prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import pytest
import numpy as np
from lumicks.pylake.kymotracker.detail.calibrated_images import CalibratedKymographChannel
from lumicks.pylake.kymotracker.detail.linalg_2d import eigenvalues_2d_symmetric, eigenvector_2d_symmetric
from lumicks.pylake.kymotracker.detail.geometry_2d import calculate_image_geometry, get_candidate_generator
from lumicks.pylake.kymotracker.detail.trace_line_2d import _traverse_line_direction, detect_lines
from lumicks.pylake.kymotracker.detail.stitch import distance_line_to_point
from lumicks.pylake.kymotracker.stitching import stitch_kymo_lines
from lumicks.pylake.kymotracker.kymotracker import track_greedy, track_lines, filter_lines
from lumicks.pylake.kymotracker.kymoline import KymoLine, KymoLineGroup
from lumicks.pylake.kymotracker.detail.trace_line_2d import KymoLineData
from lumicks.pylake.tests.data.mock_confocal import generate_kymo
from copy import deepcopy
from scipy.stats import norm
def test_eigen_2d():
def test_eigs(a, b, d):
a = np.array(a)
b = np.array(b)
d = np.array(d)
np_eigen_values, np_eigen_vectors = np.linalg.eig([[a, b], [b, d]])
pl_eigen_values = np.sort(eigenvalues_2d_symmetric(a, b, d))
# Test whether eigen values are correct
i = np.argsort(np_eigen_values)
np.testing.assert_allclose(np_eigen_values[i], pl_eigen_values, rtol=1e-6,
err_msg=f"Eigen values invalid. Calculated {pl_eigen_values}, "
f"expected: {np_eigen_vectors} ")
# Test whether eigen vectors are correct
vs = [np.array(eigenvector_2d_symmetric(a, b, d, x)) for x in pl_eigen_values]
np.testing.assert_allclose(abs(np.dot(vs[0], np_eigen_vectors[:, i[0]])), 1.0, rtol=1e-6,
err_msg="First eigen vector invalid")
np.testing.assert_allclose(abs(np.dot(vs[1], np_eigen_vectors[:, i[1]])), 1.0, rtol=1e-6,
err_msg="Second eigen vector invalid")
def np_eigenvalues(a, b, d):
eig1 = np.empty(a.shape)
eig2 = np.empty(a.shape)
ex = np.empty(a.shape)
ey = np.empty(a.shape)
for x in np.arange(a.shape[0]):
for y in np.arange(a.shape[1]):
np_eigen_values, np_eigen_vectors = np.linalg.eig(np.array([[a[x, y], b[x, y]], [b[x, y], d[x, y]]]))
idx = np_eigen_values.argsort()
np_eigen_values.sort()
eig1[x, y] = np_eigen_values[0]
eig2[x, y] = np_eigen_values[1]
ex[x, y] = np_eigen_vectors[0, idx[0]]
ey[x, y] = np_eigen_vectors[1, idx[0]]
return np.stack((eig1, eig2), axis=len(eig1.shape)), ex, ey
test_eigs(3, 4, 8)
test_eigs(3, 0, 4)
test_eigs(3, 4, 0)
test_eigs(0, 4, 0)
test_eigs(0, 0, 0)
test_eigs(1, 1, 0)
test_eigs(-0.928069046998319, 0.9020129898294712, -0.9280690469983189)
test_eigs(.000001, -1, .000001)
test_eigs(.000001, -.000001, .00001)
a = np.array([[3, 3, 3], [3, 0, 0], [3, 0, 0]])
b = np.array([[4, 0, 0], [4, 4, 4], [3, 0, 0]])
d = np.array([[8, 4, 4], [0, 0, 0], [3, 0, 0]])
eigenvalues = eigenvalues_2d_symmetric(a, b, d)
np_eigenvalues, np_eigenvector_x, np_eigenvector_y = np_eigenvalues(a, b, d)
eigenvalues.sort(axis=-1)
eigenvector_x, eigenvector_y = eigenvector_2d_symmetric(a, b, d, eigenvalues[:, :, 0])
# Given that there are some zeroes, we should include an absolute tolerance.
np.testing.assert_allclose(eigenvalues, np_eigenvalues, rtol=1e-6, atol=1e-14)
# Eigen vectors have to point in the same direction, but are not necessarily the same sign
np.testing.assert_allclose(np.abs(np_eigenvector_x*eigenvector_x + np_eigenvector_y*eigenvector_y), np.ones(a.shape))
@pytest.mark.parametrize("loc,scale,sig_x,sig_y,transpose", [
(25.25, 2, 3, 3, False),
(25.45, 2, 3, 3, False),
(25.65, 2, 3, 3, False),
(25.85, 2, 3, 3, False),
(25.25, 2, 3, 3, True),
(25.45, 2, 3, 3, True),
(25.65, 2, 3, 3, True),
(25.85, 2, 3, 3, True),
])
def test_position_determination(loc, scale, sig_x, sig_y, transpose, tol=1e-2):
data = np.tile(.0001 + norm.pdf(np.arange(0, 50, 1), loc=loc, scale=scale), (5, 1))
if transpose:
data = data.transpose()
max_derivative, normals, positions, inside = calculate_image_geometry(data, sig_x, sig_y)
if transpose:
assert np.abs(positions[round(loc), 3, 0] - (loc - round(loc))) < tol
assert inside[round(loc), 3] == 1
else:
assert np.abs(positions[3, round(loc), 1] - (loc - round(loc))) < tol
assert inside[3, round(loc)] == 1
def test_geometry():
sig_x = 2.0 / np.sqrt(3.0)
sig_y = 2.0 / np.sqrt(3.0)
# Test vectors obtained from the receptive fields
# First coordinate changes, hence we expect the normal to point in the direction of the second
data = np.zeros((5, 5))
data[1, 2] = 10
data[2, 2] = 10
data[3, 2] = 10
max_derivative, normals, positions, inside = calculate_image_geometry(data, sig_x, sig_y)
assert normals[2, 2][0] == 0
assert np.abs(normals[2, 2][1]) > 0
# Second coordinate changes, expect vector to point in direction of the first
data = np.zeros((5, 5))
data[2, 1] = 10
data[2, 2] = 10
data[2, 3] = 10
max_derivative, normals, positions, inside = calculate_image_geometry(data, sig_x, sig_y)
assert normals[2, 2][1] == 0
assert np.abs(normals[2, 2][0]) > 0
# Diagonal line y=x, expect normal's coordinates to have different signs
data = np.zeros((5, 5))
data[1, 1] = 10
data[2, 2] = 10
data[3, 3] = 10
max_derivative, normals, positions, inside = calculate_image_geometry(data, sig_x, sig_y)
np.testing.assert_allclose(normals[2, 2][1], -normals[2, 2][0])
# Diagonal line y=x, expect normal's coordinates to have same sign
data = np.zeros((5, 5))
data[3, 1] = 10
data[2, 2] = 10
data[1, 3] = 10
max_derivative, normals, positions, inside = calculate_image_geometry(data, sig_x, sig_y)
np.testing.assert_allclose(normals[2, 2][1], normals[2, 2][0])
def test_candidates():
candidates = get_candidate_generator()
normal_angle = (-22.4 - 90) * np.pi / 180
np.testing.assert_allclose(candidates(normal_angle)[0], np.array([1, -1]))
np.testing.assert_allclose(candidates(normal_angle)[1], np.array([1, 0]))
np.testing.assert_allclose(candidates(normal_angle)[2], np.array([1, 1]))
normal_angle = (22.4 - 90) * np.pi / 180
np.testing.assert_allclose(candidates(normal_angle)[0], np.array([1, -1]))
np.testing.assert_allclose(candidates(normal_angle)[1], np.array([1, 0]))
np.testing.assert_allclose(candidates(normal_angle)[2], np.array([1, 1]))
normal_angle = (-22.6 - 90) * np.pi / 180
assert not np.allclose(candidates(normal_angle)[0], np.array([1, -1]))
assert not np.allclose(candidates(normal_angle)[1], np.array([1, 0]))
assert not np.allclose(candidates(normal_angle)[2], np.array([1, 1]))
normal_angle = (22.6 - 90) * np.pi / 180
assert not np.allclose(candidates(normal_angle)[0], np.array([1, -1]))
assert not np.allclose(candidates(normal_angle)[1], np.array([1, 0]))
assert not np.allclose(candidates(normal_angle)[2], np.array([1, 1]))
for normal_angle in np.arange(-np.pi, np.pi, np.pi / 100):
options = candidates(normal_angle)
assert len(options) == 3
# Check if the options are adjacent to the center cell
assert np.max(np.max(np.abs(options))) == 1, print(options)
# Check if the options are perpendicular to the direction we were sent in.
# Normal will be at cos(angle), sin(angle). Rotate by 90 degrees, results in -sin(angle), cos(angle)
direction = np.array([-np.sin(normal_angle), np.cos(normal_angle)])
direction = np.sign(np.round(direction))
np.testing.assert_allclose(np.sort([np.max(np.abs(direction - option)) for option in options]),
[0, 1, 1]), f"Failed for normal angle {normal_angle} / direction {direction} => {options}"
def test_tracing():
"""Draw a pattern like this:
X
X
X X X X X
X
X
with appropriate normals and verify that lines are being traced correctly."""
n = 7
hx = int(n / 2)
a = -np.eye(n)
a[:hx, :hx] = -2 * np.eye(n - hx - 1)
a[int(n / 2), :] = -1
positions = np.zeros((n, n, 2))
normals = np.zeros((n, n, 2))
normals[:, :, 0] = - np.eye(n) * 1.0 / np.sqrt(2)
normals[:, :, 1] = np.eye(n) * 1.0 / np.sqrt(2)
normals[hx, :, 0] = 1
normals[hx, hx, 0] = - 1.0 / np.sqrt(2)
normals[hx, hx, 1] = 1.0 / np.sqrt(2)
candidates = get_candidate_generator()
np.testing.assert_allclose(_traverse_line_direction([0, 0], deepcopy(a), positions, normals, -0.5, 1, candidates, 1, True),
np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6]]))
np.testing.assert_allclose(
_traverse_line_direction([n - 1, n - 1], deepcopy(a), positions, normals, -0.5, 1, candidates, -1, True),
np.array([[6, 6], [5, 5], [4, 4], [3, 3], [2, 2], [1, 1], [0, 0]]))
np.testing.assert_allclose(_traverse_line_direction([hx, 0], deepcopy(a), positions, normals, -0.5, 1, candidates, 1, True),
np.array([[hx, 0], [hx, 1], [hx, 2], [hx, 3], [4, 4], [5, 5], [6, 6]]))
# Test whether the threshold is enforced
np.testing.assert_allclose(_traverse_line_direction([0, 0], deepcopy(a), positions, normals, -1.5, 1, candidates, 1, True),
np.array([[0, 0], [1, 1], [2, 2]]))
def test_uni_directional():
data = np.zeros((100, 100)) + .0001
for i in np.arange(634):
for j in np.arange(25, 35, .5):
data[int(50 + j * np.sin(.01 * i)), int(50 + j * np.cos(.01 * i))] = 1
def detect(min_length, force_dir):
lines = detect_lines(data, 6, max_lines=5, start_threshold=.005,
continuation_threshold=.095, angle_weight=1, force_dir=force_dir)
return [line for line in lines if len(line) > min_length]
assert len(detect(5, True)) == 2
assert len(detect(5, False)) == 1
def test_distance_line_to_point():
assert distance_line_to_point(np.array([0, 0]), np.array([0, 1]), np.array([0, 2])) == np.inf
assert distance_line_to_point(np.array([0, 0]), np.array([0, 2]), np.array([0, 2])) == 0.0
assert distance_line_to_point(np.array([0, 0]), np.array([1, 1]), np.array([0, 1])) == \
np.sqrt(0.5)
assert distance_line_to_point(np.array([0, 0]), np.array([1, 0]),
|
np.array([0, 1])
|
numpy.array
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_data_process.ipynb (unless otherwise specified).
__all__ = ['imgids_from_directory', 'imgids_testing', 'read_img', 'load_RGBY_image', 'save_image', 'CellSegmentator',
'load_segmentator', 'get_cellmask', 'encode_binary_mask', 'coco_rle_encode', 'rle_encode', 'rle_decode',
'mask2rles', 'rles2bboxes', 'segment_image', 'segment_images', 'resize_image', 'crop_image',
'remove_faint_greens', 'pad_to_square', 'load_seg_trn', 'split_cells', 'generate_crops', 'fill_targets',
'generate_meta', 'get_meta', 'create_split_file', 'create_random_split', 'load_match_info',
'generate_noleak_split', 'get_img_mean_std']
# Cell
import os
import ast
from pathlib import Path
from itertools import groupby
import functools
import mlcrate
from multiprocessing import Pool
from pycocotools import mask as mutils
from pycocotools import _mask as coco_mask
import numpy as np
import pandas as pd
import cv2, PIL
import zlib
import base64
import zipfile
import uuid
from sklearn.preprocessing import LabelEncoder
from .config.config import *
from .utils.common_util import *
# Cell
def imgids_from_directory(path):
if isinstance(path, str):
path = Path(path)
imgids = set(n.stem.split('_')[0] for n in path.iterdir())
return list(imgids)
# Cell
imgids_testing = [
'000a6c98-bb9b-11e8-b2b9-ac1f6b6435d0',
'001838f8-bbca-11e8-b2bc-ac1f6b6435d0',
'000c99ba-bba4-11e8-b2b9-ac1f6b6435d0',
'a34d8680-bb99-11e8-b2b9-ac1f6b6435d0',
'000a9596-bbc4-11e8-b2bc-ac1f6b6435d0']
# Cell
def read_img(dir_data, image_id, color, image_size=None, suffix='.png'):
filename = dir_data/f'{image_id}_{color}{suffix}'
assert filename.exists(), f'not found {filename}'
img = cv2.imread(str(filename), cv2.IMREAD_UNCHANGED)
if image_size is not None:
img = cv2.resize(img, (image_size, image_size))
if img.max() > 255:
img_max = img.max()
img = (img/255).astype('uint8')
return img
def load_RGBY_image(dir_data, image_id,
rgb_only=False, suffix='.png', image_size=None):
red, green, blue = [
read_img(dir_data, image_id, color, image_size, suffix)
for color in ('red', 'green', 'blue')]
channels = [red, green, blue]
if not rgb_only:
yellow = read_img(
dir_data, image_id, "yellow", image_size, suffix)
channels.append(yellow)
stacked_images = np.transpose(np.array(channels), (1, 2, 0))
return stacked_images
# Cell
def save_image(dst, imgid, img):
dst = Path(dst)
for ch, color in enumerate(['red', 'green', 'blue', 'yellow']):
cv2.imwrite(str(dst / f'{imgid}_{color}.png'), img[..., ch])
# Cell
import hpacellseg.cellsegmentator as cellsegmentator
from hpacellseg.utils import label_cell, label_nuclei
from tqdm import tqdm
class CellSegmentator(cellsegmentator.CellSegmentator):
def __init__(self, nuc_model, cell_model, *args, **kwargs):
nuc_model = str(nuc_model)
cell_model = str(cell_model)
super().__init__(nuc_model, cell_model, *args, **kwargs)
def __call__(self, red, yellow, blue):
'''
`red`: list
Red images' file paths.
`yellow`: list
Yellow images' file paths.
`blue`: list
Blue images' file paths.
'''
assert len(red) == len(yellow) == len(blue)
if isinstance(red[0], Path):
red, yellow, blue = (
[str(n) for n in fns]
for fns in [red, yellow, blue])
segs_nucl = self.pred_nuclei(blue)
segs_cell = self.pred_cells([red, yellow, blue])
masks = []
for seg_nucl, seg_cell in zip(segs_nucl, segs_cell):
mask_nucl, mask_cell = label_cell(seg_nucl, seg_cell)
masks.append((mask_nucl, mask_cell))
return masks
def load_segmentator(
dir_segmentator_models, scale_factor=0.25, device="cuda",
padding=True, multi_channel_model=True):
model_nucl = dir_segmentator_models / 'nuclei-model.pth'
model_cell = dir_segmentator_models / 'cell-model.pth'
segmentator = CellSegmentator(
model_nucl, model_cell,
scale_factor=scale_factor, device=device, padding=padding,
multi_channel_model=multi_channel_model)
return segmentator
def get_cellmask(img, segmentator):
img_r, img_y, img_b = img[...,0], img[...,3], img[...,2]
masks = segmentator(red=[img_r], yellow=[img_y], blue=[img_b])
_, mask = masks[0]
return mask
# Cell
def encode_binary_mask(mask):
"""Converts a binary mask into OID challenge encoding ascii text."""
# check input mask --
if mask.dtype != np.bool:
raise ValueError(
"encode_binary_mask expects a binary mask, received dtype == %s" %
mask.dtype)
mask = np.squeeze(mask)
if len(mask.shape) != 2:
raise ValueError(
"encode_binary_mask expects a 2d mask, received shape == %s" %
mask.shape)
# convert input mask to expected COCO API input --
mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1)
mask_to_encode = mask_to_encode.astype(np.uint8)
mask_to_encode = np.asfortranarray(mask_to_encode)
# RLE encode mask --
encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"]
# compress and base64 encoding --
binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION)
base64_str = base64.b64encode(binary_str)
return base64_str.decode()
def coco_rle_encode(bmask):
rle = {'counts': [], 'size': list(bmask.shape)}
counts = rle.get('counts')
for i, (value, elements) in enumerate(groupby(bmask.ravel(order='F'))):
if i == 0 and value == 1:
counts.append(0)
counts.append(len(list(elements)))
return rle
# Cell
def rle_encode(img, mask_val=1):
"""
Turns our masks into RLE encoding to easily store them
and feed them into models later on
https://en.wikipedia.org/wiki/Run-length_encoding
Args:
img (np.array): Segmentation array
mask_val (int): Which value to use to create the RLE
Returns:
RLE string
"""
dots = np.where(img.T.flatten() == mask_val)[0]
run_lengths = []
prev = -2
for b in dots:
if (b>prev+1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return ' '.join([str(x) for x in run_lengths])
def rle_decode(rle_string, height, width):
""" Convert RLE sttring into a binary mask
Args:
rle_string (rle_string): Run length encoding containing
segmentation mask information
height (int): Height of the original image the map comes from
width (int): Width of the original image the map comes from
Returns:
Numpy array of the binary segmentation mask for a given cell
"""
rows,cols = height,width
rle_numbers = [int(num_string) for num_string in rle_string.split(' ')]
rle_pairs = np.array(rle_numbers).reshape(-1,2)
img = np.zeros(rows*cols,dtype=np.uint8)
for index,length in rle_pairs:
index -= 1
img[index:index+length] = 255
img = img.reshape(cols,rows)
img = img.T
img = (img / 255).astype(np.uint8)
return img
# Cell
def mask2rles(mask):
'''
Args:
mask (np.array): 2-D array with discrete values each
representing a different class or object.
rles (list): COCO run-length encoding:
{'size': [height, width],
'counts': encoded RLE}
'''
ids_cell = np.unique(mask)
rles = []
for id in ids_cell:
if id == 0:
continue
bmask = np.where(mask == id, 1, 0)
bmask = np.asfortranarray(bmask).astype(np.uint8)
rle = mutils.encode(bmask)
rles.append(rle)
return rles
# Cell
def rles2bboxes(rles):
if len(rles) == 0:
return []
bboxes = mutils.toBbox(rles)
bboxes[:,2] += bboxes[:,0]
bboxes[:,3] += bboxes[:,1]
return bboxes
# Cell
def segment_image(dir_img=None, imgid=None, segmentator=None):
img = load_RGBY_image(dir_img, imgid)
mask = get_cellmask(img, segmentator)
rles = mask2rles(mask)
bboxes = rles2bboxes(rles)
ids = [f'{imgid}_{i}' for i in range(len(rles))]
df = pd.DataFrame(
{'Id': ids, 'rle': rles, 'bbox': list(bboxes)})
return df
def segment_images(dir_img, imgids, segmentator):
df = pd.DataFrame()
for imgid in tqdm(imgids, total=len(imgids)):
df_img = segment_image(dir_img, imgid, segmentator)
df = df.append(df_img, ignore_index=True)
return df
# Cell
def resize_image(img, sz):
return cv2.resize(img, (sz, sz), interpolation=cv2.INTER_LINEAR)
# Cell
def crop_image(img, bbox, bmask=None):
'''
Args:
img (np.array): Image to be cropped by ``bbox``.
bbox (np.array): Bounding box in terms of [x0, y0, x1, y1].
bmask (np.array, np.uint8): Binary mask for the cell.
'''
bbox = bbox.astype(np.int16)
x0, y0, x1, y1 = bbox
crop = img[y0:y1, x0:x1]
if bmask is not None:
crop = bmask[y0:y1, x0:x1][...,None] * crop
return crop
# Cell
def remove_faint_greens(xs, crops, green_thres=64):
assert len(xs) == len(crops)
xs_out = []
for x, crop in zip(xs, crops):
if crop[...,1].max() > green_thres:
xs_out.append(x)
return xs_out
# Cell
def pad_to_square(img):
'''
Pad an image to a square size, centering it as much as possible.
'''
h, w, c = img.shape
if h == w:
return img
elif h < w:
img_padded = np.zeros((w, w, c), dtype=img.dtype)
offset0 = (w - h) // 2
offset1 = (w - h) - offset0
img_padded[offset0:-offset1, :] = img.copy()
else:
img_padded = np.zeros((h, h, c), dtype=img.dtype)
offset0 = (h - w) // 2
offset1 = (h - w) - offset0
img_padded[:, offset0:-offset1] = img.copy()
return img_padded
# Cell
def load_seg_trn(pth_csv):
'''
Loads @dscettler8845's segmentation results for train set.
'''
df = pd.read_csv(pth_csv)
df['cell_masks'] = df['cell_masks'].apply(ast.literal_eval)
df['bboxes'] = (
df['bboxes'].apply(lambda o: np.array(ast.literal_eval(o)))
)
return df
# Cell
def _split_cells(df_img):
'''
Expand a row representing a segmented image into multiple rows
representing the cells in the image.
'''
imgid = df_img['ID'].item()
sz = df_img['dimension'].item()
rles = df_img['cell_masks'].item()
bboxes = list(df_img['bboxes'].item())
cellids = [f'{imgid}_{i}' for i in range(len(rles))]
rles = [
mutils.encode(rle_decode(rle, sz, sz)) for rle in rles]
df = pd.DataFrame(
{'Id': cellids, 'rle': rles, 'bbox': bboxes})
df['Target'] = df_img['Label'].item()
return df
def split_cells(df_seg):
'''
Args:
df_seg (pd.DataFrame): Each row is an image.
df_cells (pd.DataFrame): Each row is a cell.
'''
df_cells = (
df_seg.groupby('ID')
.apply(_split_cells).reset_index(drop=True)
)
return df_cells
# Cell
def generate_crops(df_cells, src, dst, out_sz=768):
'''
- Crop out each cell from its image.
- Resize the crop to a square and save to disk.
- Record the crop's maximum green channel value.
'''
df_cells = df_cells.copy(deep=True)
max_greens = []
imgids = df_cells['Id'].apply(lambda o: o.split('_')[0])
for imgid, df_img in df_cells.groupby(imgids):
img = load_RGBY_image(src, imgid)
for cellid, df_cell in df_img.groupby('Id'):
rle = df_cell['rle'].item()
bbox = df_cell['bbox'].item()
bmask = mutils.decode(rle)
crop = crop_image(img, bbox, bmask=bmask)
crop = pad_to_square(crop)
max_green =
|
np.max(crop[..., COLOR_INDEXS['green']])
|
numpy.max
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 10:47:01 2018
Note that the TomoPhantom package is released under Apache License, Version 2.0
@author: <NAME>
--- A class which simulates artifacts applied to sinogram (2D) data -----
currently availble:
-- noise (Poisson or Gaussian)
-- zingers
-- stripes (rings)
"""
import numpy as np
import random
class ArtifactsClass:
def __init__(self, sinogram):
self.sinogram = np.copy(sinogram)
(self.anglesDim, self.DetectorsDim) =
|
np.shape(sinogram)
|
numpy.shape
|
import numpy as np
from io import StringIO
import PIL.Image
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import json
import glob,os
import cv2
from scipy.optimize import linear_sum_assignment as hungarian
from keras.models import model_from_json
def merge_solutions_by_batch(results):
final = dict()
for r in results:
final.update(r)
return final
def read_json(filename):
with open(filename,encoding='utf-8') as file:
detections= json.load(file)
return detections
def save_json(path,obj):
with open(path,'w') as file:
json.dump(obj,file,cls=NumpyEncoder)
def load_model(json_file,weights_file):
loaded_model = load_json(json_file)
model=model_from_json(loaded_model)
model.load_weights(weights_file)
return model
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj,np.float32):
return float(obj)
return json.JSONEncoder.default(self, obj)
def rotate_bound2(image,x,y,angle, w,h):
# grab the dimensions of the image and then determine the
# center
(h0, w0) = image.shape[:2]
(pX, pY) = (x, y) # Rect center in input
(cX, cY) = (w / 2, h / 2) # Rect center in output
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0) # angle in degrees
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# adjust the rotation matrix to take into account translation
M[0, 2] += pX - cX
M[1, 2] += pY - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (w, h), flags=cv2.WARP_INVERSE_MAP,borderMode=cv2.BORDER_REPLICATE)
def distance_point(dt,gt,tl=1):
return ((dt[0]-gt[0])**2+(dt[1]-gt[1])**2)/tl
def distance_line_point(m,point):
import numpy as np
x1 =m[0][0]
y1 =m[0][1]
x2 =m[1][0]
y2 =m[1][1]
numerator = abs((y2-y1)*point[0]-(x2-x1)*point[1]+x2*y1-y2*x1)
denominator = np.sqrt((y2-y1)**2+(x2-x1)**2)
return numerator/denominator
def showBGRimage(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
a[:,:,[0,2]] = a[:,:,[2,0]] # for B,G,R order
f = StringIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
def showmap(a, fmt='png'):
a = np.uint8(np.clip(a, 0, 255))
f = StringIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
#def checkparam(param):
# octave = param['octave']
# starting_range = param['starting_range']
# ending_range = param['ending_range']
# assert starting_range <= ending_range, 'starting ratio should <= ending ratio'
# assert octave >= 1, 'octave should >= 1'
# return starting_range, ending_range, octave
def getJetColor(v, vmin, vmax):
c = np.zeros((3))
if (v < vmin):
v = vmin
if (v > vmax):
v = vmax
dv = vmax - vmin
if (v < (vmin + 0.125 * dv)):
c[0] = 256 * (0.5 + (v * 4)) #B: 0.5 ~ 1
elif (v < (vmin + 0.375 * dv)):
c[0] = 255
c[1] = 256 * (v - 0.125) * 4 #G: 0 ~ 1
elif (v < (vmin + 0.625 * dv)):
c[0] = 256 * (-4 * v + 2.5) #B: 1 ~ 0
c[1] = 255
c[2] = 256 * (4 * (v - 0.375)) #R: 0 ~ 1
elif (v < (vmin + 0.875 * dv)):
c[1] = 256 * (-4 * v + 3.5) #G: 1 ~ 0
c[2] = 255
else:
c[2] = 256 * (-4 * v + 4.5) #R: 1 ~ 0.5
return c
def colorize(gray_img):
out = np.zeros(gray_img.shape + (3,))
for y in range(out.shape[0]):
for x in range(out.shape[1]):
out[y,x,:] = getJetColor(gray_img[y,x], 0, 1)
return out
def padRightDownCorner(img, stride, padValue):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h%stride==0) else stride - (h % stride) # down
pad[3] = 0 if (w%stride==0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1,:,:]*0 + padValue, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:,0:1,:]*0 + padValue, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1,:,:]*0 + padValue, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:,-2:-1,:]*0 + padValue, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
def distance(dt,gt,tl=1):
return ((dt[0]-gt[0])**2+(dt[1]-gt[1])**2)/tl
def distance_tracks(dt,gt,tl=1):
return (np.sqrt((dt[0]-gt[0])**2+(dt[1]-gt[1])**2))/tl
def cost_matrix_tracks(ground_t,detections,threshold):
total = len(ground_t)+len(detections)
cost_m = np.zeros((total,total))
for i in range(total):
for j in range(total):
if i < len(ground_t) and j <len(detections):
cost_m[i][j] = distance_tracks(ground_t[i],detections[j])
else:
cost_m[i][j] = threshold
return cost_m
def distance_tracks_wb(dt,gt,mapping):
d=[]
d.append(np.sqrt((dt[0]-gt[0])**2+(dt[1]-gt[1])**2))
for m in mapping:
if gt==m[0]:
d.append(np.sqrt((m[0][0]-gt[0])**2+(m[0][1]-gt[1])**2))
elif gt==m[1]:
d.append(np.sqrt((m[0][0]-gt[0])**2+(m[0][1]-gt[1])**2))
if len(d)==1:
d.append(300)
elif len(d)==2:
d.append(100)
return np.array(d).mean()
def cost_matrix_tracks_wb(ground_t,detections,new_mapping,threshold):
total = len(ground_t)+len(detections)
cost_m = np.zeros((total,total))
for i in range(total):
for j in range(total):
if i < len(ground_t) and j <len(detections):
cost_m[i][j] = distance_tracks_wb(ground_t[i],detections[j],new_mapping)
else:
cost_m[i][j] = threshold
return cost_m
def cost_matrix(ground_t,detections,threshold):
total = len(ground_t)+len(detections)
cost_m = np.zeros((total,total))
for i in range(total):
for j in range(total):
if i < len(ground_t) and j <len(detections):
cost_m[i][j] = distance(ground_t[i],detections[j],len(ground_t))
else:
cost_m[i][j] = threshold
return cost_m
def cost_matrix_mappings(ground_mappings,mapings,threshold):
total = len(ground_mappings)+len(mapings)
cost_m = np.zeros((total,total))
for i in range(total):
for j in range(total):
if i < len(ground_mappings) and j <len(mapings):
cost_m[i][j] = (distance(ground_mappings[i][0],mapings[j][0],len(ground_mappings)) + distance(ground_mappings[i] [1],mapings[j][1],len(ground_mappings)))/2
else:
cost_m[i][j] = threshold*2
return cost_m
def dfs(graph, start):
visited, stack = set(), [start]
while stack:
vertex = stack.pop()
if vertex not in visited:
visited.add(vertex)
stack.extend(graph[vertex] - visited)
return visited
def clean_parts(parts,mappings):
new_parts=[]
excluded=set()
map1 = [tuple(m[0]) for m in mappings]
map2 = [tuple(m[1]) for m in mappings]
mapt = map1+map2
for i in range(len(parts)):
for j in range(len(parts)):
d=distance_tracks(parts[i],parts[j])
if d ==0 and tuple(parts[j]) not in excluded:
new_parts.append(parts[j])
elif tuple(parts[j]) not in mapt:
excluded.add(tuple(parts[j]))
#if d>0 and d<4 and tuple(parts[j]) not in excluded:
# excluded.add(tuple(parts[j]))
return new_parts
def clean_detections(detections):
keylist= list(detections.keys())
for i in range(len(keylist)):
detections[keylist[i]]['new_mapping']=[]
for mapping in detections[keylist[i]]['mapping']:
new_mapping=[[mapping[1][0],mapping[0][0]],[mapping[1][1],mapping[0][1]],mapping[2],mapping[3]]
detections[keylist[i]]['new_mapping'].append(new_mapping)
for k in detections[keylist[i]]['parts'].keys():
new_parts = clean_parts(detections[keylist[i]]['parts'][k],detections[keylist[i]]['new_mapping'])
detections[keylist[i]]['parts'][k]=new_parts
return detections
import math
def find_angle(part,mapping,typ=[1,3]):
for m in mapping:
if m[1]==part[:2]:
myradians = math.atan2(m[0][0]-m[1][0],m[0][1]-m[1][1])
mydegrees = math.degrees(myradians)
return (mydegrees-90)%360
return -1
def cost_matrix_mappings(ground_mappings,mapings,threshold):
total = len(ground_mappings)+len(mapings)
cost_m = np.zeros((total,total))
for i in range(total):
for j in range(total):
if i < len(ground_mappings) and j <len(mapings):
cost_m[i][j] = (distance(ground_mappings[i][0],mapings[j][0],len(ground_mappings)) + distance(ground_mappings[i] [1],mapings[j][1],len(ground_mappings)))/2
else:
cost_m[i][j] = threshold*2
return cost_m
def dfs(graph, start):
visited, stack = set(), [start]
while stack:
vertex = stack.pop()
if vertex not in visited:
visited.add(vertex)
stack.extend(graph[vertex] - visited)
return visited
def clean_parts(parts,mappings):
new_parts=[]
excluded=set()
map1 = [tuple(m[0]) for m in mappings]
map2 = [tuple(m[1]) for m in mappings]
mapt = map1+map2
for i in range(len(parts)):
for j in range(len(parts)):
d=distance_tracks(parts[i],parts[j])
if d ==0 and tuple(parts[j]) not in excluded:
new_parts.append(parts[j])
elif tuple(parts[j]) not in mapt:
excluded.add(tuple(parts[j]))
#if d>0 and d<4 and tuple(parts[j]) not in excluded:
# excluded.add(tuple(parts[j]))
return new_parts
def clean_detections(detections):
keylist= list(detections.keys())
for i in range(len(keylist)):
detections[keylist[i]]['new_mapping']=[]
for mapping in detections[keylist[i]]['mapping']:
new_mapping=[[mapping[1][0],mapping[0][0]],[mapping[1][1],mapping[0][1]],mapping[2],mapping[3]]
detections[keylist[i]]['new_mapping'].append(new_mapping)
for k in detections[keylist[i]]['parts'].keys():
new_parts = clean_parts(detections[keylist[i]]['parts'][k],detections[keylist[i]]['new_mapping'])
detections[keylist[i]]['parts'][k]=new_parts
return detections
def is_skeleton_zero_based(skeleton):
for limb in skeleton:
for part in limb:
if part == 0:
return True
return False
def to_one_based_skeleton(skeleton):
one_based_skeleton = list()
for part_a, part_b in skeleton:
one_based_skeleton.append(part_a + 1, part_b + 1)
return one_based_skeleton
def one_index_based_skeleton(skeleton):
if is_skeleton_zero_based(skeleton):
skeleton = to_one_based_index(skeleton)
return skeleton
def get_skeleton_from_json(filename):
"""
Get the numparts and skeleton from the coco format json file.
Skeleton should be 1 based index.
Input:
filename: str
Output:
numparts: int
skeleton: list
"""
data = read_json(filename)
pose_info = data["categories"]
beepose_info = pose_info[0]
numparts = len(beepose_info["keypoints"])
skeleton = beepose_info["skeleton"]
# detect 0-based skeleton and change to one-based index
# if it is necessary.
skeleton = one_index_based_skeleton(skeleton)
return numparts, skeleton
def get_skeleton_mapIdx(numparts):
"""
Calculate the mapsIdx for each limb from the skeleton.
Input:
skeleton: list of part connection
Output:
list of ids for x and y for part
"""
connections_num = numparts
mapIdx = list()
for i in range(connections_num):
mapIdx.append([2 * i, (2 * i) + 1])
return mapIdx
# NONMAXIMA SUPPRESSION FROM https://www.pyimagesearch.com/2014/11/17/non-maximum-suppression-object-detection-python/
# import the necessary packages
import numpy as np
def boxes2dets(boxes,size=20):
dets=[]
for b in boxes:
dets.append([b[0]+size,b[1]+size,b[-1]])
return dets
def dets2boxes(parts,size=20):
boxes=[]
for p in parts:
boxes.append([p[0]-size,p[1]-size,p[0]+size,p[1]+size,p[2]])
return np.array(boxes)
# Felzenszwalb et al.
def non_max_suppression_slow(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list, add the index
# value to the list of picked indexes, then initialize
# the suppression list (i.e. indexes that will be deleted)
# using the last index
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
suppress = [last]
# loop over all indexes in the indexes list
for pos in range(0, last):
# grab the current index
j = idxs[pos]
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = max(x1[i], x1[j])
yy1 = max(y1[i], y1[j])
xx2 = min(x2[i], x2[j])
yy2 = min(y2[i], y2[j])
# compute the width and height of the bounding box
w = max(0, xx2 - xx1 + 1)
h = max(0, yy2 - yy1 + 1)
# compute the ratio of overlap between the computed
# bounding box and the bounding box in the area list
overlap = float(w * h) / area[j]
# if there is sufficient overlap, suppress the
# current bounding box
if overlap > overlapThresh:
suppress.append(pos)
# delete all indexes from the index list that are in the
# suppression list
idxs = np.delete(idxs, suppress)
# return only the bounding boxes that were picked
return boxes[pick]
def boxes2peaks(boxes,size=15):
dets=[]
for b in boxes:
dets.append((b[0]+size,b[1]+size))
return dets
def peaks2boxes(parts,size=15):
boxes=[]
for p in parts:
boxes.append([p[0]-size,p[1]-size,p[0]+size,p[1]+size])
return np.array(boxes)
def non_max_suppression_op(peaks,overlap=0.6,size=15):
boxes= non_max_suppression_fast(peaks2boxes(peaks,size),overlap)
dets = boxes2peaks(boxes,size)
return dets
# Malisiewicz et al.
def non_max_suppression_fast(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 =
|
np.maximum(x1[i], x1[idxs[:last]])
|
numpy.maximum
|
#!/usr/bin/env python
# /***************************************************************************
#
# @package: panda_siimulator_examples
# @metapackage: panda_simulator
# @author: <NAME> <<EMAIL>>
#
# **************************************************************************/
# /***************************************************************************
# Copyright (c) 2019-2021, <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **************************************************************************/
"""
This is a demo showing task-space control on the
simulator robot using the ROS topics and messages directly
from panda_simulator. The task-space force for the desired
pose is computed using a simple PD law, and the corresponding
joint torques are computed and sent to the robot.
By using this file you can set a equilibrium pose by using interactive marker. You can also set the target
By publishing the topic "panda_simulator/equili_pose" .
"""
import copy
import rospy
import threading
import quaternion
import numpy as np
from geometry_msgs.msg import Point, TransformStamped,PoseStamped
from visualization_msgs.msg import *
from interactive_markers.interactive_marker_server import *
from franka_core_msgs.msg import EndPointState, JointCommand, RobotState
# -- add to pythonpath for finding rviz_markers.py
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# -------------------------------------------------
from multi_rviz_markers import RvizMarkers
# --------- Modify as required ------------
# Task-space controller parameters
# stiffness gains
P_pos = 50
P_ori = 0
# damping gains
D_pos = 1
D_ori = 0
# -----------------------------------------
publish_rate = 100
JACOBIAN = None
CARTESIAN_POSE = None
CARTESIAN_VEL = None
destination_marker = RvizMarkers()
def _on_robot_state(msg):
"""
Callback function for updating jacobian and EE velocity from robot state
"""
global JACOBIAN, CARTESIAN_VEL
JACOBIAN = np.asarray(msg.O_Jac_EE).reshape(6,7,order = 'F')
CARTESIAN_VEL = {
'linear': np.asarray([msg.O_dP_EE[0], msg.O_dP_EE[1], msg.O_dP_EE[2]]),
'angular': np.asarray([msg.O_dP_EE[3], msg.O_dP_EE[4], msg.O_dP_EE[5]]) }
def _on_endpoint_state(msg):
"""
Callback function to get current end-point state
"""
# pose message received is a vectorised column major transformation matrix
global CARTESIAN_POSE
cart_pose_trans_mat =
|
np.asarray(msg.O_T_EE)
|
numpy.asarray
|
"""
This library collects a bunch of Optimizers inspired by the paper
The older optimizers are stored in Optimizer.py. Those classes are equipped with a `step_simple` function taking in
scores and codes to generate the next batch of codes.
"""
# from matplotlib import use as use_backend
# use_backend("Agg")
import matplotlib.pylab as plt
# plt.ioff()
#
import os
import time
import sys
# import utils
import numpy as np
from numpy.linalg import norm
from numpy.random import randn
from numpy import sqrt, zeros, abs, floor, log, log2, eye, exp
from geometry_utils import ExpMap, VecTransport, radial_proj, orthogonalize, renormalize
orig_stdout = sys.stdout
#%% Classic Optimizers as Reference
class CholeskyCMAES:
""" Note this is a variant of CMAES Cholesky suitable for high dimensional optimization"""
def __init__(self, space_dimen, population_size=None, init_sigma=3.0, init_code=None, Aupdate_freq=10,
maximize=True, random_seed=None, optim_params={}):
N = space_dimen
self.space_dimen = space_dimen
# Overall control parameter
self.maximize = maximize # if the program is to maximize or to minimize
# Strategy parameter setting: Selection
if population_size is None:
self.lambda_ = int(4 + floor(3 * log2(N))) # population size, offspring number
# the relation between dimension and population size.
else:
self.lambda_ = population_size # use custom specified population size
mu = self.lambda_ / 2 # number of parents/points for recombination
# Select half the population size as parents
weights = log(mu + 1 / 2) - (log(np.arange(1, 1 + floor(mu)))) # muXone array for weighted recombination
self.mu = int(floor(mu))
self.weights = weights / sum(weights) # normalize recombination weights array
mueff = self.weights.sum() ** 2 / sum(self.weights ** 2) # variance-effectiveness of sum w_i x_i
self.weights.shape = (1, -1) # Add the 1st dim 1 to the weights mat
self.mueff = mueff # add to class variable
self.sigma = init_sigma # Note by default, sigma is None here.
print("Space dimension: %d, Population size: %d, Select size:%d, Optimization Parameters:\nInitial sigma: %.3f"
% (self.space_dimen, self.lambda_, self.mu, self.sigma))
# Strategy parameter settiself.weightsng: Adaptation
self.cc = 4 / (N + 4) # defaultly 0.0009756
self.cs = sqrt(mueff) / (sqrt(mueff) + sqrt(N)) # 0.0499
self.c1 = 2 / (N + sqrt(2)) ** 2 # 1.1912701410022985e-07
if "cc" in optim_params.keys(): # if there is outside value for these parameter, overwrite them
self.cc = optim_params["cc"]
if "cs" in optim_params.keys():
self.cs = optim_params["cs"]
if "c1" in optim_params.keys():
self.c1 = optim_params["c1"]
self.damps = 1 + self.cs + 2 * max(0, sqrt((mueff - 1) / (N + 1)) - 1) # damping for sigma usually close to 1
print("cc=%.3f, cs=%.3f, c1=%.3f damps=%.3f" % (self.cc, self.cs, self.c1, self.damps))
if init_code is not None:
self.init_x = np.asarray(init_code)
self.init_x.shape = (1, N)
else:
self.init_x = None # FIXED Nov. 1st
self.xmean = zeros((1, N))
self.xold = zeros((1, N))
# Initialize dynamic (internal) strategy parameters and constants
self.pc = zeros((1, N))
self.ps = zeros((1, N)) # evolution paths for C and sigma
self.A = eye(N, N) # covariant matrix is represent by the factors A * A '=C
self.Ainv = eye(N, N)
self.eigeneval = 0 # track update of B and D
self.counteval = 0
if Aupdate_freq is None:
self.update_crit = self.lambda_ / self.c1 / N / 10
else:
self.update_crit = Aupdate_freq * self.lambda_
self.chiN = sqrt(N) * (1 - 1 / (4 * N) + 1 / (21 * N ** 2))
# expectation of ||N(0,I)|| == norm(randn(N,1)) in 1/N expansion formula
self._istep = 0
def step_simple(self, scores, codes):
""" Taking scores and codes to return new codes, without generating images
Used in cases when the images are better handled in outer objects like Experiment object
"""
# Note it's important to decide which variable is to be saved in the `Optimizer` object
# Note to confirm with other code, this part is transposed.
# set short name for everything to simplify equations
N = self.space_dimen
lambda_, mu, mueff, chiN = self.lambda_, self.mu, self.mueff, self.chiN
cc, cs, c1, damps = self.cc, self.cs, self.c1, self.damps
sigma, A, Ainv, ps, pc, = self.sigma, self.A, self.Ainv, self.ps, self.pc,
# Sort by fitness and compute weighted mean into xmean
if self.maximize is False:
code_sort_index = np.argsort( scores) # add - operator it will do maximization.
else:
code_sort_index = np.argsort(-scores)
# scores = scores[code_sort_index] # Ascending order. minimization
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
if self.init_x is None:
select_n = len(code_sort_index[0:mu])
temp_weight = self.weights[:, :select_n] / np.sum(self.weights[:, :select_n]) # in case the codes is not enough
self.xmean = temp_weight @ codes[code_sort_index[0:mu], :]
else:
self.xmean = self.init_x
else:
self.xold = self.xmean
self.xmean = self.weights @ codes[code_sort_index[0:mu], :] # Weighted recombination, new mean value
# Cumulation statistics through steps: Update evolution paths
randzw = self.weights @ self.randz[code_sort_index[0:mu], :]
ps = (1 - cs) * ps + sqrt(cs * (2 - cs) * mueff) * randzw
pc = (1 - cc) * pc + sqrt(cc * (2 - cc) * mueff) * randzw @ A
# Adapt step size sigma
sigma = sigma * exp((cs / damps) * (norm(ps) / chiN - 1))
# self.sigma = self.sigma * exp((self.cs / self.damps) * (norm(ps) / self.chiN - 1))
print("sigma: %.2f" % sigma)
# Update A and Ainv with search path
if self.counteval - self.eigeneval > self.update_crit: # to achieve O(N ^ 2) do decomposition less frequently
self.eigeneval = self.counteval
t1 = time.time()
v = pc @ Ainv
normv = v @ v.T
# Directly update the A Ainv instead of C itself
A = sqrt(1 - c1) * A + sqrt(1 - c1) / normv * (
sqrt(1 + normv * c1 / (1 - c1)) - 1) * v.T @ pc # FIXME, dimension error, # FIXED aug.13th
Ainv = 1 / sqrt(1 - c1) * Ainv - 1 / sqrt(1 - c1) / normv * (
1 - 1 / sqrt(1 + normv * c1 / (1 - c1))) * Ainv @ v.T @ v
t2 = time.time()
print("A, Ainv update! Time cost: %.2f s" % (t2 - t1))
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.lambda_, N))
self.randz = randn(self.lambda_, N) # save the random number for generating the code.
for k in range(self.lambda_):
new_samples[k:k + 1, :] = self.xmean + sigma * (self.randz[k, :] @ A) # m + sig * Normal(0,C)
# Clever way to generate multivariate gaussian!!
# Stretch the guassian hyperspher with D and transform the
# ellipsoid by B mat linear transform between coordinates
self.counteval += 1
self.sigma, self.A, self.Ainv, self.ps, self.pc = sigma, A, Ainv, ps, pc,
self._istep += 1
return new_samples
#%% Optimizers that use pre-computed Hessian information
class HessCMAES:
""" Note this is a variant of CMAES Cholesky suitable for high dimensional optimization"""
def __init__(self, space_dimen, population_size=None, cutoff=None, init_sigma=3.0, init_code=None, Aupdate_freq=10, maximize=True, random_seed=None, optim_params={}):
if cutoff is None: cutoff = space_dimen
N = cutoff
self.code_len = space_dimen
self.space_dimen = cutoff # Overall control parameter
self.maximize = maximize # if the program is to maximize or to minimize
# Strategy parameter setting: Selection
if population_size is None:
self.lambda_ = int(4 + floor(3 * log2(N))) # population size, offspring number
# the relation between dimension and population size.
else:
self.lambda_ = population_size # use custom specified population size
mu = self.lambda_ / 2 # number of parents/points for recombination
# Select half the population size as parents
weights = log(mu + 1 / 2) - (log(np.arange(1, 1 + floor(mu)))) # muXone array for weighted recombination
self.mu = int(floor(mu))
self.weights = weights / sum(weights) # normalize recombination weights array
mueff = self.weights.sum() ** 2 / sum(self.weights ** 2) # variance-effectiveness of sum w_i x_i
self.weights.shape = (1, -1) # Add the 1st dim 1 to the weights mat
self.mueff = mueff # add to class variable
self.sigma = init_sigma # Note by default, sigma is None here.
print("Space dimension: %d, Population size: %d, Select size:%d, Optimization Parameters:\nInitial sigma: %.3f"
% (self.space_dimen, self.lambda_, self.mu, self.sigma))
# Strategy parameter settiself.weightsng: Adaptation
self.cc = 4 / (N + 4) # defaultly 0.0009756
self.cs = sqrt(mueff) / (sqrt(mueff) + sqrt(N)) # 0.0499
self.c1 = 2 / (N + sqrt(2)) ** 2 # 1.1912701410022985e-07
if "cc" in optim_params.keys(): # if there is outside value for these parameter, overwrite them
self.cc = optim_params["cc"]
if "cs" in optim_params.keys():
self.cs = optim_params["cs"]
if "c1" in optim_params.keys():
self.c1 = optim_params["c1"]
self.damps = 1 + self.cs + 2 * max(0, sqrt((mueff - 1) / (N + 1)) - 1) # damping for sigma usually close to 1
print("cc=%.3f, cs=%.3f, c1=%.3f damps=%.3f" % (self.cc, self.cs, self.c1, self.damps))
if init_code is not None:
self.init_x = np.asarray(init_code).reshape(1,-1)
# if self.init_x.shape[1] == space_dimen:
# self.projection = True
# elif self.init_x.shape[1] == cutoff:
# self.projection = False
# else:
# raise ValueError
else:
self.init_x = None # FIXED Nov. 1st
self.xmean = zeros((1, N))
self.xold = zeros((1, N))
# Initialize dynamic (internal) strategy parameters and constants
self.pc = zeros((1, space_dimen))
self.ps = zeros((1, N)) # evolution paths for C and sigma
self.A = eye(N, space_dimen, ) # covariant matrix is represent by the factors A * A '=C
self.Ainv = eye(space_dimen, N, )
self.eigeneval = 0 # track update of B and D
self.counteval = 0
if Aupdate_freq is None:
self.update_crit = self.lambda_ / self.c1 / N / 10
else:
self.update_crit = Aupdate_freq * self.lambda_
self.chiN = sqrt(N) * (1 - 1 / (4 * N) + 1 / (21 * N ** 2))
# expectation of ||N(0,I)|| == norm(randn(N,1)) in 1/N expansion formula
self._istep = 0
def set_Hessian(self, eigvals, eigvects, cutoff=None, expon=1/2.5):
cutoff = self.space_dimen
self.eigvals = eigvals[:cutoff]
self.eigvects = eigvects[:, :cutoff]
self.scaling = self.eigvals ** (-expon)
self.A = self.scaling[:,np.newaxis] * self.eigvects.T # cutoff by spacedimen
self.Ainv = (1 / self.scaling[np.newaxis,:]) * self.eigvects # spacedimen by cutoff
# if self.projection:
# self.init_x = self.init_x @ self.Ainv
def step_simple(self, scores, codes):
""" Taking scores and codes to return new codes, without generating images
Used in cases when the images are better handled in outer objects like Experiment object
"""
# Note it's important to decide which variable is to be saved in the `Optimizer` object
# Note to confirm with other code, this part is transposed.
# set short name for everything to simplify equations
N = self.space_dimen
lambda_, mu, mueff, chiN = self.lambda_, self.mu, self.mueff, self.chiN
cc, cs, c1, damps = self.cc, self.cs, self.c1, self.damps
sigma, A, Ainv, ps, pc, = self.sigma, self.A, self.Ainv, self.ps, self.pc,
# Sort by fitness and compute weighted mean into xmean
if self.maximize is False:
code_sort_index = np.argsort( scores) # add - operator it will do maximization.
else:
code_sort_index = np.argsort(-scores)
# scores = scores[code_sort_index] # Ascending order. minimization
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
if self.init_x is None:
select_n = len(code_sort_index[0:mu])
temp_weight = self.weights[:, :select_n] / np.sum(self.weights[:, :select_n]) # in case the codes is not enough
self.xmean = temp_weight @ codes[code_sort_index[0:mu], :]
else:
self.xmean = self.init_x
else:
self.xold = self.xmean
self.xmean = self.weights @ codes[code_sort_index[0:mu], :] # Weighted recombination, new mean value
# Cumulation statistics through steps: Update evolution paths
randzw = self.weights @ self.randz[code_sort_index[0:mu], :]
ps = (1 - cs) * ps + sqrt(cs * (2 - cs) * mueff) * randzw
pc = (1 - cc) * pc + sqrt(cc * (2 - cc) * mueff) * randzw @ A
# Adapt step size sigma
sigma = sigma * exp((cs / damps) * (norm(ps) / chiN - 1))
# self.sigma = self.sigma * exp((self.cs / self.damps) * (norm(ps) / self.chiN - 1))
print("sigma: %.2f" % sigma)
# Update A and Ainv with search path
if self.counteval - self.eigeneval > self.update_crit: # to achieve O(N ^ 2) do decomposition less frequently
self.eigeneval = self.counteval
t1 = time.time()
v = pc @ Ainv # (1, spacedimen) * (spacedimen, N) -> (1,N)
normv = v @ v.T
# Directly update the A Ainv instead of C itself
A = sqrt(1 - c1) * A + sqrt(1 - c1) / normv * (
sqrt(1 + normv * c1 / (1 - c1)) - 1) * v.T @ pc # FIXME, dimension error
Ainv = 1 / sqrt(1 - c1) * Ainv - 1 / sqrt(1 - c1) / normv * (
1 - 1 / sqrt(1 + normv * c1 / (1 - c1))) * Ainv @ v.T @ v
t2 = time.time()
print("A, Ainv update! Time cost: %.2f s" % (t2 - t1))
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.lambda_, N))
self.randz = randn(self.lambda_, N) # save the random number for generating the code.
new_samples = self.xmean + sigma * self.randz @ A
self.counteval += self.lambda_
# Clever way to generate multivariate gaussian!!
# Stretch the guassian hyperspher with D and transform the
# ellipsoid by B mat linear transform between coordinates
self.sigma, self.A, self.Ainv, self.ps, self.pc = sigma, A, Ainv, ps, pc,
self._istep += 1
return new_samples
#%% New Optimizers from the paper.
class HessAware_ADAM:
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, nu=0.9, maximize=True):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of estimating gradient
self.nu = nu # update rate for D
self.lr = lr # learning rate (step size) of moving along gradient
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.D = np.ones((1, self.dimen)) # running average of gradient square
self.Hdiag = np.ones((1, self.dimen)) # Diagonal of estimated Hessian
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros((self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
else:
# self.xcur = self.xnew # should be same as following
self.xcur = codes[0:1, :]
self.weights = (scores - scores[0]) / self.mu
HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
if self.maximize is True:
self.xnew = self.xcur + self.lr * HAgrad # add - operator it will do maximization.
else:
self.xnew = self.xcur - self.lr * HAgrad
self.D = self.nu * self.D + (1 - self.nu) * HAgrad ** 2 # running average of gradient square # Missing square before
self.Hdiag = self.D / (1 - self.nu ** self._istep) # Diagonal of estimated Hessian
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.B + 1, N))
self.innerU = randn(self.B, N) # save the random number for generating the code.
self.outerV = self.innerU / sqrt(self.Hdiag) # H^{-1/2}U
new_samples[0:1, :] = self.xnew
new_samples[1: , :] = self.xnew + self.mu * self.outerV # m + sig * Normal(0,C)
self._istep += 1
return new_samples
#%%
class HessAware_Gauss:
"""Gaussian Sampling method for estimating Hessian"""
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, Lambda=0.9, Hupdate_freq=5, maximize=True):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of the Gaussian distribution to estimate gradient
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr = lr # learning rate (step size) of moving along gradient
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros((self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
def step_hessian(self, scores):
'''Currently only use part of the samples to estimate hessian, maybe need more '''
fbasis = scores[0]
fpos = scores[-2*self.HB:-self.HB]
fneg = scores[-self.HB:]
weights = abs((fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda) )
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self.hess_comp: # if this flag is True then more samples have been added to the trial
self.step_hessian(scores)
# you should only get images for gradient estimation, get rid of the Hessian samples, or make use of it to estimate gradient
codes = codes[:self.B+1, :]
scores = scores[:self.B+1]
self.hess_comp = False
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
else:
# self.xcur = self.xnew # should be same as following line
self.xcur = codes[0:1, :]
self.weights = (scores - scores[0]) / self.mu
# estimate gradient from the codes and scores
HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
print("Estimated Gradient Norm %f"%np.linalg.norm(HAgrad))
if self.maximize is True:
self.xnew = self.xcur + self.lr * HAgrad # add - operator it will do maximization.
else:
self.xnew = self.xcur - self.lr * HAgrad
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.B + 1, N))
self.innerU = randn(self.B, N) # Isotropic gaussian distributions
self.outerV = self.innerU / sqrt(self.Lambda) + ((self.innerU @ self.HessUC.T) * self.HUDiag) @ self.HessUC # H^{-1/2}U
new_samples[0:1, :] = self.xnew
new_samples[1: , :] = self.xnew + self.mu * self.outerV # m + sig * Normal(0,C)
if self._istep % self.Hupdate_freq == 0:
# add more samples to next batch for hessian computation
self.hess_comp = True
self.HinnerU = randn(self.HB, N)
H_pos_samples = self.xnew + self.mu * self.HinnerU
H_neg_samples = self.xnew - self.mu * self.HinnerU
new_samples = np.concatenate((new_samples, H_pos_samples, H_neg_samples), axis=0)
self._istep += 1
return new_samples
def rankweight(lambda_, mu=None):
""" Rank weight inspired by CMA-ES code
mu is the cut off number, how many samples will be kept while `lambda_ - mu` will be ignore
"""
if mu is None:
mu = lambda_ / 2 # number of parents/points for recombination
# Defaultly Select half the population size as parents
weights = zeros(int(lambda_))
mu_int = int(floor(mu))
weights[:mu_int] = log(mu + 1 / 2) - (log(np.arange(1, 1 + floor(mu)))) # muXone array for weighted recombination
weights = weights / sum(weights)
return weights
# Major Classes.
class HessAware_Gauss_Spherical:
"""Gaussian Sampling method for estimating Hessian"""
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, Lambda=0.9, Hupdate_freq=5,
sphere_norm=300, maximize=True, rankweight=False):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of the Gaussian distribution to estimate gradient
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr = lr # learning rate (step size) of moving along gradient
self.sphere_norm = sphere_norm
self.tang_codes = zeros((self.B, self.dimen))
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros(
(self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
self.rankweight = rankweight # Switch between using raw score as weight VS use rank weight as score
print(
"Spereical Space dimension: %d, Population size: %d, Optimization Parameters:\n Exploration: %.3f\n Learning rate: %.3f"
% (self.dimen, self.B, self.mu, self.lr))
if self.rankweight:
if select_cutoff is None:
self.select_cutoff = int(population_size / 2)
else:
self.select_cutoff = select_cutoff
print("Using rank weight, selection size: %d\n" % self.select_cutoff)
def step_hessian(self, scores):
'''Currently not implemented in Spherical Version.'''
fbasis = scores[0]
fpos = scores[-2 * self.HB:-self.HB]
fneg = scores[-self.HB:]
weights = abs(
(fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda))
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self.hess_comp: # if this flag is True then more samples have been added to the trial
self.step_hessian(scores)
# you should only get images for gradient estimation, get rid of the Hessian samples, or make use of it to estimate gradient
codes = codes[:self.B + 1, :]
scores = scores[:self.B + 1]
self.hess_comp = False
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
print('First generation\n')
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
# No reweighting as there should be a single code
else:
# self.xcur = self.xnew # should be same as following line
self.xcur = codes[0:1, :]
if self.rankweight is False: # use the score difference as weight
# B normalizer should go here larger cohort of codes gives more estimates
self.weights = (scores[1:] - scores[0]) / self.B # / self.mu
else: # use a function of rank as weight, not really gradient.
if self.maximize is False: # note for weighted recombination, the maximization flag is here.
code_rank = np.argsort(np.argsort(scores[1:])) # add - operator it will do maximization.
else:
code_rank = np.argsort(np.argsort(-scores[1:]))
# Consider do we need to consider the basis code and score here? Or no?
# Note the weights here are internally normalized s.t. sum up to 1, no need to normalize more.
self.weights = rankweight(len(scores) - 1, mu=self.select_cutoff)[
code_rank] # map the rank to the corresponding weight of recombination
# estimate gradient from the codes and scores
# HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
HAgrad = self.weights[np.newaxis, :] @ self.tang_codes
print("Estimated Gradient Norm %f" % np.linalg.norm(HAgrad))
if self.rankweight is False:
if self.maximize is True:
self.xnew = ExpMap(self.xcur, self.lr * HAgrad) # add - operator it will do maximization.
else:
self.xnew = ExpMap(self.xcur, - self.lr * HAgrad)
else:
self.xnew = ExpMap(self.xcur, self.lr * HAgrad)
# vtan_new = VecTransport(self.xcur, self.xnew, vtan_old)
# uni_vtan_old = vtan_old / np.linalg.norm(vtan_old);
# uni_vtan_new = vtan_new / np.linalg.norm(vtan_new); # uniform the tangent vector
# Generate new sample by sampling from Gaussian distribution
self.tang_codes = zeros((self.B, N)) # Tangent vectors of exploration
new_samples = zeros((self.B + 1, N))
self.innerU = randn(self.B, N) # Isotropic gaussian distributions
self.outerV = self.innerU / sqrt(self.Lambda) + (
(self.innerU @ self.HessUC.T) * self.HUDiag) @ self.HessUC # H^{-1/2}U
new_samples[0:1, :] = self.xnew
self.tang_codes[:, :] = self.mu * self.outerV # m + sig * Normal(0,C)
new_samples[1:, ] = ExpMap(self.xnew, self.tang_codes)
if (self._istep + 1) % self.Hupdate_freq == 0:
# add more samples to next batch for hessian computation
self.hess_comp = True
self.HinnerU = randn(self.HB, N)
H_pos_samples = self.xnew + self.mu * self.HinnerU
H_neg_samples = self.xnew - self.mu * self.HinnerU
new_samples = np.concatenate((new_samples, H_pos_samples, H_neg_samples), axis=0)
self._istep += 1
self._curr_samples = new_samples / norm(new_samples, axis=1)[:, np.newaxis] * self.sphere_norm
return self._curr_samples
class HessAware_Gauss_Cylind:
""" Cylindrical Evolution, Both angular and radial. """
def __init__(self, space_dimen, population_size=40, population_kept=None, lr_norm=0.5, mu_norm=5, lr_sph=2,
mu_sph=0.005,
Lambda=1, Hupdate_freq=201, max_norm=300, maximize=True, rankweight=False):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr_norm = lr_norm # learning rate (step size) of moving along gradient
self.mu_norm = mu_norm # scale of the Gaussian distribution to estimate gradient
self.lr_sph = lr_sph
self.mu_sph = mu_sph
self.sphere_flag = True # initialize the whole system as linear?
self.max_norm = max_norm
self.tang_codes = zeros((self.B, self.dimen))
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros(
(self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
self.rankweight = rankweight # Switch between using raw score as weight VS use rank weight as score
print("Spereical Space dimension: %d, Population size: %d, Optimization Parameters:\n"
"Norm Exploration Range %.3f Learning rate: %.3f\n Angular Exploration Range:%.3f Learning Rate: %.3f"
% (self.dimen, self.B, self.mu_norm, self.lr_norm, self.mu_sph, self.lr_sph))
if rankweight:
self.BKeep = population_kept if population_kept is not None else int(self.B // 2)
print("Using rank based weights. Keep population size: %d" % (self.BKeep))
def step_hessian(self, scores):
''' Currently not implemented in Spherical Version. '''
raise NotImplementedError
# fbasis = scores[0]
# fpos = scores[-2 * self.HB:-self.HB]
# fneg = scores[-self.HB:]
# weights = abs(
# (fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
# C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# # H = C^TC + Lambda * I
# self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
# self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
# print("Hessian Samples Spectrum", self.HessD)
# print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda))
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self.hess_comp: # if this flag is True then more samples have been added to the trial
raise NotImplementedError
self.step_hessian(scores)
# you should only get images for gradient estimation, get rid of the Hessian samples, or make use of it to estimate gradient
codes = codes[:self.B + 1, :]
scores = scores[:self.B + 1]
self.hess_comp = False
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
print('First generation\n')
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
# No reweighting as there should be a single code
else:
# self.xcur = self.xnew # should be same as following line
self.xcur = codes[0:1, :]
if self.rankweight is False: # use the score difference as weight
# B normalizer should go here larger cohort of codes gives more estimates
self.weights = (scores[:] - scores[0]) / self.B # / self.mu
else: # use a function of rank as weight, not really gradient.
if self.maximize is False: # note for weighted recombination, the maximization flag is here.
code_rank = np.argsort(np.argsort(scores[:])) # add - operator it will do maximization.
else:
code_rank = np.argsort(np.argsort(-scores[:]))
# Consider do we need to consider the basis code and score here? Or no?
# Note the weights here are internally normalized s.t. sum up to 1, no need to normalize more.
self.weights = rankweight(len(scores), mu=self.BKeep)[code_rank]
# map the rank to the corresponding weight of recombination
# estimate gradient from the codes and scores
# HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
tang_codes_aug = np.concatenate((np.zeros((1, self.tang_codes.shape[1])), self.tang_codes), axis=0)
HAgrad = self.weights[np.newaxis,
:] @ tang_codes_aug # self.tang_codes # Changed to take the current location into account.
normgrad = self.weights[np.newaxis, 1:] @ (self.code_norms - norm(self.xcur)) # Recombine norms to get,
print("Estimated Angular Gradient Norm %f" % norm(HAgrad))
print("Estimated Radial Gradient Norm %f" % normgrad)
mov_sign = -1 if (not self.maximize) and (not self.rankweight) else 1
normnew = np.minimum(self.max_norm, norm(
self.xcur) + mov_sign * self.lr_norm * normgrad) # use the new norm to normalize ynew
self.xnew = ExpMap(self.xcur, mov_sign * self.lr_sph * HAgrad) # add - operator it will do maximization.
self.xnew = renormalize(self.xnew, normnew)
# Generate new sample by sampling from Gaussian distribution
self.innerU = randn(self.B, N) # Isotropic gaussian distributions
self.outerV = self.innerU / sqrt(self.Lambda) + (
(self.innerU @ self.HessUC.T) * self.HUDiag) @ self.HessUC # H^{-1/2}U
self.tang_codes = self.mu_sph * self.outerV # m + sig * Normal(0,C)
self.tang_codes = orthogonalize(self.xnew, self.tang_codes) # Tangent vectors of exploration
new_norms = norm(self.xnew) + self.mu_norm * randn(self.B)
new_norms = np.minimum(self.max_norm, new_norms)
new_samples = zeros((self.B + 1, N))
new_samples[0:1, :] = self.xnew
new_samples[1:, ] = ExpMap(self.xnew, self.tang_codes)
new_samples[1:, ] = renormalize(new_samples[1:, ], new_norms)
print("norm of new samples", norm(new_samples, axis=1))
self.code_norms = new_norms # doesn't include the norm of the basis vector.
if (self._istep + 1) % self.Hupdate_freq == 0:
# add more samples to next batch for hessian computation
self.hess_comp = True
self.HinnerU = randn(self.HB, N)
H_pos_samples = self.xnew + self.mu * self.HinnerU
H_neg_samples = self.xnew - self.mu * self.HinnerU
new_samples = np.concatenate((new_samples, H_pos_samples, H_neg_samples), axis=0)
self._istep += 1
return new_samples
#%
class HessEstim_Gauss:
"""Code to generate samples and estimate Hessian from it"""
def __init__(self, space_dimen):
self.dimen = space_dimen
self.HB = 0
self.std = 2
def GaussSampling(self, xmean, batch=100, std=2):
xmean = xmean.reshape(1, -1)
self.std = std
self.HB = batch
self.HinnerU = randn(self.HB, self.dimen) # / sqrt(self.dimen) # make it unit var along the code vector dimension
H_pos_samples = xmean + self.std * self.HinnerU
H_neg_samples = xmean - self.std * self.HinnerU
new_samples = np.concatenate((xmean, H_pos_samples, H_neg_samples), axis=0)
return new_samples
def HessEstim(self, scores):
fbasis = scores[0]
fpos = scores[-2 * self.HB:-self.HB]
fneg = scores[-self.HB:]
weights = abs(
(fpos + fneg - 2 * fbasis) / 2 / self.std ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
# self.HessV.shape = (HB, HB); self.HessD.shape = (HB,), self.HessUC.shape = (HB, dimen)
# self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f" % ((self.HessD ** 2).sum()))
return self.HessV, self.HessD, self.HessUC
#%
class HessAware_Gauss_DC:
"""Gaussian Sampling method for estimating Hessian"""
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, Lambda=0.9, Hupdate_freq=5,
maximize=True, max_norm=300, rankweight=False, nat_grad=False):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of the Gaussian distribution to estimate gradient
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr = lr # learning rate (step size) of moving along gradient
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros((self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xnew = np.zeros((1, self.dimen)) # new base point
self.xscore = 0
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
self.code_stored = np.array([]).reshape((0, self.dimen))
self.score_stored = np.array([])
self.N_in_samp = 0
self.max_norm = max_norm
self.nat_grad = nat_grad # use the natural gradient definition, or normal gradient.
self.rankweight = rankweight
def new_generation(self, init_score, init_code):
self.xscore = init_score
self.score_stored = np.array([])
self.xnew = init_code
self.code_stored = np.array([]).reshape((0, self.dimen))
self.N_in_samp = 0
def compute_hess(self, scores, Lambda_Frac=100):
'''Currently only use part of the samples to estimate hessian, maybe need more '''
fbasis = self.xscore
fpos = scores[:self.HB]
fneg = scores[-self.HB:]
weights = abs((fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
self.Lambda = (self.HessD ** 2).sum() / Lambda_Frac
self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda) )
def compute_grad(self, scores):
# add the new scores to storage
self.score_stored = np.concatenate((self.score_stored, scores), axis=0) if self.score_stored.size else scores
if self.rankweight is False: # use the score difference as weight
# B normalizer should go here larger cohort of codes gives more estimates
self.weights = (self.score_stored - self.xscore) / self.score_stored.size # / self.mu
# assert(self.N_in_samp == self.score_stored.size)
else: # use a function of rank as weight, not really gradient.
# Note descent check **could be** built into ranking weight?
# If not better just don't give weights to that sample
if self.maximize is False: # note for weighted recombination, the maximization flag is here.
code_rank = np.argsort(np.argsort( self.score_stored)) # add - operator it will do maximization.
else:
code_rank = np.argsort(np.argsort(-self.score_stored))
# Consider do we need to consider the basis code and score here? Or no?
# Note the weights here are internally normalized s.t. sum up to 1, no need to normalize more.
self.weights = rankweight(len(self.score_stored), mu=20)[code_rank] # map the rank to the corresponding weight of recombination
# only keep the top 20 codes and recombine them.
if self.nat_grad: # if or not using the Hessian to rescale the codes
hagrad = self.weights @ (self.code_stored - self.xnew) # /self.mu
else:
Hdcode = self.Lambda * (self.code_stored - self.xnew) + (
((self.code_stored - self.xnew) @ self.HessUC.T) * self.HessD **2) @ self.HessUC
hagrad = self.weights @ Hdcode # /self.mu
print("Gradient Norm %.2f" % (
|
np.linalg.norm(hagrad)
|
numpy.linalg.norm
|
import attr
import argparse
import PIL.Image as image
import numpy
import nes
@attr.s
class Rect:
x0 = attr.ib()
y0 = attr.ib()
x1 = attr.ib()
y1 = attr.ib()
def __call__(self, arr):
return arr[self.y0:self.y1,self.x0:self.x1]
@attr.s
class Part:
xoff = attr.ib()
yoff = attr.ib()
sprite = attr.ib()
palette = attr.ib()
def find_sprite(arr):
"""Find bounding box of a single sprite in a 2D boolean array."""
rows = numpy.argwhere(numpy.amax(arr, axis=1))
cols = numpy.argwhere(numpy.amax(arr, axis=0))
return Rect(int(numpy.amin(cols)), int(numpy.amin(rows)),
int(numpy.amax(cols)) + 1, int(numpy.amax(rows)) + 1)
def find_sprites(arr):
"""Find bounding boxes of sprites in a 2D boolean array."""
rows, = numpy.amax(arr, axis=1),
rows = numpy.pad(rows, 1, 'constant')
ranges = numpy.nonzero(rows[:-1] != rows[1:])[0].reshape(-1, 2)
for ymin, ymax in ranges:
rarr = arr[ymin:ymax]
cols, = numpy.amax(rarr, axis=0),
cols = numpy.pad(cols, 1, 'constant')
cranges = numpy.nonzero(cols[:-1] != cols[1:])[0].reshape(-1, 2)
for xmin, xmax in cranges:
yield Rect(int(xmin),int(ymin),
int(xmax),int(ymax))
def find_mark(arr):
"""Find the coordinates of a sprite mark from a 2D boolean array."""
a = arr.astype(numpy.uint16)
xvals, = numpy.where(numpy.sum(a, axis=0) > 1)
yvals, = numpy.where(numpy.sum(a, axis=1) > 1)
if len(xvals) != 1 or len(yvals) != 1:
raise ValueError('cannot find mark')
return int(xvals[0]) + 1, int(yvals[0]) + 1
class Sprites:
@classmethod
def load(class_, inpath):
img = image.open(inpath)
assert img.mode == 'P'
imgpalette = numpy.array(img.getpalette(), numpy.uint8).reshape((-1, 3))
img = numpy.array(img)
mark = img[0,0]
img[0,0] = 0
ncolors = numpy.amax(img)
imgpalette = imgpalette[:ncolors]
# Transparent must be palette index 0.
assert img[0,1] == 0
# Get map from palette colors to 0..3 indexes.
pcolors = img[:,0:3]
pcolors = pcolors[numpy.where(numpy.amin(pcolors, axis=1))[0]]
pcolors = numpy.pad(pcolors, ((0, 0), (1, 0)), 'constant')
pmap = numpy.zeros((len(pcolors), ncolors), numpy.uint8)
pmask = numpy.zeros((ncolors,), numpy.uint8)
pmap[:,1:] = 0xff
prange = numpy.arange(0, 4)
for i in range(len(pcolors)):
pmap[i][pcolors[i]] = prange
pmask[pcolors[i]] |= 1 << i
# Convert each sprite.
img = img[:,3:]
pats = []
sprites = []
for sp in find_sprites(img != 0):
# Sprite must be aligned vertically to 16 pixel grid.
assert sp.y0 & 15 == 0
sp = sp(img)
spmark = sp == mark
xorigin, yorigin = find_mark(spmark)
sp = numpy.where(spmark, 0, sp)
assert numpy.amax(sp[0]) == 0
sp = numpy.pad(sp, [(0, 0), (0, 7)], 'constant')
sprite = []
for y in numpy.arange(1, img.shape[0], 16):
rows = sp[y:y+16]
cols = numpy.argwhere(numpy.amax(rows, axis=0))
if cols.shape[0] == 0:
continue
xmin = int(numpy.min(cols))
xmax = int(numpy.max(cols)) + 1
nsprite = (xmax - xmin + 7) // 8
xmax = xmin + nsprite * 8
rows = rows[:,xmin:xmax]
smask = int(numpy.bitwise_and.reduce(
numpy.bitwise_and.reduce(pmask[rows])))
for i in range(len(pmap)):
if (smask >> i) & 1:
rows = pmap[i,rows]
palette = i
break
else:
raise ValueError('no matching palette')
nsprite = (rows.shape[1] + 7) // 8
for x in range(0, nsprite * 8, 8):
sprite.append(Part(
x + xmin - xorigin,
y - yorigin,
len(pats),
palette,
))
pats.append(rows[:,x:x+8])
sprites.append(sprite)
self = class_()
self.palettes = imgpalette[pcolors]
self.sprites = sprites
self.patterns =
|
numpy.array(pats)
|
numpy.array
|
import numpy as np
from scipy.stats import fisher_exact
import scipy.sparse as sp
import multiprocessing
from functools import partial
def perform_fisher(nn_gene_expression, binary_expression, p_value, odds_ratio=2):
p_value_nn = 1
if np.sum(nn_gene_expression) != 0:
input_fisher = np.array([[np.sum(nn_gene_expression), np.sum(binary_expression)-np.sum(nn_gene_expression)],
[np.sum(~nn_gene_expression),
|
np.sum(~binary_expression)
|
numpy.sum
|
import sys
sys.path.insert(0, "../lib")
sys.path.insert(1, "../lib/x64")
import Leap
import os
import urx
import time
import math
import numpy as np
import math3d as m3d
from scipy.spatial.transform import Rotation as R
import traceback
import json
import pandas as pd
np.set_printoptions(precision=4, suppress=True)
def convert_tool_pose_to_transformation_matrix(tool_pose):
position_vector = np.array(tool_pose[:3]).reshape((3, 1))
rotation_vector =
|
np.array(tool_pose[3:])
|
numpy.array
|
import os
import sys
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from ..exec_model import ExecModel, ModelObject
from ..plotting import SensitivityOptions
from .util import SignalingMetric, dlnyi_dlnxj
@dataclass
class ReactionSensitivity(ExecModel, SignalingMetric):
"""Sensitivity for rate equations"""
model: ModelObject
create_metrics: Optional[Dict[str, Callable[[np.ndarray], Union[int, float]]]]
def __post_init__(self) -> None:
self._plotting: SensitivityOptions = self.model.viz.get_sensitivity_options()
self._coefficients: Callable[[str], str] = lambda metric: os.path.join(
self.model.path,
"sensitivity_coefficients",
"reaction",
f"{metric}.npy",
)
self._path_to_figs: Callable[[str], str] = lambda metric: os.path.join(
self.model.path,
"figure",
"sensitivity",
"reaction",
f"{metric}",
)
if self.create_metrics is not None:
for name, function in self.create_metrics.items():
self.quantification[name] = function
def _calc_sensitivity_coefficients(
self,
metric: str,
reaction_indices: List[int],
) -> np.ndarray:
"""Calculating Sensitivity Coefficients
Parameters
----------
metric : str
The signaling metric used for sensitivity analysis.
reaction_indices : list of int
List of reaction indices.
Returns
-------
sensitivity_coefficients : numpy array
"""
rate = 1.01 # 1% change
n_file = self.get_executable()
signaling_metric = np.full(
(
len(n_file),
len(reaction_indices) + 1,
len(self.model.observables),
len(self.model.problem.conditions),
),
np.nan,
)
for i, nth_paramset in enumerate(n_file):
optimized = self.load_param(nth_paramset)
for j, rxn_idx in enumerate(reaction_indices):
perturbation: Dict[int, float] = {}
for idx in reaction_indices:
perturbation[idx] = 1.0
perturbation[rxn_idx] = rate
if (
self.model.problem.simulate(optimized.params, optimized.initials, perturbation)
is None
):
for k, _ in enumerate(self.model.observables):
for l, _ in enumerate(self.model.problem.conditions):
signaling_metric[i, j, k, l] = self.quantification[metric](
self.model.problem.simulations[k, :, l]
)
sys.stdout.write(
"\r{:d} / {:d}".format(
i * len(reaction_indices) + j + 1,
len(n_file) * len(reaction_indices),
)
)
if self.model.problem.simulate(optimized.params, optimized.initials) is None:
for k, _ in enumerate(self.model.observables):
for l, _ in enumerate(self.model.problem.conditions):
signaling_metric[i, -1, k, l] = self.quantification[metric](
self.model.problem.simulations[k, :, l]
)
sensitivity_coefficients = dlnyi_dlnxj(
signaling_metric,
n_file,
reaction_indices,
self.model.observables,
self.model.problem.conditions,
rate,
)
return sensitivity_coefficients
def _load_sc(
self,
metric: str,
reaction_indices: List[int],
) -> np.ndarray:
"""
Load (or calculate) sensitivity coefficients.
"""
if not os.path.isfile(self._coefficients(metric)):
os.makedirs(
os.path.join(
self.model.path,
"sensitivity_coefficients",
"reaction",
),
exist_ok=True,
)
sensitivity_coefficients = self._calc_sensitivity_coefficients(
metric, reaction_indices
)
np.save(self._coefficients(metric), sensitivity_coefficients)
else:
sensitivity_coefficients = np.load(self._coefficients(metric))
return sensitivity_coefficients
@staticmethod
def _draw_vertical_span(
biological_processes: List[List[int]],
width: float,
) -> None:
"""
Draw vertical span separating biological processes.
"""
if len(biological_processes) > 1:
left_end = 0
for i, proc in enumerate(biological_processes):
if i % 2 == 0:
plt.axvspan(
left_end - width,
left_end - width + len(proc),
facecolor="k",
alpha=0.1,
)
left_end += len(proc)
def _write_reaction_indices(
self,
reaction_indices: List[int],
average: np.ndarray,
stdev: np.ndarray,
width: float,
) -> None:
"""
Put reaction index on each bar.
"""
distance = np.max(average) * 0.05
for i, j in enumerate(reaction_indices):
xp = i + width * 0.5 * (len(self.model.problem.conditions) - 1)
yp = average[i, np.argmax(np.abs(average[i, :]))]
yerr = stdev[i, np.argmax(stdev[i, :])]
if yp > 0:
plt.text(
xp,
yp + yerr + distance,
str(j),
ha="center",
va="bottom",
fontsize=10,
rotation=90,
)
else:
plt.text(
xp,
yp - yerr - distance,
str(j),
ha="center",
va="top",
fontsize=10,
rotation=90,
)
def _barplot_sensitivity(
self,
metric: str,
sensitivity_coefficients: np.ndarray,
biological_processes: List[List[int]],
reaction_indices: List[int],
show_indices: bool,
) -> None:
"""
Visualize sensitivity coefficients using barplot.
"""
os.makedirs(os.path.join(self._path_to_figs(metric), "barplot"), exist_ok=True)
# rcParams
self.model.viz.set_sensitivity_rcParams()
if len(self._plotting.cmap) < len(self.model.problem.conditions):
raise ValueError(
"len(sensitivity_options['cmap']) must be equal to "
"or greater than len(problem.conditions)."
)
for k, obs_name in enumerate(self.model.observables):
plt.figure(figsize=self._plotting.figsize)
self._draw_vertical_span(biological_processes, self._plotting.width)
sensitivity_array = sensitivity_coefficients[:, :, k, :]
# Remove NaN
nan_idx = []
for i in range(sensitivity_array.shape[0]):
for j in range(sensitivity_array.shape[1]):
if
|
np.isnan(sensitivity_array[i, j, :])
|
numpy.isnan
|
from astropy.coordinates import Angle, SkyCoord, get_sun
from astropy.constants import G, M_earth, R_earth
from astropy.time import Time
from astropy import units as u
import numpy as np
def apparent_to_absolute(magapp, jd, ra, dec):
""" Compute absolute magnitude from apparent magnitude
Parameters
----------
magapp: array
Apparent magnitudes of alerts
jd: array
Times (JD) of alerts
ra: array
RA of alerts (deg)
dec: array
Dec of alerts (deg)
Returns
----------
magabs: array
Absolute magnitude of alerts
"""
sun_pos = get_sun(Time(jd, format='jd'))
sun_dists = sun_pos.separation(SkyCoord(ra, dec, unit='deg')).rad
denominator = np.sin(sun_dists) + (np.pi - sun_dists) * np.cos(sun_dists)
magabs = magapp - 5. / 2. * np.log10(np.pi / denominator)
return magabs, sun_dists
def fake_lambertian_size(magabs, msun, distance, albedo):
""" Try to estimate the size of an object using Lambertian formula
Parameters
----------
magabs: array
Absolute magnitudes of alerts
msun: float
Apparent magnitude of the Sun
distance: array
Distance between the object and us [km]
albedo: float
Albedo - take 0.175 (2012.12549)
"""
return 10**((msun - magabs)/5.) * distance *
|
np.sqrt(6/albedo)
|
numpy.sqrt
|
import torch
import math
import numpy as np
import pandas as pd
import torch.nn.functional as F
import statistics
from glob import glob
from utilities.images import load_images
from models.latent_optimizer import VGGFaceProcessing
from models.vgg_face2 import resnet50_scratch_dag
from models.regressors import ImageToLandmarks_batch, VGGToHist, LandMarksRegressor, CelebaRegressor
vgg_face_dag = resnet50_scratch_dag(
'./Trained_model/resnet50_scratch_dag.pth').cuda().eval()
image_directory = './celeba/data/'
filenames = sorted(glob(image_directory + "*.jpg"))
def feed_into_Vgg(generated_image):
features = vgg_face_dag(generated_image)
return features
def image_to_Vggencoder(img):
vgg_processing = VGGFaceProcessing()
generated_image = vgg_processing(img)
features = feed_into_Vgg(generated_image)
return features
def feed_into_Image_to_landmarksRegressor(img):
landmark_regressor = ImageToLandmarks_batch(landmark_num=68).cuda().eval()
weights_path = './Trained_model/Image_to_landmarks_Regressor_batchnorm_lr=0.001.pt'
landmark_regressor.load_state_dict(torch.load(weights_path))
target_size = 64
img = F.interpolate(img, target_size, mode='bilinear')
pred_landmarks = landmark_regressor(img)
return pred_landmarks
def generate_image_hist(image, bins=20):
hist_list = []
r = image[:, 0, :]
g = image[:, 1, :]
b = image[:, 2, :]
hist_r = torch.histc(r.float(), bins, min=0, max=255).cpu().detach().numpy()
hist_g = torch.histc(g.float(), bins, min=0, max=255).cpu().detach().numpy()
hist_b = torch.histc(b.float(), bins, min=0, max=255).cpu().detach().numpy()
hist = []
num_pix = 224 * 224
hist.append(hist_r / num_pix)
hist.append(hist_g / num_pix)
hist.append(hist_b / num_pix)
hist_list.append(hist)
hist_list = np.asarray(hist_list)
return hist_list
class SoftHistogram(torch.nn.Module):
def __init__(self, bins, min, max, sigma):
super(SoftHistogram, self).__init__()
self.bins = bins
self.min = min
self.max = max
self.sigma = sigma
self.delta = float(max - min) / float(bins)
self.centers = float(min) + self.delta * (torch.arange(bins).float() + 0.5)
self.centers = torch.nn.Parameter(self.centers, requires_grad=False)
def forward(self, x):
x = torch.unsqueeze(x, 0) - torch.unsqueeze(self.centers, 1)
x = torch.sigmoid(self.sigma * (x + self.delta / 2)) - torch.sigmoid(self.sigma * (x - self.delta / 2))
x = x.sum(dim=1)
return x
def image_to_hist(croped_image, bins_num):
r = croped_image[:, 0, :]
g = croped_image[:, 1, :]
b = croped_image[:, 2, :]
softhist = SoftHistogram(bins_num, min=0, max=255, sigma=1.85).cuda() # sigma=0.04
r = r.flatten()
g = g.flatten()
b = b.flatten()
hist_r = softhist(r)
hist_g = softhist(g)
hist_b = softhist(b)
num_pix = 224 * 224
hist_r = hist_r / num_pix
hist_g = hist_g / num_pix
hist_b = hist_b / num_pix
hist_pred = torch.stack((hist_r, hist_g, hist_b))
return hist_pred
# calcualte the Earth mover's distance
def EMDLoss(output, target):
# output and target must have size nbatch * nhist * nbins
# We will compute an EMD for each histogram for each batch element
loss = torch.zeros(output.shape[0], output.shape[1], output.shape[2] + 1) # loss: [batch_size, 3, bins_num]
for i in range(1, output.shape[2] + 1):
loss[:, :, i] = output[:, :, i - 1] + loss[:, :, i - 1] - target[:, :, i - 1] # loss:[32,3,20]
# Compute the EMD
loss = loss.abs().sum(dim=2) # loss: [32,3]
# Sum over histograms
loss = loss.sum(dim=1) # loss: [32]
# Average over batch
loss = loss.mean()
return loss
# test the correct rate of lable predicgtion by the regressor on the CelebA test set.
def Celeba_Regressor_Result_preddistribution():
label = 'Wearing_Hat' # Eyeglasses,Smiling,Mouth_Slightly_Open, Blurry, Wearing_Hat, Wearing_Necktie
vgg_processing = VGGFaceProcessing()
celeba_regressor = CelebaRegressor().cuda()
celeba_regressor.load_state_dict(torch.load('./Trained_model/celebaregressor_' + label + '.pt'))
celeba_regressor.eval()
pred_list = []
for i in filenames[-10000:]:
image = load_images([i])
image = torch.from_numpy(image).cuda()
image = vgg_processing(image)
vgg_descriptors = vgg_face_dag(image).cuda()
pred = celeba_regressor(vgg_descriptors)
pred_choice = (pred > 0.4).int()
pred_choice = torch.squeeze(pred_choice)
pred_list.append(pred_choice)
attribute_count = 0
for number in pred_list:
if number == 1:
attribute_count += 1
guess = attribute_count / 10000 #
save_path = "./Result/Celeba_Regressor_Realdistribution" + label + ".txt"
with open(save_path, 'w') as f:
f.write(str(guess))
data = pd.read_csv('./celeba/Anno/list_attr_celeba_name+glass+smiling_hat.csv').to_dict()
droped_table = pd.read_csv('./celeba/Anno/imglist_after_crop.csv')
leaved_img_name = droped_table['File_Name']
count = 0
for i in leaved_img_name[-10000:]:
i = i.split('.')[0]
i = int(i) - 1
result = data[label][i]
if result == 1:
count += 1
attribute_testsets = count / 10000
save_path = "./Result/Celeba_Regressor_Result_preddistribution" + label + ".txt"
with open(save_path, 'w') as f:
f.write(str(attribute_testsets))
# calculate the mean normalized error between the target 68-landmark points and the predicted 68-landmark points
def computeMNELandmark(target_landmark, pred_landmark):
sum = 0
count = 0
for target, pred in zip(target_landmark, pred_landmark):
for i in range(0, len(target), 2):
pred_x = int(pred[i])
pred_y = int(pred[i + 1])
target_x = int(target[i])
target_y = int(target[i + 1])
point_pred = np.array([pred_x, pred_y])
point_target =
|
np.array([target_x, target_y])
|
numpy.array
|
### This is run when you want to select the parameters from the parameters file
import transformers
import torch
import neptune
from knockknock import slack_sender
from transformers import *
import glob
from transformers import BertTokenizer
from transformers import BertForSequenceClassification, AdamW, BertConfig
import random
import pandas as pd
from transformers import BertTokenizer
from hateXplain.Models.utils import masked_cross_entropy,fix_the_random,format_time,save_normal_model,save_bert_model
from sklearn.metrics import accuracy_score,f1_score
from tqdm import tqdm
from hateXplain.TensorDataset.datsetSplitter import createDatasetSplit
from hateXplain.TensorDataset.dataLoader import combine_features
from hateXplain.Preprocess.dataCollect import collect_data,set_name
from sklearn.metrics import accuracy_score,f1_score,roc_auc_score,recall_score,precision_score
import matplotlib.pyplot as plt
import time
import os
from transformers import BertTokenizer
import GPUtil
from sklearn.utils import class_weight
import json
from hateXplain.Models.bertModels import *
from hateXplain.Models.otherModels import *
from hateXplain.Models.utils import return_params
import sys
import time
from waiting import wait
from sklearn.preprocessing import LabelEncoder
import numpy as np
import threading
import argparse
import ast
from datetime import datetime
NEPTUNE_API_TOKEN = os.environ.get('NEPTUNE_API_TOKEN')
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0) # only difference
### gpu selection algo
def get_gpu():
print('There are %d GPU(s) available.' % torch.cuda.device_count())
while(1):
tempID = []
tempID = GPUtil.getAvailable(order = 'memory', limit = 1, maxLoad = 0.3, maxMemory = 0.2, includeNan=False, excludeID=[], excludeUUID=[])
if len(tempID) > 0:
print("Found a gpu")
print('We will use the GPU:',tempID[0],torch.cuda.get_device_name(tempID[0]))
deviceID=tempID
return deviceID
else:
time.sleep(5)
# return flag,deviceID
##### selects the type of model
def select_model(params,embeddings):
if(params['bert_tokens']):
if(params['what_bert']=='weighted'):
model = SC_weighted_BERT.from_pretrained(
params['path_files'], # Use the 12-layer BERT model, with an uncased vocab.
num_labels = params['num_classes'], # The number of output labels
output_attentions = True, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
hidden_dropout_prob=params['dropout_bert'],
params=params
)
else:
print("Error in bert model name!!!!")
return model
else:
text=params['model_name']
if(text=="birnn"):
model=BiRNN(params,embeddings)
elif(text == "birnnatt"):
model=BiAtt_RNN(params,embeddings,return_att=False,)
elif(text == "birnnscrat"):
model=BiAtt_RNN(params,embeddings,return_att=True)
elif(text == "cnn_gru"):
model=CNN_GRU(params,embeddings)
elif(text == "lstm_bad"):
model=LSTM_bad(params)
else:
print("Error in model name!!!!")
return model
@torch.no_grad()
def Eval_phase(params,which_files='test',model=None,test_dataloader=None,device=None):
if(params['is_model']==True):
print("model previously passed")
model.eval()
else:
return 1
# ### Have to modify in the final run
# model=select_model(params['what_bert'],params['path_files'],params['weights'])
# model.cuda()
# model.eval()
print("Running eval on ",which_files,"...")
t0 = time.time()
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
# Tracking variables
true_labels=[]
pred_labels=[]
logits_all=[]
# Evaluate data for one epoch
for step, batch in tqdm(enumerate(test_dataloader)):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention vals
# [2]: attention mask
# [3]: labels
b_input_ids = batch[0].to(device)
b_att_val = batch[1].to(device)
b_input_mask = batch[2].to(device)
b_labels = batch[3].to(device)
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)
model.zero_grad()
outputs = model(b_input_ids,
attention_vals=b_att_val,
attention_mask=b_input_mask,
labels=None,device=device)
logits = outputs[0]
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences.
# Accumulate the total accuracy.
pred_labels+=list(
|
np.argmax(logits, axis=1)
|
numpy.argmax
|
"""
Sufficient Dimensionality Reduction:
given an m-by-n matrix G of coocurrence statistics for discrete random variables X (m states) and Y (n states)
and a desired embedding size k, returns U and V, m-by-k and n-by-k matrices of embeddings for the states of
X and Y minimizing D_KL(G/Zg || exp(UV^T)/Z) where Z and Zg are constants that normalize both matrices
to be probability distributiions
This implementation uses Adagrad to solve the optimiztion problem, and uses
random features to make the gradient computation much more scalable than the algorithm given in the SDR paper.
see:
Globerson and Tishby. "Sufficient dimensionality reduction." JMLR, 2003
Gittens, Achlioptas, and Mahoney. "Skip-Gram - Zipf + Uniform = Vector Additivity". ACL, 2017
Le et al., "Fastfood --- Approximating Kernel Expansions in Loglinear Time", ICML 2013
"""
# Author: <NAME> <<EMAIL>>
# License: TBD
from typing import *
from primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
from numpy.random import randn, random
from numpy import pi, exp, cos, sqrt, expand_dims, sum, ones, ones_like, log, hstack, ndarray, copy
from numpy.linalg import norm
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import svds
from scipy.optimize import brent
import sys
Input=ndarray
Output=ndarray
Params = NamedTuple('Params', [])
__all__ = ['SDR']
class SDR(UnsupervisedLearnerPrimitiveBase[Input, Output, Params]):
"""
Sufficient Dimensionality Reduction:
given an m-by-n sparse matrix G of coocurrence statistics for discrete random variables X (m states) and Y (n states)
and a desired embedding size k, returns U and V, m-by-k and n-by-k matrices of embeddings for the states of
X and Y minimizing D_KL(G/Zg || exp(UV^T)/Z) where Z and Zg are constants that normalize both matrices
to be probability distributions. Constrains the maximum row norms of U and V to be less than alpha.
This implementation uses Adagrad to solve the optimization problem, and uses
random features to make the gradient computation much more scalable than the algorithm given in the SDR paper.
Read docstring for __init__() to see hyperparameters, then use fit() and predict() (see their docstrings)
"""
def __init__(self, *, dim: int = 300, numrandfeats: int = 1000, alpha=5, tol: float = .01, stepsize: float = 0.1, maxIters: int = 100, eps: float = 0.001):
"""
inputs:
dim: the desired dimensionality of the embeddings (positive integer)
numrandfeats: size of the random feature map used in estimating the gradient (positive integer)
alpha: maximum euclidean norm of a feature vector
tol: stop when relative change (Frobenius norm) of embeddings is smaller than tol
stepsize: Adagrad stepsize
maxIters: maximum number of iterations
eps: Adagrad protection against division by zero, small constant
Note the larger size alpha you allow for the feature vectors, the more randomfeatures you should allow, otherwise you will get poor performance.
The runtime increases as numrandfeats increases.
"""
self.dim = dim
self.numrandfeats = numrandfeats
self.alpha = alpha
self.maxIters = maxIters
self.tol = tol
self.stepsize = stepsize
self.eps = eps
self.fitted = False
return
def set_training_data(self, *, inputs: Sequence[Input], outputs: None = None) -> None:
"""
Inputs:
X : array, shape = [n_rows, n_cols]
only takes one input
"""
self.G = inputs[0]
self.fitted = False
def fit(self, *, timeout: float = None, iterations: int = None) -> None:
"""
internally computes and sets U and V, the embeddings of the row and column entities, respectively
"""
if self.fitted:
return
m, n = self.G.shape
self.maxIters = iterations
gNormalizer = sum(self.G)
# initialize with the PPMI: force sparsity if the data was not originally sparse
sparseData = coo_matrix(self.G)
i = sparseData.row
j = sparseData.col
vals = sparseData.data
numPairs = sum(vals) + 0.0
numRowEntities = sum(self.G, axis=1) + 0.0
numColEntities = sum(self.G, axis=0) + 0.0
print("Constructing PPMI")
def computePPMI(vals, i, j, numPairs, numRowEntities, numColEntities):
ppmiEntries=vals
for idx in range(len(vals)):
if idx % 1000 == 0:
sys.stdout.write(str(idx/1000.0)+'.')
ppmiEntries[idx] = max(log(numPairs*vals[idx]/(numRowEntities[i[idx]] * numColEntities[j[idx]])), 0)
return ppmiEntries
sparsePPMI = coo_matrix((computePPMI(vals, i, j, numPairs, numRowEntities, numColEntities), (i, j)), shape=self.G.shape)
self.U, _, vt = svds(sparsePPMI, k=self.dim-1)
self.V = vt.transpose()
gUhist = self.eps**2 * ones_like(self.U)
gVhist = self.eps**2 * ones_like(self.V)
def adaproj(G, X):
"""calculates the projection Pi_{||x||_2<=alpha}^D(y) for each row y of X,
where D=diag(g) for g, the corresponding row of G.
used for constrained Adagrad optimization (see the adagrad paper for the definition of Pi_X^A)"""
res = copy(X)
for rownum in range(X.shape[0]):
curg = G[rownum, :]
curvec = X[rownum, :]
if norm(curvec) <= self.alpha:
continue
else:
scaledrownormdiff = lambda sf: (norm(curg/(curg + sf)*curvec)**2 - self.alpha**2)**2
sf, fval, iters, funcalls = brent(scaledrownormdiff, brack=(0, sum(curg)), full_output=True)
assert(fval < 1e-3)
res[rownum, :] = curg/(curg + sf)*curvec
return res
print("Refining")
for iter in range(self.maxIters):
sys.stdout.write(str(iter)+'.')
sys.stdout.flush()
Unorms = expand_dims(norm(self.U, axis=1)**2, axis=1)
Vnorms = expand_dims(norm(self.V, axis=1)**2, axis=1)
W = randn(self.dim-1, self.numrandfeats)
phases =2*pi*random([1, self.numrandfeats])
ZU = exp(1.0/2*Unorms) * sqrt(2.0/self.numrandfeats)*cos(self.U.dot(W) + phases)
ZV =
|
exp(1.0/2*Vnorms)
|
numpy.exp
|
import sys
import datetime as dt
import pytest
import numpy as np
b = np.bool_()
u8 = np.uint64()
i8 = np.int64()
f8 = np.float64()
c16 = np.complex128()
U = np.str_()
S = np.bytes_()
# Construction
class D:
def __index__(self) -> int:
return 0
class C:
def __complex__(self) -> complex:
return 3j
class B:
def __int__(self) -> int:
return 4
class A:
def __float__(self) -> float:
return 4.0
np.complex64(3j)
np.complex64(A())
np.complex64(C())
np.complex128(3j)
np.complex128(C())
np.complex128(None)
np.complex64("1.2")
np.complex128(b"2j")
np.int8(4)
np.int16(3.4)
np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
np.int32("1")
np.int64(b"2")
np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
np.float32("1")
np.float16(b"2.5")
if sys.version_info >= (3, 8):
np.uint64(D())
np.float32(D())
np.complex64(D())
np.bytes_(b"hello")
np.bytes_("hello", 'utf-8')
np.bytes_("hello", encoding='utf-8')
|
np.str_("hello")
|
numpy.str_
|
import argparse
from keras.callbacks import ModelCheckpoint
import numpy as np
import pandas as pd
import pickle
import random
from scipy.io import arff
from model import model
np.random.seed(2018)
np.random.RandomState(2018)
random.seed(2018)
# default args
DATASET = './data/ECG5000_TEST.arff'
SAVE_PATH = './models/'
MODEL_NAME = 'seq2seq'
DATA_RANGE = [0,2627]
# data preprocessing
STANDARDIZED = False
MINMAX = False
CLIP = [99999]
# architecture
TIMESTEPS = 140 # length of 1 ECG
ENCODER_DIM = [20]
DECODER_DIM = [40]
OUTPUT_ACTIVATION = 'sigmoid'
# training
EPOCHS = 100
BATCH_SIZE = 32
LEARNING_RATE = .005
LOSS = 'mean_squared_error'
DROPOUT = 0.
VALIDATION_SPLIT = 0.2
SAVE = False
PRINT_PROGRESS = False
CONTINUE_TRAINING = False
LOAD_PATH = SAVE_PATH
def train(model,X,args):
""" Train seq2seq-LSTM model. """
# clip data per feature
for col,clip in enumerate(args.clip):
X[:,:,col] =
|
np.clip(X[:,:,col],-clip,clip)
|
numpy.clip
|
# -*- coding: utf-8 -*-
#GSASII image calculations: ellipse fitting & image integration
########### SVN repository information ###################
# $Date: 2019-08-22 13:27:03 -0500 (Thu, 22 Aug 2019) $
# $Author: vondreele $
# $Revision: 4109 $
# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/GSASIIimage.py $
# $Id: GSASIIimage.py 4109 2019-08-22 18:27:03Z vondreele $
########### SVN repository information ###################
'''
*GSASIIimage: Image calc module*
================================
Ellipse fitting & image integration
'''
from __future__ import division, print_function
import math
import time
import numpy as np
import numpy.linalg as nl
import numpy.ma as ma
from scipy.optimize import leastsq
import scipy.interpolate as scint
import copy
import GSASIIpath
GSASIIpath.SetVersionNumber("$Revision: 4109 $")
try:
import GSASIIplot as G2plt
except ImportError: # expected in scriptable w/o matplotlib and/or wx
pass
import GSASIIlattice as G2lat
import GSASIIpwd as G2pwd
import GSASIIspc as G2spc
import GSASIImath as G2mth
import GSASIIfiles as G2fil
# trig functions in degrees
sind = lambda x: math.sin(x*math.pi/180.)
asind = lambda x: 180.*math.asin(x)/math.pi
tand = lambda x: math.tan(x*math.pi/180.)
atand = lambda x: 180.*math.atan(x)/math.pi
atan2d = lambda y,x: 180.*math.atan2(y,x)/math.pi
cosd = lambda x: math.cos(x*math.pi/180.)
acosd = lambda x: 180.*math.acos(x)/math.pi
rdsq2d = lambda x,p: round(1.0/math.sqrt(x),p)
#numpy versions
npsind = lambda x: np.sin(x*np.pi/180.)
npasind = lambda x: 180.*np.arcsin(x)/np.pi
npcosd = lambda x: np.cos(x*np.pi/180.)
npacosd = lambda x: 180.*np.arccos(x)/np.pi
nptand = lambda x: np.tan(x*np.pi/180.)
npatand = lambda x: 180.*np.arctan(x)/np.pi
npatan2d = lambda y,x: 180.*np.arctan2(y,x)/np.pi
nxs = np.newaxis
debug = False
def pointInPolygon(pXY,xy):
'Needs a doc string'
#pXY - assumed closed 1st & last points are duplicates
Inside = False
N = len(pXY)
p1x,p1y = pXY[0]
for i in range(N+1):
p2x,p2y = pXY[i%N]
if (max(p1y,p2y) >= xy[1] > min(p1y,p2y)) and (xy[0] <= max(p1x,p2x)):
if p1y != p2y:
xinters = (xy[1]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or xy[0] <= xinters:
Inside = not Inside
p1x,p1y = p2x,p2y
return Inside
def peneCorr(tth,dep,dist,tilt=0.,azm=0.):
'Needs a doc string'
# return dep*(1.-npcosd(abs(tilt*npsind(azm))-tth*npcosd(azm))) #something wrong here
return dep*(1.-npcosd(tth))*dist**2/1000. #best one
# return dep*npsind(tth) #not as good as 1-cos2Q
def makeMat(Angle,Axis):
'''Make rotation matrix from Angle and Axis
:param float Angle: in degrees
:param int Axis: 0 for rotation about x, 1 for about y, etc.
'''
cs = npcosd(Angle)
ss = npsind(Angle)
M = np.array(([1.,0.,0.],[0.,cs,-ss],[0.,ss,cs]),dtype=np.float32)
return np.roll(np.roll(M,Axis,axis=0),Axis,axis=1)
def FitEllipse(xy):
def ellipse_center(p):
''' gives ellipse center coordinates
'''
b,c,d,f,a = p[1]/2., p[2], p[3]/2., p[4]/2., p[0]
num = b*b-a*c
x0=(c*d-b*f)/num
y0=(a*f-b*d)/num
return np.array([x0,y0])
def ellipse_angle_of_rotation( p ):
''' gives rotation of ellipse major axis from x-axis
range will be -90 to 90 deg
'''
b,c,a = p[1]/2., p[2], p[0]
return 0.5*npatand(2*b/(a-c))
def ellipse_axis_length( p ):
''' gives ellipse radii in [minor,major] order
'''
b,c,d,f,g,a = p[1]/2., p[2], p[3]/2., p[4]/2, p[5], p[0]
up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
res1=np.sqrt(up/down1)
res2=np.sqrt(up/down2)
return np.array([ res2,res1])
xy = np.array(xy)
x = np.asarray(xy.T[0])[:,np.newaxis]
y = np.asarray(xy.T[1])[:,np.newaxis]
D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))
S = np.dot(D.T,D)
C = np.zeros([6,6])
C[0,2] = C[2,0] = 2; C[1,1] = -1
E, V = nl.eig(np.dot(nl.inv(S), C))
n = np.argmax(np.abs(E))
a = V[:,n]
cent = ellipse_center(a)
phi = ellipse_angle_of_rotation(a)
radii = ellipse_axis_length(a)
phi += 90.
if radii[0] > radii[1]:
radii = [radii[1],radii[0]]
phi -= 90.
return cent,phi,radii
def FitDetector(rings,varyList,parmDict,Print=True,covar=False):
'''Fit detector calibration parameters
:param np.array rings: vector of ring positions
:param list varyList: calibration parameters to be refined
:param dict parmDict: all calibration parameters
:param bool Print: set to True (default) to print the results
:param bool covar: set to True to return the covariance matrix (default is False)
:returns: [chisq,vals,sigList] unless covar is True, then
[chisq,vals,sigList,coVarMatrix] is returned
'''
def CalibPrint(ValSig,chisq,Npts):
print ('Image Parameters: chi**2: %12.3g, Np: %d'%(chisq,Npts))
ptlbls = 'names :'
ptstr = 'values:'
sigstr = 'esds :'
for name,value,sig in ValSig:
ptlbls += "%s" % (name.rjust(12))
if name == 'phi':
ptstr += Fmt[name] % (value%360.)
else:
ptstr += Fmt[name] % (value)
if sig:
sigstr += Fmt[name] % (sig)
else:
sigstr += 12*' '
print (ptlbls)
print (ptstr)
print (sigstr)
def ellipseCalcD(B,xyd,varyList,parmDict):
x,y,dsp = xyd
varyDict = dict(zip(varyList,B))
parms = {}
for parm in parmDict:
if parm in varyList:
parms[parm] = varyDict[parm]
else:
parms[parm] = parmDict[parm]
phi = parms['phi']-90. #get rotation of major axis from tilt axis
tth = 2.0*npasind(parms['wave']/(2.*dsp))
phi0 = npatan2d(y-parms['det-Y'],x-parms['det-X'])
dxy = peneCorr(tth,parms['dep'],parms['dist'],parms['tilt'],phi0)
stth = npsind(tth)
cosb = npcosd(parms['tilt'])
tanb = nptand(parms['tilt'])
tbm = nptand((tth-parms['tilt'])/2.)
tbp = nptand((tth+parms['tilt'])/2.)
d = parms['dist']+dxy
fplus = d*tanb*stth/(cosb+stth)
fminus = d*tanb*stth/(cosb-stth)
vplus = d*(tanb+(1+tbm)/(1-tbm))*stth/(cosb+stth)
vminus = d*(tanb+(1-tbp)/(1+tbp))*stth/(cosb-stth)
R0 = np.sqrt((vplus+vminus)**2-(fplus+fminus)**2)/2. #+minor axis
R1 = (vplus+vminus)/2. #major axis
zdis = (fplus-fminus)/2.
Robs = np.sqrt((x-parms['det-X'])**2+(y-parms['det-Y'])**2)
rsqplus = R0**2+R1**2
rsqminus = R0**2-R1**2
R = rsqminus*npcosd(2.*phi0-2.*phi)+rsqplus
Q = np.sqrt(2.)*R0*R1*np.sqrt(R-2.*zdis**2*npsind(phi0-phi)**2)
P = 2.*R0**2*zdis*npcosd(phi0-phi)
Rcalc = (P+Q)/R
M = (Robs-Rcalc)*25. #why 25? does make "chi**2" more reasonable
return M
names = ['dist','det-X','det-Y','tilt','phi','dep','wave']
fmt = ['%12.3f','%12.3f','%12.3f','%12.3f','%12.3f','%12.4f','%12.6f']
Fmt = dict(zip(names,fmt))
p0 = [parmDict[key] for key in varyList]
result = leastsq(ellipseCalcD,p0,args=(rings.T,varyList,parmDict),full_output=True,ftol=1.e-8)
chisq = np.sum(result[2]['fvec']**2)/(rings.shape[0]-len(p0)) #reduced chi^2 = M/(Nobs-Nvar)
parmDict.update(zip(varyList,result[0]))
vals = list(result[0])
if not len(vals):
sig = []
ValSig = []
sigList = []
else:
sig = list(np.sqrt(chisq*np.diag(result[1])))
sigList = np.zeros(7)
for i,name in enumerate(varyList):
sigList[i] = sig[varyList.index(name)]
ValSig = zip(varyList,vals,sig)
if Print:
if len(sig):
CalibPrint(ValSig,chisq,rings.shape[0])
else:
print(' Nothing refined')
if covar:
return [chisq,vals,sigList,result[1]]
else:
return [chisq,vals,sigList]
def FitMultiDist(rings,varyList,parmDict,Print=True,covar=False):
'''Fit detector calibration parameters with multi-distance data
:param np.array rings: vector of ring positions (x,y,dist,d-space)
:param list varyList: calibration parameters to be refined
:param dict parmDict: calibration parameters
:param bool Print: set to True (default) to print the results
:param bool covar: set to True to return the covariance matrix (default is False)
:returns: [chisq,vals,sigDict] unless covar is True, then
[chisq,vals,sigDict,coVarMatrix] is returned
'''
def CalibPrint(parmDict,sigDict,chisq,Npts):
ptlbls = 'names :'
ptstr = 'values:'
sigstr = 'esds :'
for d in sorted(set([i[5:] for i in parmDict.keys() if 'det-X' in i]),key=lambda x:int(x)):
fmt = '%12.3f'
for key in 'det-X','det-Y','delta':
name = key+d
if name not in parmDict: continue
ptlbls += "%12s" % name
ptstr += fmt % (parmDict[name])
if name in sigDict:
sigstr += fmt % (sigDict[name])
else:
sigstr += 12*' '
if len(ptlbls) > 68:
print()
print (ptlbls)
print (ptstr)
print (sigstr)
ptlbls = 'names :'
ptstr = 'values:'
sigstr = 'esds :'
if len(ptlbls) > 8:
print()
print (ptlbls)
print (ptstr)
print (sigstr)
print ('\nImage Parameters: chi**2: %12.3g, Np: %d'%(chisq,Npts))
ptlbls = 'names :'
ptstr = 'values:'
sigstr = 'esds :'
names = ['wavelength', 'dep', 'phi', 'tilt']
if 'deltaDist' in parmDict:
names += ['deltaDist']
for name in names:
if name == 'wavelength':
fmt = '%12.6f'
elif name == 'dep':
fmt = '%12.4f'
else:
fmt = '%12.3f'
ptlbls += "%s" % (name.rjust(12))
if name == 'phi':
ptstr += fmt % (parmDict[name]%360.)
else:
ptstr += fmt % (parmDict[name])
if name in sigDict:
sigstr += fmt % (sigDict[name])
else:
sigstr += 12*' '
print (ptlbls)
print (ptstr)
print (sigstr)
print()
def ellipseCalcD(B,xyd,varyList,parmDict):
x,y,dist,dsp = xyd
varyDict = dict(zip(varyList,B))
parms = {}
for parm in parmDict:
if parm in varyList:
parms[parm] = varyDict[parm]
else:
parms[parm] = parmDict[parm]
# create arrays with detector center values
detX = np.array([parms['det-X'+str(int(d))] for d in dist])
detY = np.array([parms['det-Y'+str(int(d))] for d in dist])
if 'deltaDist' in parms:
deltaDist = parms['deltaDist']
else:
deltaDist = np.array([parms['delta'+str(int(d))] for d in dist])
phi = parms['phi']-90. #get rotation of major axis from tilt axis
tth = 2.0*npasind(parms['wavelength']/(2.*dsp))
phi0 = npatan2d(y-detY,x-detX)
dxy = peneCorr(tth,parms['dep'],dist-deltaDist,parms['tilt'],phi0)
stth = npsind(tth)
cosb = npcosd(parms['tilt'])
tanb = nptand(parms['tilt'])
tbm = nptand((tth-parms['tilt'])/2.)
tbp = nptand((tth+parms['tilt'])/2.)
d = (dist-deltaDist)+dxy
fplus = d*tanb*stth/(cosb+stth)
fminus = d*tanb*stth/(cosb-stth)
vplus = d*(tanb+(1+tbm)/(1-tbm))*stth/(cosb+stth)
vminus = d*(tanb+(1-tbp)/(1+tbp))*stth/(cosb-stth)
R0 = np.sqrt((vplus+vminus)**2-(fplus+fminus)**2)/2. #+minor axis
R1 = (vplus+vminus)/2. #major axis
zdis = (fplus-fminus)/2.
Robs = np.sqrt((x-detX)**2+(y-detY)**2)
rsqplus = R0**2+R1**2
rsqminus = R0**2-R1**2
R = rsqminus*npcosd(2.*phi0-2.*phi)+rsqplus
Q = np.sqrt(2.)*R0*R1*np.sqrt(R-2.*zdis**2*npsind(phi0-phi)**2)
P = 2.*R0**2*zdis*npcosd(phi0-phi)
Rcalc = (P+Q)/R
return (Robs-Rcalc)*25. #why 25? does make "chi**2" more reasonable
p0 = [parmDict[key] for key in varyList]
result = leastsq(ellipseCalcD,p0,args=(rings.T,varyList,parmDict),full_output=True,ftol=1.e-8)
chisq = np.sum(result[2]['fvec']**2)/(rings.shape[0]-len(p0)) #reduced chi^2 = M/(Nobs-Nvar)
parmDict.update(zip(varyList,result[0]))
vals = list(result[0])
if chisq > 1:
sig = list(np.sqrt(chisq*np.diag(result[1])))
else:
sig = list(np.sqrt(np.diag(result[1])))
sigDict = {name:s for name,s in zip(varyList,sig)}
if Print:
CalibPrint(parmDict,sigDict,chisq,rings.shape[0])
if covar:
return [chisq,vals,sigDict,result[1]]
else:
return [chisq,vals,sigDict]
def ImageLocalMax(image,w,Xpix,Ypix):
'Needs a doc string'
w2 = w*2
sizey,sizex = image.shape
xpix = int(Xpix) #get reference corner of pixel chosen
ypix = int(Ypix)
if not w:
ZMax = np.sum(image[ypix-2:ypix+2,xpix-2:xpix+2])
return xpix,ypix,ZMax,0.0001
if (w2 < xpix < sizex-w2) and (w2 < ypix < sizey-w2) and image[ypix,xpix]:
ZMax = image[ypix-w:ypix+w,xpix-w:xpix+w]
Zmax = np.argmax(ZMax)
ZMin = image[ypix-w2:ypix+w2,xpix-w2:xpix+w2]
Zmin = np.argmin(ZMin)
xpix += Zmax%w2-w
ypix += Zmax//w2-w
return xpix,ypix,np.ravel(ZMax)[Zmax],max(0.0001,np.ravel(ZMin)[Zmin]) #avoid neg/zero minimum
else:
return 0,0,0,0
def makeRing(dsp,ellipse,pix,reject,scalex,scaley,image,mul=1):
'Needs a doc string'
def ellipseC():
'compute estimate of ellipse circumference'
if radii[0] < 0: #hyperbola
# theta = npacosd(1./np.sqrt(1.+(radii[0]/radii[1])**2))
# print (theta)
return 0
apb = radii[1]+radii[0]
amb = radii[1]-radii[0]
return np.pi*apb*(1+3*(amb/apb)**2/(10+np.sqrt(4-3*(amb/apb)**2)))
cent,phi,radii = ellipse
cphi = cosd(phi-90.) #convert to major axis rotation
sphi = sind(phi-90.)
ring = []
C = int(ellipseC())*mul #ring circumference in mm
azm = []
for i in range(0,C,1): #step around ring in 1mm increments
a = 360.*i/C
x = radii[1]*cosd(a-phi+90.) #major axis
y = radii[0]*sind(a-phi+90.)
X = (cphi*x-sphi*y+cent[0])*scalex #convert mm to pixels
Y = (sphi*x+cphi*y+cent[1])*scaley
X,Y,I,J = ImageLocalMax(image,pix,X,Y)
if I and J and float(I)/J > reject:
X += .5 #set to center of pixel
Y += .5
X /= scalex #convert back to mm
Y /= scaley
if [X,Y,dsp] not in ring: #no duplicates!
ring.append([X,Y,dsp])
azm.append(a)
if len(ring) < 10:
ring = []
azm = []
return ring,azm
def GetEllipse2(tth,dxy,dist,cent,tilt,phi):
'''uses Dandelin spheres to find ellipse or hyperbola parameters from detector geometry
on output
radii[0] (b-minor axis) set < 0. for hyperbola
'''
radii = [0,0]
stth = sind(tth)
cosb = cosd(tilt)
tanb = tand(tilt)
tbm = tand((tth-tilt)/2.)
tbp = tand((tth+tilt)/2.)
sinb = sind(tilt)
d = dist+dxy
if tth+abs(tilt) < 90.: #ellipse
fplus = d*tanb*stth/(cosb+stth)
fminus = d*tanb*stth/(cosb-stth)
vplus = d*(tanb+(1+tbm)/(1-tbm))*stth/(cosb+stth)
vminus = d*(tanb+(1-tbp)/(1+tbp))*stth/(cosb-stth)
radii[0] = np.sqrt((vplus+vminus)**2-(fplus+fminus)**2)/2. #+minor axis
radii[1] = (vplus+vminus)/2. #major axis
zdis = (fplus-fminus)/2.
else: #hyperbola!
f = d*abs(tanb)*stth/(cosb+stth)
v = d*(abs(tanb)+tand(tth-abs(tilt)))
delt = d*stth*(1.+stth*cosb)/(abs(sinb)*cosb*(stth+cosb))
eps = (v-f)/(delt-v)
radii[0] = -eps*(delt-f)/np.sqrt(eps**2-1.) #-minor axis
radii[1] = eps*(delt-f)/(eps**2-1.) #major axis
if tilt > 0:
zdis = f+radii[1]*eps
else:
zdis = -f
#NB: zdis is || to major axis & phi is rotation of minor axis
#thus shift from beam to ellipse center is [Z*sin(phi),-Z*cos(phi)]
elcent = [cent[0]+zdis*sind(phi),cent[1]-zdis*cosd(phi)]
return elcent,phi,radii
def GetEllipse(dsp,data):
'''uses Dandelin spheres to find ellipse or hyperbola parameters from detector geometry
as given in image controls dictionary (data) and a d-spacing (dsp)
'''
cent = data['center']
tilt = data['tilt']
phi = data['rotation']
dep = data.get('DetDepth',0.0)
tth = 2.0*asind(data['wavelength']/(2.*dsp))
dist = data['distance']
dxy = peneCorr(tth,dep,dist,tilt)
return GetEllipse2(tth,dxy,dist,cent,tilt,phi)
def GetDetectorXY(dsp,azm,data):
'''Get detector x,y position from d-spacing (dsp), azimuth (azm,deg)
& image controls dictionary (data)
it seems to be only used in plotting
'''
elcent,phi,radii = GetEllipse(dsp,data)
phi = data['rotation']-90. #to give rotation of major axis
tilt = data['tilt']
dist = data['distance']
cent = data['center']
tth = 2.0*asind(data['wavelength']/(2.*dsp))
stth = sind(tth)
cosb = cosd(tilt)
if radii[0] > 0.:
sinb = sind(tilt)
tanb = tand(tilt)
fplus = dist*tanb*stth/(cosb+stth)
fminus = dist*tanb*stth/(cosb-stth)
zdis = (fplus-fminus)/2.
rsqplus = radii[0]**2+radii[1]**2
rsqminus = radii[0]**2-radii[1]**2
R = rsqminus*cosd(2.*azm-2.*phi)+rsqplus
Q = np.sqrt(2.)*radii[0]*radii[1]*np.sqrt(R-2.*zdis**2*sind(azm-phi)**2)
P = 2.*radii[0]**2*zdis*cosd(azm-phi)
radius = (P+Q)/R
xy = np.array([radius*cosd(azm),radius*sind(azm)])
xy += cent
else: #hyperbola - both branches (one is way off screen!)
sinb = abs(sind(tilt))
tanb = abs(tand(tilt))
f = dist*tanb*stth/(cosb+stth)
v = dist*(tanb+tand(tth-abs(tilt)))
delt = dist*stth*(1+stth*cosb)/(sinb*cosb*(stth+cosb))
ecc = (v-f)/(delt-v)
R = radii[1]*(ecc**2-1)/(1-ecc*cosd(azm))
if tilt > 0.:
offset = 2.*radii[1]*ecc+f #select other branch
xy = [-R*cosd(azm)-offset,-R*sind(azm)]
else:
offset = -f
xy = [-R*cosd(azm)-offset,R*sind(azm)]
xy = -np.array([xy[0]*cosd(phi)+xy[1]*sind(phi),xy[0]*sind(phi)-xy[1]*cosd(phi)])
xy += cent
return xy
def GetDetXYfromThAzm(Th,Azm,data):
'''Computes a detector position from a 2theta angle and an azimultal
angle (both in degrees) - apparently not used!
'''
dsp = data['wavelength']/(2.0*npsind(Th))
return GetDetectorXY(dsp,Azm,data)
def GetTthAzmDsp(x,y,data): #expensive
'''Computes a 2theta, etc. from a detector position and calibration constants - checked
OK for ellipses & hyperbola.
:returns: np.array(tth,azm,G,dsp) where tth is 2theta, azm is the azimutal angle,
G is ? and dsp is the d-space
'''
wave = data['wavelength']
cent = data['center']
tilt = data['tilt']
dist = data['distance']/cosd(tilt)
x0 = dist*tand(tilt)
phi = data['rotation']
dep = data.get('DetDepth',0.)
azmthoff = data['azmthOff']
dx = np.array(x-cent[0],dtype=np.float32)
dy = np.array(y-cent[1],dtype=np.float32)
D = ((dx-x0)**2+dy**2+dist**2) #sample to pixel distance
X = np.array(([dx,dy,np.zeros_like(dx)]),dtype=np.float32).T
X = np.dot(X,makeMat(phi,2))
Z = np.dot(X,makeMat(tilt,0)).T[2]
tth = npatand(np.sqrt(dx**2+dy**2-Z**2)/(dist-Z))
dxy = peneCorr(tth,dep,dist,tilt,npatan2d(dy,dx))
DX = dist-Z+dxy
DY = np.sqrt(dx**2+dy**2-Z**2)
tth = npatan2d(DY,DX)
dsp = wave/(2.*npsind(tth/2.))
azm = (npatan2d(dy,dx)+azmthoff+720.)%360.
G = D/dist**2 #for geometric correction = 1/cos(2theta)^2 if tilt=0.
return np.array([tth,azm,G,dsp])
def GetTth(x,y,data):
'Give 2-theta value for detector x,y position; calibration info in data'
return GetTthAzmDsp(x,y,data)[0]
def GetTthAzm(x,y,data):
'Give 2-theta, azimuth values for detector x,y position; calibration info in data'
return GetTthAzmDsp(x,y,data)[0:2]
def GetTthAzmG(x,y,data):
'''Give 2-theta, azimuth & geometric corr. values for detector x,y position;
calibration info in data - only used in integration
'''
'Needs a doc string - checked OK for ellipses & hyperbola'
tilt = data['tilt']
dist = data['distance']/npcosd(tilt)
x0 = data['distance']*nptand(tilt)
MN = -np.inner(makeMat(data['rotation'],2),makeMat(tilt,0))
distsq = data['distance']**2
dx = x-data['center'][0]
dy = y-data['center'][1]
G = ((dx-x0)**2+dy**2+distsq)/distsq #for geometric correction = 1/cos(2theta)^2 if tilt=0.
Z = np.dot(np.dstack([dx.T,dy.T,np.zeros_like(dx.T)]),MN).T[2]
xyZ = dx**2+dy**2-Z**2
tth = npatand(np.sqrt(xyZ)/(dist-Z))
dxy = peneCorr(tth,data['DetDepth'],dist,tilt,npatan2d(dy,dx))
tth = npatan2d(np.sqrt(xyZ),dist-Z+dxy)
azm = (npatan2d(dy,dx)+data['azmthOff']+720.)%360.
return tth,azm,G
def GetDsp(x,y,data):
'Give d-spacing value for detector x,y position; calibration info in data'
return GetTthAzmDsp(x,y,data)[3]
def GetAzm(x,y,data):
'Give azimuth value for detector x,y position; calibration info in data'
return GetTthAzmDsp(x,y,data)[1]
def meanAzm(a,b):
AZM = lambda a,b: npacosd(0.5*(npsind(2.*b)-npsind(2.*a))/(np.pi*(b-a)/180.))/2.
azm = AZM(a,b)
# quad = int((a+b)/180.)
# if quad == 1:
# azm = 180.-azm
# elif quad == 2:
# azm += 180.
# elif quad == 3:
# azm = 360-azm
return azm
def ImageCompress(image,scale):
''' Reduces size of image by selecting every n'th point
param: image array: original image
param: scale int: intervsl between selected points
returns: array: reduced size image
'''
if scale == 1:
return image
else:
return image[::scale,::scale]
def checkEllipse(Zsum,distSum,xSum,ySum,dist,x,y):
'Needs a doc string'
avg = np.array([distSum/Zsum,xSum/Zsum,ySum/Zsum])
curr = np.array([dist,x,y])
return abs(avg-curr)/avg < .02
def GetLineScan(image,data):
Nx,Ny = data['size']
pixelSize = data['pixelSize']
scalex = 1000./pixelSize[0] #microns --> 1/mm
scaley = 1000./pixelSize[1]
wave = data['wavelength']
numChans = data['outChannels']
LUtth = np.array(data['IOtth'],dtype=np.float)
azm = data['linescan'][1]-data['azmthOff']
Tx = np.array([tth for tth in np.linspace(LUtth[0],LUtth[1],numChans+1)])
Ty = np.zeros_like(Tx)
dsp = wave/(2.0*npsind(Tx/2.0))
xy = np.array([GetDetectorXY(d,azm,data) for d in dsp]).T
xy[1] *= scalex
xy[0] *= scaley
xy = np.array(xy,dtype=int)
Xpix = ma.masked_outside(xy[1],0,Ny-1)
Ypix = ma.masked_outside(xy[0],0,Nx-1)
xpix = Xpix[~(Xpix.mask+Ypix.mask)].compressed()
ypix = Ypix[~(Xpix.mask+Ypix.mask)].compressed()
Ty = image[xpix,ypix]
Tx = ma.array(Tx,mask=Xpix.mask+Ypix.mask).compressed()
return [Tx,Ty]
def EdgeFinder(image,data):
'''this makes list of all x,y where I>edgeMin suitable for an ellipse search?
Not currently used but might be useful in future?
'''
import numpy.ma as ma
Nx,Ny = data['size']
pixelSize = data['pixelSize']
edgemin = data['edgemin']
scalex = pixelSize[0]/1000.
scaley = pixelSize[1]/1000.
tay,tax = np.mgrid[0:Nx,0:Ny]
tax = np.asfarray(tax*scalex,dtype=np.float32)
tay = np.asfarray(tay*scaley,dtype=np.float32)
tam = ma.getmask(ma.masked_less(image.flatten(),edgemin))
tax = ma.compressed(ma.array(tax.flatten(),mask=tam))
tay = ma.compressed(ma.array(tay.flatten(),mask=tam))
return zip(tax,tay)
def MakeFrameMask(data,frame):
import polymask as pm
pixelSize = data['pixelSize']
scalex = pixelSize[0]/1000.
scaley = pixelSize[1]/1000.
blkSize = 512
Nx,Ny = data['size']
nXBlks = (Nx-1)//blkSize+1
nYBlks = (Ny-1)//blkSize+1
tam = ma.make_mask_none(data['size'])
for iBlk in range(nXBlks):
iBeg = iBlk*blkSize
iFin = min(iBeg+blkSize,Nx)
for jBlk in range(nYBlks):
jBeg = jBlk*blkSize
jFin = min(jBeg+blkSize,Ny)
nI = iFin-iBeg
nJ = jFin-jBeg
tax,tay = np.mgrid[iBeg+0.5:iFin+.5,jBeg+.5:jFin+.5] #bin centers not corners
tax = np.asfarray(tax*scalex,dtype=np.float32)
tay = np.asfarray(tay*scaley,dtype=np.float32)
tamp = ma.make_mask_none((1024*1024))
tamp = ma.make_mask(pm.polymask(nI*nJ,tax.flatten(),
tay.flatten(),len(frame),frame,tamp)[:nI*nJ])^True #switch to exclude around frame
if tamp.shape:
tamp = np.reshape(tamp[:nI*nJ],(nI,nJ))
tam[iBeg:iFin,jBeg:jFin] = ma.mask_or(tamp[0:nI,0:nJ],tam[iBeg:iFin,jBeg:jFin])
else:
tam[iBeg:iFin,jBeg:jFin] = True
return tam.T
def ImageRecalibrate(G2frame,ImageZ,data,masks,getRingsOnly=False):
'''Called to repeat the calibration on an image, usually called after
calibration is done initially to improve the fit.
:param G2frame: The top-level GSAS-II frame or None, to skip plotting
:param np.Array ImageZ: the image to calibrate
:param dict data: the Controls dict for the image
:param dict masks: a dict with masks
:returns: a list containing vals,varyList,sigList,parmDict,covar or rings
(with an array of x, y, and d-space values) if getRingsOnly is True
or an empty list, in case of an error
'''
import ImageCalibrants as calFile
if not getRingsOnly:
G2fil.G2Print ('Image recalibration:')
time0 = time.time()
pixelSize = data['pixelSize']
scalex = 1000./pixelSize[0]
scaley = 1000./pixelSize[1]
pixLimit = data['pixLimit']
cutoff = data['cutoff']
data['rings'] = []
data['ellipses'] = []
if data['DetDepth'] > 0.5: #patch - redefine DetDepth
data['DetDepth'] /= data['distance']
if not data['calibrant']:
G2fil.G2Print ('warning: no calibration material selected')
return []
skip = data['calibskip']
dmin = data['calibdmin']
if data['calibrant'] not in calFile.Calibrants:
G2fil.G2Print('Warning: %s not in local copy of image calibrants file'%data['calibrant'])
return []
calibrant = calFile.Calibrants[data['calibrant']]
Bravais,SGs,Cells = calibrant[:3]
HKL = []
for bravais,sg,cell in zip(Bravais,SGs,Cells):
A = G2lat.cell2A(cell)
if sg:
SGData = G2spc.SpcGroup(sg)[1]
hkl = G2pwd.getHKLpeak(dmin,SGData,A,Inst=None,nodup=True)
HKL += list(hkl)
else:
hkl = G2lat.GenHBravais(dmin,bravais,A)
HKL += list(hkl)
if len(calibrant) > 5:
absent = calibrant[5]
else:
absent = ()
HKL = G2lat.sortHKLd(HKL,True,False)
varyList = [item for item in data['varyList'] if data['varyList'][item]]
parmDict = {'dist':data['distance'],'det-X':data['center'][0],'det-Y':data['center'][1],
'setdist':data.get('setdist',data['distance']),
'tilt':data['tilt'],'phi':data['rotation'],'wave':data['wavelength'],'dep':data['DetDepth']}
Found = False
wave = data['wavelength']
frame = masks['Frames']
tam = ma.make_mask_none(ImageZ.shape)
if frame:
tam = ma.mask_or(tam,MakeFrameMask(data,frame))
for iH,H in enumerate(HKL):
if debug: print (H)
dsp = H[3]
tth = 2.0*asind(wave/(2.*dsp))
if tth+abs(data['tilt']) > 90.:
G2fil.G2Print ('next line is a hyperbola - search stopped')
break
ellipse = GetEllipse(dsp,data)
if iH not in absent and iH >= skip:
Ring = makeRing(dsp,ellipse,pixLimit,cutoff,scalex,scaley,ma.array(ImageZ,mask=tam))[0]
else:
Ring = makeRing(dsp,ellipse,pixLimit,1000.0,scalex,scaley,ma.array(ImageZ,mask=tam))[0]
if Ring:
if iH not in absent and iH >= skip:
data['rings'].append(np.array(Ring))
data['ellipses'].append(copy.deepcopy(ellipse+('r',)))
Found = True
elif not Found: #skipping inner rings, keep looking until ring found
continue
else: #no more rings beyond edge of detector
data['ellipses'].append([])
continue
if not data['rings']:
G2fil.G2Print ('no rings found; try lower Min ring I/Ib',mode='warn')
return []
rings = np.concatenate((data['rings']),axis=0)
if getRingsOnly:
return rings,HKL
[chisq,vals,sigList,covar] = FitDetector(rings,varyList,parmDict,True,True)
data['wavelength'] = parmDict['wave']
data['distance'] = parmDict['dist']
data['center'] = [parmDict['det-X'],parmDict['det-Y']]
data['rotation'] = np.mod(parmDict['phi'],360.0)
data['tilt'] = parmDict['tilt']
data['DetDepth'] = parmDict['dep']
data['chisq'] = chisq
N = len(data['ellipses'])
data['ellipses'] = [] #clear away individual ellipse fits
for H in HKL[:N]:
ellipse = GetEllipse(H[3],data)
data['ellipses'].append(copy.deepcopy(ellipse+('b',)))
G2fil.G2Print ('calibration time = %.3f'%(time.time()-time0))
if G2frame:
G2plt.PlotImage(G2frame,newImage=True)
return [vals,varyList,sigList,parmDict,covar]
def ImageCalibrate(G2frame,data):
'''Called to perform an initial image calibration after points have been
selected for the inner ring.
'''
import ImageCalibrants as calFile
G2fil.G2Print ('Image calibration:')
time0 = time.time()
ring = data['ring']
pixelSize = data['pixelSize']
scalex = 1000./pixelSize[0]
scaley = 1000./pixelSize[1]
pixLimit = data['pixLimit']
cutoff = data['cutoff']
varyDict = data['varyList']
if varyDict['dist'] and varyDict['wave']:
G2fil.G2Print ('ERROR - you can not simultaneously calibrate distance and wavelength')
return False
if len(ring) < 5:
G2fil.G2Print ('ERROR - not enough inner ring points for ellipse')
return False
#fit start points on inner ring
data['ellipses'] = []
data['rings'] = []
outE = FitEllipse(ring)
fmt = '%s X: %.3f, Y: %.3f, phi: %.3f, R1: %.3f, R2: %.3f'
fmt2 = '%s X: %.3f, Y: %.3f, phi: %.3f, R1: %.3f, R2: %.3f, chi**2: %.3f, Np: %d'
if outE:
G2fil.G2Print (fmt%('start ellipse: ',outE[0][0],outE[0][1],outE[1],outE[2][0],outE[2][1]))
ellipse = outE
else:
return False
#setup 360 points on that ring for "good" fit
data['ellipses'].append(ellipse[:]+('g',))
Ring = makeRing(1.0,ellipse,pixLimit,cutoff,scalex,scaley,G2frame.ImageZ)[0]
if Ring:
ellipse = FitEllipse(Ring)
Ring = makeRing(1.0,ellipse,pixLimit,cutoff,scalex,scaley,G2frame.ImageZ)[0] #do again
ellipse = FitEllipse(Ring)
else:
G2fil.G2Print ('1st ring not sufficiently complete to proceed',mode='warn')
return False
if debug:
G2fil.G2Print (fmt2%('inner ring: ',ellipse[0][0],ellipse[0][1],ellipse[1],
ellipse[2][0],ellipse[2][1],0.,len(Ring))) #cent,phi,radii
data['ellipses'].append(ellipse[:]+('r',))
data['rings'].append(np.array(Ring))
G2plt.PlotImage(G2frame,newImage=True)
#setup for calibration
data['rings'] = []
if not data['calibrant']:
G2fil.G2Print ('Warning: no calibration material selected')
return True
skip = data['calibskip']
dmin = data['calibdmin']
#generate reflection set
calibrant = calFile.Calibrants[data['calibrant']]
Bravais,SGs,Cells = calibrant[:3]
HKL = []
for bravais,sg,cell in zip(Bravais,SGs,Cells):
A = G2lat.cell2A(cell)
if sg:
SGData = G2spc.SpcGroup(sg)[1]
hkl = G2pwd.getHKLpeak(dmin,SGData,A,Inst=None,nodup=True)
#G2fil.G2Print(hkl)
HKL += list(hkl)
else:
hkl = G2lat.GenHBravais(dmin,bravais,A)
HKL += list(hkl)
HKL = G2lat.sortHKLd(HKL,True,False)[skip:]
#set up 1st ring
elcent,phi,radii = ellipse #from fit of 1st ring
dsp = HKL[0][3]
G2fil.G2Print ('1st ring: try %.4f'%(dsp))
if varyDict['dist']:
wave = data['wavelength']
tth = 2.0*asind(wave/(2.*dsp))
else: #varyDict['wave']!
dist = data['distance']
tth = npatan2d(radii[0],dist)
data['wavelength'] = wave = 2.0*dsp*sind(tth/2.0)
Ring0 = makeRing(dsp,ellipse,3,cutoff,scalex,scaley,G2frame.ImageZ)[0]
ttth = nptand(tth)
ctth = npcosd(tth)
#1st estimate of tilt; assume ellipse - don't know sign though
if varyDict['tilt']:
tilt = npasind(np.sqrt(max(0.,1.-(radii[0]/radii[1])**2))*ctth)
if not tilt:
G2fil.G2Print ('WARNING - selected ring was fitted as a circle')
G2fil.G2Print (' - if detector was tilted we suggest you skip this ring - WARNING')
else:
tilt = data['tilt']
#1st estimate of dist: sample to detector normal to plane
if varyDict['dist']:
data['distance'] = dist = radii[0]**2/(ttth*radii[1])
else:
dist = data['distance']
if varyDict['tilt']:
#ellipse to cone axis (x-ray beam); 2 choices depending on sign of tilt
zdisp = radii[1]*ttth*tand(tilt)
zdism = radii[1]*ttth*tand(-tilt)
#cone axis position; 2 choices. Which is right?
#NB: zdisp is || to major axis & phi is rotation of minor axis
#thus shift from beam to ellipse center is [Z*sin(phi),-Z*cos(phi)]
centp = [elcent[0]+zdisp*sind(phi),elcent[1]-zdisp*cosd(phi)]
centm = [elcent[0]+zdism*sind(phi),elcent[1]-zdism*cosd(phi)]
#check get same ellipse parms either way
#now do next ring; estimate either way & do a FitDetector each way; best fit is correct one
fail = True
i2 = 1
while fail:
dsp = HKL[i2][3]
G2fil.G2Print ('2nd ring: try %.4f'%(dsp))
tth = 2.0*asind(wave/(2.*dsp))
ellipsep = GetEllipse2(tth,0.,dist,centp,tilt,phi)
G2fil.G2Print (fmt%('plus ellipse :',ellipsep[0][0],ellipsep[0][1],ellipsep[1],ellipsep[2][0],ellipsep[2][1]))
Ringp = makeRing(dsp,ellipsep,3,cutoff,scalex,scaley,G2frame.ImageZ)[0]
parmDict = {'dist':dist,'det-X':centp[0],'det-Y':centp[1],
'tilt':tilt,'phi':phi,'wave':wave,'dep':0.0}
varyList = [item for item in varyDict if varyDict[item]]
if len(Ringp) > 10:
chip = FitDetector(np.array(Ring0+Ringp),varyList,parmDict,True)[0]
tiltp = parmDict['tilt']
phip = parmDict['phi']
centp = [parmDict['det-X'],parmDict['det-Y']]
fail = False
else:
chip = 1e6
ellipsem = GetEllipse2(tth,0.,dist,centm,-tilt,phi)
G2fil.G2Print (fmt%('minus ellipse:',ellipsem[0][0],ellipsem[0][1],ellipsem[1],ellipsem[2][0],ellipsem[2][1]))
Ringm = makeRing(dsp,ellipsem,3,cutoff,scalex,scaley,G2frame.ImageZ)[0]
if len(Ringm) > 10:
parmDict['tilt'] *= -1
chim = FitDetector(np.array(Ring0+Ringm),varyList,parmDict,True)[0]
tiltm = parmDict['tilt']
phim = parmDict['phi']
centm = [parmDict['det-X'],parmDict['det-Y']]
fail = False
else:
chim = 1e6
if fail:
i2 += 1
if chip < chim:
data['tilt'] = tiltp
data['center'] = centp
data['rotation'] = phip
else:
data['tilt'] = tiltm
data['center'] = centm
data['rotation'] = phim
data['ellipses'].append(ellipsep[:]+('b',))
data['rings'].append(np.array(Ringp))
data['ellipses'].append(ellipsem[:]+('r',))
data['rings'].append(np.array(Ringm))
G2plt.PlotImage(G2frame,newImage=True)
if data['DetDepth'] > 0.5: #patch - redefine DetDepth
data['DetDepth'] /= data['distance']
parmDict = {'dist':data['distance'],'det-X':data['center'][0],'det-Y':data['center'][1],
'tilt':data['tilt'],'phi':data['rotation'],'wave':data['wavelength'],'dep':data['DetDepth']}
varyList = [item for item in varyDict if varyDict[item]]
data['rings'] = []
data['ellipses'] = []
for i,H in enumerate(HKL):
dsp = H[3]
tth = 2.0*asind(wave/(2.*dsp))
if tth+abs(data['tilt']) > 90.:
G2fil.G2Print ('next line is a hyperbola - search stopped')
break
if debug: print ('HKLD:'+str(H[:4])+'2-theta: %.4f'%(tth))
elcent,phi,radii = ellipse = GetEllipse(dsp,data)
data['ellipses'].append(copy.deepcopy(ellipse+('g',)))
if debug: print (fmt%('predicted ellipse:',elcent[0],elcent[1],phi,radii[0],radii[1]))
Ring = makeRing(dsp,ellipse,pixLimit,cutoff,scalex,scaley,G2frame.ImageZ)[0]
if Ring:
data['rings'].append(np.array(Ring))
rings = np.concatenate((data['rings']),axis=0)
if i:
chisq = FitDetector(rings,varyList,parmDict,False)[0]
data['distance'] = parmDict['dist']
data['center'] = [parmDict['det-X'],parmDict['det-Y']]
data['rotation'] = parmDict['phi']
data['tilt'] = parmDict['tilt']
data['DetDepth'] = parmDict['dep']
data['chisq'] = chisq
elcent,phi,radii = ellipse = GetEllipse(dsp,data)
if debug: print (fmt2%('fitted ellipse: ',elcent[0],elcent[1],phi,radii[0],radii[1],chisq,len(rings)))
data['ellipses'].append(copy.deepcopy(ellipse+('r',)))
# G2plt.PlotImage(G2frame,newImage=True)
else:
if debug: print ('insufficient number of points in this ellipse to fit')
# break
G2plt.PlotImage(G2frame,newImage=True)
fullSize = len(G2frame.ImageZ)/scalex
if 2*radii[1] < .9*fullSize:
G2fil.G2Print ('Are all usable rings (>25% visible) used? Try reducing Min ring I/Ib')
N = len(data['ellipses'])
if N > 2:
FitDetector(rings,varyList,parmDict)[0]
data['wavelength'] = parmDict['wave']
data['distance'] = parmDict['dist']
data['center'] = [parmDict['det-X'],parmDict['det-Y']]
data['rotation'] = parmDict['phi']
data['tilt'] = parmDict['tilt']
data['DetDepth'] = parmDict['dep']
for H in HKL[:N]:
ellipse = GetEllipse(H[3],data)
data['ellipses'].append(copy.deepcopy(ellipse+('b',)))
G2fil.G2Print ('calibration time = %.3f'%(time.time()-time0))
G2plt.PlotImage(G2frame,newImage=True)
return True
def Make2ThetaAzimuthMap(data,iLim,jLim): #most expensive part of integration!
'Needs a doc string'
#transforms 2D image from x,y space to 2-theta,azimuth space based on detector orientation
pixelSize = data['pixelSize']
scalex = pixelSize[0]/1000.
scaley = pixelSize[1]/1000.
tay,tax = np.mgrid[iLim[0]+0.5:iLim[1]+.5,jLim[0]+.5:jLim[1]+.5] #bin centers not corners
tax = np.asfarray(tax*scalex,dtype=np.float32).flatten()
tay = np.asfarray(tay*scaley,dtype=np.float32).flatten()
nI = iLim[1]-iLim[0]
nJ = jLim[1]-jLim[0]
TA = np.array(GetTthAzmG(np.reshape(tax,(nI,nJ)),np.reshape(tay,(nI,nJ)),data)) #includes geom. corr. as dist**2/d0**2 - most expensive step
TA[1] = np.where(TA[1]<0,TA[1]+360,TA[1])
return TA #2-theta, azimuth & geom. corr. arrays
def MakeMaskMap(data,masks,iLim,jLim,tamp):
import polymask as pm
pixelSize = data['pixelSize']
scalex = pixelSize[0]/1000.
scaley = pixelSize[1]/1000.
tay,tax = np.mgrid[iLim[0]+0.5:iLim[1]+.5,jLim[0]+.5:jLim[1]+.5] #bin centers not corners
tax = np.asfarray(tax*scalex,dtype=np.float32).flatten()
tay = np.asfarray(tay*scaley,dtype=np.float32).flatten()
nI = iLim[1]-iLim[0]
nJ = jLim[1]-jLim[0]
#make position masks here
frame = masks['Frames']
tam = ma.make_mask_none((nI*nJ))
if frame:
tam = ma.mask_or(tam,ma.make_mask(pm.polymask(nI*nJ,tax,
tay,len(frame),frame,tamp)[:nI*nJ])^True)
polygons = masks['Polygons']
for polygon in polygons:
if polygon:
tam = ma.mask_or(tam,ma.make_mask(pm.polymask(nI*nJ,tax,
tay,len(polygon),polygon,tamp)[:nI*nJ]))
for X,Y,rsq in masks['Points'].T:
tam = ma.mask_or(tam,ma.getmask(ma.masked_less((tax-X)**2+(tay-Y)**2,rsq)))
if tam.shape:
tam = np.reshape(tam,(nI,nJ))
else:
tam = ma.make_mask_none((nI,nJ))
for xline in masks.get('Xlines',[]): #a y pixel position
if iLim[0] <= xline <= iLim[1]:
tam[xline-iLim[0],:] = True
for yline in masks.get('Ylines',[]): #a x pixel position
if jLim[0] <= yline <= jLim[1]:
tam[:,yline-jLim[0]] = True
return tam #position mask
def Fill2ThetaAzimuthMap(masks,TA,tam,image):
'Needs a doc string'
Zlim = masks['Thresholds'][1]
rings = masks['Rings']
arcs = masks['Arcs']
TA = np.dstack((ma.getdata(TA[1]),ma.getdata(TA[0]),ma.getdata(TA[2]))) #azimuth, 2-theta, dist
tax,tay,tad = np.dsplit(TA,3) #azimuth, 2-theta, dist**2/d0**2
for tth,thick in rings:
tam = ma.mask_or(tam.flatten(),ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.)))
for tth,azm,thick in arcs:
tamt = ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.))
tama = ma.getmask(ma.masked_inside(tax.flatten(),azm[0],azm[1]))
tam = ma.mask_or(tam.flatten(),tamt*tama)
taz = ma.masked_outside(image.flatten(),int(Zlim[0]),Zlim[1])
tabs = np.ones_like(taz)
tam = ma.mask_or(tam.flatten(),ma.getmask(taz))
tax = ma.compressed(ma.array(tax.flatten(),mask=tam)) #azimuth
tay = ma.compressed(ma.array(tay.flatten(),mask=tam)) #2-theta
taz = ma.compressed(ma.array(taz.flatten(),mask=tam)) #intensity
tad = ma.compressed(ma.array(tad.flatten(),mask=tam)) #dist**2/d0**2
tabs = ma.compressed(ma.array(tabs.flatten(),mask=tam)) #ones - later used for absorption corr.
return tax,tay,taz,tad,tabs
def MakeUseTA(data,blkSize=128):
Nx,Ny = data['size']
nXBlks = (Nx-1)//blkSize+1
nYBlks = (Ny-1)//blkSize+1
useTA = []
for iBlk in range(nYBlks):
iBeg = iBlk*blkSize
iFin = min(iBeg+blkSize,Ny)
useTAj = []
for jBlk in range(nXBlks):
jBeg = jBlk*blkSize
jFin = min(jBeg+blkSize,Nx)
TA = Make2ThetaAzimuthMap(data,(iBeg,iFin),(jBeg,jFin)) #2-theta & azimuth arrays & create position mask
useTAj.append(TA)
useTA.append(useTAj)
return useTA
def MakeUseMask(data,masks,blkSize=128):
Masks = copy.deepcopy(masks)
Masks['Points'] = np.array(Masks['Points']).T #get spots as X,Y,R arrays
if np.any(masks['Points']):
Masks['Points'][2] = np.square(Masks['Points'][2]/2.)
Nx,Ny = data['size']
nXBlks = (Nx-1)//blkSize+1
nYBlks = (Ny-1)//blkSize+1
useMask = []
tamp = ma.make_mask_none((1024*1024)) #NB: this array size used in the fortran histogram2d
for iBlk in range(nYBlks):
iBeg = iBlk*blkSize
iFin = min(iBeg+blkSize,Ny)
useMaskj = []
for jBlk in range(nXBlks):
jBeg = jBlk*blkSize
jFin = min(jBeg+blkSize,Nx)
mask = MakeMaskMap(data,Masks,(iBeg,iFin),(jBeg,jFin),tamp) #2-theta & azimuth arrays & create position mask
useMaskj.append(mask)
useMask.append(useMaskj)
return useMask
def ImageIntegrate(image,data,masks,blkSize=128,returnN=False,useTA=None,useMask=None):
'Integrate an image; called from OnIntegrateAll and OnIntegrate in G2imgGUI' #for q, log(q) bins need data['binType']
import histogram2d as h2d
G2fil.G2Print ('Begin image integration; image range: %d %d'%(np.min(image),np.max(image)))
CancelPressed = False
LUtth = np.array(data['IOtth'])
LRazm = np.array(data['LRazimuth'],dtype=np.float64)
numAzms = data['outAzimuths']
numChans = (data['outChannels']//4)*4
Dazm = (LRazm[1]-LRazm[0])/numAzms
if '2-theta' in data.get('binType','2-theta'):
lutth = LUtth
elif 'log(q)' in data['binType']:
lutth = np.log(4.*np.pi*npsind(LUtth/2.)/data['wavelength'])
elif 'q' == data['binType'].lower():
lutth = 4.*np.pi*npsind(LUtth/2.)/data['wavelength']
dtth = (lutth[1]-lutth[0])/numChans
muT = data.get('SampleAbs',[0.0,''])[0]
if data['DetDepth'] > 0.5: #patch - redefine DetDepth
data['DetDepth'] /= data['distance']
if 'SASD' in data['type']:
muT = -np.log(muT)/2. #Transmission to 1/2 thickness muT
Masks = copy.deepcopy(masks)
Masks['Points'] = np.array(Masks['Points']).T #get spots as X,Y,R arrays
if np.any(masks['Points']):
Masks['Points'][2] = np.square(Masks['Points'][2]/2.)
NST = np.zeros(shape=(numAzms,numChans),order='F',dtype=np.float32)
H0 = np.zeros(shape=(numAzms,numChans),order='F',dtype=np.float32)
H2 = np.linspace(lutth[0],lutth[1],numChans+1)
Nx,Ny = data['size']
nXBlks = (Nx-1)//blkSize+1
nYBlks = (Ny-1)//blkSize+1
tbeg = time.time()
times = [0,0,0,0,0]
tamp = ma.make_mask_none((1024*1024)) #NB: this array size used in the fortran histogram2d
for iBlk in range(nYBlks):
iBeg = iBlk*blkSize
iFin = min(iBeg+blkSize,Ny)
for jBlk in range(nXBlks):
jBeg = jBlk*blkSize
jFin = min(jBeg+blkSize,Nx)
# next is most expensive step!
t0 = time.time()
if useTA:
TA = useTA[iBlk][jBlk]
else:
TA = Make2ThetaAzimuthMap(data,(iBeg,iFin),(jBeg,jFin)) #2-theta & azimuth arrays & create position mask
times[1] += time.time()-t0
t0 = time.time()
if useMask:
tam = useMask[iBlk][jBlk]
else:
tam = MakeMaskMap(data,Masks,(iBeg,iFin),(jBeg,jFin),tamp)
Block = image[iBeg:iFin,jBeg:jFin]
tax,tay,taz,tad,tabs = Fill2ThetaAzimuthMap(Masks,TA,tam,Block) #and apply masks
pol = G2pwd.Polarization(data['PolaVal'][0],tay,tax-90.)[0] #for pixel pola correction
times[0] += time.time()-t0
t0 = time.time()
tax = np.where(tax > LRazm[1],tax-360.,tax) #put azm inside limits if possible
tax = np.where(tax < LRazm[0],tax+360.,tax)
if data.get('SampleAbs',[0.0,''])[1]:
if 'Cylind' in data['SampleShape']:
muR = muT*(1.+npsind(tax)**2/2.)/(npcosd(tay)) #adjust for additional thickness off sample normal
tabs = G2pwd.Absorb(data['SampleShape'],muR,tay)
elif 'Fixed' in data['SampleShape']: #assumes flat plate sample normal to beam
tabs = G2pwd.Absorb('Fixed',muT,tay)
if 'log(q)' in data.get('binType',''):
tay = np.log(4.*np.pi*npsind(tay/2.)/data['wavelength'])
elif 'q' == data.get('binType','').lower():
tay = 4.*np.pi*npsind(tay/2.)/data['wavelength']
times[2] += time.time()-t0
t0 = time.time()
taz = np.array((taz*tad/tabs),dtype='float32')/pol
if any([tax.shape[0],tay.shape[0],taz.shape[0]]):
NST,H0 = h2d.histogram2d(len(tax),tax,tay,taz,
numAzms,numChans,LRazm,lutth,Dazm,dtth,NST,H0)
times[3] += time.time()-t0
G2fil.G2Print('End integration loops')
t0 = time.time()
#prepare masked arrays of bins with pixels for interpolation setup
H2msk = [ma.array(H2[:-1],mask=np.logical_not(nst)) for nst in NST]
H0msk = [ma.array(np.divide(h0,nst),mask=np.logical_not(nst)) for nst,h0 in zip(NST,H0)]
#make linear interpolators; outside limits give NaN
H0int = [scint.interp1d(h2msk.compressed(),h0msk.compressed(),bounds_error=False) for h0msk,h2msk in zip(H0msk,H2msk)]
#do interpolation on all points - fills in the empty bins; leaves others the same
H0 = np.array([h0int(H2[:-1]) for h0int in H0int])
H0 = np.nan_to_num(H0)
if 'log(q)' in data.get('binType',''):
H2 = 2.*npasind(np.exp(H2)*data['wavelength']/(4.*np.pi))
elif 'q' == data.get('binType','').lower():
H2 = 2.*npasind(H2*data['wavelength']/(4.*np.pi))
if Dazm:
H1 = np.array([azm for azm in np.linspace(LRazm[0],LRazm[1],numAzms+1)])
else:
H1 = LRazm
if 'SASD' not in data['type']:
H0 *= np.array(G2pwd.Polarization(data['PolaVal'][0],H2[:-1],0.)[0])
H0 /= npcosd(H2[:-1]) #**2? I don't think so, **1 is right for powders
if 'SASD' in data['type']:
H0 /= npcosd(H2[:-1]) #one more for small angle scattering data?
if data['Oblique'][1]:
H0 /= G2pwd.Oblique(data['Oblique'][0],H2[:-1])
times[4] += time.time()-t0
G2fil.G2Print ('Step times: \n apply masks %8.3fs xy->th,azm %8.3fs fill map %8.3fs \
\n binning %8.3fs cleanup %8.3fs'%(times[0],times[1],times[2],times[3],times[4]))
G2fil.G2Print ("Elapsed time:","%8.3fs"%(time.time()-tbeg))
G2fil.G2Print ('Integration complete')
if returnN: #As requested by <NAME>
return H0,H1,H2,NST,CancelPressed
else:
return H0,H1,H2,CancelPressed
def MakeStrStaRing(ring,Image,Controls):
ellipse = GetEllipse(ring['Dset'],Controls)
pixSize = Controls['pixelSize']
scalex = 1000./pixSize[0]
scaley = 1000./pixSize[1]
Ring = np.array(makeRing(ring['Dset'],ellipse,ring['pixLimit'],ring['cutoff'],scalex,scaley,Image)[0]).T #returns x,y,dsp for each point in ring
if len(Ring):
ring['ImxyObs'] = copy.copy(Ring[:2])
TA = GetTthAzm(Ring[0],Ring[1],Controls) #convert x,y to tth,azm
TA[0] = Controls['wavelength']/(2.*npsind(TA[0]/2.)) #convert 2th to d
ring['ImtaObs'] = TA
ring['ImtaCalc'] = np.zeros_like(ring['ImtaObs'])
Ring[0] = TA[0]
Ring[1] = TA[1]
return Ring,ring
else:
ring['ImxyObs'] = [[],[]]
ring['ImtaObs'] = [[],[]]
ring['ImtaCalc'] = [[],[]]
return [],[] #bad ring; no points found
def FitStrSta(Image,StrSta,Controls):
'Needs a doc string'
StaControls = copy.deepcopy(Controls)
phi = StrSta['Sample phi']
wave = Controls['wavelength']
pixelSize = Controls['pixelSize']
scalex = 1000./pixelSize[0]
scaley = 1000./pixelSize[1]
StaType = StrSta['Type']
StaControls['distance'] += StrSta['Sample z']*cosd(phi)
for ring in StrSta['d-zero']: #get observed x,y,d points for the d-zeros
dset = ring['Dset']
Ring,R = MakeStrStaRing(ring,Image,StaControls)
if len(Ring):
ring.update(R)
p0 = ring['Emat']
val,esd,covMat = FitStrain(Ring,p0,dset,wave,phi,StaType)
ring['Emat'] = val
ring['Esig'] = esd
ellipse = FitEllipse(R['ImxyObs'].T)
ringxy,ringazm = makeRing(ring['Dcalc'],ellipse,0,0.,scalex,scaley,Image)
ring['ImxyCalc'] = np.array(ringxy).T[:2]
ringint = np.array([float(Image[int(x*scalex),int(y*scaley)]) for y,x in np.array(ringxy)[:,:2]])
ringint /=
|
np.mean(ringint)
|
numpy.mean
|
# Copyright 2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Unit tests for tdmprogram.py"""
import pytest
import numpy as np
import strawberryfields as sf
from strawberryfields import ops
from strawberryfields.tdm import tdmprogram
# make test deterministic
np.random.seed(42)
def singleloop(r, alpha, phi, theta, copies, shift="default", hbar=2):
"""Single delay loop with program.
Args:
r (float): squeezing parameter
alpha (Sequence[float]): beamsplitter angles
phi (Sequence[float]): rotation angles
theta (Sequence[float]): homodyne measurement angles
hbar (float): value in appearing in the commutation relation
Returns:
(list): homodyne samples from the single loop simulation
"""
prog = tdmprogram.TDMProgram(N=2)
with prog.context(alpha, phi, theta, copies=copies, shift=shift) as (p, q):
ops.Sgate(r, 0) | q[1]
ops.BSgate(p[0]) | (q[0], q[1])
ops.Rgate(p[1]) | q[1]
ops.MeasureHomodyne(p[2]) | q[0]
eng = sf.Engine("gaussian")
result = eng.run(prog, hbar=hbar)
return result.samples[0]
def test_number_of_copies_must_be_integer():
"""Checks number of copies is integer"""
sq_r = 1.0
N = 3
c = 4
copies = 1 / 137
alpha = [0, np.pi / 4] * c
phi = [np.pi / 2, 0] * c
theta = [0, 0] + [0, np.pi / 2] + [np.pi / 2, 0] + [np.pi / 2, np.pi / 2]
with pytest.raises(TypeError, match="Number of copies must be a positive integer"):
singleloop(sq_r, alpha, phi, theta, copies)
def test_gates_equal_length():
"""Checks gate list parameters have same length"""
sq_r = 1.0
N = 3
c = 4
copies = 10
alpha = [0, np.pi / 4] * c
phi = [np.pi / 2, 0] * c
theta = [0, 0] + [0, np.pi / 2] + [np.pi / 2, 0] + [np.pi / 2]
with pytest.raises(ValueError, match="Gate-parameter lists must be of equal length."):
singleloop(sq_r, alpha, phi, theta, copies)
def test_at_least_one_measurement():
"""Checks circuit has at least one measurement operator"""
sq_r = 1.0
N = 3
copies = 1
alpha = [0] * 4
phi = [0] * 4
prog = tdmprogram.TDMProgram(N=3)
with pytest.raises(ValueError, match="Must be at least one measurement."):
with prog.context(alpha, phi, copies=copies, shift="default") as (p, q):
ops.Sgate(sq_r, 0) | q[2]
ops.BSgate(p[0]) | (q[1], q[2])
ops.Rgate(p[1]) | q[2]
eng = sf.Engine("gaussian")
result = eng.run(prog)
def test_spatial_modes_number_of_measurements_match():
"""Checks number of spatial modes matches number of measurements"""
sq_r = 1.0
N = 3
copies = 1
alpha = [0] * 4
phi = [0] * 4
theta = [0] * 4
with pytest.raises(ValueError, match="Number of measurement operators must match number of spatial modes."):
prog = tdmprogram.TDMProgram(N=[3, 3])
with prog.context(alpha, phi, theta, copies=copies) as (p, q):
ops.Sgate(sq_r, 0) | q[2]
ops.BSgate(p[0]) | (q[1], q[2])
ops.Rgate(p[1]) | q[2]
ops.MeasureHomodyne(p[2]) | q[0]
eng = sf.Engine("gaussian")
result = eng.run(prog)
def test_shift_by_specified_amount():
"""Checks that shifting by 1 is equivalent to shift='end' for a program
with one spatial mode"""
np.random.seed(42)
sq_r = 1.0
N = 3
copies = 1
alpha = [0] * 4
phi = [0] * 4
theta = [0] * 4
np.random.seed(42)
x = singleloop(sq_r, alpha, phi, theta, copies)
np.random.seed(42)
y = singleloop(sq_r, alpha, phi, theta, copies, shift=1)
assert np.allclose(x, y)
def test_str_tdm_method():
"""Testing the string method"""
prog = tdmprogram.TDMProgram(N=1)
assert prog.__str__() == "<TDMProgram: concurrent modes=1, time bins=0, spatial modes=0>"
def test_epr():
"""Generates an EPR state and checks that the correct correlations (noise reductions) are observed
from the samples"""
np.random.seed(42)
sq_r = 1.0
c = 4
copies = 200
# This will generate c EPRstates per copy. I chose c = 4 because it allows us to make 4 EPR pairs per copy that can each be measured in different basis permutations.
alpha = [np.pi / 4, 0] * c
phi = [0, np.pi / 2] * c
# Measurement of 4 subsequent EPR states in XX, XP, PX, PP to investigate nearest-neighbour correlations in all basis permutations
theta = [0, 0] + [0, np.pi / 2] + [np.pi / 2, 0] + [np.pi / 2, np.pi / 2] #
x = singleloop(sq_r, alpha, phi, theta, copies)
X0 = x[0::8]
X1 = x[1::8]
X2 = x[2::8]
P0 = x[3::8]
P1 = x[4::8]
X3 = x[5::8]
P2 = x[6::8]
P3 = x[7::8]
atol = 5 / np.sqrt(copies)
minusstdX1X0 = (X1 - X0).std() / np.sqrt(2)
plusstdX1X0 = (X1 + X0).std() /
|
np.sqrt(2)
|
numpy.sqrt
|
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import *
import matplotlib.pyplot as plt
num_neurons = 100
num_inputs = 1
num_outputs = 1
symbol = 'goog'
epochs = 500
seq_len = 20
learning_rate = 0.001
f = open(symbol + '.txt', 'r').read()
data = f.split('\n')[:-1] # get rid of the last '' so float(n) works
data.reverse()
d = [float(n) for n in data]
result = []
for i in range(len(d) - seq_len - 1):
result.append(d[i: i + seq_len + 1])
result = np.array(result)
row = int(round(0.9 * result.shape[0]))
train = result[:row, :]
test = result[row:, :]
# normally you should most likely randomly shuffle your data
# before splitting it into a training and test set.
np.random.shuffle(train)
X_train = train[:, :-1] # all rows with all columns except the last one
X_test = test[:, :-1] # rest 20% used for testing
y_train = train[:, 1:]
y_test = test[:, 1:]
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], num_inputs))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], num_inputs))
y_train = np.reshape(y_train, (y_train.shape[0], y_train.shape[1], num_outputs))
y_test =
|
np.reshape(y_test, (y_test.shape[0], y_test.shape[1], num_outputs))
|
numpy.reshape
|
import numpy as np
import cv2
import cv2.aruco as aruco
import pyrealsense2 as rs
import copy
import threading
from datetime import datetime
import sys, time
from threading import Timer
import math
#from numpy import cross, eye, dot
from scipy.linalg import expm, norm
diff_w = np.asarray([0.01,-0.085,-0.1])
def M(axis, theta):
return expm(np.cross(np.eye(3), axis/norm(axis)*theta))
class Wheelchair():
def __init__(self):
#To do: change num1, num2 to a list of num
self.aruco_num1 = 0
self.aruco_num2 = 12
self.in_camera = False
self.num_xyz = {'num1': None , 'num2': None }
def wheelchair_dec(self,ids,corners,depth_img):
num = int(len(ids))
for id in range(num):
if ids[id] == self.aruco_num1:
self.num_xyz['num1'] = get_xyz(corners[id], depth_img)
self.in_camera = True
if ids[id] == self.aruco_num2:
self.num_xyz['num2'] = get_xyz(corners[id], depth_img)
#self.in_camera = True
#if self.num_xyz['num1'] != None and self.num_xyz['num2'] != None :
#print(self.num_xyz['num1'],self.num_xyz['num2'])
def unit_vector(vector):
""" Returns the unit vector of the vector."""
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
"""Finds angle between two vectors"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
print("rotation_matrix compute",axis,theta)
class obj():
def __init__(self,num1,num2,num3):
self.aruco_num1 = num1
self.aruco_num2 = num2
self.aruco_num3 = num3
self.in_camera = False
self.xyz1 = None
self.xyz2 = None
self.xyz3 = None
def obj_dec(self,ids,corners,depth_img):
for id in range(len(ids)):
if ids[id] == self.aruco_num1:
self.xyz1 = get_xyz(corners[id], depth_img)
self.in_camera = True
#print("obj",self.aruco_num,"xyz=",self.xyz)
if ids[id] == self.aruco_num2:
self.xyz2 = get_xyz(corners[id], depth_img)
if ids[id] == self.aruco_num3:
self.xyz3 = get_xyz(corners[id], depth_img)
#self.in_camera = True
#print("obj",self.aruco_num,"xyz=",self.xyz)
def compute_obj2wheelchair_base(self,wheelchair):
w_Q1 = np.asarray(wheelchair.num_xyz['num1'])
w_Q2 = np.asarray(wheelchair.num_xyz['num2'])
o_Q1 =
|
np.asarray(self.xyz1)
|
numpy.asarray
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-02-18 at 10:59
@author: cook
"""
from astropy.io import fits
import numpy as np
import os
from astropy.table import Table
from collections import OrderedDict
import pandas as pd
import sys
import threading
import warnings
# try to deal with python 2/3 compatibility
if sys.version_info.major > 2:
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import tkinter.font as tkFont
from tkinter import filedialog
else:
import Tkinter as tk
import tkFont
import ttk
import tkFileDialog as filedialog
from apero.core import constants
from apero.core import math as mp
from apero import core
from apero.tools.module.gui import widgets
from apero.tools.module.setup import drs_processing
from apero import plotting
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'file_explorer.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# Get Logging function
WLOG = core.wlog
# -----------------------------------------------------------------------------
# define the program name
PROGRAM_NAME = 'APERO File Explorer'
# define the default path
ALLOWED_PATHS = ['DRS_DATA_WORKING', 'DRS_DATA_REDUC', 'DRS_DATA_RAW']
# define min column length
MIN_TABLE_COL_WIDTH = 25
# =============================================================================
# Define classes
# =============================================================================
class Navbar:
"""
Navigation bar widget
"""
def __init__(self, master):
"""
Navigation bar constructor
:param master: tk.TK parent app/frame
:type master: tk.TK
"""
self.master = master
self.menubar = tk.Menu(master)
# ---------------------------------------------------------------------
# add file menu
self.filemenu = tk.Menu(self.menubar, tearoff=0)
self.filemenu.add_command(label='Export', command=self.export)
self.filemenu.add_command(label='Quit', command=self.quit)
self.menubar.add_cascade(label='File', menu=self.filemenu)
self.clickmenu = tk.Menu(self.menubar, tearoff=0)
# ---------------------------------------------------------------------
# add settings menu
self.master.command_plot = tk.BooleanVar()
self.master.command_ds9 = tk.BooleanVar()
self.settingsmenu = tk.Menu(self.menubar, tearoff=0)
self.settingsmenu.add_cascade(label='Select',
menu=self.clickmenu)
self.clickmenu.add_checkbutton(label='plot',
onvalue=True,
offvalue=False,
variable=self.master.command_plot,
command=self.activate_plot_check)
self.clickmenu.add_checkbutton(label='ds9',
onvalue=True,
offvalue=False,
variable=self.master.command_ds9,
command=self.activate_ds9_check)
self.menubar.add_cascade(label='Settings', menu=self.settingsmenu)
# set initial value of command_plot to True
self.master.command_plot.set(True)
# ---------------------------------------------------------------------
# add help menu
self.helpmenu = tk.Menu(self.menubar, tearoff=0)
self.helpmenu.add_command(label='About', command=self.about)
self.menubar.add_cascade(label='Help', menu=self.helpmenu)
def activate_plot_check(self):
print('Clicked plot check')
if self.master.command_plot.get():
self.master.command_ds9.set(False)
def activate_ds9_check(self):
print('Clicked ds9 check')
if self.master.command_ds9.get():
self.master.command_plot.set(False)
def about(self):
"""
Make the about message box
:return:
"""
# set title
abouttitle = 'About {0}'.format(PROGRAM_NAME)
# write about message
message = ('File Explorer for the DRS. \n'
'Choose the Location to explore \n'
'Choose filters to filter the files by.')
# make message box
messagebox.showinfo(abouttitle, message)
def export(self):
listfiletypes = ['.csv', '.fits', '.txt']
filetypes = [['csv', '.csv'],
['fits-table', '.fits'],
['ascii', '.txt']]
# set up kwargs
kwargs = dict()
kwargs['initialdir'] = os.getcwd()
kwargs['title'] = 'Save file'
kwargs['filetypes'] = filetypes
# set initial cond
cond = True
# loop until cond broken
while cond:
# get filename
filename = filedialog.asksaveasfilename(**kwargs)
# --------------------------------------------------------------
# check if string
if isinstance(filename, str):
# ----------------------------------------------------------
# write file
if filename.endswith('csv'):
self.master.datastore.write(filename, 'csv')
cond = False
elif filename.endswith('txt'):
self.master.datastore.write(filename, 'ascii')
cond = False
elif filename.endswith('fits'):
self.master.datastore.write(filename, 'fits')
cond = False
else:
alltypes = ', '.join(listfiletypes)
emsg = 'Extension must be: {0}'.format(alltypes)
messagebox.showerror('Error', emsg)
# --------------------------------------------------------------
else:
cond = False
def quit(self):
"""
Quits the app
:return:
"""
self.master.destroy()
class LocationSection:
def __init__(self, parent, master):
self.master = master
# set up frames
self.frame1 = tk.Frame(parent)
self.frame2 = tk.Frame(parent)
# -----------------------------------------------------------------
# add instrument element
self.label1 = tk.Label(self.frame1, text='Instrument: ',
anchor=tk.W)
self.label1.pack(side=tk.LEFT, anchor=tk.W)
# define choices
choices = self.master.datastore.params['DRS_INSTRUMENTS']
self.box1 = ttk.Combobox(self.frame1, values=choices,
state="readonly", width=20)
self.box1.current(0)
self.box1.bind('<<ComboboxSelected>>', self.on_drop_instrument)
self.box1.pack(side=tk.LEFT, anchor=tk.W)
# -----------------------------------------------------------------
# add location element
self.label2 = tk.Label(self.frame2, text='Location: ', anchor=tk.W)
self.label2.pack(side=tk.LEFT, anchor=tk.W)
# -----------------------------------------------------------------
# define choices
choices = []
for path in ALLOWED_PATHS:
choices.append(self.master.datastore.params[path])
# -----------------------------------------------------------------
self.box2 = ttk.Combobox(self.frame2, values=choices,
state="readonly", width=75)
self.box2.current(0)
self.box2.bind('<<ComboboxSelected>>', self.on_drop_location)
self.box2.pack(side=tk.LEFT, anchor=tk.W)
# -----------------------------------------------------------------
# add frames
self.frame1.pack(padx=10, pady=10, fill=tk.BOTH, expand=tk.YES,
side=tk.TOP)
self.frame2.pack(padx=10, pady=10, fill=tk.BOTH, expand=tk.YES,
side=tk.TOP)
def on_drop_instrument(self, *args):
# update status
self.master.status_bar.status.set('Changing instruments...')
# get the value
value = self.box1.get()
# update the data
self.master.instrument = value
self.master.datastore.instrument = value
# update the instrument
self.update_instrument()
# unpopulate table
if self.master.datastore.success:
self.master.table_element.unpopulate_table()
self.master.table_element.populate_table()
self.master.filter_element.remove_filters()
self.master.datastore.apply_filters()
self.master.datastore.calculate_lengths()
self.master.filter_element.add_filters()
else:
self.master.table_element.unpopulate_table()
self.master.filter_element.remove_filters()
def update_instrument(self):
def update():
print('UPDATE INSTRUMENT')
self.master.datastore.get_data()
self.master.datastore.combine_files()
# update mask
tprocess = threading.Thread(target=update)
#self.master.config(cursor="wait")
self.master.progress.pack(side=tk.LEFT, expand=tk.YES, fill=tk.X)
self.master.progress.start()
tprocess.start()
while tprocess.is_alive():
tprocess.join(0.1)
#self.master.config(cursor="")
self.master.progress.stop()
self.master.progress.pack_forget()
# reset status
self.master.status_bar.status.set('')
# set title
self.master.set_title()
def on_drop_location(self, *args):
# update status
self.master.status_bar.status.set('Changing locations...')
# update the data
self.update_location()
# unpopulate table
if self.master.datastore.success:
self.master.table_element.unpopulate_table()
self.master.table_element.populate_table()
self.master.filter_element.remove_filters()
self.master.datastore.apply_filters()
self.master.datastore.calculate_lengths()
self.master.filter_element.add_filters()
else:
self.master.table_element.unpopulate_table()
self.master.filter_element.remove_filters()
def update_location(self):
# get the value
value = self.box2.get()
def update():
print('UPDATE LOCATION')
self.master.datastore.get_data(path=value)
self.master.datastore.combine_files()
# update mask
tprocess = threading.Thread(target=update)
#self.master.config(cursor="wait")
self.master.progress.pack(side=tk.LEFT, expand=tk.YES, fill=tk.X)
self.master.progress.start()
tprocess.start()
while tprocess.is_alive():
self.master.progress.step(2)
self.master.update_idletasks()
tprocess.join(0.1)
#self.master.config(cursor="")
# reset status
self.master.progress.stop()
self.master.progress.pack_forget()
self.master.status_bar.status.set('')
class FilterSection:
def __init__(self, parent, master):
self.master = master
self.frame = tk.Frame(parent)
# do not populate if datastore is empty
if self.master.datastore.success:
# -----------------------------------------------------------------
self.label = tk.Label(self.frame, text='Filters: ', anchor=tk.W)
self.label.pack(side=tk.TOP, anchor=tk.W)
# fill buttons
self.add_filters()
# pack frame
self.frame.pack(padx=10, pady=10, fill=tk.Y, side=tk.LEFT)
def add_filters(self):
# set up filter frame
self.filter_frame = tk.Frame(self.frame)
self.filter_frame.propagate(False)
self.filter_frame.pack(padx=10, pady=10, fill=tk.Y, expand=tk.YES,
side=tk.LEFT)
# get data and mask
cols = self.master.datastore.cols
sets = self.master.datastore.entries
# grid depends on number of columns
# rowlabels, collabels, rowbox, colbox = self.get_grid_positions()
# define dropdownbox storage
self.boxes = dict()
# loop around columns and add to filter grid
for it, col in enumerate(cols):
# set up choices and string variable
choices = ['None'] + list(np.sort(list(sets[col])))
label = tk.Label(self.filter_frame, text=col)
dbox = ttk.Combobox(self.filter_frame, values=choices,
state="readonly")
dbox.current(0)
label.grid(row=it, column=0, sticky=tk.W)
dbox.grid(row=it, column=1)
dbox.bind('<<ComboboxSelected>>', self.on_drop)
self.boxes[col] = dbox
def remove_filters(self):
"""
Unpopulate the table (with widget.destroy())
:return: None
"""
for widget in self.frame.winfo_children():
widget.destroy()
def on_drop(self, *args):
# update status
self.master.status_bar.status.set('Appying filters...')
# get data and mask
cols = self.master.datastore.cols
for col in cols:
value = self.boxes[col].get()
if value is None or value == 'None':
self.master.datastore.options[col] = None
else:
self.master.datastore.options[col] = [value]
# update
self.update_filters()
# unpopulate table
if self.master.datastore.success:
self.master.table_element.unpopulate_table()
self.master.table_element.populate_table()
def update_filters(self):
def update():
print('UPDATE FILTERS')
self.master.datastore.apply_filters()
self.master.datastore.calculate_lengths()
# update mask
tprocess = threading.Thread(target=update)
#self.master.config(cursor="wait")
self.master.progress.pack(side=tk.LEFT, expand=tk.YES, fill=tk.X)
self.master.progress.start()
tprocess.start()
while tprocess.is_alive():
self.master.progress.step(2)
self.master.update_idletasks()
tprocess.join(0.1)
#self.master.config(cursor="")
self.master.progress.stop()
self.master.progress.pack_forget()
# reset status
self.master.status_bar.status.set('')
def get_grid_positions(self):
FILTER_COLS = 6
n_tot =len(self.master.datastore.cols)
n_rows = int(np.ceil(n_tot / FILTER_COLS))
# set up
rowlabels = np.repeat(np.arange(0, n_rows), FILTER_COLS)
collabels = np.tile(np.arange(0, FILTER_COLS * 2, 2), n_rows)
rowboxs = np.repeat(np.arange(0, n_rows), FILTER_COLS)
colboxs = np.tile(np.arange(1, FILTER_COLS * 2, 2), n_rows)
# return
return rowlabels, collabels, rowboxs, colboxs
class TableSection:
def __init__(self, parent, master):
self.master = master
parent.update_idletasks()
self.width = parent.winfo_width()
self.frame = tk.Frame(parent)
# pack frame
self.frame.pack(padx=10, pady=10, fill=tk.BOTH, expand=tk.YES,
side=tk.LEFT)
# do not populate if datastore is empty
if self.master.datastore.success:
self.label = tk.Label(self.frame, text='Table: ', anchor=tk.W)
self.label.pack(side=tk.TOP, anchor=tk.W)
# fill table
self.populate_table()
def populate_table(self):
self.tableframe = tk.Frame(self.frame)
self.tableframe.propagate(False)
self.tableframe.pack(padx=10, pady=10, fill=tk.BOTH, expand=tk.YES,
side=tk.TOP)
# get data, cols and mask
data = self.master.datastore.data
cols = self.master.datastore.cols
mask = self.master.datastore.mask
# ---------------------------------------------------------------------
# work out the column widths
max_column_widths = self.get_widths()
# ---------------------------------------------------------------------
# mask data
masked_data = np.array(data)[mask]
# make a style for the table
style = ttk.Style()
style.configure('file_explorer.Treeview',
borderwidth=2,
relief=tk.SUNKEN)
# ---------------------------------------------------------------------
# make table
self.tree = ttk.Treeview(self.tableframe, height=len(data),
style=('file_explorer.Treeview'))
# ---------------------------------------------------------------------
# add scroll bar
ysb = ttk.Scrollbar(self.tableframe, orient='vertical',
command=self.tree.yview)
xsb = ttk.Scrollbar(self.tableframe, orient='horizontal',
command=self.tree.xview)
self.tree.configure(yscrollcommand=ysb.set,
xscrollcommand=xsb.set)
# ---------------------------------------------------------------------
# set up columns
self.tree['columns'] = [''] + list(cols)
# add id column
self.tree.heading('#0')
self.tree.column('#0', stretch=tk.NO, width=50)
# add data columns
for c_it, col in enumerate(list(cols)):
col_id = '#{0}'.format(c_it + 1)
self.tree.heading(col_id, text=col)
self.tree.column(col_id, stretch=tk.YES,
width=max_column_widths[c_it])
# ---------------------------------------------------------------------
# insert data
for row in range(len(masked_data)):
tags = []
if row % 2 == 0:
tags.append("oddrow")
else:
tags.append("evenrow")
self.tree.insert("", row, text=str(row),
values=tuple(masked_data[row]),
tags=tags)
# ---------------------------------------------------------------------
# style for tagged elements
self.tree.tag_configure('oddrow', background='#E8E8E8')
self.tree.tag_configure('evenrow', background='#99CCFF')
# ---------------------------------------------------------------------
# pack into frame
ysb.pack(fill=tk.Y, side=tk.RIGHT)
xsb.pack(fill=tk.X, side=tk.BOTTOM)
self.tree.pack(fill=tk.BOTH)
self.tree.bind("<Double-1>", self.on_double_click)
def on_double_click(self, event):
# get data, cols and mask
data = np.array(self.master.datastore.data)
mask = self.master.datastore.mask
path = self.master.datastore.path
# ---------------------------------------------------------------------
# get item
item = self.tree.identify('item', event.x, event.y)
rownumber = self.tree.item(item, 'text')
# ---------------------------------------------------------------------
# try to get absolute path
try:
row = int(rownumber)
night = data[mask][row][0]
filename = data[mask][row][1]
# check path exists
if os.path.exists(filename):
abspath = filename
elif os.path.exists(os.path.join(night, filename)):
abspath = os.path.join(night, filename)
elif os.path.exists(os.path.join(path, night, filename)):
abspath = os.path.join(path, night, filename)
else:
emsg = 'Could not construct filename from {0}/{1}/{2}'
eargs = [path, night, filename]
raise ValueError(emsg.format(*eargs))
except Exception as e:
print('Error constructing absolute path for row={0}'
''.format(rownumber))
print('\tError {0}: {1}'.format(type(e), e))
return 0
# ---------------------------------------------------------------------
# if open in ds9 open in ds9
if self.master.command_ds9.get():
self.open_ds9(abspath)
if self.master.command_plot.get():
self.open_plot(abspath)
def open_ds9(self, abspath):
# id_plot_file
plotid = _id_plot_file(abspath)
# can only open images in ds9
if plotid != 'image':
return
# -------------------------------------------------------------
# update status
self.master.status_bar.status.set('Opening DS9...')
# construct command
ds9path = self.master.datastore.params['DRS_DS9_PATH']
if ds9path in [None, 'None', '']:
print('ds9 not found. Define path in DRS_DS9_PATH')
return
command = '{0} {1} &'.format(ds9path, abspath)
try:
os.system(command)
except Exception as e:
print('Cannot run command:')
print('\t{0}'.format(command))
print('\tError {0}: {1}'.format(type(e), e))
# reset status
self.master.status_bar.status.set('')
# TODO: Move plot to plotting
def open_plot(self, abspath):
# get params
params = self.master.datastore.params
# id_plot_file
plotid = _id_plot_file(abspath)
# can only open images and s1d in plot
if plotid is None:
return
# -----------------------------------------------------------------
# update status
self.master.status_bar.status.set('Opening Plot interface...')
# -----------------------------------------------------------------
# try to print graph
try:
# --------------------------------------------------------------
# plot s1d
if plotid == 's1d':
# load table
table = Table.read(abspath)
header = fits.getheader(abspath, ext=1)
# get data
x = table['wavelength']
y = table['flux']
# scale data (by percentiles)
with warnings.catch_warnings(record=True) as _:
mask = y > np.nanpercentile(y, 5)
mask &= y < np.nanpercentile(y, 95)
x, y = x[mask], y[mask]
# set arguments
pkwargs = dict()
pkwargs['x'] = x
pkwargs['y'] = y
pkwargs['xlabel'] = 'Wavelength [nm]'
pkwargs['ylabel'] = 'Flux'
# set name
name = 'PLOT'
# --------------------------------------------------------------
# plot image
else:
# load data
image, header = fits.getdata(abspath, header=True)
# set argument
pkwargs = dict()
pkwargs['image'] = image
pkwargs['vmin'] = np.nanpercentile(image, 5)
pkwargs['vmax'] = np.nanpercentile(image, 95)
# set name
name = 'IMAGE'
# --------------------------------------------------------------
# add title
title = '{0}\n'.format(os.path.basename(abspath))
if 'OBJECT' in header:
title += 'OBJECT={0} '.format(header['OBJECT'])
if 'DPRTYPE' in header:
title += 'DPRTYPE={0}'.format(header['DPRTYPE'])
pkwargs['title'] = title
# --------------------------------------------------------------
plotting.main(params, name, **pkwargs)
# --------------------------------------------------------------
# else print the error and move on
except Exception as e:
WLOG(params, '', 'Error cannot plot {0}'.format(abspath),
colour='red')
WLOG(params, '', '\tError {0}: {1}'.format(type(e), e))
# reset status
self.master.status_bar.status.set('')
def get_widths(self):
cols = self.master.datastore.cols
lens = self.master.datastore.lengths
# define font
self.myFont = tkFont.Font(self.frame, font='TkDefaultFont')
# loop around columns and work out width
max_column_widths = [0] * len(cols)
for it, col in enumerate(cols):
test_string = '_'*lens[col]
new_length1 = self.myFont.measure(str(test_string))
new_length2 = self.myFont.measure(str(col))
new_length3 = MIN_TABLE_COL_WIDTH
new_length = mp.nanmax([new_length1, new_length2, new_length3])
if new_length > max_column_widths[it]:
max_column_widths[it] = int(new_length * 1.10)
return max_column_widths
def unpopulate_table(self):
"""
Unpopulate the table (with widget.destroy())
:return: None
"""
for widget in self.frame.winfo_children():
widget.destroy()
class App(tk.Tk):
"""
Main Application for file explorer
"""
def __init__(self, datastore, *args, **kwargs):
"""
Main application constructor
:param datastore: LoadData instance, storage of the indexed database
and python code line references
:param args: arguments to pass to tk.Tk.__init__
:param kwargs: keyword arguments to pass to tk.Tk.__init__
:type datastore: LoadData
:returns None:
"""
# run the super
tk.Tk.__init__(self, *args, **kwargs)
# self.style = ttk.Style()
# self.style.theme_use('alt')
# set minimum size
self.minsize(1024, 768)
# set application title
self.set_title()
# update the height and width(s) - need to update idle tasks to make
# sure we have correct height/width
self.update_idletasks()
self.height = self.winfo_height()
self.width = self.winfo_width()
# ---------------------------------------------------------------------
# add full frames
self.main_top = tk.Frame(self)
self.main_middle = tk.Frame(self, relief=tk.RAISED)
self.main_bottom = tk.Frame(self)
self.main_end = tk.Frame(self)
# ---------------------------------------------------------------------
# set the location of main frames
self.main_top.grid(column=0, row=0, columnspan=2,
sticky=(tk.E, tk.W, tk.N, tk.S))
self.main_middle.grid(column=0, row=1, sticky=(tk.E, tk.W, tk.N, tk.S))
self.main_bottom.grid(column=1, row=1, sticky=(tk.E, tk.W, tk.N, tk.S))
self.main_end.grid(column=0, row=2, columnspan=2,
sticky=(tk.E, tk.W, tk.N, tk.S))
# ---------------------------------------------------------------------
# add status bar
self.status_bar = widgets.StatusBar(self.main_end)
# ---------------------------------------------------------------------
# add progress bar to status bar
self.progress = ttk.Progressbar(self.status_bar.frame,
orient=tk.HORIZONTAL,
mode='indeterminate')
# add nav bar
self.navbar = Navbar(self)
# add menu master
self.config(menu=self.navbar.menubar)
# ---------------------------------------------------------------------
# set up the grid weights (to make it expand to full size)
self.grid_rowconfigure(0, weight=0)
self.grid_rowconfigure(1, weight=1)
self.grid_rowconfigure(2, weight=0)
#self.grid_rowconfigure(2, weight=1)
self.grid_columnconfigure(0, weight=0)
self.grid_columnconfigure(1, weight=1)
# ---------------------------------------------------------------------
# save datastore
self.datastore = datastore
# now load in the data
self.update_data()
# set the instrument
self.instrument = self.datastore.instrument
# set application title
self.set_title()
# add other elements
self.loc_element = LocationSection(self.main_top, self)
self.filter_element = FilterSection(self.main_middle, self)
self.table_element = TableSection(self.main_bottom, self)
self.frames = [self.main_middle, self.main_bottom]
def set_title(self):
if hasattr(self, 'instrument'):
self.title('{0} ({1})'.format(PROGRAM_NAME, self.instrument))
else:
self.title('{0}'.format(PROGRAM_NAME))
def update_data(self):
# update status
self.status_bar.status.set('Loading data...')
def update():
print('UPDATE DATA')
# update data now
self.datastore.get_data()
# combine table
self.datastore.combine_files()
# update mask
tprocess = threading.Thread(target=update)
#self.config(cursor="wait")
tprocess.start()
self.progress.pack(side=tk.LEFT, expand=tk.YES, fill=tk.X)
self.progress.start()
while tprocess.is_alive():
self.progress.step(2)
self.update_idletasks()
tprocess.join(0.1)
#self.config(cursor="")
# finish
self.progress.stop()
self.progress.pack_forget()
# reset status
self.status_bar.status.set('')
# =============================================================================
# Worker functions
# =============================================================================
class LoadData:
def __init__(self, instrument, recipe=None, params=None):
self.instrument = instrument
# define empty storage
self.params = params
self.recipe = recipe
self.pconstant = None
self.path = None
self.index_filename = None
self.index_files = []
self.data = None
self.mask = None
self.cols = []
self.entries = OrderedDict()
self.lengths = OrderedDict()
self.options = OrderedDict()
self.success = False
def get_data(self, path=None):
# empty storage
self.params = None
self.pconstant = None
self.path = None
self.index_filename = None
self.index_files = []
self.data = None
self.mask = None
self.cols = []
self.entries = OrderedDict()
self.lengths = OrderedDict()
self.options = OrderedDict()
# get parameters from apero
self.params = constants.load(self.instrument)
self.pconstant = constants.pload(self.instrument)
# set path from parameters
if (path is None) and (self.path is None):
self.path = self.params[ALLOWED_PATHS[0]]
elif (path is None) and (self.path is not None):
pass
else:
self.path = path
self.index_filename = self.pconstant.INDEX_OUTPUT_FILENAME()
# get index files
self.get_index_files()
def get_index_files(self):
# raw is a special case
if self.path == self.params['DRS_DATA_RAW']:
# get run path
runpath = self.params['DRS_DATA_RUN']
runfile = self.params['REPROCESS_RAWINDEXFILE']
# construct absolute path
abspath = os.path.join(runpath, runfile)
# add to index files if index file exists
if os.path.exists(abspath):
self.index_files.append(abspath)
else:
_, _ = drs_processing.find_raw_files(self.params, self.recipe)
else:
# walk through all sub-directories
for root, dirs, files in os.walk(self.path, followlinks=True):
# loop around files in current sub-directory
for filename in files:
# only save index files
if filename == self.index_filename:
# construct absolute path
abspath = os.path.join(root, filename)
# add to index files if index file exists
if os.path.exists(abspath):
# append to storage
self.index_files.append(abspath)
# sort index files
self.index_files = np.sort(self.index_files)
def combine_files(self):
# define storage
storage = OrderedDict()
storage['SOURCE'] = []
# print that we are indexing
print('Reading all index files (N={0})'.format(len(self.index_files)))
# loop around file names
for it, filename in enumerate(self.index_files):
# get data from table
data = Table.read(filename)
# loop around columns and add to storage
for col in data.colnames:
if col not in storage:
storage[col] = list(np.array(data[col], dtype=str))
else:
storage[col] += list(np.array(data[col], dtype=str))
# full path
abspath = os.path.dirname(filename)
# get common source
common = os.path.commonpath([abspath, self.path]) + os.sep
outdir = filename.split(common)[-1]
# remove the index filename
outdir = outdir.split(self.index_filename)[0]
# append source to file
storage['SOURCE'] += [outdir] * len(data)
# deal with having a night name column (source column)
nightcols = ['NIGHTNAME', '__NIGHTNAME']
for nightcol in nightcols:
if nightcol in storage:
storage['SOURCE'] = np.array(storage[nightcol])
del storage[nightcol]
# remove hidden columns
keys = list(storage.keys())
for col in keys:
if col.startswith('__'):
del storage[col]
# deal with column names being different lengths
current_length = 0
for col in storage.keys():
if current_length == 0:
current_length = len(storage[col])
if len(storage[col]) != current_length:
print('Index columns have wrong lengths')
self.data = None
self.clean_data = OrderedDict()
self.mask = None
self.cols = []
self.success = False
return 0
# store storage as pandas dataframe
self.data = pd.DataFrame(data=storage)
self.clean_data = OrderedDict()
self.mask = np.ones(len(self.data), dtype=bool)
# get column names
self.cols = list(self.data.columns)
# get unique column entries
for col in self.cols:
# get clean data
clean_list = list(map(self.clean, self.data[col]))
self.clean_data[col] =
|
np.array(clean_list)
|
numpy.array
|
import pdb
import gc
import sys
import argparse
import time
import yaml
import pandas as pd
import numpy as np
import torch
import optuna
import pickle
import random
from sklearn.preprocessing import StandardScaler
from pathlib import Path
from tqdm import tqdm
# Your own functions
from datasets.make_dataset import make_datasets
from trainer.train_and_eval import define_model, train_wrapper, test, objective, objective_2d, objective_fix
from utils.preprocess import preprocess_wrapper
from utils.torch_preprocess import torch_dataloaders
from utils.validation import validate_results
from utils.utils import set_logger, timer, dir_maker, update_args
from utils.ymlfun import get_args, get_parser
# for debugging, you could delete this later
if __name__ == '__main__':
"""Main file for running all the experiments for decoding.
You may need to modify some of the variables and paths to run the code.
Configurations are taken by the config.yaml file and the argparse arguments
are automatically generated from the YAML file. So you could specify the args
from CLI as long as it's written in the config file.
"""
# Load configuration file
DIR_CURRENT = Path(__file__).parent
args = get_args(get_parser(str(DIR_CURRENT / "config.yaml")))
args.config = None # No longer needed
# ===== Define paths =====
# - DATA_EEG: The directory EEG .csv files as an input
# - DATA_KIN: The directory Kinematics .csv files as a target
# - DIR_DATA_ALL: The directory to save all the above processed data in .npz compressed format
# - DIR_DATA_EACH: The directory to save .npz compressed processed data for each trial
# - DIR_LOG: Where you want to save the .log files
# - DIR_CHECK: Where you want to save the trained model parameters
# - DIR_PRED_CSV: Where you want to save the predicted kinematics.csv
# - DIR_RESULTS_SUMMARY: Where you want to save the overall summary
# - DIR_RESULTS_SUMMARY_EACH: Where you want to save the parameters used after optimization.
# - DIR_FIGURES: Where you want to save the figures
DATA_EEG = DIR_CURRENT / 'data' / 'raw' / 'avatar' / 'eeg' / args.data_path
DATA_KIN = DIR_CURRENT / 'data' / 'raw' / 'avatar' / 'kin' / 'SL'
DIR_DATA_ALL = DIR_CURRENT / 'data' / 'processed' / args.data_compressed
DIR_DATA_EACH = DIR_CURRENT / 'data' / \
'processed' / args.data_compressed / 'each_trial'
DIR_LOG = DIR_CURRENT / 'results' / 'logs'
folder_name = str(args.exp + "_" + args.decode_type +
"_" + str(args.tap_size))
DIR_CHECK = DIR_CURRENT / 'results' / 'checkpoints' / folder_name
DIR_PRED_CSV = DIR_CURRENT / 'results' / 'predictions' / folder_name
DIR_RESULTS_SUMMARY = DIR_CURRENT / 'results' / 'summary' / folder_name
DIR_RESULTS_SUMMARY_EACH = DIR_CURRENT / 'results' / \
'summary' / folder_name / 'each_trial'
DIR_FIGURES = DIR_CURRENT / 'results' / 'figures' / 'loss' / folder_name
# Define log file
log_fname = args.exp + "_" + args.decode_type + \
"_" + str(args.tap_size) + args.name_log
log = set_logger(str(DIR_LOG), log_fname)
# Check the directories to save files and create if it doesn't exist
dir_maker(DIR_DATA_ALL)
dir_maker(DIR_DATA_EACH)
dir_maker(DIR_LOG)
dir_maker(DIR_CHECK)
dir_maker(DIR_PRED_CSV)
dir_maker(DIR_RESULTS_SUMMARY)
dir_maker(DIR_RESULTS_SUMMARY_EACH)
dir_maker(DIR_FIGURES)
# Something for smoke test
if args.smoke_test == 1:
args.num_epochs = 1
args.tap_size = 1
args.rnn_num_hidden = 4
# use GPU if available
args.cuda = torch.cuda.is_available()
# Just for checking purposes
log.info("========================================")
log.info(f"Parameters: {yaml.dump(vars(args), None, default_flow_style=False)}")
log.info(f"Smoke test: {args.smoke_test}")
log.info(f"Decode type: {args.decode_type}")
log.info(f"GPU available: {args.cuda}")
log.info(f"Batch size: {args.batch_size}")
log.info(f"Tap size: {args.tap_size}")
log.info(f"Learning rate: {args.optim_lr}")
log.info(f"Number of epochs: {args.num_epochs}")
log.info("========================================")
# set the random seed for reproducible experiments
# https://pytorch.org/docs/master/notes/randomness.html
torch.manual_seed(0)
np.random.seed(0)
random.seed(0)
if args.cuda:
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# ========== 1) Import and sort data ==========
log.info("Loading data, create datasets")
start = time.time()
# TODO: Make this part into a module "make_dataset.py"
dataset_name = "avatar8subs.npz"
files_number = 24 # There should be 24 files (8 subjects x 3 trials)
make_datasets(args, DATA_EEG, DATA_KIN, DIR_DATA_ALL,
DIR_DATA_EACH, dataset_name, files_number)
log.info("Finished loading and creating datasets")
end = time.time()
hours, mins, seconds = timer(start, end)
log.info(f"Data loading: {int(hours)} Hours {int(mins)} minutes {round(seconds, 3)} seconds")
# Define some variables to keep track of the results
num_eeg_train = len(list(DIR_DATA_EACH.glob('*.npz')))
key_eeg_train = [i.stem for i in DIR_DATA_EACH.glob('*.npz')]
sep_fraction = 0.8 # If 0.8, 80% for train and 20% validation
seg_len = 200
joints = ('hip', 'knee', 'ankle')
dsets = ('train', 'valid', 'test')
# Initialize metrics for results logging
mse_all = {}
r2_all = {}
median_rvals_all = {}
# ========== 2) Training for each trial ==========
log.info("Start training each trial")
start = time.time()
for subID in tqdm(range(num_eeg_train)):
log.info(f"Finished processing {subID} / {num_eeg_train}")
# Trial name
trial_ID = key_eeg_train[subID]
# Check if the file exists
if args.decode_type in args.input_2d_decoders:
file_name = trial_ID + ".sav"
elif args.decode_type in args.input_3d_decoders:
file_name = trial_ID + ".pth.tar"
# So that you won't run the training again
my_file = Path(DIR_CHECK / file_name)
if my_file.is_file():
log.info("The file already exists, skipping.")
else:
log.debug(f" Processing: {trial_ID}")
# ===== 2.1) Load the data =====
dataset_name = trial_ID + ".npz"
data = np.load(str(DIR_DATA_EACH / dataset_name))
# ===== 2.2) Preparing the data =====
# Define a class for eeg and kin to standardize
sc_eeg, sc_kin = StandardScaler(), StandardScaler()
# Create datasets for training, validation, and testing
X, X_2d, Y, sc_kin = preprocess_wrapper(args,
dsets,
sep_fraction,
sc_eeg,
sc_kin,
data)
del data
gc.collect()
for d in dsets:
# Just for logging purposes
log.debug(f" Chunked {d} data: {X[d].shape}")
log.debug(f" Chunked {d} data 2D: {X_2d[d].shape}")
log.debug(f" Chunked {d} target: {Y[d].shape}")
# Define dataloaders for PyTorch
loaders = torch_dataloaders(args, dsets, X, Y)
# ===== Optional: Hyperparameter tuning =====
# If you are doing hyperparameter optimization using optuna
if args.optuna_do:
study_name = args.exp + "_" + trial_ID
# For non-neural networks
if args.decode_type in args.input_2d_decoders:
study = optuna.create_study(study_name=study_name,
pruner=optuna.pruners.SuccessiveHalvingPruner())
study.optimize(lambda trial: objective_2d(trial, args, X_2d, Y, DIR_CHECK),
n_trials=args.optuna_trials)
# For neural networks
elif args.decode_type in args.input_3d_decoders:
study = optuna.create_study(
study_name=study_name, pruner=optuna.pruners.SuccessiveHalvingPruner())
if args.fix_do:
study.optimize(lambda trial: objective_fix(trial, args, loaders, DIR_CHECK),
n_trials=args.optuna_trials)
else:
study.optimize(lambda trial: objective(trial, args, loaders, DIR_CHECK),
n_trials=args.optuna_trials)
# Extract the best optimized parameters and log them
best_params = study.best_params
best_error = study.best_value
log.info(f"Best parameters are: {best_params}")
log.info(f"Best error_rate is: {best_error:.4f}")
if args.decode_type in args.input_2d_decoders:
# Load the best model
full_path = str(
Path(DIR_CHECK / str(study.best_trial.trial_id)))+".sav"
with open(full_path, 'rb') as file_name:
model = pickle.load(file_name)
elif args.decode_type in args.input_3d_decoders:
# Load the parameters from the best trial
args = update_args(args, best_params)
model = define_model(args)
full_path = str(
Path(DIR_CHECK / str(study.best_trial.trial_id)))+".pth.tar"
model.load_state_dict(torch.load(full_path))
del study
gc.collect()
else: # Not using optuna, just define the model
# ===== 2.3) Define the model =====
model = define_model(args)
# ===== 2.4) Train and Validate the model =====
log.info(" --------------------------------------")
log.info(" Start training")
log.info(" --------------------------------------")
# For models that takes 2D input (ML)
if args.decode_type in args.input_2d_decoders:
# This is the same for all the above models
model.train(X_2d['train'], Y['train'])
# Now start to validate
log.info(" Run validation")
if (args.decode_type == "KF") or (args.decode_type == "UKF"):
# prediction = model.process(X_2d['valid'], Y['valid'][0, :].T)
print("Skip for now")
else:
# prediction = model.predict(X_2d['valid'])
print("Skip for now")
# For models that takes 3D input (DL)
elif args.decode_type in args.input_3d_decoders:
# Calling your own function for training and validating
train_wrapper(args, model, DIR_FIGURES, trial_ID, loaders)
log.info(" --------------------------------------")
log.info(" Finished training")
log.info(" --------------------------------------")
# ===== 2.5) Test the model =====
log.info(" --------------------------------------")
log.info(" Start testing")
log.info(" --------------------------------------")
if args.decode_type in args.input_2d_decoders:
if (args.decode_type == "KF") or (args.decode_type == "UKF"):
prediction = model.process(X_2d['test'], Y['test'][0, :].T)
else:
prediction = model.predict(X_2d['test'])
elif args.decode_type in args.input_3d_decoders:
if args.decode_type in args.rnn_decoders:
prediction = test(model, X['test'], rnn_flag=True)
elif args.decode_type in args.cnn_decoders:
prediction = test(model, X['test'])
log.info(" --------------------------------------")
log.info(" Finished testing")
log.info(" --------------------------------------")
# ===== 2.6) Post processing the results =====
target = Y['test']
# Scale both the prediciton using the same scaler used during the training
if args.standardize_do:
prediction = sc_kin.transform(prediction)
target = sc_kin.transform(target)
# extract right hip, knee, ankle
actual = target[:, 0:3]
act_size =
|
np.array(actual)
|
numpy.array
|
# MIT License
# Copyright (c) 2021 <NAME>
# Copyright (c) 2021 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Union, Tuple
import numpy as np
from numpy.core.fromnumeric import cumsum
from numpy.lib import interp, polyval
from numpy.lib.scimath import sqrt
def histcounts(x: np.ndarray, edge: np.ndarray):
"""Returns the same as the Matlab code:
``` matlab
r = histcounts(X,edge,'Normalization','probability')';
```
Normalization: probability
Specify 'Normalization' as 'probability' to normalize the bin counts so that sum(N) is 1.
That is, each bin count represents the probability that an observation falls within that bin.
Args:
X (np.ndarray): input data.
edge (np.ndarray): edges used to determine the bins.
Returns:
np.ndarray: Array of probabilities that an observation falls within the i-th bin.
"""
probs = np.zeros(len(edge))
map_to_bins = np.digitize(x, edge)
for i in map_to_bins:
probs[i - 1] += 1
# Normalization
probs = probs / sum(probs)
# The last one is not meaningful since #bins will be one less than len(edges)
return probs[:-1]
def fuzzy_bunching(
X: np.ndarray,
X0: Union[np.ndarray, None],
x_l: float,
x_min: float,
x_max: float,
degree: int,
friction=True,
noise=True,
fig=False,
) -> Tuple[float, float]:
r"""Fuzzy bunching
See e.g., [<NAME> Waseem (2013)](https://doi.org/10.1093/qje/qjt004) for bunching estimation,
and [<NAME> (2020)](https://dx.doi.org/10.2139/ssrn.3611447) for fuzzy bunching.
Note:
This code is adapted from Xiao's Matlab code, available at [his website](https://sites.google.com/site/kairongxiao/).
Args:
X (np.ndarray): bunching sample
X0 (Union[np.ndarray, None]): non-bunching sample if exists, skip otherwise by setting it to `None`.
x_l (float): threshold
x_min (float): excluded range lower bound
x_max (float): excluded range upper bound
degree (int): degree of polynomial for conterfactual density
friction (bool, optional): whether to allow optimization friction. Defaults to True.
noise (bool, optional): whether to allow noise in the data. Defaults to True.
fig (bool, optional): whether to create a figure of CDF. Defaults to False.
Returns:
Tuple[float, float]: (dx_hat, alpha_hat), where `dx_hat` is bunching range, $\Delta q$, and `alpha_hat` is non-optimizing share, $\alpha$.
Examples:
>>> from frds.algorithms.bunching import fuzzy_bunching
>>> import numpy as np
Generate a sample.
>>> N = 1000
>>> x_l = 0.5
>>> dx = 0.1
>>> alpha = 0
>>> rng = np.random.RandomState(0)
>>> Z0 = rng.rand(N)
>>> Z = Z0.copy()
>>> Z[(Z0 > x_l) & (Z0 <= x_l + dx)] = x_l
>>> Z[: round(alpha * N)] = Z0[: round(alpha * N)]
>>> u = 0.0
>>> U = u * np.random.randn(N)
>>> X = np.add(Z, U)
>>> X0 = np.add(Z0, U)
>>> x_max = x_l + 1.1 * dx
>>> x_min = x_l * 0.99
Fuzzy bunching estimation
>>> fuzzy_bunching(X, X0, x_l, x_min, x_max, degree=5, friction=True, noise=True, fig=False)
(0.032179950901143374, 0.0)
References:
* [<NAME> Xiao (2020)](https://dx.doi.org/10.2139/ssrn.3611447), Fuzzy bunching, *SSRN*.
* [<NAME> Waseem (2013)](https://doi.org/10.1093/qje/qjt004),
Using notches to uncover optimization frictions and structural elasticities: Theory and evidence from pakistan, *The Quarterly Journal of Ecnomics*, 128(2), 669-723.
Todo:
- [ ] Option to specify output plot path.
- [ ] Plot styling.
"""
G = 10 ** 4
edge = np.linspace(x_min, x_max, G + 1)
grid = edge[:-1]
f = histcounts(X, edge)
if X0 is None:
N = len(X)
J = round(N / 10) # J bins, with 10 obs in each bin
# y: unconditional probability for the whole sample.
#
# Try the best to reproduce the same behaviour as in Xiao's Matlab code:
# ``` matlab
# y, x = histcounts(X, J, 'Normalizaton', 'probability');
# ```
# This variant of Matlab `histcounts` produces J bins with equal sizes.
# The computation of bins is automatic and no details provided.
# So, here I use numpy `histogram` function to compute the optimal bins (edges).
_, edges = np.histogram(X, J)
# Then , use the my `histcounts` with calculated bin edges.
y = histcounts(X, edges)
# Because ('Normalization','probability') is specified in the Matlab code,
# the returned `x`, i.e. edges, from the Matlab code above is always the same as
# `linspace(0,1,J+1)`, since it's probability.
x = np.linspace(0, 1, J + 1)
x = x[:-1]
w = (x >= x_min) & (x <= x_max)
p = np.polyfit(x[~w], y[~w], degree)
f0 = np.polyval(p, grid) / sum(np.polyval(p, grid))
else:
N = len(X0)
J = round(N / 10)
# Again, I use numpy `histogram` to compute the optimal bins (edges).
_, edges = np.histogram(X0, J)
y = histcounts(X0, edges)
x = np.linspace(0, 1, J + 1)
x = x[:-1]
p = np.polyfit(x, y, degree)
f0 = histcounts(X0, edge)
# In Numpy, np.trapz(y, x); in Matlab, trapz(x, y).
# Special attention to the order of parameters.
A = np.trapz(
|
cumsum(f)
|
numpy.core.fromnumeric.cumsum
|
"""Define the default Transfer class."""
from __future__ import division
from itertools import product, chain
from six import iteritems, itervalues
import numpy as np
from openmdao.vectors.vector import INT_DTYPE
from openmdao.vectors.transfer import Transfer
from openmdao.utils.array_utils import convert_neg
_empty_idx_array = np.array([], dtype=INT_DTYPE)
class DefaultTransfer(Transfer):
"""
Default NumPy transfer.
"""
@staticmethod
def _setup_transfers(group, recurse=True):
"""
Compute all transfers that are owned by our parent group.
Parameters
----------
group : <Group>
Parent group.
recurse : bool
Whether to call this method in subsystems.
"""
group._transfers = {}
iproc = group.comm.rank
rev = group._mode == 'rev' or group._mode == 'auto'
def merge(indices_list):
if len(indices_list) > 0:
return np.concatenate(indices_list)
else:
return _empty_idx_array
if recurse:
for subsys in group._subgroups_myproc:
subsys._setup_transfers(recurse)
# Pre-compute map from abs_names to the index of the containing subsystem
abs2isub = {}
for subsys, isub in zip(group._subsystems_myproc, group._subsystems_myproc_inds):
for type_ in ['input', 'output']:
for abs_name in subsys._var_allprocs_abs_names[type_]:
abs2isub[abs_name] = isub
abs2meta = group._var_abs2meta
allprocs_abs2meta = group._var_allprocs_abs2meta
transfers = group._transfers
vectors = group._vectors
for vec_name in group._lin_rel_vec_name_list:
relvars, _ = group._relevant[vec_name]['@all']
relvars_in = relvars['input']
relvars_out = relvars['output']
# Initialize empty lists for the transfer indices
nsub_allprocs = len(group._subsystems_allprocs)
xfer_in = {}
xfer_out = {}
fwd_xfer_in = [{} for i in range(nsub_allprocs)]
fwd_xfer_out = [{} for i in range(nsub_allprocs)]
if rev:
rev_xfer_in = [{} for i in range(nsub_allprocs)]
rev_xfer_out = [{} for i in range(nsub_allprocs)]
for set_name_in in group._num_var_byset[vec_name]['input']:
for set_name_out in group._num_var_byset[vec_name]['output']:
key = (set_name_in, set_name_out)
xfer_in[key] = []
xfer_out[key] = []
for isub in range(nsub_allprocs):
fwd_xfer_in[isub][key] = []
fwd_xfer_out[isub][key] = []
if rev:
rev_xfer_in[isub][key] = []
rev_xfer_out[isub][key] = []
allprocs_abs2idx_byset = group._var_allprocs_abs2idx_byset[vec_name]
sizes_byset_in = group._var_sizes_byset[vec_name]['input']
sizes_byset_out = group._var_sizes_byset[vec_name]['output']
# Loop through all explicit / implicit connections owned by this system
for abs_in, abs_out in iteritems(group._conn_abs_in2out):
if abs_out not in relvars_out or abs_in not in relvars_in:
continue
# Only continue if the input exists on this processor
if abs_in in abs2meta and abs_in in relvars['input']:
# Get meta
meta_in = abs2meta[abs_in]
meta_out = allprocs_abs2meta[abs_out]
# Get varset info
set_name_in = meta_in['var_set']
set_name_out = meta_out['var_set']
idx_byset_in = allprocs_abs2idx_byset[abs_in]
idx_byset_out = allprocs_abs2idx_byset[abs_out]
# Get the sizes (byset) array
sizes_in = sizes_byset_in[set_name_in]
sizes_out = sizes_byset_out[set_name_out]
# Read in and process src_indices
shape_in = meta_in['shape']
shape_out = meta_out['shape']
global_size_out = meta_out['global_size']
src_indices = meta_in['src_indices']
if src_indices is None:
pass
elif src_indices.ndim == 1:
src_indices = convert_neg(src_indices, global_size_out)
else:
if len(shape_out) == 1 or shape_in == src_indices.shape:
src_indices = src_indices.flatten()
src_indices = convert_neg(src_indices, global_size_out)
else:
# TODO: this duplicates code found
# in System._setup_scaling.
entries = [list(range(x)) for x in shape_in]
cols = np.vstack(src_indices[i] for i in product(*entries))
dimidxs = [convert_neg(cols[:, i], shape_out[i])
for i in range(cols.shape[1])]
src_indices = np.ravel_multi_index(dimidxs, shape_out)
# 1. Compute the output indices
if src_indices is None:
offset =
|
np.sum(sizes_out[iproc, :idx_byset_out])
|
numpy.sum
|
import logging
import numpy as np
import galsim
LOGGER = logging.getLogger(__name__)
def render_objs_with_psf_shear(
*,
objs, psf_function, uv_offsets, uv_cen,
wcs, img_dim, method, g1, g2, shear_scene):
"""Render objects into a scene with some PSF function, shear, and WCS.
Parameters
----------
objs : list of galsim.GSObjects
The list of objects to be rendered.
psf_function : callable
A callable with signature `psf_function(*, x, y)` that returns the
PSF at a given location in the image.
uv_offsets : list of galsim.PositionD
The offset from the center of the image for each object in u,v. The
units of u,v are usualy arcseconds.
uv_cen : galsim.PositionD
The center of the image in u,v. The units of u,v are usualy arcseconds.
wcs : galsim.BaseWCS or one if its subclasses
The WCS function to use for the image.
img_dim : int
The size of the image in pixels.
method : string
The method used to render the image. This should usually be 'auto'
unless you are doing something special.
g1 : float
The 1-component of the shear.
g2 : float
The 2-component of the shear.
shear_scene : bool
If True, the object positions and their shapes are sheared. Otherwise,
only the object shapes are sheared.
Returns
-------
se_image : galsim.ImageD
The rendered image.
"""
shear_mat = galsim.Shear(g1=g1, g2=g2).getMatrix()
se_im = galsim.ImageD(
nrow=img_dim, ncol=img_dim, xmin=0, ymin=0)
for obj, uv_offset, in zip(objs, uv_offsets):
# shear object and maybe position
sobj = obj.shear(g1=g1, g2=g2)
if shear_scene:
sdu, sdv = np.dot(shear_mat,
|
np.array([uv_offset.x, uv_offset.y])
|
numpy.array
|
# Created by <NAME>
# All right reserved
# Department of Computer Science
# the University of Warwick
# <EMAIL>
import itertools as it
import math
import random
import sys
from concurrent import futures
from copy import deepcopy
from os import remove
from os.path import abspath
import category_encoders as ce
import dill
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import torch
import torch.nn as nn
import torch.optim as optim
from matplotlib.widgets import Slider
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import OneHotEncoder
from torch.autograd import Variable
from torch.distributions import Categorical
from torch.multiprocessing import Pool
from dbestclient.ml.integral import approx_count, prepare_reg_density_data
from dbestclient.ml.embedding import columns2sentences,WordEmbedding
# https://www.katnoria.com/mdn/
# https://github.com/sagelywizard/pytorch-mdn
"""A module for a mixture density network layer
For more info on MDNs, see _Mixture Desity Networks_ by Bishop, 1994.
"""
class MDN(nn.Module):
"""A mixture density network layer
The input maps to the parameters of a MoG probability distribution, where
each Gaussian has O dimensions and diagonal covariance.
Arguments:
in_features (int): the number of dimensions in the input
out_features (int): the number of dimensions in the output
num_gaussians (int): the number of Gaussians per output dimensions
Input:
minibatch (BxD): B is the batch size and D is the number of input
dimensions.
Output:
(pi, sigma, mu) (BxG, BxGxO, BxGxO): B is the batch size, G is the
number of Gaussians, and O is the number of dimensions for each
Gaussian. Pi is a multinomial distribution of the Gaussians. Sigma
is the standard deviation of each Gaussian. Mu is the mean of each
Gaussian.
"""
def __init__(self, in_features, out_features, num_gaussians, device):
super(MDN, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.num_gaussians = num_gaussians
self.pi = nn.Sequential(
nn.Linear(in_features, num_gaussians),
nn.Softmax(dim=1)
)
self.sigma = nn.Linear(in_features, out_features * num_gaussians)
self.mu = nn.Linear(in_features, out_features * num_gaussians)
self.pi = self.pi.to(device)
self.mu = self.mu.to(device)
self.sigma = self.sigma.to(device)
def forward(self, minibatch):
pi = self.pi(minibatch)
sigma = torch.exp(self.sigma(minibatch))
sigma = sigma.view(-1, self.num_gaussians, self.out_features)
mu = self.mu(minibatch)
mu = mu.view(-1, self.num_gaussians, self.out_features)
return pi, sigma, mu
# ONEOVERSQRT2PI = 1.0 / math.sqrt(2 * math.pi)
def gaussian_probability(sigma, mu, data):
"""Returns the probability of `data` given MoG parameters `sigma` and `mu`.
Arguments:
sigma (BxGxO): The standard deviation of the Gaussians. B is the batch
size, G is the number of Gaussians, and O is the number of
dimensions per Gaussian.
mu (BxGxO): The means of the Gaussians. B is the batch size, G is the
number of Gaussians, and O is the number of dimensions per Gaussian.
data (BxI): A batch of data. B is the batch size and I is the number of
input dimensions.
Returns:
probabilities (BxG): The probability of each point in the probability
of the distribution in the corresponding sigma/mu index.
"""
data = data.unsqueeze(1).expand_as(sigma)
ret = 1.0 / math.sqrt(2 * math.pi) * torch.exp(-0.5 *
((data - mu) / sigma) ** 2) / sigma
return torch.prod(ret, 2)
def mdn_loss(pi, sigma, mu, target, device):
"""Calculates the error, given the MoG parameters and the target
The loss is the negative log likelihood of the data given the MoG
parameters.
"""
prob = pi * gaussian_probability(sigma, mu, target)
nll = -torch.log(torch.sum(prob, dim=1)).to(device)
return torch.mean(nll)
def sample(pi, sigma, mu):
"""Draw samples from a MoG.
"""
categorical = Categorical(pi)
pis = list(categorical.sample().data)
sample = Variable(sigma.data.new(sigma.size(0), sigma.size(2)).normal_())
for i, idx in enumerate(pis):
sample[i] = sample[i].mul(sigma[i, idx]).add(mu[i, idx])
return sample
def gaussion_predict(weights: list, mus: list, sigmas: list, xs: list, n_jobs=1):
if n_jobs == 1:
result = np.array([np.multiply(stats.norm(mus, sigmas).pdf(x),
weights).sum(axis=1).tolist() for x in xs]).transpose()
else:
with Pool(processes=n_jobs) as pool:
instances = []
results = []
for x in xs:
i = pool.apply_async(
gaussion_predict, (weights, mus, sigmas, [x], 1))
instances.append(i)
for i in instances:
result = i.get()
# print("partial result", result)
results.append(result)
result = np.concatenate(results, axis=1)
# with futures.ThreadPoolExecutor() as executor:
# for x in xs:
# future = executor.submit(
# gaussion_predict, weights, mus, sigmas, [x], 1)
# results.append(future.result())
# result = np.concatenate(results, axis=1)
return result
def gm(weights: list, mus: list, vars: list, x: list, b_plot=False, n_division=100):
""" given a list of points, calculate the gaussian mixture probability
Args:
weights (list): weights
mus (list): the centroids of gaussions.
vars (list): the variances.
x (list): the targeting points.
b_plot (bool, optional): whether return the value for plotting. Defaults to False.
n_division (int, optional): number of division, if b_plot=True. Defaults to 100.
Returns:
float: the pdf of a gaussian mixture.
"""
if not b_plot:
result = [stats.norm(mu_i, vars_i).pdf(
x)*weights_i for mu_i, vars_i, weights_i in zip(mus, vars, weights)]
result = sum(result)
# result = 0
# for index in range(len(weights)):
# result += stats.norm(mus[index], vars[index]
# ).pdf(x) * weights[index]
# print(result)
return result
else:
xs = np.linspace(-1, 1, n_division)
# ys = [gm(weights, mus, vars, xi, b_plot=False) for xi in xs]
ys = gm(weights, mus, vars, xs, b_plot=False)
return xs, ys
# plt.plot(xs, ys)
# plt.show()
def normalize(x_point: float, mean: float, width: float) -> float:
"""normalize the data point
Args:
x (float): the data point
mean (float): the mean value
width (float): the width
Returns:
float: the normalized value
"""
return (x_point - mean) / width * 2
def denormalize(x_point: float, mean: float, width: float) -> float:
"""de-normalize the data point
Args:
x (float): the data point
mean (float): the mean value
width (float): the width
Returns:
float: the de-normalized value
"""
return 0.5 * width * x_point + mean
def de_serialize(file: str):
"""de-serialize the model from a file.
Args:
file (str): the file path.
Returns:
Callable: the model.
"""
with open(file, 'rb') as f:
return dill.load(f)
class GenericMdn:
def __init__(self, config):
self.meanx = None
self.widthx = None
self.config = config
def fit(self, runtime_config):
raise NotImplementedError("Method fit() is not implemented.")
def fit_grid_search(self, runtime_config):
raise NotImplementedError(
"Method fit_grid_search() is not implemented.")
def predict(self, runtime_config):
raise NotImplementedError("Method predict() is not implemented.")
def normalize(self, xs: np.array):
"""normalize the data
Args:
x (list): the data points to be normalized.
mean (float): the mean value of x.
width (float): the range of x.
Returns:
list: the normalized data.
"""
return (xs - self.meanx) / self.widthx * 2
def denormalize(self, xs):
"""de-normalize the data
Args:
x (list): the data points to be de-normalized.
mean (float): the mean value of x.
width (float): the range of x.
Returns:
list: the de-normalized data.
"""
return 0.5 * self.widthx * xs + self.meanx
class RegMdnGroupBy():
""" This class implements the regression using mixture density network for group by queries.
"""
def __init__(self, config, b_store_training_data=False, b_normalize_data=True):
if b_store_training_data:
self.x_points = None # query range
self.y_points = None # aggregate value
self.z_points = None # group by balue
self.sample_x = None # used in the score() function
self.sample_g = None
self.sample_average_y = None
self.b_store_training_data = b_store_training_data
self.meanx = None
self.widthx = None
self.meany = None
self.widthy = None
self.model = None
self.last_xs = None
self.last_pi = None
self.last_mu = None
self.last_sigma = None
self.config = config
self.b_normalize_data = b_normalize_data
self.enc = None
def fit(self, z_group: list, x_points: list, y_points: list, runtime_config, lr: float = 0.001, n_workers=0):
"""fit the MDN regression model.
Args:
z_group (list): group by values
x_points (list): x points
y_points (list): y points
n_epoch (int, optional): number of epochs for training. Defaults to 100.
n_gaussians (int, optional): the number of gaussions. Defaults to 5.
n_hidden_layer (int, optional): the number of hidden layers. Defaults to 1.
n_mdn_layer_node (int, optional): the node number in the hidden layer. Defaults to 10.
lr (float, optional): the learning rate of the MDN network for training. Defaults to 0.001.
Raises:
ValueError: The hidden layer should be 1 or 2.
Returns:
RegMdnGroupBy: The regression model.
"""
n_epoch = self.config.config["n_epoch"]
n_gaussians = self.config.config["n_gaussians_reg"]
n_hidden_layer = self.config.config["n_hidden_layer"]
n_mdn_layer_node = self.config.config["n_mdn_layer_node_reg"]
b_grid_search = self.config.config["b_grid_search"]
encoder = self.config.config["encoder"]
device = runtime_config["device"]
if not b_grid_search:
if encoder == "onehot":
self.enc = OneHotEncoder(handle_unknown='ignore')
zs_encoded = z_group
zs_encoded = self.enc.fit_transform(zs_encoded).toarray()
elif encoder == "binary":
# print(z_group)
# prepare column names for binary encoding
columns = list(range(len(z_group[0])))
self.enc = ce.BinaryEncoder(cols=columns)
zs_encoded = self.enc.fit_transform(z_group).to_numpy()
elif encoder == "embedding":
sentences = columns2sentences(z_group, x_points, y_points)
self.enc = WordEmbedding()
self.enc.fit(sentences, gbs=["gb"],dim=self.config.config["n_embedding_dim"])
gbs_data = z_group.reshape(1,-1)[0]
zs_encoded = self.enc.predicts(gbs_data)
# raise TypeError("embedding is not supported yet.")
if self.b_normalize_data:
self.meanx = (np.max(x_points) + np.min(x_points)) / 2
self.widthx = np.max(x_points) - np.min(x_points)
self.meany = (np.max(y_points) + np.min(y_points)) / 2
self.widthy = np.max(y_points) - np.min(y_points)
x_points = np.array([normalize(i, self.meanx, self.widthx)
for i in x_points])
y_points = np.array([normalize(i, self.meany, self.widthy)
for i in y_points])
if self.b_store_training_data:
self.x_points = x_points
self.y_points = y_points
self.z_points = z_group
else:
# delete the previous stored data in grid search, to save space.
self.x_points = None
self.y_points = None
self.z_points = None
if encoder in ["onehot", "binary", "embedding"]:
xs_encoded = x_points[:, np.newaxis]
xzs_encoded = np.concatenate(
[xs_encoded, zs_encoded], axis=1).tolist()
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs_encoded])
else:
xzs = [[x_point, z_point]
for x_point, z_point in zip(x_points, z_group)]
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs]) # transform to torch tensors
y_points = y_points[:, np.newaxis]
tensor_ys = torch.stack([torch.Tensor(i) for i in y_points])
# move variables to cuda
tensor_xzs = tensor_xzs.to(device)
tensor_ys = tensor_ys.to(device)
my_dataset = torch.utils.data.TensorDataset(
tensor_xzs, tensor_ys) # create your dataloader
my_dataloader = torch.utils.data.DataLoader(
my_dataset, batch_size=self.config.config["batch_size"], shuffle=True, num_workers=n_workers)
if encoder == "onehot":
input_dim = sum([len(i) for i in self.enc.categories_]) + 1
elif encoder == "binary":
input_dim = len(self.enc.base_n_encoder.feature_names) + 1
elif encoder == "embedding":
input_dim = self.enc.dim + 1
else:
raise ValueError("Encoding should be binary or onehot")
# initialize the model
if n_hidden_layer == 1:
self.model = nn.Sequential(
nn.Linear(input_dim, n_mdn_layer_node),
nn.Tanh(),
nn.Dropout(0.1),
MDN(n_mdn_layer_node, 1, n_gaussians, device)
)
elif n_hidden_layer == 2:
self.model = nn.Sequential(
nn.Linear(input_dim, n_mdn_layer_node),
nn.Tanh(),
nn.Linear(n_mdn_layer_node, n_mdn_layer_node),
nn.Tanh(),
nn.Dropout(0.1),
MDN(n_mdn_layer_node, 1, n_gaussians, device)
)
else:
raise ValueError(
"The hidden layer should be 1 or 2, but you provided "+str(n_hidden_layer))
self.model = self.model.to(device)
optimizer = optim.Adam(self.model.parameters(), lr=lr)
decay_rate = 0.96
my_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer=optimizer, gamma=decay_rate)
for epoch in range(n_epoch):
if runtime_config["v"]:
if epoch % 1 == 0:
print("< Epoch {}".format(epoch))
# train the model
for minibatch, labels in my_dataloader:
minibatch.to(device)
labels.to(device)
self.model.zero_grad()
pi, sigma, mu = self.model(minibatch)
loss = mdn_loss(pi, sigma, mu, labels, device)
loss.backward()
optimizer.step()
my_lr_scheduler.step()
self.model.eval()
print("Finish regression training.")
return self
else:
return self.fit_grid_search(z_group, x_points, y_points, runtime_config)
def fit_grid_search(self, z_group: list, x_points: list, y_points: list, runtime_config):
"""use grid search to tune the hyper parameters.
Args:
z_group (list): group by values
x_points (list): independent values
y_points (list): dependent values
Returns:
RegMdnGroupBy: the fitted model
"""
param_grid = {'epoch': [5], 'lr': [0.001], 'node': [
5, 10, 20], 'hidden': [1, 2], 'gaussian_reg': [3, 5]}
# param_grid = {'epoch': [5], 'lr': [0.001], 'node': [
# 5], 'hidden': [1], 'gaussian': [3]}
errors = []
combinations = it.product(*(param_grid[Name] for Name in param_grid))
combinations = list(combinations)
combs = []
for combination in combinations:
idx = 0
comb = {}
# print(combination)
for key in param_grid:
comb[key] = combination[idx]
idx += 1
combs.append(comb)
self.b_store_training_data = True
for para in combs:
print("Grid search for parameter set :", para)
config = self.config.copy()
config.config["n_gaussians_reg"] = para['gaussian_reg']
# config.config["n_gaussians_density"] = para['gaussian_density']
config.config["n_epoch"] = para['epoch']
config.config["n_hidden_layer"] = para['hidden']
config.config["n_mdn_layer_node_reg"] = para['node']
config.config["b_grid_search"] = False
instance = RegMdnGroupBy(config, b_store_training_data=True).fit(z_group, x_points, y_points,
runtime_config, lr=para['lr'])
errors.append(instance.score(runtime_config))
print("errors for grid search ", errors)
index = errors.index(min(errors))
para = combs[index]
print("Finding the best configuration for the network", para)
self.b_store_training_data = False
# release space
self.x_points = None
self.y_points = None
self.z_points = None
self.sample_x = None
self.sample_g = None
self.sample_average_y = None
config = self.config.copy()
config.config["n_gaussians_reg"] = para['gaussian_reg']
# config.config["n_gaussians_density"] = para['gaussian_density']
# config.config["n_epoch"] = para['epoch']
config.config["n_hidden_layer"] = para['hidden']
config.config["n_mdn_layer_node_reg"] = para['node']
config.config["b_grid_search"] = False
instance = RegMdnGroupBy(config).fit(z_group, x_points, y_points,
runtime_config, lr=para['lr'])
print("-"*80)
return instance
def predict(self, z_group: list, x_points: list, runtime_config, b_plot=False) -> list:
"""provide predictions for given groups and points.
Args:
z_group (list): the group by values
x_points (list): the corresponding x points
b_plot (bool, optional): to plot the data or not.. Defaults to False.
Raises:
Exception: [description]
Returns:
list: the predictions.
"""
# torch.set_num_threads(4)
# check input data type, and convert to np.array
if type(z_group) is list:
z_group = np.array(z_group)
if type(x_points) is list:
x_points = np.array(x_points)
encoder = self.config.config["encoder"]
device = runtime_config["device"]
if encoder == 'no':
convert2float = True
if convert2float:
try:
zs_float = []
for item in z_group:
if item[0] == "":
zs_float.append([0.0])
else:
zs_float.append([(float)(item[0])])
z_group = zs_float
except:
raise Exception
if self.b_normalize_data:
x_points = normalize(x_points, self.meanx, self.widthx)
if encoder == "onehot":
# zs_encoded = z_group # [:, np.newaxis]
zs_encoded = self.enc.transform(z_group).toarray()
x_points = x_points[:, np.newaxis]
xzs_encoded = np.concatenate(
[x_points, zs_encoded], axis=1).tolist()
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs_encoded])
elif encoder == "binary":
zs_encoded = self.enc.transform(z_group).to_numpy()
x_points = x_points[:, np.newaxis]
xzs_encoded = np.concatenate(
[x_points, zs_encoded], axis=1).tolist()
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs_encoded])
elif encoder == "embedding":
zs_transformed = z_group.reshape(1,-1)[0]
zs_encoded = self.enc.predicts(zs_transformed)
x_points = x_points[:, np.newaxis]
xzs_encoded = np.concatenate(
[x_points, zs_encoded], axis=1).tolist()
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs_encoded])
else:
xzs = [[x_point, z_point]
for x_point, z_point in zip(x_points, z_group)]
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs])
tensor_xzs = tensor_xzs.to(device)
self.model = self.model.to(device)
pis, sigmas, mus = self.model(tensor_xzs)
if not b_plot:
pis = pis.cpu().detach().numpy() # [0]
# sigmas = sigmas.detach().numpy().reshape(len(sigmas), -1)[0]
mus = mus.cpu().detach().numpy().reshape(len(z_group), -1) # [0]
predictions = np.sum(np.multiply(pis, mus), axis=1)
if self.b_normalize_data:
predictions = [denormalize(pred, self.meany, self.widthy)
for pred in predictions]
return predictions
else:
samples = sample(pis, sigmas, mus).data.numpy().reshape(-1)
if self.b_normalize_data:
samples = [denormalize(pred, self.meany, self.widthy)
for pred in samples]
# plt.scatter(z_group, x_points, samples)
# plt.show()
# return samples
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if len(self.x_points) > 2000:
idx = np.random.randint(0, len(self.x_points), 2000)
if self.b_normalize_data:
x_samples = [denormalize(i, self.meanx, self.widthx)
for i in self.x_points[idx]]
y_samples = [denormalize(i, self.meany, self.widthy)
for i in self.y_points[idx]]
ax.scatter(x_samples,
self.z_points[idx], y_samples)
else:
ax.scatter(self.x_points, self.z_points, self.y_points)
if self.b_normalize_data:
x_points = denormalize(x_points, self.meanx, self.widthx)
if len(samples) > 2000:
idx = np.random.randint(0, len(x_points), 2000)
ax.scatter(np.array(x_points)[idx], np.array(
z_group)[idx], np.array(samples)[idx])
else:
ax.scatter(x_points, z_group, samples)
ax.set_xlabel('query range attribute')
ax.set_ylabel('group by attribute')
ax.set_zlabel('aggregate attribute')
plt.show()
return samples
def score(self, runtime_config) -> float:
""" evaluate the error for this model. currenltly,
it is the sum of all absolute errors, for a random sample of points.
Raises:
ValueError: b_store_training_data must be set to True to enable the score() function.
Returns:
float: the absolute error
"""
gs = ["g1", "g2", "g3", "g4", "g5"]
if not self.b_store_training_data:
raise ValueError(
"b_store_training_data must be set to True to enable the score() function.")
else:
# groups = self.enc.categories_[0]
if self.sample_x is None:
# process group by values
data = {gs[i]: [row[i] for row in self.z_points]
for i in range(len(self.z_points[0]))}
# append x y values
data['x'] = denormalize(self.x_points, self.meanx, self.widthx)
data['y'] = denormalize(self.y_points, self.meany, self.widthy)
df = pd.DataFrame(data)
columns = list(df.columns.values)
columns.remove("y")
# df = pd.DataFrame(
# {'g': self.z_points, 'x': denormalize(self.x_points, self.meanx, self.widthx), 'y': denormalize(self.y_points, self.meany, self.widthy)})
# mean_y = df.groupby(['g', 'x'])['y'].mean() # .reset_index()
mean_y = df.groupby(columns)['y'].mean() # .reset_index()
# print(df)
# raise Exception
# make the same index here
df = df.set_index(columns) # df = df.set_index(['g', 'x'])
df['mean_y'] = mean_y
# print(df)
df = df.reset_index() # to take the hierarchical index off again
df = df.sample(
n=min(1000, len(self.x_points)), random_state=1, replace=False)
self.sample_x = df["x"].values
# for g in columns[:-1]:
# self.sample_g = df["g"].values
self.sample_g = df[columns[:-1]].values
# raise Exception
self.sample_average_y = df["mean_y"].values
predictions = self.predict(
self.sample_g, self.sample_x, runtime_config)
errors = [abs(pred-tru)
for pred, tru in zip(predictions, self.sample_average_y)]
errors = sum(sorted(errors)[10:-10])
return errors
class RegMdn():
""" This class implements the regression using mixture density network.
"""
# , n_mdn_layer_node=20, b_one_hot=True
def __init__(self, config, dim_input, b_store_training_data=False):
if b_store_training_data:
self.xs = None # query range
self.ys = None # aggregate value
self.zs = None # group by balue
self.b_store_training_data = b_store_training_data
self.meanx = None
self.widthx = None
self.meany = None
self.widthy = None
self.meanz = None
self.widthz = None
self.model = None
self.is_normalized = False
self.dim_input = dim_input
self.is_training_data_denormalized = False
self.last_xs = None
self.last_pi = None
self.last_mu = None
self.last_sigma = None
self.enc = None
self.config = config
# num_epoch=400, num_gaussians=5
def fit(self, xs, ys, runtime_config, b_show_plot=False, b_normalize=True):
""" fit a regression y= R(x)"""
if len(xs.shape) != 2:
raise Exception("xs should be 2-d, but got unexpected shape.")
if self.dim_input == 1:
return self.fit2d(xs, ys, runtime_config, b_show_reg_plot=b_show_plot,
b_normalize=b_normalize, )
elif self.dim_input == 2:
return self.fit3d(xs[:, 0], xs[:, 1], ys, runtime_config, b_show_plot=b_show_plot,
b_normalize=b_normalize, )
else:
print("dimension mismatch")
sys.exit(0)
def predict(self, xs, runtime_config, b_show_plot=False):
""" make predictions"""
if self.dim_input == 1:
return self.predict2d(xs, runtime_config, b_show_plot=b_show_plot)
elif self.dim_input == 2:
return self.predict3d(xs[:, 0], xs[:, 1], runtime_config, b_show_plot=b_show_plot)
else:
print("dimension mismatch")
sys.exit(0)
def fit3d(self, xs, zs, ys, runtime_config, b_show_plot=False, b_normalize=True, n_workers=0):
""" fit a regression y = R(x,z)
Args:
xs ([float]): query range attribute
zs ([float]): group by attribute
ys ([float]): aggregate attribute
b_show_plot (bool, optional): whether to show the plot. Defaults to True.
"""
b_one_hot = True
device = runtime_config["device"]
n_mdn_layer_node = self.config.config["n_mdn_layer_node"]
num_gaussians = self.config.config["n_gaussions"]
num_epoch = self.config.config["n_epoch"]
if b_one_hot:
self.enc = OneHotEncoder(handle_unknown='ignore')
zs_onehot = zs[:, np.newaxis]
zs_onehot = self.enc.fit_transform(zs_onehot).toarray()
if b_normalize:
self.meanx = (np.max(xs) + np.min(xs)) / 2
self.widthx = np.max(xs) - np.min(xs)
self.meany = (np.max(ys) + np.min(ys)) / 2
self.widthy = np.max(ys) - np.min(ys)
# self.meanz = np.mean(zs)
# self.widthz = np.max(zs)-np.min(zs)
# s= [(i-meanx)/1 for i in x]
xs = np.array([self.normalize(i, self.meanx, self.widthx)
for i in xs])
ys = np.array([self.normalize(i, self.meany, self.widthy)
for i in ys])
# zs = np.array([self.normalize(i, self.meanz, self.widthz)
# for i in zs])
self.is_normalized = True
if self.b_store_training_data:
self.xs = xs
self.ys = ys
self.zs = zs
if b_show_plot:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs, zs, ys)
ax.set_xlabel('query range attribute')
ax.set_ylabel('group by attribute')
ax.set_zlabel('aggregate attribute')
plt.show()
if b_one_hot:
xs_onehot = xs[:, np.newaxis]
xzs_onehot = np.concatenate(
[xs_onehot, zs_onehot], axis=1).tolist()
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs_onehot]) # transform to torch tensors
else:
xzs = [[xs[i], zs[i]] for i in range(len(xs))]
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs]) # transform to torch tensors
ys = ys[:, np.newaxis]
tensor_ys = torch.stack([torch.Tensor(i) for i in ys])
# move variables to cuda
tensor_xzs = tensor_xzs.to(device)
tensor_ys = tensor_ys.to(device)
my_dataset = torch.utils.data.TensorDataset(
tensor_xzs, tensor_ys) # create your datset
# , num_workers=8) # create your dataloader
my_dataloader = torch.utils.data.DataLoader(
my_dataset, batch_size=self.config.config["batch_size"], shuffle=False, num_workers=n_workers)
input_dim = len(self.enc.categories_[0]) + 1
# initialize the model
self.model = nn.Sequential(
nn.Linear(input_dim, n_mdn_layer_node), # self.dim_input
nn.Tanh(),
nn.Dropout(0.01),
MDN(n_mdn_layer_node, 1, num_gaussians, device)
)
self.model = self.model.to(device)
optimizer = optim.Adam(self.model.parameters())
for epoch in range(num_epoch):
if epoch % 100 == 0:
print("< Epoch {}".format(epoch))
# train the model
for minibatch, labels in my_dataloader:
minibatch.to(device)
labels.to(device)
self.model.zero_grad()
pi, sigma, mu = self.model(minibatch)
loss = mdn_loss(pi, sigma, mu, labels, device)
loss.backward()
optimizer.step()
return self
def fit3d_grid_search(self, xs: list, zs: list, ys: list, runtime_config, b_normalize=True):
""" fit the regression, using grid search to find the optimal parameters.
Args:
xs (list): x points.
zs (list): group by attributes
ys (list): y values.
b_normalize (bool, optional): whether the values should be normalized
for training. Defaults to True.
Returns:
RegMdn: the model.
"""
param_grid = {'epoch': [5], 'lr': [0.001, 0.0001], 'node': [
5, 10, 20], 'hidden': [1, 2], 'gaussian': [2, 4]}
# param_grid = {'epoch': [2], 'lr': [0.001], 'node': [4, 12], 'hidden': [1, 2], 'gaussian': [10]}
errors = []
combinations = it.product(*(param_grid[Name] for Name in param_grid))
combinations = list(combinations)
combs = []
for combination in combinations:
idx = 0
comb = {}
for key in param_grid:
comb[key] = combination[idx]
idx += 1
combs.append(comb)
self.b_store_training_data = True
# for para in combs:
# print("Grid search for parameter set :", para)
# instance = self.fit(zs, xs, b_normalize=b_normalize, num_gaussians=para['gaussian'], num_epoch=para['epoch'],
# n_mdn_layer_node=para['node'], lr=para['lr'], hidden=para['hidden'], b_grid_search=False)
# errors.append(instance.score())
# index = errors.index(min(errors))
# para = combs[index]
# print("Finding the best configuration for the network", para)
# self.b_store_training_data = False
# instance = self.fit(zs, xs, b_normalize=True, num_gaussians=para['gaussian'], num_epoch=20,
# n_mdn_layer_node=para['node'], lr=para['lr'], hidden=para['hidden'], b_grid_search=False)
# return instance
def fit2d(self, xs, ys, runtime_config, b_show_reg_plot=False, b_normalize=True,
b_show_density_plot=False, n_workers=0):
""" fit a regression y = R(x)
Args:
xs([float]): query range attribute
ys([float]): aggregate attribute
b_show_plot(bool, optional): whether to show the plot. Defaults to True.
"""
n_mdn_layer_node = self.config.config["n_mdn_layer_node"]
num_epoch = self.config.config["n_epoch"]
if b_normalize:
self.meanx = (np.max(xs) + np.min(xs)) / 2
self.widthx = np.max(xs) - np.min(xs)
self.meany = (np.max(ys) +
|
np.min(ys)
|
numpy.min
|
import unittest
import numpy as np
from pltools.data import LoadSample, LoadSampleLabel
from pltools.data.load_fn import norm_zero_mean_unit_std
class TestLoadsample(unittest.TestCase):
@staticmethod
def load_dummy_label(path):
return {'label': 42}
@staticmethod
def load_dummy_data(path):
return np.random.rand(1, 256, 256) * np.random.randint(2, 20) + \
np.random.randint(20)
def test_load_sample(self):
# check loading of a single sample
sample_fn = LoadSample({'data': ['data', 'data', 'data'],
'seg': ['data'],
'data2': ['data', 'data', 'data']},
self.load_dummy_data,
dtype={'seg': 'uint8'},
normalize=('data2',))
sample = sample_fn('load')
assert not np.isclose(np.mean(sample['data']), 0)
assert not np.isclose(np.mean(sample['seg']), 0)
assert sample['seg'].dtype == 'uint8'
assert np.isclose(sample['data2'].max(), 1)
assert np.isclose(sample['data2'].min(), -1)
def test_load_sample_zero_mean_norm(self):
# check different normalization function
sample_fn = LoadSample({'data': ['data', 'data', 'data']},
self.load_dummy_data,
normalize=('data',),
norm_fn=norm_zero_mean_unit_std)
sample = sample_fn('load')
assert np.isclose(np.mean(sample['data']), 0)
assert np.isclose(
|
np.std(sample['data'])
|
numpy.std
|
import numpy as np
import typing
import warnings
import slippy
import functools
__all__ = ['guess_loads_from_displacement', 'bccg', 'plan_convolve', 'plan_multi_convolve', 'plan_coupled_convolve',
'polonsky_and_keer']
def guess_loads_from_displacement(displacements_z: np.array, zz_component: np.array) -> np.array:
"""
Defines the starting point for the default loads from displacement method
Parameters
----------
displacements_z: np.array
The point wise displacement
zz_component: dict
Dict of influence matrix components
Returns
-------
guess_of_loads: np.array
A named tuple of the loads
"""
max_im = max(zz_component)
return displacements_z / max_im
try:
import cupy as cp
def n_pow_2(a):
return 2 ** int(np.ceil(np.log2(a)))
def _plan_cuda_convolve(loads: np.ndarray, im: np.ndarray, domain: np.ndarray,
circular: typing.Sequence[bool], no_shape_check: bool):
"""Plans an FFT convolution, returns a function to carry out the convolution
CUDA implementation
Parameters
----------
loads: np.ndarray
An example of a loads array, this is not altered or stored
im: np.ndarray
The influence matrix component for the transformation, this is not altered but it's fft is stored to
save time during convolution, this must be larger in every dimension than the loads array
domain: np.ndarray, optional
Array with same shape as loads filled with boolean values. If supplied this function will return a
function which first fills the supplied loads into the domain then computes the convolution.
This is typically used for finding loads from set displacements as the displacements are often not set
over the whole surface.
circular: Sequence[bool], optional (False)
If True the circular convolution will be calculated, to be used for periodic simulations
Returns
-------
function
A function which takes a single input of loads and returns the result of the convolution with the original
influence matrix. If a domain was not supplied the input to the returned function must be exactly the same
shape as the loads array used in this function. If a domain was specified the length of the loads input to
the returned function must be the same as the number of non zero elements in domain.
Notes
-----
This function uses CUDA to run on a GPU if your computer dons't have cupy installed this should not have loaded
if it is for some reason, this can be manually overridden by first importing slippy then setting the CUDA
variable to False:
>>> import slippy
>>> slippy.CUDA = False
>>> import slippy.contact
>>> ...
Examples
--------
>>> import numpy as np
>>> import slippy.contact as c
>>> result = c.hertz_full([1,1], [np.inf, np.inf], [200e9, 200e9], [0.3, 0.3], 1e4)
>>> X,Y = np.meshgrid(*[np.linspace(-0.005,0.005,256)]*2)
>>> grid_spacing = X[1][1]-X[0][0]
>>> loads = result['pressure_f'](X,Y)
>>> disp_analytical = result['surface_displacement_b_f'][0](X,Y)['uz']
>>> im = c.elastic_influence_matrix('zz', (512,512), (grid_spacing,grid_spacing), 200e9/(2*(1+0.3)), 0.3)
>>> convolve_func = plan_convolve(loads, im, None, [False, False])
>>> disp_numerical = convolve_func(loads)
"""
loads = cp.asarray(loads)
im = cp.asarray(im)
im_shape_orig = im.shape
if domain is not None:
domain = cp.asarray(domain)
input_shape = []
for i in range(2):
if circular[i]:
if not no_shape_check:
assert loads.shape[i] == im.shape[i], "For circular convolution loads and im must be same shape"
input_shape.append(loads.shape[i])
else:
if not no_shape_check:
msg = "For non circular convolution influence matrix must be double loads"
assert loads.shape[i] == im.shape[i] // 2, msg
input_shape.append(n_pow_2(max(loads.shape[i], im.shape[i])))
input_shape = tuple(input_shape)
forward_trans = functools.partial(cp.fft.fft2, s=input_shape)
backward_trans = functools.partial(cp.fft.ifft2, s=input_shape)
shape_diff = [[0, (b - a)] for a, b in zip(im.shape, input_shape)]
norm_inv = (input_shape[0] * input_shape[1]) ** 0.5
norm = 1 / norm_inv
im = cp.pad(im, shape_diff, mode='constant')
im = cp.roll(im, tuple(-((sz - 1) // 2) for sz in im_shape_orig), (-2, -1))
fft_im = forward_trans(im) * norm
shape = loads.shape
dtype = loads.dtype
def inner_with_domain(sub_loads, ignore_domain=False):
full_loads = cp.zeros(shape, dtype=dtype)
full_loads[domain] = sub_loads
fft_loads = forward_trans(full_loads)
full = norm_inv * cp.real(backward_trans(fft_loads * fft_im))
full = full[:full_loads.shape[0], :full_loads.shape[1]]
if ignore_domain:
return full
return full[domain]
def inner_no_domain(full_loads):
full_loads = cp.asarray(full_loads)
if full_loads.shape == shape:
flat = False
else:
full_loads = cp.reshape(full_loads, loads.shape)
flat = True
fft_loads = forward_trans(full_loads)
full = norm_inv * cp.real(backward_trans(fft_loads * fft_im))
full = full[:full_loads.shape[0], :full_loads.shape[1]]
if flat:
full = full.flatten()
return full
if domain is None:
return inner_no_domain
else:
return inner_with_domain
def _plan_cuda_multi_convolve(loads: np.ndarray, ims: np.ndarray, domain: np.ndarray = None,
circular: typing.Sequence[bool] = (False, False)):
"""Plans an FFT convolution, returns a function to carry out the convolution
CUDA implementation
Parameters
----------
loads: np.ndarray
An example of a loads array, this is not altered or stored
ims: np.ndarray
The influence matrix component for the transformation, this is not altered but it's fft is stored to
save time during convolution, this must be larger in every dimension than the loads array
domain: np.ndarray, optional
Array with same shape as loads filled with boolean values. If supplied this function will return a
function which first fills the supplied loads into the domain then computes the convolution.
This is typically used for finding loads from set displacements as the displacements are often not set
over the whole surface.
circular: Sequence[bool], optional (False)
If True the circular convolution will be calculated, to be used for periodic simulations
Returns
-------
function
A function which takes a single input of loads and returns the result of the convolution with the original
influence matrix. If a domain was not supplied the input to the returned function must be exactly the same
shape as the loads array used in this function. If a domain was specified the length of the loads input to
the returned function must be the same as the number of non zero elements in domain.
Notes
-----
This function uses CUDA to run on a GPU if your computer dons't have cupy installed this should not have loaded
if it is for some reason, this can be manually overridden by first importing slippy then setting the CUDA
variable to False:
>>> import slippy
>>> slippy.CUDA = False
>>> import slippy.contact
>>> ...
Examples
--------
>>> import numpy as np
>>> import slippy.contact as c
>>> result = c.hertz_full([1,1], [np.inf, np.inf], [200e9, 200e9], [0.3, 0.3], 1e4)
>>> X,Y = np.meshgrid(*[np.linspace(-0.005,0.005,256)]*2)
>>> grid_spacing = X[1][1]-X[0][0]
>>> loads = result['pressure_f'](X,Y)
>>> disp_analytical = result['surface_displacement_b_f'][0](X,Y)['uz']
>>> im = c.elastic_influence_matrix('zz', (512,512), (grid_spacing,grid_spacing), 200e9/(2*(1+0.3)), 0.3)
>>> convolve_func = plan_convolve(loads, im, None, [False, False])
>>> disp_numerical = convolve_func(loads)
"""
loads = cp.asarray(loads)
im = cp.asarray(ims[0])
im_shape_orig = im.shape
if domain is not None:
domain = cp.asarray(domain)
input_shape = []
for i in range(2):
if circular[i]:
assert loads.shape[i] == im.shape[i], "For circular convolution loads and im must be same shape"
input_shape.append(loads.shape[i])
else:
msg = "For non circular convolution influence matrix must be double loads"
assert loads.shape[i] == im.shape[i] // 2, msg
input_shape.append(n_pow_2(max(loads.shape[i], im.shape[i])))
input_shape = tuple(input_shape)
forward_trans = functools.partial(cp.fft.fft2, s=input_shape)
backward_trans = functools.partial(cp.fft.ifft2, s=input_shape)
shape_diff = [[0, (b - a)] for a, b in zip(im.shape, input_shape)]
norm_inv = (input_shape[0] * input_shape[1]) ** 0.5
norm = 1 / norm_inv
fft_ims = cp.zeros((len(ims), *input_shape), dtype=cp.complex128)
for i in range(len(ims)):
im = cp.asarray(ims[i])
im = cp.pad(im, shape_diff, mode='constant')
im = cp.roll(im, tuple(-((sz - 1) // 2) for sz in im_shape_orig), (-2, -1))
fft_ims[i] = forward_trans(im) * norm
shape = loads.shape
dtype = loads.dtype
def inner_no_domain(full_loads):
full_loads = cp.asarray(full_loads)
all_results = cp.zeros((len(fft_ims), *full_loads.shape))
if full_loads.shape == shape:
flat = False
else:
full_loads = cp.reshape(full_loads, loads.shape)
flat = True
fft_loads = forward_trans(full_loads)
for i in range(len(ims)):
full = norm_inv * cp.real(backward_trans(fft_loads * fft_ims[i]))
full = full[:full_loads.shape[0], :full_loads.shape[1]]
if flat:
full = full.flatten()
all_results[i] = full
return all_results
def inner_with_domain(sub_loads, ignore_domain=False):
full_loads = cp.zeros(shape, dtype=dtype)
full_loads[domain] = sub_loads
if ignore_domain:
all_results = cp.zeros((len(fft_ims), *full_loads.shape))
else:
all_results = cp.zeros((len(fft_ims), *sub_loads.shape))
fft_loads = forward_trans(full_loads)
for i in range(len(ims)):
full = norm_inv * cp.real(backward_trans(fft_loads * fft_ims[i]))
full = full[:full_loads.shape[0], :full_loads.shape[1]]
if ignore_domain:
all_results[i] = full
else:
all_results[i] = full[domain]
return all_results
if domain is None:
return inner_no_domain
else:
return inner_with_domain
def _cuda_polonsky_and_keer(f: typing.Callable, p0: typing.Sequence, just_touching_gap: typing.Sequence,
target_load: float, grid_spacing: typing.Sequence[float], eps_0: float = 1e-6,
max_it: int = None):
just_touching_gap = cp.array(just_touching_gap)
p0 = cp.array(p0)
if max_it is None:
max_it = just_touching_gap.size
# init
pij = p0 / cp.mean(p0) * target_load
delta = 0
g_big_old = 1
tij = 0
it_num = 0
element_area = grid_spacing[0] * grid_spacing[1]
while True:
uij = f(pij)
gij = uij + just_touching_gap
current_touching = pij > 0
g_bar = cp.mean(gij[current_touching])
gij = gij - g_bar
g_big = cp.sum(gij[current_touching] ** 2)
if it_num == 0:
tij = gij
else:
tij = gij + delta * (g_big / g_big_old) * tij
tij[cp.logical_not(current_touching)] = 0
g_big_old = g_big
rij = f(tij)
r_bar = cp.mean(rij[current_touching])
rij = rij - r_bar
if not cp.linalg.norm(rij):
tau = 0
else:
tau = (cp.dot(gij[current_touching], tij[current_touching]) /
cp.dot(rij[current_touching], tij[current_touching]))
pij_old = pij
pij = pij - tau * tij
pij = cp.clip(pij, 0, cp.inf)
iol = cp.logical_and(pij == 0, gij < 0)
if cp.any(iol):
delta = 0
pij[iol] = pij[iol] - tau * gij[iol]
else:
delta = 1
p_big = element_area * cp.sum(pij)
pij = pij / p_big * target_load
eps = (element_area / target_load) * cp.sum(cp.abs(pij - pij_old))
if eps < eps_0:
failed = False
break
it_num += 1
if it_num > max_it:
failed = True
break
if cp.any(cp.isnan(pij)):
failed = True
break
return failed, pij, gij
def _cuda_bccg(f: typing.Callable, b: typing.Sequence, tol: float, max_it: int, x0: typing.Sequence,
min_pressure: float = 0.0, max_pressure: typing.Union[float, typing.Sequence] = cp.inf,
k_inn=1) -> typing.Tuple[cp.ndarray, bool]:
"""
The Bound-Constrained Conjugate Gradient Method for Non-negative Matrices
CUDA implementation
Parameters
----------
f: Callable
A function equivalent to multiplication by a non negative n by n matrix must work with cupy arrays.
Typically this function will be generated by slippy.contact.plan_convolve, this will guarantee
compatibility with different versions of this function (FFTW and CUDA).
b: array
1 by n array of displacements
tol: float
The tolerance on the result
max_it: int
The maximum number of iterations used
x0: array
An initial guess of the solution
min_pressure: float, optional (0)
The minimum allowable pressure at each node, defaults to 0
max_pressure: float, optional (inf)
The maximum allowable pressure at each node, defaults to inf, for purely elastic contacts
k_inn: int
Returns
-------
x: cp.array
The solution to the system f(x)-b = 0 with the constraints applied.
Notes
-----
This function uses the method described in the reference below, with some modification.
Firstly, this method allows both a minimum and maximum force to be set simulating quasi plastic regimes. The
code has also been optimised in several places and importantly this version has also been modified to run
on a GPU through cupy.
If you do not have a CUDA compatible GPU, slippy can be imported while falling back to the fftw version
by first importing slippy then patching the CUDA variable to False:
>>> import slippy
>>> slippy.CUDA = False
>>> import slippy.contact
>>> ...
Though this should happen automatically if you don't have cupy installed.
References
----------
Vollebregt, E.A.H. The Bound-Constrained Conjugate Gradient Method for Non-negative Matrices. J Optim
Theory Appl 162, 931–953 (2014). https://doi.org/10.1007/s10957-013-0499-x
Examples
--------
"""
# if you use np or most built ins in this function at all it will slow it down a lot!
try:
float(max_pressure)
max_is_float = True
except TypeError:
max_is_float = False
max_pressure = cp.array(max_pressure)
try:
float(min_pressure)
min_is_float = True
except TypeError:
min_is_float = False
min_pressure = cp.array(min_pressure)
# initialize
b = cp.asarray(b)
x = cp.clip(cp.asarray(x0), min_pressure, max_pressure)
g = f(x) - b
msk_bnd_0 = cp.logical_and(x <= 0, g >= 0)
msk_bnd_max = cp.logical_and(x >= max_pressure, g <= 0)
n_bound = cp.sum(msk_bnd_0) + cp.sum(msk_bnd_max)
n = b.size
n_free = n - n_bound
small = 1e-14
it = 0
it_inn = 0
rho_prev = cp.nan
rho = 0.0
r, p, r_prev = 0, 0, 0
failed = False
while True:
it += 1
it_inn += 1
x_prev = x
if it > 1:
r_prev = r
rho_prev = rho
r = -g
r[msk_bnd_0] = 0
r[msk_bnd_max] = 0
rho = cp.dot(r, r)
if it > 1:
beta_pr = (rho - cp.dot(r, r_prev)) / rho_prev
p = r + max([beta_pr, 0]) * p
else:
p = r
p[msk_bnd_0] = 0
p[msk_bnd_max] = 0
# compute tildex optimisation ignoring the bounds
q = f(p)
if it_inn < k_inn:
q[msk_bnd_0] = cp.nan
q[msk_bnd_max] = cp.nan
alpha = cp.dot(r, p) / cp.dot(p, q)
x = x + alpha * p
rms_xk = cp.linalg.norm(x) / cp.sqrt(n_free)
rms_upd = cp.linalg.norm(x - x_prev) / cp.sqrt(n_free)
upd = rms_upd / rms_xk
# project onto feasible domain
changed = False
outer_it = it_inn >= k_inn or upd < tol
if outer_it:
msk_prj_0 = x < min_pressure - small
if cp.any(msk_prj_0):
if min_is_float:
x[msk_prj_0] = min_pressure
else:
x[msk_prj_0] = min_pressure[msk_prj_0]
msk_bnd_0[msk_prj_0] = True
changed = True
msk_prj_max = x >= max_pressure * (1 + small)
if cp.any(msk_prj_max):
if max_is_float:
x[msk_prj_max] = max_pressure
else:
x[msk_prj_max] = max_pressure[msk_prj_max]
msk_bnd_max[msk_prj_max] = True
changed = True
if changed or (outer_it and k_inn > 1):
g = f(x) - b
else:
g = g + alpha * q
check_grad = outer_it
if check_grad:
msk_rel = cp.logical_or(cp.logical_and(msk_bnd_0, g < -small), cp.logical_and(msk_bnd_max, g > small))
if cp.any(msk_rel):
msk_bnd_0[msk_rel] = False
msk_bnd_max[msk_rel] = False
changed = True
if changed:
n_free = n - cp.sum(msk_bnd_0) - cp.sum(msk_bnd_max)
if not n_free:
print("No free nodes")
warnings.warn("No free nodes for BCCG iterations")
failed = True
break
if outer_it:
it_inn = 0
if it > max_it:
print("Max iterations")
warnings.warn("Bound constrained conjugate gradient iterations failed to converge")
failed = True
break
if outer_it and (not changed) and upd < tol:
break
return x, bool(failed)
except ImportError:
_plan_cuda_convolve = None
_plan_cuda_multi_convolve = None
_cuda_bccg = None
try:
import pyfftw
def _plan_fftw_convolve(loads: np.ndarray, im: np.ndarray, domain: np.ndarray, circular: typing.Sequence[bool],
no_shape_check: bool):
"""Plans an FFT convolution, returns a function to carry out the convolution
FFTW implementation
Parameters
----------
loads: np.ndarray
An example of a loads array, this is not altered or stored
im: np.ndarray
The influence matrix component for the transformation, this is not altered but it's fft is stored to
save time during convolution, this must be larger in every dimension than the loads array
domain: np.ndarray, optional (None)
Array with same shape as loads filled with boolean values. If supplied this function will return a
function which first fills the supplied loads into the domain then computes the convolution.
This is typically used for finding loads from set displacements as the displacements are often not set
over the whole surface.
circular: Sequence[bool]
If True the circular convolution will be calculated, to be used for periodic simulations
Returns
-------
function
A function which takes a single input of loads and returns the result of the convolution with the original
influence matrix. If a domain was not supplied the input to the returned function must be exactly the same
shape as the loads array used in this function. If a domain was specified the length of the loads input to
the returned function must be the same as the number of non zero elements in domain.
Notes
-----
This function uses FFTW, if you want to use the CUDA implementation make sure that cupy is installed and
importable. If cupy can be imported slippy will use the CUDA implementations by default
Examples
--------
>>> import numpy as np
>>> import slippy.contact as c
>>> result = c.hertz_full([1,1], [np.inf, np.inf], [200e9, 200e9], [0.3, 0.3], 1e4)
>>> X,Y = np.meshgrid(*[np.linspace(-0.005,0.005,256)]*2)
>>> grid_spacing = X[1][1]-X[0][0]
>>> loads = result['pressure_f'](X,Y)
>>> disp_analytical = result['surface_displacement_b_f'][0](X,Y)['uz']
>>> im = c.elastic_influence_matrix('zz', (512,512), (grid_spacing,grid_spacing), 200e9/(2*(1+0.3)), 0.3)
>>> convolve_func = plan_convolve(loads, im, None, [False, False])
>>> disp_numerical = convolve_func(loads)
"""
loads = np.asarray(loads)
im = np.asarray(im)
im_shape_orig = im.shape
if domain is not None:
domain = np.asarray(domain, dtype=np.bool)
input_shape = []
for i in range(2):
if circular[i]:
if not no_shape_check:
assert loads.shape[i] == im.shape[i], "For circular convolution loads and im must be same shape"
input_shape.append(loads.shape[i])
else:
if not no_shape_check:
msg = "For non circular convolution influence matrix must be double loads"
assert loads.shape[i] == im.shape[i] // 2, msg
input_shape.append(pyfftw.next_fast_len(im.shape[i]))
input_shape = tuple(input_shape)
fft_shape = [input_shape[0], input_shape[1] // 2 + 1]
in_empty = pyfftw.empty_aligned(input_shape, dtype=loads.dtype)
out_empty = pyfftw.empty_aligned(fft_shape, dtype='complex128')
ret_empty = pyfftw.empty_aligned(input_shape, dtype=loads.dtype)
forward_trans = pyfftw.FFTW(in_empty, out_empty, axes=(0, 1),
direction='FFTW_FORWARD', threads=slippy.CORES)
backward_trans = pyfftw.FFTW(out_empty, ret_empty, axes=(0, 1),
direction='FFTW_BACKWARD', threads=slippy.CORES)
norm_inv = forward_trans.N ** 0.5
norm = 1 / norm_inv
shape_diff = [[0, (b - a)] for a, b in zip(im.shape, input_shape)]
im = np.pad(im, shape_diff, 'constant')
im = np.roll(im, tuple(-((sz - 1) // 2) for sz in im_shape_orig), (-2, -1))
fft_im = forward_trans(im) * norm
shape_diff_loads = [[0, (b - a)] for a, b in zip(loads.shape, input_shape)]
shape = loads.shape
dtype = loads.dtype
def inner_no_domain(full_loads):
if full_loads.shape == shape:
flat = False
else:
full_loads = np.reshape(full_loads, loads.shape)
flat = True
loads_pad = np.pad(full_loads, shape_diff_loads, 'constant')
full = backward_trans(forward_trans(loads_pad) * fft_im)
full = norm_inv * full[:full_loads.shape[0], :full_loads.shape[1]]
if flat:
full = full.flatten()
return full
def inner_with_domain(sub_loads, ignore_domain=False):
full_loads = np.zeros(shape, dtype=dtype)
full_loads[domain] = sub_loads
loads_pad = np.pad(full_loads, shape_diff_loads, 'constant')
full = backward_trans(forward_trans(loads_pad) * fft_im)
same = norm_inv * full[:full_loads.shape[0], :full_loads.shape[1]]
if ignore_domain:
return same
return same[domain]
if domain is None:
return inner_no_domain
else:
return inner_with_domain
def _plan_fftw_multi_convolve(loads: np.ndarray, ims: np.ndarray, domain: np.ndarray = None,
circular: typing.Sequence[bool] = (False, False)):
"""Plans an FFT convolution, returns a function to carry out the convolution
FFTW implementation
Parameters
----------
loads: np.ndarray
An example of a loads array, this is not altered or stored
ims: np.ndarray
The influence matrix components for the transformation, this is not altered but it's fft is stored to
save time during convolution, this must be larger in every dimension than the loads array
domain: np.ndarray, optional (None)
Array with same shape as loads filled with boolean values. If supplied this function will return a
function which first fills the supplied loads into the domain then computes the convolution.
This is typically used for finding loads from set displacements as the displacements are often not set
over the whole surface.
circular: Sequence[bool]
If True the circular convolution will be calculated, to be used for periodic simulations
Returns
-------
function
A function which takes a single input of loads and returns the result of the convolution with the original
influence matrix. If a domain was not supplied the input to the returned function must be exactly the same
shape as the loads array used in this function. If a domain was specified the length of the loads input to
the returned function must be the same as the number of non zero elements in domain.
Notes
-----
This function uses FFTW, if you want to use the CUDA implementation make sure that cupy is installed and
importable. If cupy can be imported slippy will use the CUDA implementations by default
Examples
--------
>>> import numpy as np
>>> import slippy.contact as c
>>> result = c.hertz_full([1,1], [np.inf, np.inf], [200e9, 200e9], [0.3, 0.3], 1e4)
>>> X,Y = np.meshgrid(*[np.linspace(-0.005,0.005,256)]*2)
>>> grid_spacing = X[1][1]-X[0][0]
>>> loads = result['pressure_f'](X,Y)
>>> disp_analytical = result['surface_displacement_b_f'][0](X,Y)['uz']
>>> im = c.elastic_influence_matrix('zz', (512,512), (grid_spacing,grid_spacing), 200e9/(2*(1+0.3)), 0.3)
>>> convolve_func = plan_convolve(loads, im, None, [False, False])
>>> disp_numerical = convolve_func(loads)
"""
loads = slippy.asnumpy(loads)
im = np.asarray(ims[0])
im_shape_orig = im.shape
if domain is not None:
domain = slippy.asnumpy(domain)
input_shape = []
for i in range(2):
if circular[i]:
assert loads.shape[i] == im.shape[i], "For circular convolution loads and im must be same shape"
input_shape.append(loads.shape[i])
else:
msg = "For non circular convolution influence matrix must be double loads"
assert loads.shape[i] == im.shape[i] // 2, msg
input_shape.append(pyfftw.next_fast_len(im.shape[i]))
input_shape = (len(ims),) + tuple(input_shape)
fft_shape = [input_shape[0], input_shape[1], input_shape[2] // 2 + 1]
ims_in_empty = pyfftw.empty_aligned(input_shape, dtype='float64')
ims_out_empty = pyfftw.empty_aligned(fft_shape, dtype='complex128')
loads_in_empty = pyfftw.empty_aligned(input_shape[-2:], dtype='float64')
loads_out_empty = pyfftw.empty_aligned(fft_shape[-2:], dtype='complex128')
ret_empty = pyfftw.empty_aligned(input_shape, dtype='float64')
forward_trans_ims = pyfftw.FFTW(ims_in_empty, ims_out_empty, axes=(1, 2),
direction='FFTW_FORWARD', threads=slippy.CORES)
forward_trans_loads = pyfftw.FFTW(loads_in_empty, loads_out_empty, axes=(0, 1),
direction='FFTW_FORWARD', threads=slippy.CORES)
backward_trans_ims = pyfftw.FFTW(ims_out_empty, ret_empty, axes=(1, 2),
direction='FFTW_BACKWARD', threads=slippy.CORES)
norm_inv = forward_trans_loads.N ** 0.5
norm = 1 / norm_inv
shape_diff = [[0, (b - a)] for a, b in zip((len(ims),) + im.shape, input_shape)]
ims = np.pad(ims, shape_diff, 'constant')
ims = np.roll(ims, tuple(-((sz - 1) // 2) for sz in im_shape_orig), (-2, -1))
fft_ims = forward_trans_ims(ims) * norm
shape_diff_loads = [[0, (b - a)] for a, b in zip(loads.shape, input_shape[1:])]
shape = loads.shape
dtype = loads.dtype
def inner_no_domain(full_loads):
if not isinstance(full_loads, np.ndarray):
full_loads = slippy.asnumpy(full_loads)
if full_loads.shape == shape:
flat = False
else:
full_loads = np.reshape(full_loads, shape)
flat = True
loads_pad = np.pad(full_loads, shape_diff_loads, 'constant')
fft_loads = np.expand_dims(forward_trans_loads(loads_pad), 0)
full = backward_trans_ims(fft_loads * fft_ims)
full = norm_inv * full[:, :full_loads.shape[0], :full_loads.shape[1]]
if flat:
return full.reshape((len(fft_ims), -1))
return full
def inner_with_domain(sub_loads, ignore_domain=False):
if not isinstance(sub_loads, np.ndarray):
sub_loads = slippy.asnumpy(sub_loads)
full_loads = np.zeros(shape, dtype=dtype)
full_loads[domain] = sub_loads
loads_pad = np.pad(full_loads, shape_diff_loads, 'constant')
fft_loads = np.expand_dims(forward_trans_loads(loads_pad), 0)
full = backward_trans_ims(fft_loads * fft_ims)
same = norm_inv * full[:, :full_loads.shape[0], :full_loads.shape[1]]
if ignore_domain:
return same
else:
return same[:, domain]
if domain is None:
return inner_no_domain
else:
return inner_with_domain
def _fftw_polonsky_and_keer(f: typing.Callable, p0: typing.Sequence, just_touching_gap: typing.Sequence,
target_load: float, grid_spacing: typing.Sequence[float], eps_0: float = 1e-6,
max_it: int = None):
just_touching_gap = np.array(just_touching_gap)
p0 = np.array(p0)
if max_it is None:
max_it = just_touching_gap.size
# init
pij = p0 / np.mean(p0) * target_load
delta = 0
g_big_old = 1
tij = 0
it_num = 0
element_area = grid_spacing[0] * grid_spacing[1]
while True:
uij = f(pij)
gij = uij + just_touching_gap
current_touching = pij > 0
g_bar = np.mean(gij[current_touching])
gij = gij - g_bar
g_big = np.sum(gij[current_touching] ** 2)
if it_num == 0:
tij = gij
else:
tij = gij + delta * (g_big / g_big_old) * tij
tij[np.logical_not(current_touching)] = 0
g_big_old = g_big
rij = f(tij)
r_bar = np.mean(rij[current_touching])
rij = rij - r_bar
if not np.linalg.norm(rij):
tau = 0
else:
tau = (np.dot(gij[current_touching], tij[current_touching]) /
np.dot(rij[current_touching], tij[current_touching]))
pij_old = pij
pij = pij - tau * tij
pij = np.clip(pij, 0, np.inf)
iol = np.logical_and(pij == 0, gij < 0)
if np.any(iol):
delta = 0
pij[iol] = pij[iol] - tau * gij[iol]
else:
delta = 1
p_big = element_area * np.sum(pij)
pij = pij / p_big * target_load
eps = (element_area / target_load) * np.sum(np.abs(pij - pij_old))
if eps < eps_0:
failed = False
break
it_num += 1
if it_num > max_it:
failed = True
break
if np.any(np.isnan(pij)):
failed = True
break
return failed, pij, gij
def _fftw_bccg(f: typing.Callable, b: np.ndarray, tol: float, max_it: int, x0: np.ndarray,
min_pressure: float = 0, max_pressure: typing.Union[float, typing.Sequence] = np.inf,
k_inn=1) -> typing.Tuple[np.ndarray, bool]:
"""
The Bound-Constrained Conjugate Gradient Method for Non-negative Matrices
FFTW implementation
Parameters
----------
f: Callable
A function equivalent to multiplication by a non negative n by n matrix must work with cupy arrays.
Typically this function will be generated by slippy.contact.plan_convolve, this will guarantee
compatibility with different versions of this function (FFTW and CUDA).
b: array
1 by n array of displacements
tol: float
The tolerance on the result
max_it: int
The maximum number of iterations used
x0: array
An initial guess of the solution must be 1 by n
min_pressure: float, optional (0)
The minimum allowable pressure at each node, defaults to 0
max_pressure: float, optional (inf)
The maximum allowable pressure at each node, defaults to inf, for purely elastic contacts
k_inn: int, optional (1)
Returns
-------
x: cp.array
The solution to the system f(x)-b = 0 with the constraints applied.
Notes
-----
This function uses the method described in the reference below, with some modification.
Firstly, this method allows both a minimum and maximum force to be set simulating quasi plastic regimes. The
code has also been optimised in several places and updated to allow fft convolution in place of the large matrix
multiplication step.
References
----------
Vollebregt, E.A.H. The Bound-Constrained Conjugate Gradient Method for Non-negative Matrices. J Optim
Theory Appl 162, 931–953 (2014). https://doi.org/10.1007/s10957-013-0499-x
Examples
--------
"""
try:
float(max_pressure)
max_is_float = True
except TypeError:
max_is_float = False
try:
float(min_pressure)
min_is_float = True
except TypeError:
min_is_float = False
min_pressure = np.array(min_pressure)
# initialize
x = np.clip(x0, min_pressure, max_pressure)
g = f(x) - b
msk_bnd_0 = np.logical_and(x <= 0, g >= 0)
msk_bnd_max =
|
np.logical_and(x >= max_pressure, g <= 0)
|
numpy.logical_and
|
# 13 March 2018 <NAME>
# Python bootcamp, lesson 34: Seaborn and data display
# Import modules
import numpy as np
import pandas as pd
# This is how we import the module of Matplotlib we'll be using
import matplotlib.pyplot as plt
# Some pretty Seaborn settings
import seaborn as sns
rc={'lines.linewidth': 2, 'axes.labelsize': 18, 'axes.titlesize': 18}
sns.set(rc=rc)
##############################
# Close all open figures
plt.close('all')
# Load the data of the frog
df = pd.read_csv('data/frog_tongue_adhesion.csv', comment='#')
# Rename impact force column
df = df.rename(columns={'impact force (mN)': 'impf'})
# Mean impact force of frog I
np.mean(df.loc[df['ID']=='I', 'impf'])
# Calculate the means and SEMs of all four frogs
# For loop for mean and standard error of the mean
mean_impf =
|
np.empty(4)
|
numpy.empty
|
#!/usr/bin/env python3
"""
The plotting wrappers that add functionality to various `~matplotlib.axes.Axes`
methods. "Wrapped" `~matplotlib.axes.Axes` methods accept the additional
arguments documented in the wrapper function.
"""
# NOTE: Two possible workflows are 1) make horizontal functions use wrapped
# vertical functions, then flip things around inside apply_cycle or by
# creating undocumented 'plot', 'scatter', etc. methods in Axes that flip
# arguments around by reading a 'orientation' key or 2) make separately
# wrapped chains of horizontal functions and vertical functions whose 'extra'
# wrappers jointly refer to a hidden helper function and create documented
# 'plotx', 'scatterx', etc. that flip arguments around before sending to
# superclass 'plot', 'scatter', etc. Opted for the latter approach.
import functools
import inspect
import re
import sys
from numbers import Integral
import matplotlib.artist as martist
import matplotlib.axes as maxes
import matplotlib.cm as mcm
import matplotlib.collections as mcollections
import matplotlib.colors as mcolors
import matplotlib.container as mcontainer
import matplotlib.contour as mcontour
import matplotlib.font_manager as mfonts
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.patheffects as mpatheffects
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import numpy as np
import numpy.ma as ma
from .. import colors as pcolors
from .. import constructor
from .. import ticker as pticker
from ..config import rc
from ..internals import ic # noqa: F401
from ..internals import (
_dummy_context,
_getattr_flexible,
_not_none,
_pop_props,
_state_context,
docstring,
warnings,
)
from ..utils import edges, edges2d, to_rgb, to_xyz, units
try:
from cartopy.crs import PlateCarree
except ModuleNotFoundError:
PlateCarree = object
__all__ = [
'default_latlon',
'default_transform',
'standardize_1d',
'standardize_2d',
'indicate_error',
'apply_cmap',
'apply_cycle',
'colorbar_extras',
'legend_extras',
'text_extras',
'vlines_extras',
'hlines_extras',
'scatter_extras',
'scatterx_extras',
'bar_extras',
'barh_extras',
'fill_between_extras',
'fill_betweenx_extras',
'boxplot_extras',
'violinplot_extras',
]
# Positional args that can be passed as out-of-order keywords. Used by standardize_1d
# NOTE: The 'barh' interpretation represent a breaking change from default
# (y, width, height, left) behavior. Want to have consistent interpretation
# of vertical or horizontal bar 'width' with 'width' key or 3rd positional arg.
# Interal hist() func uses positional arguments when calling bar() so this is fine.
KEYWORD_TO_POSITIONAL_INSERT = {
'fill_between': ('x', 'y1', 'y2'),
'fill_betweenx': ('y', 'x1', 'x2'),
'vlines': ('x', 'ymin', 'ymax'),
'hlines': ('y', 'xmin', 'xmax'),
'bar': ('x', 'height'),
'barh': ('y', 'height'),
'parametric': ('x', 'y', 'values'),
'boxplot': ('positions',), # use as x-coordinates during wrapper processing
'violinplot': ('positions',),
}
KEYWORD_TO_POSITIONAL_APPEND = {
'bar': ('width', 'bottom'),
'barh': ('width', 'left'),
}
# Consistent keywords for cmap plots. Used by apply_cmap to pass correct plural
# or singular form to matplotlib function.
STYLE_ARGS_TRANSLATE = {
'contour': ('colors', 'linewidths', 'linestyles'),
'tricontour': ('colors', 'linewidths', 'linestyles'),
'pcolor': ('edgecolors', 'linewidth', 'linestyle'),
'pcolormesh': ('edgecolors', 'linewidth', 'linestyle'),
'pcolorfast': ('edgecolors', 'linewidth', 'linestyle'),
'tripcolor': ('edgecolors', 'linewidth', 'linestyle'),
'parametric': ('color', 'linewidth', 'linestyle'),
'hexbin': ('edgecolors', 'linewidths', 'linestyles'),
'hist2d': ('edgecolors', 'linewidths', 'linestyles'),
'barbs': ('barbcolor', 'linewidth', 'linestyle'),
'quiver': ('color', 'linewidth', 'linestyle'), # applied to arrow *outline*
'streamplot': ('color', 'linewidth', 'linestyle'),
'spy': ('color', 'linewidth', 'linestyle'),
'matshow': ('color', 'linewidth', 'linestyle'),
}
docstring.snippets['axes.autoformat'] = """
data : dict-like, optional
A dict-like dataset container (e.g., `~pandas.DataFrame` or `~xarray.DataArray`).
If passed, positional arguments must be valid `data` keys and the arrays used for
plotting are retrieved with ``data[key]``. This is a native `matplotlib feature \
<https://matplotlib.org/stable/gallery/misc/keyword_plotting.html>`__
previously restricted to just `~matplotlib.axes.Axes.plot`
and `~matplotlib.axes.Axes.scatter`.
autoformat : bool, optional
Whether *x* axis labels, *y* axis labels, axis formatters, axes titles,
legend labels, and colorbar labels are automatically configured when
a `~pandas.Series`, `~pandas.DataFrame` or `~xarray.DataArray` is passed
to the plotting command. Default is :rc:`autoformat`.
"""
docstring.snippets['axes.cmap_norm'] = """
cmap : colormap spec, optional
The colormap specifer, passed to the `~proplot.constructor.Colormap`
constructor.
cmap_kw : dict-like, optional
Passed to `~proplot.constructor.Colormap`.
norm : normalizer spec, optional
The colormap normalizer, used to warp data before passing it
to `~proplot.colors.DiscreteNorm`. This is passed to the
`~proplot.constructor.Norm` constructor.
norm_kw : dict-like, optional
Passed to `~proplot.constructor.Norm`.
extend : {{'neither', 'min', 'max', 'both'}}, optional
Whether to assign unique colors to out-of-bounds data and draw
"extensions" (triangles, by default) on the colorbar.
"""
docstring.snippets['axes.levels_values'] = """
N
Shorthand for `levels`.
levels : int or list of float, optional
The number of level edges or a list of level edges. If the former,
`locator` is used to generate this many level edges at "nice" intervals.
If the latter, the levels should be monotonically increasing or
decreasing (note that decreasing levels will only work with ``pcolor``
plots, not ``contour`` plots). Default is :rc:`image.levels`.
values : int or list of float, optional
The number of level centers or a list of level centers. If the former,
`locator` is used to generate this many level centers at "nice" intervals.
If the latter, levels are inferred using `~proplot.utils.edges`.
This will override any `levels` input.
discrete : bool, optional
If ``False``, the `~proplot.colors.DiscreteNorm` is not applied to the
colormap when ``levels=N`` or ``levels=array_of_values`` are not
explicitly requested. Instead, the number of levels in the colormap will be
roughly controlled by :rcraw:`image.lut`. This has a similar effect
to using `levels=large_number` but it may improve rendering speed.
By default, this is ``False`` only for `~matplotlib.axes.Axes.imshow`,
`~matplotlib.axes.Axes.matshow`, `~matplotlib.axes.Axes.spy`,
`~matplotlib.axes.Axes.hexbin`, and `~matplotlib.axes.Axes.hist2d` plots.
"""
docstring.snippets['axes.vmin_vmax'] = """
vmin, vmax : float, optional
Used to determine level locations if `levels` or `values` is an integer.
Actual levels may not fall exactly on `vmin` and `vmax`, but the minimum
level will be no smaller than `vmin` and the maximum level will be
no larger than `vmax`. If `vmin` or `vmax` are not provided, the
minimum and maximum data values are used.
"""
docstring.snippets['axes.auto_levels'] = """
inbounds : bool, optional
If ``True`` (the edefault), when automatically selecting levels in the presence
of hard *x* and *y* axis limits (i.e., when `~matplotlib.axes.Axes.set_xlim`
or `~matplotlib.axes.Axes.set_ylim` have been called previously), only the
in-bounds data is sampled. Default is :rc:`image.inbounds`.
locator : locator-spec, optional
The locator used to determine level locations if `levels` or `values`
is an integer and `vmin` and `vmax` were not provided. Passed to the
`~proplot.constructor.Locator` constructor. Default is
`~matplotlib.ticker.MaxNLocator` with ``levels`` integer levels.
locator_kw : dict-like, optional
Passed to `~proplot.constructor.Locator`.
symmetric : bool, optional
If ``True``, automatically generated levels are symmetric
about zero.
positive : bool, optional
If ``True``, automatically generated levels are positive
with a minimum at zero.
negative : bool, optional
If ``True``, automatically generated levels are negative
with a maximum at zero.
nozero : bool, optional
If ``True``, ``0`` is removed from the level list. This is
mainly useful for `~matplotlib.axes.Axes.contour` plots.
"""
_lines_docstring = """
Support overlaying and stacking successive columns of data and support
different colors for "negative" and "positive" lines.
Important
---------
This function wraps `~matplotlib.axes.Axes.{prefix}lines`.
Parameters
----------
*args : ({y}1,), ({x}, {y}1), or ({x}, {y}1, {y}2)
The *{x}* and *{y}* coordinates. If `{x}` is not provided, it will be
inferred from `{y}1`. If `{y}1` and `{y}2` are provided, this function will
draw lines between these points. If `{y}1` or `{y}2` are 2D, this
function is called with each column. The default value for `{y}2` is ``0``.
stack, stacked : bool, optional
Whether to "stack" successive columns of the `{y}1` array. If this is
``True`` and `{y}2` was provided, it will be ignored.
negpos : bool, optional
Whether to color lines greater than zero with `poscolor` and lines less
than zero with `negcolor`.
negcolor, poscolor : color-spec, optional
Colors to use for the negative and positive lines. Ignored if `negpos`
is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.
color, colors : color-spec or list thereof, optional
The line color(s).
linestyle, linestyles : linestyle-spec or list thereof, optional
The line style(s).
lw, linewidth, linewidths : linewidth-spec or list thereof, optional
The line width(s).
See also
--------
standardize_1d
apply_cycle
"""
docstring.snippets['axes.vlines'] = _lines_docstring.format(
x='x', y='y', prefix='v', orientation='vertical',
)
docstring.snippets['axes.hlines'] = _lines_docstring.format(
x='y', y='x', prefix='h', orientation='horizontal',
)
_scatter_docstring = """
Support `apply_cmap` features and support style keywords that are
consistent with `~{package}.axes.Axes.plot{suffix}` keywords.
Important
---------
This function wraps `~{package}.axes.Axes.scatter{suffix}`.
Parameters
----------
*args : {y} or {x}, {y}
The input *{x}* or *{x}* and *{y}* coordinates. If only *{y}* is provided,
*{x}* will be inferred from *{y}*.
s, size, markersize : float or list of float, optional
The marker size(s). The units are optionally scaled by
`smin` and `smax`.
smin, smax : float, optional
The minimum and maximum marker size in units ``points^2`` used to scale
`s`. If not provided, the marker sizes are equivalent to the values in `s`.
c, color, markercolor : color-spec or list thereof, or array, optional
The marker fill color(s). If this is an array of scalar values, colors
will be generated using the colormap `cmap` and normalizer `norm`.
%(axes.vmin_vmax)s
%(axes.cmap_norm)s
%(axes.levels_values)s
%(axes.auto_levels)s
lw, linewidth, linewidths, markeredgewidth, markeredgewidths \
: float or list thereof, optional
The marker edge width.
edgecolors, markeredgecolor, markeredgecolors \
: color-spec or list thereof, optional
The marker edge color.
Other parameters
----------------
**kwargs
Passed to `~{package}.axes.Axes.scatter{suffix}`.
See also
--------
{package}.axes.Axes.bar{suffix}
standardize_1d
indicate_error
apply_cycle
"""
docstring.snippets['axes.scatter'] = docstring.add_snippets(
_scatter_docstring.format(x='x', y='y', suffix='', package='matplotlib')
)
docstring.snippets['axes.scatterx'] = docstring.add_snippets(
_scatter_docstring.format(x='y', y='x', suffix='', package='proplot')
)
_fill_between_docstring = """
Support overlaying and stacking successive columns of data support
different colors for "negative" and "positive" regions.
Important
---------
This function wraps `~matplotlib.axes.Axes.fill_between{suffix}` and
`~proplot.axes.Axes.area{suffix}`.
Parameters
----------
*args : ({y}1,), ({x}, {y}1), or ({x}, {y}1, {y}2)
The *{x}* and *{y}* coordinates. If `{x}` is not provided, it will be
inferred from `{y}1`. If `{y}1` and `{y}2` are provided, this function will
shade between these points. If `{y}1` or `{y}2` are 2D, this function
is called with each column. The default value for `{y}2` is ``0``.
stack, stacked : bool, optional
Whether to "stack" successive columns of the `{y}1` array. If this is
``True`` and `{y}2` was provided, it will be ignored.
negpos : bool, optional
Whether to shade where ``{y}1 >= {y}2`` with `poscolor` and where ``{y}1 < {y}2``
with `negcolor`. For example, to shade positive values red and negative values
blue, simply use ``ax.fill_between{suffix}({x}, {y}, negpos=True)``.
negcolor, poscolor : color-spec, optional
Colors to use for the negative and positive shaded regions. Ignored if `negpos`
is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.
where : ndarray, optional
Boolean ndarray mask for points you want to shade. See `this example \
<https://matplotlib.org/stable/gallery/pyplots/whats_new_98_4_fill_between.html>`__.
lw, linewidth : float, optional
The edge width for the area patches.
edgecolor : color-spec, optional
The edge color for the area patches.
Other parameters
----------------
**kwargs
Passed to `~matplotlib.axes.Axes.fill_between`.
See also
--------
matplotlib.axes.Axes.fill_between{suffix}
proplot.axes.Axes.area{suffix}
standardize_1d
apply_cycle
"""
docstring.snippets['axes.fill_between'] = _fill_between_docstring.format(
x='x', y='y', suffix='',
)
docstring.snippets['axes.fill_betweenx'] = _fill_between_docstring.format(
x='y', y='x', suffix='x',
)
_bar_docstring = """
Support grouping and stacking successive columns of data, specifying bar widths
relative to coordinate spacing, and using different colors for "negative" and
"positive" bar heights.
Important
---------
This function wraps `~matplotlib.axes.Axes.bar{suffix}`.
Parameters
----------
{x}, height, width, {bottom} : float or list of float, optional
The dimensions of the bars. If the *{x}* coordinates are not provided,
they are set to ``np.arange(0, len(height))``. The units for width
are *relative* by default.
absolute_width : bool, optional
Whether to make the units for width *absolute*. This restores
the default matplotlib behavior.
stack, stacked : bool, optional
Whether to stack columns of the input array or plot the bars
side-by-side in groups.
negpos : bool, optional
Whether to shade bars greater than zero with `poscolor` and bars less
than zero with `negcolor`.
negcolor, poscolor : color-spec, optional
Colors to use for the negative and positive bars. Ignored if `negpos`
is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.
lw, linewidth : float, optional
The edge width for the bar patches.
edgecolor : color-spec, optional
The edge color for the bar patches.
Other parameters
----------------
**kwargs
Passed to `~matplotlib.axes.Axes.bar{suffix}`.
See also
--------
matplotlib.axes.Axes.bar{suffix}
standardize_1d
indicate_error
apply_cycle
"""
docstring.snippets['axes.bar'] = _bar_docstring.format(
x='x', bottom='bottom', suffix='',
)
docstring.snippets['axes.barh'] = _bar_docstring.format(
x='y', bottom='left', suffix='h',
)
def _load_objects():
"""
Delay loading expensive modules. We just want to detect if *input
arrays* belong to these types -- and if this is the case, it means the
module has already been imported! So, we only try loading these classes
within autoformat calls. This saves >~500ms of import time.
"""
global DataArray, DataFrame, Series, Index, ndarray
ndarray = np.ndarray
DataArray = getattr(sys.modules.get('xarray', None), 'DataArray', ndarray)
DataFrame = getattr(sys.modules.get('pandas', None), 'DataFrame', ndarray)
Series = getattr(sys.modules.get('pandas', None), 'Series', ndarray)
Index = getattr(sys.modules.get('pandas', None), 'Index', ndarray)
_load_objects()
def _is_number(data):
"""
Test whether input is numeric array rather than datetime or strings.
"""
return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)
def _is_string(data):
"""
Test whether input is array of strings.
"""
return len(data) and isinstance(_to_ndarray(data).flat[0], str)
def _to_arraylike(data):
"""
Convert list of lists to array-like type.
"""
_load_objects()
if data is None:
raise ValueError('Cannot convert None data.')
return None
if not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index)):
data = np.asarray(data)
if not np.iterable(data):
data = np.atleast_1d(data)
return data
def _to_ndarray(data):
"""
Convert arbitrary input to ndarray cleanly. Returns a masked
array if input is a masked array.
"""
return np.atleast_1d(getattr(data, 'values', data))
def _mask_array(mask, *args):
"""
Apply the mask to the input arrays. Values matching ``False`` are
set to `np.nan`.
"""
invalid = ~mask # True if invalid
args_masked = []
for arg in args:
if arg.size > 1 and arg.shape != invalid.shape:
raise ValueError('Shape mismatch between mask and array.')
arg_masked = arg.astype(np.float64)
if arg.size == 1:
pass
elif invalid.size == 1:
arg_masked = np.nan if invalid.item() else arg_masked
elif arg.size > 1:
arg_masked[invalid] = np.nan
args_masked.append(arg_masked)
return args_masked[0] if len(args_masked) == 1 else args_masked
def default_latlon(self, *args, latlon=True, **kwargs):
"""
Make ``latlon=True`` the default for `~proplot.axes.BasemapAxes` plots.
This means you no longer have to pass ``latlon=True`` if your data
coordinates are longitude and latitude.
Important
---------
This function wraps {methods} for `~proplot.axes.BasemapAxes`.
"""
method = kwargs.pop('_method')
return method(self, *args, latlon=latlon, **kwargs)
def default_transform(self, *args, transform=None, **kwargs):
"""
Make ``transform=cartopy.crs.PlateCarree()`` the default for
`~proplot.axes.CartopyAxes` plots. This means you no longer have to
pass ``transform=cartopy.crs.PlateCarree()`` if your data
coordinates are longitude and latitude.
Important
---------
This function wraps {methods} for `~proplot.axes.CartopyAxes`.
"""
# Apply default transform
# TODO: Do some cartopy methods reset backgroundpatch or outlinepatch?
# Deleted comment reported this issue
method = kwargs.pop('_method')
if transform is None:
transform = PlateCarree()
return method(self, *args, transform=transform, **kwargs)
def _basemap_redirect(self, *args, **kwargs):
"""
Docorator that calls the basemap version of the function of the
same name. This must be applied as the innermost decorator.
"""
method = kwargs.pop('_method')
name = method.__name__
if getattr(self, 'name', None) == 'proplot_basemap':
return getattr(self.projection, name)(*args, ax=self, **kwargs)
else:
return method(self, *args, **kwargs)
def _basemap_norecurse(self, *args, called_from_basemap=False, **kwargs):
"""
Decorator to prevent recursion in basemap method overrides.
See `this post https://stackoverflow.com/a/37675810/4970632`__.
"""
method = kwargs.pop('_method')
name = method.__name__
if called_from_basemap:
return getattr(maxes.Axes, name)(self, *args, **kwargs)
else:
return method(self, *args, called_from_basemap=True, **kwargs)
def _get_data(data, *args):
"""
Try to convert positional `key` arguments to `data[key]`. If argument is string
it could be a valid positional argument like `fmt` so do not raise error.
"""
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, str):
try:
array = data[arg]
except KeyError:
pass
else:
args[i] = array
return args
def _get_label(obj):
"""
Return a valid non-placeholder artist label from the artist or a tuple of
artists destined for a legend. Prefer final artist (drawn last and on top).
"""
# NOTE: BarContainer and StemContainer are instances of tuple
while not hasattr(obj, 'get_label') and isinstance(obj, tuple) and len(obj) > 1:
obj = obj[-1]
label = getattr(obj, 'get_label', lambda: None)()
return label if label and label[:1] != '_' else None
def _get_labels(data, axis=0, always=True):
"""
Return the array-like "labels" along axis `axis` from an array-like
object. These might be an xarray `DataArray` or pandas `Index`. If
`always` is ``False`` we return ``None`` for simple ndarray input.
"""
# NOTE: Previously inferred 'axis 1' metadata of 1D variable using the
# data values metadata but that is incorrect. The paradigm for 1D plots
# is we have row coordinates representing x, data values representing y,
# and column coordinates representing individual series.
if axis not in (0, 1, 2):
raise ValueError(f'Invalid axis {axis}.')
labels = None
_load_objects()
if isinstance(data, ndarray):
if not always:
pass
elif axis < data.ndim:
labels = np.arange(data.shape[axis])
else: # requesting 'axis 1' on a 1D array
labels = np.array([0])
# Xarray object
# NOTE: Even if coords not present .coords[dim] auto-generates indices
elif isinstance(data, DataArray):
if axis < data.ndim:
labels = data.coords[data.dims[axis]]
elif not always:
pass
else:
labels = np.array([0])
# Pandas object
elif isinstance(data, (DataFrame, Series, Index)):
if axis == 0 and isinstance(data, (DataFrame, Series)):
labels = data.index
elif axis == 1 and isinstance(data, (DataFrame,)):
labels = data.columns
elif not always:
pass
else: # beyond dimensionality
labels = np.array([0])
# Everything else
# NOTE: We ensure data is at least 1D in _to_arraylike so this covers everything
else:
raise ValueError(f'Unrecognized array type {type(data)}.')
return labels
def _get_title(data, units=True):
"""
Return the "title" associated with an array-like object with metadata. This
might be a pandas `DataFrame` `name` or a name constructed from xarray `DataArray`
attributes. In the latter case we search for `long_name` and `standard_name`,
preferring the former, and append `(units)` if `units` is ``True``. If no
names are available but units are available we just use the units string.
"""
title = None
_load_objects()
if isinstance(data, ndarray):
pass
# Xarray object with possible long_name, standard_name, and units attributes.
# Output depends on if units is True
elif isinstance(data, DataArray):
title = getattr(data, 'name', None)
for key in ('standard_name', 'long_name'):
title = data.attrs.get(key, title)
if units:
units = data.attrs.get('units', None)
if title and units:
title = f'{title} ({units})'
elif units:
title = units
# Pandas object. Note DataFrame has no native name attribute but user can add one
# See: https://github.com/pandas-dev/pandas/issues/447
elif isinstance(data, (DataFrame, Series, Index)):
title = getattr(data, 'name', None) or None
# Standardize result
if title is not None:
title = str(title).strip()
return title
def _parse_string_coords(*args, which='x', **kwargs):
"""
Convert string arrays and lists to index coordinates.
"""
# NOTE: Why FixedLocator and not IndexLocator? The latter requires plotting
# lines or else error is raised... very strange.
# NOTE: Why IndexFormatter and not FixedFormatter? The former ensures labels
# correspond to indices while the latter can mysteriously truncate labels.
res = []
for arg in args:
arg = _to_arraylike(arg)
if _is_string(arg) and arg.ndim > 1:
raise ValueError('Non-1D string coordinate input is unsupported.')
if not _is_string(arg):
res.append(arg)
continue
idx = np.arange(len(arg))
kwargs.setdefault(which + 'locator', mticker.FixedLocator(idx))
kwargs.setdefault(which + 'formatter', pticker._IndexFormatter(_to_ndarray(arg))) # noqa: E501
kwargs.setdefault(which + 'minorlocator', mticker.NullLocator())
res.append(idx)
return *res, kwargs
def _auto_format_1d(
self, x, *ys, name='plot', autoformat=False,
label=None, values=None, labels=None, **kwargs
):
"""
Try to retrieve default coordinates from array-like objects and apply default
formatting. Also update the keyword arguments.
"""
# Parse input
projection = hasattr(self, 'projection')
parametric = name in ('parametric',)
scatter = name in ('scatter',)
hist = name in ('hist',)
box = name in ('boxplot', 'violinplot')
pie = name in ('pie',)
vert = kwargs.get('vert', True) and kwargs.get('orientation', None) != 'horizontal'
vert = vert and name not in ('plotx', 'scatterx', 'fill_betweenx', 'barh')
stem = name in ('stem',)
nocycle = name in ('stem', 'hexbin', 'hist2d', 'parametric')
labels = _not_none(
label=label,
values=values,
labels=labels,
colorbar_kw_values=kwargs.get('colorbar_kw', {}).pop('values', None),
legend_kw_labels=kwargs.get('legend_kw', {}).pop('labels', None),
)
# Retrieve the x coords
# NOTE: Allow for "ragged array" input to boxplot and violinplot.
# NOTE: Where columns represent distributions, like for box and violin plots or
# where we use 'means' or 'medians', columns coords (axis 1) are 'x' coords.
# Otherwise, columns represent e.g. lines, and row coords (axis 0) are 'x' coords
dists = box or any(kwargs.get(s) for s in ('mean', 'means', 'median', 'medians'))
ragged = any(getattr(y, 'dtype', None) == 'object' for y in ys)
xaxis = 1 if dists and not ragged else 0
if x is None and not hist:
x = _get_labels(ys[0], axis=xaxis) # infer from rows or columns
# Default legend or colorbar labels and title. We want default legend
# labels if this is an object with 'title' metadata and/or coords are string
# WARNING: Confusing terminology differences here -- for box and violin plots
# 'labels' refer to indices along x axis. Get interpreted that way down the line.
if autoformat and not stem:
# The inferred labels and title
title = None
if labels is not None:
title = _get_title(labels)
else:
yaxis = xaxis if box or pie else xaxis + 1
labels = _get_labels(ys[0], axis=yaxis, always=False)
title = _get_title(labels) # e.g. if labels is a Series
if labels is None:
pass
elif not title and not any(isinstance(_, str) for _ in labels):
labels = None
# Apply the title
if title:
kwargs.setdefault('colorbar_kw', {}).setdefault('title', title)
if not nocycle:
kwargs.setdefault('legend_kw', {}).setdefault('title', title)
# Apply the labels
if labels is not None:
if not nocycle:
kwargs['labels'] = _to_ndarray(labels)
elif parametric:
values, colorbar_kw = _parse_string_coords(labels, which='')
kwargs['values'] = _to_ndarray(values)
kwargs.setdefault('colorbar_kw', {}).update(colorbar_kw)
# The basic x and y settings
if not projection:
# Apply label
# NOTE: Do not overwrite existing labels!
sx, sy = 'xy' if vert else 'yx'
sy = sx if hist else sy # histogram 'y' values end up along 'x' axis
kw_format = {}
if autoformat: # 'y' axis
title = _get_title(ys[0])
if title and not getattr(self, f'get_{sy}label')():
kw_format[sy + 'label'] = title
if autoformat and not hist: # 'x' axis
title = _get_title(x)
if title and not getattr(self, f'get_{sx}label')():
kw_format[sx + 'label'] = title
# Handle string-type coordinates
if not pie and not hist:
x, kw_format = _parse_string_coords(x, which=sx, **kw_format)
if not hist and not box and not pie:
*ys, kw_format = _parse_string_coords(*ys, which=sy, **kw_format)
if not hist and not scatter and not parametric and x.ndim == 1 and x.size > 1 and x[1] < x[0]: # noqa: E501
kw_format[sx + 'reverse'] = True # auto reverse
# Appply
if kw_format:
self.format(**kw_format)
# Finally strip metadata
# WARNING: Most methods that accept 2D arrays use columns of data, but when
# pandas DataFrame specifically is passed to hist, boxplot, or violinplot, rows
# of data assumed! Converting to ndarray necessary.
return _to_ndarray(x), *map(_to_ndarray, ys), kwargs
def _basemap_1d(x, *ys, projection=None):
"""
Fix basemap geographic 1D data arrays.
"""
xmin, xmax = projection.lonmin, projection.lonmax
x_orig, ys_orig = x, ys
ys = []
for y_orig in ys_orig:
x, y = _fix_span(*_fix_coords(x_orig, y_orig), xmin, xmax)
ys.append(y)
return x, *ys
def _fix_coords(x, y):
"""
Ensure longitudes are monotonic and make `~numpy.ndarray` copies so the
contents can be modified. Ignores 2D coordinate arrays.
"""
if x.ndim != 1 or all(x < x[0]): # skip 2D arrays and monotonic backwards data
return x, y
lon1 = x[0]
filter_ = x < lon1
while filter_.sum():
filter_ = x < lon1
x[filter_] += 360
return x, y
def _fix_span(x, y, xmin, xmax):
"""
Ensure data for basemap plots is restricted between the minimum and
maximum longitude of the projection. Input is the ``x`` and ``y``
coordinates. The ``y`` coordinates are rolled along the rightmost axis.
"""
if x.ndim != 1:
return x, y
# Roll in same direction if some points on right-edge extend
# more than 360 above min longitude; *they* should be on left side
lonroll = np.where(x > xmin + 360)[0] # tuple of ids
if lonroll.size: # non-empty
roll = x.size - lonroll.min()
x = np.roll(x, roll)
y = np.roll(y, roll, axis=-1)
x[:roll] -= 360 # make monotonic
# Set NaN where data not in range xmin, xmax. Must be done
# for regional smaller projections or get weird side-effects due
# to having valid data way outside of the map boundaries
y = y.copy()
if x.size - 1 == y.shape[-1]: # test western/eastern grid cell edges
y[..., (x[1:] < xmin) | (x[:-1] > xmax)] = np.nan
elif x.size == y.shape[-1]: # test the centers and pad by one for safety
where = np.where((x < xmin) | (x > xmax))[0]
y[..., where[1:-1]] = np.nan
return x, y
@docstring.add_snippets
def standardize_1d(self, *args, data=None, autoformat=None, **kwargs):
"""
Interpret positional arguments for all "1D" plotting commands so the syntax
is consistent. The arguments are standardized as follows:
* If a 2D array is passed, the corresponding plot command is called for
each column of data (except for ``boxplot`` and ``violinplot``, in which
case each column is interpreted as a distribution).
* If *x* and *y* or *latitude* and *longitude* coordinates were not provided,
and a `~pandas.DataFrame` or `~xarray.DataArray`, we try to infer them from
the metadata. Otherwise, ``np.arange(0, data.shape[0])`` is used.
Important
---------
This function wraps {methods}
Parameters
----------
%(axes.autoformat)s
See also
--------
apply_cycle
indicate_error
"""
method = kwargs.pop('_method')
name = method.__name__
bar = name in ('bar', 'barh')
box = name in ('boxplot', 'violinplot')
hist = name in ('hist',)
parametric = name in ('parametric',)
onecoord = name in ('hist',)
twocoords = name in ('vlines', 'hlines', 'fill_between', 'fill_betweenx')
allowempty = name in ('fill', 'plot', 'plotx',)
autoformat = _not_none(autoformat, rc['autoformat'])
# Find and translate input args
args = list(args)
keys = KEYWORD_TO_POSITIONAL_INSERT.get(name, {})
for idx, key in enumerate(keys):
if key in kwargs:
args.insert(idx, kwargs.pop(key))
if data is not None:
args = _get_data(data, *args)
if not args:
if allowempty:
return [] # match matplotlib behavior
else:
raise TypeError('Positional arguments are required.')
# Translate between 'orientation' and 'vert' for flexibility
# NOTE: Users should only pass these to hist, boxplot, or violinplot. To change
# the bar plot orientation users should use 'bar' and 'barh'. Internally,
# matplotlib has a single central bar function whose behavior is configured
# by the 'orientation' key, so critical not to strip the argument here.
vert = kwargs.pop('vert', None)
orientation = kwargs.pop('orientation', None)
if orientation is not None:
vert = _not_none(vert=vert, orientation=(orientation == 'vertical'))
if orientation not in (None, 'horizontal', 'vertical'):
raise ValueError("Orientation must be either 'horizontal' or 'vertical'.")
if vert is None:
pass
elif box:
kwargs['vert'] = vert
elif bar or hist:
kwargs['orientation'] = 'vertical' if vert else 'horizontal' # used internally
else:
raise TypeError("Unexpected keyword argument(s) 'vert' and 'orientation'.")
# Parse positional args
if parametric and len(args) == 3: # allow positional values
kwargs['values'] = args.pop(2)
if parametric and 'c' in kwargs: # handle aliases
kwargs['values'] = kwargs.pop('c')
if onecoord or len(args) == 1: # allow hist() positional bins
x, ys, args = None, args[:1], args[1:]
elif twocoords:
x, ys, args = args[0], args[1:3], args[3:]
else:
x, ys, args = args[0], args[1:2], args[2:]
if x is not None:
x = _to_arraylike(x)
ys = tuple(map(_to_arraylike, ys))
# Append remaining positional args
# NOTE: This is currently just used for bar and barh. More convenient to pass
# 'width' as positional so that matplotlib native 'barh' sees it as 'height'.
keys = KEYWORD_TO_POSITIONAL_APPEND.get(name, {})
for key in keys:
if key in kwargs:
args.append(kwargs.pop(key))
# Automatic formatting and coordinates
# NOTE: For 'hist' the 'x' coordinate remains None then is ignored in apply_cycle.
x, *ys, kwargs = _auto_format_1d(
self, x, *ys, name=name, autoformat=autoformat, **kwargs
)
# Ensure data is monotonic and falls within map bounds
if getattr(self, 'name', None) == 'proplot_basemap' and kwargs.get('latlon', None):
x, *ys = _basemap_1d(x, *ys, projection=self.projection)
# Call function
if box:
kwargs.setdefault('positions', x) # *this* is how 'x' is passed to boxplot
return method(self, x, *ys, *args, **kwargs)
def _auto_format_2d(self, x, y, *zs, name=None, order='C', autoformat=False, **kwargs):
"""
Try to retrieve default coordinates from array-like objects and apply default
formatting. Also apply optional transpose and update the keyword arguments.
"""
# Retrieve coordinates
allow1d = name in ('barbs', 'quiver') # these also allow 1D data
projection = hasattr(self, 'projection')
if x is None and y is None:
z = zs[0]
if z.ndim == 1:
x = _get_labels(z, axis=0)
y = np.zeros(z.shape) # default barb() and quiver() behavior in matplotlib
else:
x = _get_labels(z, axis=1)
y = _get_labels(z, axis=0)
if order == 'F':
x, y = y, x
# Check coordinate and data shapes
shapes = tuple(z.shape for z in zs)
if any(len(_) != 2 and not (allow1d and len(_) == 1) for _ in shapes):
raise ValueError(f'Data arrays must be 2d, but got shapes {shapes}.')
shapes = set(shapes)
if len(shapes) > 1:
raise ValueError(f'Data arrays must have same shape, but got shapes {shapes}.')
if any(_.ndim not in (1, 2) for _ in (x, y)):
raise ValueError('x and y coordinates must be 1d or 2d.')
if x.ndim != y.ndim:
raise ValueError('x and y coordinates must have same dimensionality.')
if order == 'F': # TODO: double check this
x, y = x.T, y.T # in case they are 2-dimensional
zs = tuple(z.T for z in zs)
# The labels and XY axis settings
if not projection:
# Apply labels
# NOTE: Do not overwrite existing labels!
kw_format = {}
if autoformat:
for s, d in zip('xy', (x, y)):
title = _get_title(d)
if title and not getattr(self, f'get_{s}label')():
kw_format[s + 'label'] = title
# Handle string-type coordinates
x, kw_format = _parse_string_coords(x, which='x', **kw_format)
y, kw_format = _parse_string_coords(y, which='y', **kw_format)
for s, d in zip('xy', (x, y)):
if d.size > 1 and d.ndim == 1 and _to_ndarray(d)[1] < _to_ndarray(d)[0]:
kw_format[s + 'reverse'] = True
# Apply formatting
if kw_format:
self.format(**kw_format)
# Default colorbar label
# WARNING: This will fail for any funcs wrapped by standardize_2d but not
# wrapped by apply_cmap. So far there are none.
if autoformat:
kwargs.setdefault('colorbar_kw', {})
title = _get_title(zs[0])
if title and True:
kwargs['colorbar_kw'].setdefault('label', title)
# Finally strip metadata
return _to_ndarray(x), _to_ndarray(y), *map(_to_ndarray, zs), kwargs
def _add_poles(y, z):
"""
Add data points on the poles as the average of highest latitude data.
"""
# Get means
with np.errstate(all='ignore'):
p1 = z[0, :].mean() # pole 1, make sure is not 0D DataArray!
p2 = z[-1, :].mean() # pole 2
if hasattr(p1, 'item'):
p1 = np.asscalar(p1) # happens with DataArrays
if hasattr(p2, 'item'):
p2 = np.asscalar(p2)
# Concatenate
ps = (-90, 90) if (y[0] < y[-1]) else (90, -90)
z1 = np.repeat(p1, z.shape[1])[None, :]
z2 = np.repeat(p2, z.shape[1])[None, :]
y = ma.concatenate((ps[:1], y, ps[1:]))
z = ma.concatenate((z1, z, z2), axis=0)
return y, z
def _enforce_centers(x, y, z):
"""
Enforce that coordinates are centers. Convert from edges if possible.
"""
xlen, ylen = x.shape[-1], y.shape[0]
if z.ndim == 2 and z.shape[1] == xlen - 1 and z.shape[0] == ylen - 1:
# Get centers given edges
if all(z.ndim == 1 and z.size > 1 and _is_number(z) for z in (x, y)):
x = 0.5 * (x[1:] + x[:-1])
y = 0.5 * (y[1:] + y[:-1])
else:
if (
x.ndim == 2 and x.shape[0] > 1 and x.shape[1] > 1
and _is_number(x)
):
x = 0.25 * (x[:-1, :-1] + x[:-1, 1:] + x[1:, :-1] + x[1:, 1:])
if (
y.ndim == 2 and y.shape[0] > 1 and y.shape[1] > 1
and _is_number(y)
):
y = 0.25 * (y[:-1, :-1] + y[:-1, 1:] + y[1:, :-1] + y[1:, 1:])
elif z.shape[-1] != xlen or z.shape[0] != ylen:
# Helpful error message
raise ValueError(
f'Input shapes x {x.shape} and y {y.shape} '
f'must match z centers {z.shape} '
f'or z borders {tuple(i+1 for i in z.shape)}.'
)
return x, y
def _enforce_edges(x, y, z):
"""
Enforce that coordinates are edges. Convert from centers if possible.
"""
xlen, ylen = x.shape[-1], y.shape[0]
if z.ndim == 2 and z.shape[1] == xlen and z.shape[0] == ylen:
# Get edges given centers
if all(z.ndim == 1 and z.size > 1 and _is_number(z) for z in (x, y)):
x = edges(x)
y = edges(y)
else:
if (
x.ndim == 2 and x.shape[0] > 1 and x.shape[1] > 1
and _is_number(x)
):
x = edges2d(x)
if (
y.ndim == 2 and y.shape[0] > 1 and y.shape[1] > 1
and _is_number(y)
):
y = edges2d(y)
elif z.shape[-1] != xlen - 1 or z.shape[0] != ylen - 1:
# Helpful error message
raise ValueError(
f'Input shapes x {x.shape} and y {y.shape} must match '
f'array centers {z.shape} or '
f'array borders {tuple(i + 1 for i in z.shape)}.'
)
return x, y
def _cartopy_2d(x, y, *zs, globe=False):
"""
Fix cartopy 2D geographic data arrays.
"""
# Fix coordinates
x, y = _fix_coords(x, y)
# Fix data
x_orig, y_orig, zs_orig = x, y, zs
zs = []
for z_orig in zs_orig:
# Bail for 2D coordinates
if not globe or x_orig.ndim > 1 or y_orig.ndim > 1:
zs.append(z_orig)
continue
# Fix holes over poles by *interpolating* there
y, z = _add_poles(y_orig, z_orig)
# Fix seams by ensuring circular coverage (cartopy can plot over map edges)
if x_orig[0] % 360 != (x_orig[-1] + 360) % 360:
x = ma.concatenate((x_orig, [x_orig[0] + 360]))
z = ma.concatenate((z, z[:, :1]), axis=1)
zs.append(z)
return x, y, *zs
def _basemap_2d(x, y, *zs, globe=False, projection=None):
"""
Fix basemap 2D geographic data arrays.
"""
# Fix coordinates
x, y = _fix_coords(x, y)
# Fix data
xmin, xmax = projection.lonmin, projection.lonmax
x_orig, y_orig, zs_orig = x, y, zs
zs = []
for z_orig in zs_orig:
# Ensure data is within map bounds
x, z_orig = _fix_span(x_orig, z_orig, xmin, xmax)
# Bail for 2D coordinates
if not globe or x_orig.ndim > 1 or y_orig.ndim > 1:
zs.append(z_orig)
continue
# Fix holes over poles by *interpolating* there
y, z = _add_poles(y_orig, z_orig)
# Fix seams at map boundary
if x[0] == xmin and x.size - 1 == z.shape[1]: # scenario 1
# Edges (e.g. pcolor) fit perfectly against seams. Size is unchanged.
pass
elif x.size - 1 == z.shape[1]: # scenario 2
# Edges (e.g. pcolor) do not fit perfectly. Size augmented by 1.
x = ma.append(xmin, x)
x[-1] = xmin + 360
z = ma.concatenate((z[:, -1:], z), axis=1)
elif x.size == z.shape[1]: # scenario 3
# Centers (e.g. contour) must be interpolated to edge. Size augmented by 2.
xi = np.array([x[-1], x[0] + 360])
if xi[0] == xi[1]: # impossible to interpolate
pass
else:
zq = ma.concatenate((z[:, -1:], z[:, :1]), axis=1)
xq = xmin + 360
zq = (zq[:, :1] * (xi[1] - xq) + zq[:, 1:] * (xq - xi[0])) / (xi[1] - xi[0]) # noqa: E501
x = ma.concatenate(([xmin], x, [xmin + 360]))
z = ma.concatenate((zq, z, zq), axis=1)
else:
raise ValueError('Unexpected shapes of coordinates or data arrays.')
zs.append(z)
# Convert coordinates
if x.ndim == 1 and y.ndim == 1:
x, y = np.meshgrid(x, y)
x, y = projection(x, y)
return x, y, *zs
@docstring.add_snippets
def standardize_2d(
self, *args, data=None, autoformat=None, order='C', globe=False, **kwargs
):
"""
Interpret positional arguments for all "2D" plotting commands so the syntax is
consistent. The arguments are standardized as follows:
* If *x* and *y* or *latitude* and *longitude* coordinates were not
provided, and a `~pandas.DataFrame` or `~xarray.DataArray` is passed, we
try to infer them from the metadata. Otherwise, ``np.arange(0, data.shape[0])``
and ``np.arange(0, data.shape[1])`` are used.
* For ``pcolor`` and ``pcolormesh``, coordinate *edges* are calculated
if *centers* were provided. This uses the `~proplot.utils.edges` and
`~propot.utils.edges2d` functions. For all other methods, coordinate
*centers* are calculated if *edges* were provided.
Important
---------
This function wraps {methods}
Parameters
----------
%(axes.autoformat)s
order : {{'C', 'F'}}, optional
If ``'C'``, arrays should be shaped ``(y, x)``. If ``'F'``, arrays
should be shaped ``(x, y)``. Default is ``'C'``.
globe : bool, optional
Whether to ensure global coverage for `~proplot.axes.GeoAxes` plots.
Default is ``False``. When set to ``True`` this does the following:
#. Interpolates input data to the North and South poles by setting the data
values at the poles to the mean from latitudes nearest each pole.
#. Makes meridional coverage "circular", i.e. the last longitude coordinate
equals the first longitude coordinate plus 360\N{DEGREE SIGN}.
#. For `~proplot.axes.BasemapAxes`, 1D longitude vectors are also cycled to
fit within the map edges. For example, if the projection central longitude
is 90\N{DEGREE SIGN}, the data is shifted so that it spans
-90\N{DEGREE SIGN} to 270\N{DEGREE SIGN}.
See also
--------
apply_cmap
proplot.utils.edges
proplot.utils.edges2d
"""
method = kwargs.pop('_method')
name = method.__name__
pcolor = name in ('pcolor', 'pcolormesh', 'pcolorfast')
allow1d = name in ('barbs', 'quiver') # these also allow 1D data
autoformat = _not_none(autoformat, rc['autoformat'])
# Find and translate input args
if data is not None:
args = _get_data(data, *args)
if not args:
raise TypeError('Positional arguments are required.')
# Parse input args
if len(args) > 2:
x, y, *args = args
else:
x = y = None
if x is not None:
x = _to_arraylike(x)
if y is not None:
y = _to_arraylike(y)
zs = tuple(map(_to_arraylike, args))
# Automatic formatting
x, y, *zs, kwargs = _auto_format_2d(
self, x, y, *zs, name=name, order=order, autoformat=autoformat, **kwargs
)
# Standardize coordinates
if pcolor:
x, y = _enforce_edges(x, y, zs[0])
else:
x, y = _enforce_centers(x, y, zs[0])
# Cartopy projection axes
if (
not allow1d and getattr(self, 'name', None) == 'proplot_cartopy'
and isinstance(kwargs.get('transform', None), PlateCarree)
):
x, y, *zs = _cartopy_2d(x, y, *zs, globe=globe)
# Basemap projection axes
elif (
not allow1d and getattr(self, 'name', None) == 'proplot_basemap'
and kwargs.get('latlon', None)
):
x, y, *zs = _basemap_2d(x, y, *zs, globe=globe, projection=self.projection)
kwargs['latlon'] = False
# Call function
return method(self, x, y, *zs, **kwargs)
def _get_error_data(
data, y, errdata=None, stds=None, pctiles=None,
stds_default=None, pctiles_default=None,
reduced=True, absolute=False, label=False,
):
"""
Return values that can be passed to the `~matplotlib.axes.Axes.errorbar`
`xerr` and `yerr` keyword args.
"""
# Parse stds arguments
# NOTE: Have to guard against "truth value of an array is ambiguous" errors
if stds is True:
stds = stds_default
elif stds is False or stds is None:
stds = None
else:
stds = np.atleast_1d(stds)
if stds.size == 1:
stds = sorted((-stds.item(), stds.item()))
elif stds.size != 2:
raise ValueError('Expected scalar or length-2 stdev specification.')
# Parse pctiles arguments
if pctiles is True:
pctiles = pctiles_default
elif pctiles is False or pctiles is None:
pctiles = None
else:
pctiles = np.atleast_1d(pctiles)
if pctiles.size == 1:
delta = (100 - pctiles.item()) / 2.0
pctiles = sorted((delta, 100 - delta))
elif pctiles.size != 2:
raise ValueError('Expected scalar or length-2 pctiles specification.')
# Incompatible settings
if stds is not None and pctiles is not None:
warnings._warn_proplot(
'You passed both a standard deviation range and a percentile range for '
'drawing error indicators. Using the former.'
)
pctiles = None
if not reduced and (stds is not None or pctiles is not None):
raise ValueError(
'To automatically compute standard deviations or percentiles on columns '
'of data you must pass means=True or medians=True.'
)
if reduced and errdata is not None:
stds = pctiles = None
warnings._warn_proplot(
'You explicitly provided the error bounds but also requested '
'automatically calculating means or medians on data columns. '
'It may make more sense to use the "stds" or "pctiles" keyword args '
'and have *proplot* calculate the error bounds.'
)
# Compute error data in format that can be passed to maxes.Axes.errorbar()
# NOTE: Include option to pass symmetric deviation from central points
if errdata is not None:
# Manual error data
if y.ndim != 1:
raise ValueError(
'errdata with 2D y coordinates is not yet supported.'
)
label_default = 'uncertainty'
err = _to_ndarray(errdata)
if (
err.ndim not in (1, 2)
or err.shape[-1] != y.size
or err.ndim == 2 and err.shape[0] != 2
):
raise ValueError(
f'errdata must have shape (2, {y.shape[-1]}), but got {err.shape}.'
)
if err.ndim == 1:
abserr = err
err = np.empty((2, err.size))
err[0, :] = y - abserr # translated back to absolute deviations below
err[1, :] = y + abserr
elif stds is not None:
# Standard deviations
label_default = fr'{abs(stds[1])}$\sigma$ range'
err = y + np.nanstd(data, axis=0)[None, :] * _to_ndarray(stds)[:, None]
elif pctiles is not None:
# Percentiles
label_default = f'{pctiles[1] - pctiles[0]}% range'
err = np.nanpercentile(data, pctiles, axis=0)
else:
raise ValueError('You must provide error bounds.')
# Return label possibly
if label is True:
label = label_default
elif not label:
label = None
# Make relative data for maxes.Axes.errorbar() ingestion
if not absolute:
err = err - y
err[0, :] *= -1 # absolute deviations from central points
# Return data with legend entry
return err, label
def indicate_error(
self, *args,
mean=None, means=None, median=None, medians=None,
barstd=None, barstds=None, barpctile=None, barpctiles=None, bardata=None,
boxstd=None, boxstds=None, boxpctile=None, boxpctiles=None, boxdata=None,
shadestd=None, shadestds=None, shadepctile=None, shadepctiles=None, shadedata=None,
fadestd=None, fadestds=None, fadepctile=None, fadepctiles=None, fadedata=None,
boxmarker=None, boxmarkercolor='white',
boxcolor=None, barcolor=None, shadecolor=None, fadecolor=None,
shadelabel=False, fadelabel=False, shadealpha=0.4, fadealpha=0.2,
boxlinewidth=None, boxlw=None, barlinewidth=None, barlw=None, capsize=None,
boxzorder=2.5, barzorder=2.5, shadezorder=1.5, fadezorder=1.5,
**kwargs
):
"""
Support on-the-fly error bars and error shading. Use the input error data or
optionally interpret columns of data as distributions, pass the column
means or medians to the relevant plotting command, and draw error
indications from the specified standard deviation or percentile range.
Important
---------
This function wraps {methods}
Parameters
----------
*args
The input data.
mean, means : bool, optional
Whether to plot the means of each column in the input data. If no other
arguments specified, this also sets ``barstd=True`` (and ``boxstd=True``
for violin plots).
median, medians : bool, optional
Whether to plot the medians of each column in the input data. If no other
arguments specified, this also sets ``barstd=True`` (and ``boxstd=True``
for violin plots).
barstd, barstds : float, (float, float), or bool, optional
Standard deviation multiples for *thin error bars* with optional whiskers
(i.e. caps). If scalar, then +/- that number is used. If ``True``, the
default of +/-3 standard deviations is used. This argument is only valid
if `means` or `medians` is ``True``.
barpctile, barpctiles : float, (float, float) or bool, optional
As with `barstd`, but instead using *percentiles* for the error bars. The
percentiles are calculated with `numpy.percentile`. If scalar, that width
surrounding the 50th percentile is used (e.g. ``90`` shows the 5th to 95th
percentiles). If ``True``, the default percentile range of 0 to 100 is
used. This argument is only valid if `means` or `medians` is ``True``.
bardata : 2 x N array or 1D array, optional
If shape is 2 x N these are the lower and upper bounds for the thin error bars.
If array is 1D these are the absolute, symmetric deviations from the central
points. This should be used if `means` and `medians` are both ``False`` (i.e.
you did not provide dataset columns from which statistical properties can be
calculated automatically).
boxstd, boxstds, boxpctile, boxpctiles, boxdata : optional
As with `barstd`, `barpctile`, and `bardata`, but for *thicker error bars*
representing a smaller interval than the thin error bars. If `boxstds` is
``True``, the default standard deviation range of +/-1 is used. If `boxpctiles`
is ``True``, the default percentile range of 25 to 75 is used (i.e. the
interquartile range). When "boxes" and "bars" are combined, this has the effect
of drawing miniature box-and-whisker plots.
shadestd, shadestds, shadepctile, shadepctiles, shadedata : optional
As with `barstd`, `barpctile`, and `bardata`, but using *shading* to indicate
the error range. If `shadestds` is ``True``, the default standard deviation
range of +/-2 is used. If `shadepctiles` is ``True``, the default
percentile range of 10 to 90 is used. Shading is generally useful for
`~matplotlib.axes.Axes.plot` plots.
fadestd, fadestds, fadepctile, fadepctiles, fadedata : optional
As with `shadestd`, `shadepctile`, and `shadedata`, but for an additional,
more faded, *secondary* shaded region. If `fadestds` is ``True``, the default
standard deviation range of +/-3 is used. If `fadepctiles` is ``True``,
the default percentile range of 0 to 100 is used.
barcolor, boxcolor, shadecolor, fadecolor : color-spec, optional
Colors for the different error indicators. For error bars, the default is
``'k'``. For shading, the default behavior is to inherit color from the
primary `~matplotlib.artist.Artist`.
shadelabel, fadelabel : bool or str, optional
Labels for the shaded regions to be used as separate legend entries. To toggle
labels "on" and apply a *default* label, use e.g. ``shadelabel=True``. To apply
a *custom* label, use e.g. ``shadelabel='label'``. Otherwise, the shading is
drawn underneath the line and/or marker in the legend entry.
barlinewidth, boxlinewidth, barlw, boxlw : float, optional
Line widths for the thin and thick error bars, in points. The defaults
are ``barlw=0.8`` and ``boxlw=4 * barlw``.
boxmarker : bool, optional
Whether to draw a small marker in the middle of the box denoting the mean or
median position. Ignored if `boxes` is ``False``.
boxmarkercolor : color-spec, optional
Color for the `boxmarker` marker. Default is ``'w'``.
capsize : float, optional
The cap size for thin error bars in points.
barzorder, boxzorder, shadezorder, fadezorder : float, optional
The "zorder" for the thin error bars, thick error bars, and shading.
Returns
-------
h, err1, err2, ...
The original plot object and the error bar or shading objects.
"""
method = kwargs.pop('_method')
name = method.__name__
bar = name in ('bar',)
flip = name in ('barh', 'plotx', 'scatterx') or kwargs.get('vert') is False
plot = name in ('plot', 'scatter')
violin = name in ('violinplot',)
means = _not_none(mean=mean, means=means)
medians = _not_none(median=median, medians=medians)
barstds = _not_none(barstd=barstd, barstds=barstds)
boxstds = _not_none(boxstd=boxstd, boxstds=boxstds)
shadestds = _not_none(shadestd=shadestd, shadestds=shadestds)
fadestds = _not_none(fadestd=fadestd, fadestds=fadestds)
barpctiles = _not_none(barpctile=barpctile, barpctiles=barpctiles)
boxpctiles = _not_none(boxpctile=boxpctile, boxpctiles=boxpctiles)
shadepctiles = _not_none(shadepctile=shadepctile, shadepctiles=shadepctiles)
fadepctiles = _not_none(fadepctile=fadepctile, fadepctiles=fadepctiles)
bars = any(_ is not None for _ in (bardata, barstds, barpctiles))
boxes = any(_ is not None for _ in (boxdata, boxstds, boxpctiles))
shade = any(_ is not None for _ in (shadedata, shadestds, shadepctiles))
fade = any(_ is not None for _ in (fadedata, fadestds, fadepctiles))
if means and medians:
warnings._warn_proplot('Cannot have both means=True and medians=True. Using former.') # noqa: E501
# Get means or medians while preserving metadata for autoformat
# TODO: Permit 3D array with error dimension coming first
# NOTE: Previously went to great pains to preserve metadata but now retrieval
# of default legend handles moved to _auto_format_1d so can strip.
x, y, *args = args
data = y
if means or medians:
if data.ndim != 2:
raise ValueError(f'Expected 2D array for means=True. Got {data.ndim}D.')
if not any((bars, boxes, shade, fade)):
bars = barstds = True
if violin:
boxes = boxstds = True
if means:
y = np.nanmean(data, axis=0)
elif medians:
y = np.nanpercentile(data, 50, axis=0)
# Parse keyword args and apply defaults
# NOTE: Should not use plot() 'linewidth' for bar elements
# NOTE: violinplot_extras passes some invalid keyword args with expectation
# that indicate_error pops them and uses them for error bars.
getter = kwargs.pop if violin else kwargs.get if bar else lambda *args: None
boxmarker = _not_none(boxmarker, True if violin else False)
capsize = _not_none(capsize, 3.0)
linewidth = _not_none(getter('linewidth', None), getter('lw', None), 1.0)
barlinewidth = _not_none(barlinewidth=barlinewidth, barlw=barlw, default=linewidth)
boxlinewidth = _not_none(boxlinewidth=boxlinewidth, boxlw=boxlw, default=4 * barlinewidth) # noqa: E501
edgecolor = _not_none(getter('edgecolor', None), 'k')
barcolor = _not_none(barcolor, edgecolor)
boxcolor = _not_none(boxcolor, barcolor)
shadecolor_infer = shadecolor is None
fadecolor_infer = fadecolor is None
shadecolor = _not_none(shadecolor, kwargs.get('color'), kwargs.get('facecolor'), edgecolor) # noqa: E501
fadecolor = _not_none(fadecolor, shadecolor)
# Draw dark and light shading
getter = kwargs.pop if plot else kwargs.get
eobjs = []
fill = self.fill_betweenx if flip else self.fill_between
if fade:
edata, label = _get_error_data(
data, y, errdata=fadedata, stds=fadestds, pctiles=fadepctiles,
stds_default=(-3, 3), pctiles_default=(0, 100), absolute=True,
reduced=means or medians, label=fadelabel,
)
eobj = fill(
x, *edata, linewidth=0, label=label,
color=fadecolor, alpha=fadealpha, zorder=fadezorder,
)
eobjs.append(eobj)
if shade:
edata, label = _get_error_data(
data, y, errdata=shadedata, stds=shadestds, pctiles=shadepctiles,
stds_default=(-2, 2), pctiles_default=(10, 90), absolute=True,
reduced=means or medians, label=shadelabel,
)
eobj = fill(
x, *edata, linewidth=0, label=label,
color=shadecolor, alpha=shadealpha, zorder=shadezorder,
)
eobjs.append(eobj)
# Draw thin error bars and thick error boxes
sy = 'x' if flip else 'y' # yerr
ex, ey = (y, x) if flip else (x, y)
if boxes:
edata, _ = _get_error_data(
data, y, errdata=boxdata, stds=boxstds, pctiles=boxpctiles,
stds_default=(-1, 1), pctiles_default=(25, 75),
reduced=means or medians,
)
if boxmarker:
self.scatter(
ex, ey, s=boxlinewidth, marker='o', color=boxmarkercolor, zorder=5
)
eobj = self.errorbar(
ex, ey, color=boxcolor, linewidth=boxlinewidth, linestyle='none',
capsize=0, zorder=boxzorder, **{sy + 'err': edata}
)
eobjs.append(eobj)
if bars: # now impossible to make thin bar width different from cap width!
edata, _ = _get_error_data(
data, y, errdata=bardata, stds=barstds, pctiles=barpctiles,
stds_default=(-3, 3), pctiles_default=(0, 100),
reduced=means or medians,
)
eobj = self.errorbar(
ex, ey, color=barcolor, linewidth=barlinewidth, linestyle='none',
markeredgecolor=barcolor, markeredgewidth=barlinewidth,
capsize=capsize, zorder=barzorder, **{sy + 'err': edata}
)
eobjs.append(eobj)
# Call main function
# NOTE: Provide error objects for inclusion in legend, but *only* provide
# the shading. Never want legend entries for error bars.
xy = (x, data) if violin else (x, y)
kwargs.setdefault('_errobjs', eobjs[:int(shade + fade)])
res = obj = method(self, *xy, *args, **kwargs)
# Apply inferrred colors to objects
i = 0
if isinstance(res, tuple): # pull out patch from e.g. BarContainers
obj = res[0]
for b, infer in zip((fade, shade), (fadecolor_infer, shadecolor_infer)):
if not b or not infer:
continue
if hasattr(obj, 'get_facecolor'):
color = obj.get_facecolor()
elif hasattr(obj, 'get_color'):
color = obj.get_color()
else:
color = None
if color is not None:
eobjs[i].set_facecolor(color)
i += 1
# Return objects
# NOTE: For now 'errobjs' can only be returned with 1D y coordinates
# NOTE: Avoid expanding matplolib collections that are list subclasses here
if not eobjs:
return res
elif isinstance(res, tuple) and not isinstance(res, mcontainer.Container):
return ((*res, *eobjs),) # for plot()
else:
return (res, *eobjs)
def _apply_plot(self, *args, cmap=None, values=None, **kwargs):
"""
Apply horizontal or vertical lines.
"""
# Deprecated functionality
if cmap is not None:
warnings._warn_proplot(
'Drawing "parametric" plots with ax.plot(x, y, values=values, cmap=cmap) '
'is deprecated and will be removed in the next major release. Please use '
'ax.parametric(x, y, values, cmap=cmap) instead.'
)
return self.parametric(*args, cmap=cmap, values=values, **kwargs)
# Plot line(s)
method = kwargs.pop('_method')
name = method.__name__
sx = 'y' if 'x' in name else 'x' # i.e. plotx
objs = []
args = list(args)
while args:
# Support e.g. x1, y1, fmt, x2, y2, fmt2 input
# NOTE: Copied from _process_plot_var_args.__call__ to avoid relying
# on public API. ProPlot already supports passing extra positional
# arguments beyond x, y so can feed (x1, y1, fmt) through wrappers.
# Instead represent (x2, y2, fmt, ...) as successive calls to plot().
iargs, args = args[:2], args[2:]
if args and isinstance(args[0], str):
iargs.append(args[0])
args = args[1:]
# Call function
iobjs = method(self, *iargs, values=values, **kwargs)
# Add sticky edges
# NOTE: Skip edges when error bars present or caps are flush against axes edge
lines = all(isinstance(obj, mlines.Line2D) for obj in iobjs)
if lines and not getattr(self, '_no_sticky_edges', False):
for obj in iobjs:
data = getattr(obj, 'get_' + sx + 'data')()
if not data.size:
continue
convert = getattr(self, 'convert_' + sx + 'units')
edges = getattr(obj.sticky_edges, sx)
edges.append(convert(min(data)))
edges.append(convert(max(data)))
objs.extend(iobjs)
return tuple(objs)
def _plot_extras(self, *args, **kwargs):
"""
Pre-processing for `plot`.
"""
return _apply_plot(self, *args, **kwargs)
def _plotx_extras(self, *args, **kwargs):
"""
Pre-processing for `plotx`.
"""
# NOTE: The 'horizontal' orientation will be inferred by downstream
# wrappers using the function name.
return _apply_plot(self, *args, **kwargs)
def _stem_extras(
self, *args, linefmt=None, basefmt=None, markerfmt=None, **kwargs
):
"""
Make `use_line_collection` the default to suppress warning message.
"""
# Set default colors
# NOTE: 'fmt' strings can only be 2 to 3 characters and include color shorthands
# like 'r' or cycle colors like 'C0'. Cannot use full color names.
# NOTE: Matplotlib defaults try to make a 'reddish' color the base and 'bluish'
# color the stems. To make this more robust we temporarily replace the cycler
# with a negcolor/poscolor cycler.
method = kwargs.pop('_method')
fmts = (linefmt, basefmt, markerfmt)
if not any(isinstance(_, str) and re.match(r'\AC[0-9]', _) for _ in fmts):
cycle = constructor.Cycle((rc['negcolor'], rc['poscolor']), name='_neg_pos')
context = rc.context({'axes.prop_cycle': cycle})
else:
context = _dummy_context()
# Add stem lines with bluish stem color and reddish base color
with context:
kwargs['linefmt'] = linefmt = _not_none(linefmt, 'C0-')
kwargs['basefmt'] = _not_none(basefmt, 'C1-')
kwargs['markerfmt'] = _not_none(markerfmt, linefmt[:-1] + 'o')
kwargs.setdefault('use_line_collection', True)
try:
return method(self, *args, **kwargs)
except TypeError:
del kwargs['use_line_collection'] # older version
return method(self, *args, **kwargs)
def _parametric_extras(self, x, y, c=None, *, values=None, interp=0, **kwargs):
"""
Interpolate the array.
"""
# Parse input
# NOTE: Critical to put this here instead of parametric() so that the
# interpolated 'values' are used to select colormap levels in apply_cmap.
method = kwargs.pop('_method')
c = _not_none(c=c, values=values)
if c is None:
raise ValueError('Values must be provided.')
c = _to_ndarray(c)
ndim = tuple(_.ndim for _ in (x, y, c))
size = tuple(_.size for _ in (x, y, c))
if any(_ != 1 for _ in ndim):
raise ValueError(f'Input coordinates must be 1D. Instead got dimensions {ndim}.') # noqa: E501
if any(_ != size[0] for _ in size):
raise ValueError(f'Input coordinates must have identical size. Instead got sizes {size}.') # noqa: E501
# Interpolate values to allow for smooth gradations between values
# (interp=False) or color switchover halfway between points
# (interp=True). Then optionally interpolate the colormap values.
# NOTE: The 'extras' wrapper handles input before ingestion by other wrapper
# functions. *This* method is analogous to a native matplotlib method.
if interp > 0:
x_orig, y_orig, v_orig = x, y, c
x, y, c = [], [], []
for j in range(x_orig.shape[0] - 1):
idx = slice(None)
if j + 1 < x_orig.shape[0] - 1:
idx = slice(None, -1)
x.extend(np.linspace(x_orig[j], x_orig[j + 1], interp + 2)[idx].flat)
y.extend(np.linspace(y_orig[j], y_orig[j + 1], interp + 2)[idx].flat)
c.extend(np.linspace(v_orig[j], v_orig[j + 1], interp + 2)[idx].flat) # noqa: E501
x, y, c = np.array(x), np.array(y), np.array(c)
return method(self, x, y, values=c, **kwargs)
def _check_negpos(name, **kwargs):
"""
Issue warnings if we are ignoring arguments for "negpos" pplt.
"""
for key, arg in kwargs.items():
if arg is None:
continue
warnings._warn_proplot(
f'{name}() argument {key}={arg!r} is incompatible with '
'negpos=True. Ignoring.'
)
def _apply_lines(
self, *args,
stack=None, stacked=None,
negpos=False, negcolor=None, poscolor=None,
color=None, colors=None,
linestyle=None, linestyles=None,
lw=None, linewidth=None, linewidths=None,
**kwargs
):
"""
Apply hlines or vlines command. Support default "minima" at zero.
"""
# Parse input arguments
method = kwargs.pop('_method')
name = method.__name__
stack = _not_none(stack=stack, stacked=stacked)
colors = _not_none(color=color, colors=colors)
linestyles = _not_none(linestyle=linestyle, linestyles=linestyles)
linewidths = _not_none(lw=lw, linewidth=linewidth, linewidths=linewidths)
args = list(args)
if len(args) > 3:
raise ValueError(f'Expected 1-3 positional args, got {len(args)}.')
if len(args) == 3 and stack:
warnings._warn_proplot(
f'{name}() cannot have three positional arguments with stack=True. '
'Ignoring second argument.'
)
if len(args) == 2: # empty possible
args.insert(1, np.array([0.0])) # default base
# Support "negative" and "positive" lines
x, y1, y2, *args = args # standardized
if not negpos:
# Plot basic lines
kwargs['stack'] = stack
if colors is not None:
kwargs['colors'] = colors
result = method(self, x, y1, y2, *args, **kwargs)
objs = (result,)
else:
# Plot negative and positive colors
_check_negpos(name, stack=stack, colors=colors)
y1neg, y2neg = _mask_array(y2 < y1, y1, y2)
color = _not_none(negcolor, rc['negcolor'])
negobj = method(self, x, y1neg, y2neg, color=color, **kwargs)
y1pos, y2pos = _mask_array(y2 >= y1, y1, y2)
color = _not_none(poscolor, rc['poscolor'])
posobj = method(self, x, y1pos, y2pos, color=color, **kwargs)
objs = result = (negobj, posobj)
# Apply formatting unavailable in matplotlib
for obj in objs:
if linewidths is not None:
obj.set_linewidth(linewidths) # LineCollection setters
if linestyles is not None:
obj.set_linestyle(linestyles)
return result
@docstring.add_snippets
def vlines_extras(self, *args, **kwargs):
"""
%(axes.vlines)s
"""
return _apply_lines(self, *args, **kwargs)
@docstring.add_snippets
def hlines_extras(self, *args, **kwargs):
"""
%(axes.hlines)s
"""
# NOTE: The 'horizontal' orientation will be inferred by downstream
# wrappers using the function name.
return _apply_lines(self, *args, **kwargs)
def _apply_scatter(
self, *args,
vmin=None, vmax=None, smin=None, smax=None,
cmap=None, cmap_kw=None, norm=None, norm_kw=None,
extend='neither', levels=None, N=None, values=None,
locator=None, locator_kw=None, discrete=None,
symmetric=False, positive=False, negative=False, nozero=False, inbounds=None,
**kwargs
):
"""
Apply scatter or scatterx markers. Permit up to 4 positional arguments
including `s` and `c`.
"""
# Manage input arguments
method = kwargs.pop('_method')
props = _pop_props(kwargs, 'lines')
c = props.pop('color', None)
s = props.pop('markersize', None)
args = list(args)
if len(args) > 4:
raise ValueError(f'Expected 1-4 positional arguments, got {len(args)}.')
if len(args) == 4:
c = _not_none(c_positional=args.pop(-1), c=c)
if len(args) == 3:
s = _not_none(s_positional=args.pop(-1), s=s)
# Get colormap
cmap_kw = cmap_kw or {}
if cmap is not None:
cmap_kw.setdefault('luminance', 90) # matches to_listed behavior
cmap = constructor.Colormap(cmap, **cmap_kw)
# Get normalizer and levels
ticks = None
carray = np.atleast_1d(c)
discrete = _not_none(
getattr(self, '_image_discrete', None),
discrete,
rc['image.discrete'],
True
)
if (
discrete and np.issubdtype(carray.dtype, np.number)
and not (carray.ndim == 2 and carray.shape[1] in (3, 4))
):
carray = carray.ravel()
levels = _not_none(levels=levels, N=N)
norm, cmap, _, ticks = _build_discrete_norm(
self, carray, # sample data for getting suitable levels
levels=levels, values=values,
cmap=cmap, norm=norm, norm_kw=norm_kw, extend=extend,
vmin=vmin, vmax=vmax, locator=locator, locator_kw=locator_kw,
symmetric=symmetric, positive=positive, negative=negative,
nozero=nozero, inbounds=inbounds,
)
# Fix 2D arguments but still support scatter(x_vector, y_2d) usage
# NOTE: numpy.ravel() preserves masked arrays
# NOTE: Since we are flattening vectors the coordinate metadata is meaningless,
# so converting to ndarray and stripping metadata is no problem.
if len(args) == 2 and all(_to_ndarray(arg).squeeze().ndim > 1 for arg in args):
args = tuple(np.ravel(arg) for arg in args)
# Scale s array
if np.iterable(s) and (smin is not None or smax is not None):
smin_true, smax_true = min(s), max(s)
if smin is None:
smin = smin_true
if smax is None:
smax = smax_true
s = smin + (smax - smin) * (np.array(s) - smin_true) / (smax_true - smin_true)
# Call function
obj = objs = method(self, *args, c=c, s=s, cmap=cmap, norm=norm, **props, **kwargs)
if not isinstance(objs, tuple):
objs = (obj,)
for iobj in objs:
iobj._colorbar_extend = extend
iobj._colorbar_ticks = ticks
return obj
@docstring.add_snippets
def scatter_extras(self, *args, **kwargs):
"""
%(axes.scatter)s
"""
return _apply_scatter(self, *args, **kwargs)
@docstring.add_snippets
def scatterx_extras(self, *args, **kwargs):
"""
%(axes.scatterx)s
"""
# NOTE: The 'horizontal' orientation will be inferred by downstream
# wrappers using the function name.
return _apply_scatter(self, *args, **kwargs)
def _apply_fill_between(
self, *args, where=None, negpos=None, negcolor=None, poscolor=None,
lw=None, linewidth=None, color=None, facecolor=None,
stack=None, stacked=None, **kwargs
):
"""
Apply `fill_between` or `fill_betweenx` shading. Permit up to 4
positional arguments including `where`.
"""
# Parse input arguments
method = kwargs.pop('_method')
name = method.__name__
sx = 'y' if 'x' in name else 'x' # i.e. fill_betweenx
sy = 'x' if sx == 'y' else 'y'
stack = _not_none(stack=stack, stacked=stacked)
color = _not_none(color=color, facecolor=facecolor)
linewidth = _not_none(lw=lw, linewidth=linewidth, default=0)
args = list(args)
if len(args) > 4:
raise ValueError(f'Expected 1-4 positional args, got {len(args)}.')
if len(args) == 4:
where = _not_none(where_positional=args.pop(3), where=where)
if len(args) == 3 and stack:
warnings._warn_proplot(
f'{name}() cannot have three positional arguments with stack=True. '
'Ignoring second argument.'
)
if len(args) == 2: # empty possible
args.insert(1, np.array([0.0])) # default base
# Draw patches with default edge width zero
x, y1, y2 = args
kwargs['linewidth'] = linewidth
if not negpos:
# Plot basic patches
kwargs.update({'where': where, 'stack': stack})
if color is not None:
kwargs['color'] = color
result = method(self, x, y1, y2, **kwargs)
objs = (result,)
else:
# Plot negative and positive patches
if y1.ndim > 1 or y2.ndim > 1:
raise ValueError(f'{name}() arguments with negpos=True must be 1D.')
kwargs.setdefault('interpolate', True)
_check_negpos(name, where=where, stack=stack, color=color)
color = _not_none(negcolor, rc['negcolor'])
negobj = method(self, x, y1, y2, where=(y2 < y1), facecolor=color, **kwargs)
color = _not_none(poscolor, rc['poscolor'])
posobj = method(self, x, y1, y2, where=(y2 >= y1), facecolor=color, **kwargs)
result = objs = (posobj, negobj) # may be tuple of tuples due to apply_cycle
# Add sticky edges in x-direction, and sticky edges in y-direction
# *only* if one of the y limits is scalar. This should satisfy most users.
# NOTE: Could also retrieve data from PolyCollection but that's tricky.
# NOTE: Standardize function guarantees ndarray input by now
if not getattr(self, '_no_sticky_edges', False):
xsides = (np.min(x), np.max(x))
ysides = []
for y in (y1, y2):
if y.size == 1:
ysides.append(y.item())
objs = tuple(obj for _ in objs for obj in (_ if isinstance(_, tuple) else (_,)))
for obj in objs:
for s, sides in zip((sx, sy), (xsides, ysides)):
convert = getattr(self, 'convert_' + s + 'units')
edges = getattr(obj.sticky_edges, s)
edges.extend(convert(sides))
return result
@docstring.add_snippets
def fill_between_extras(self, *args, **kwargs):
"""
%(axes.fill_between)s
"""
return _apply_fill_between(self, *args, **kwargs)
@docstring.add_snippets
def fill_betweenx_extras(self, *args, **kwargs):
"""
%(axes.fill_betweenx)s
"""
# NOTE: The 'horizontal' orientation will be inferred by downstream
# wrappers using the function name.
return _apply_fill_between(self, *args, **kwargs)
def _convert_bar_width(x, width=1, ncols=1):
"""
Convert bar plot widths from relative to coordinate spacing. Relative
widths are much more convenient for users.
"""
# WARNING: This will fail for non-numeric non-datetime64 singleton
# datatypes but this is good enough for vast majority of cases.
x_test = np.atleast_1d(_to_ndarray(x))
if len(x_test) >= 2:
x_step = x_test[1:] - x_test[:-1]
x_step = np.concatenate((x_step, x_step[-1:]))
elif x_test.dtype == np.datetime64:
x_step = np.timedelta64(1, 'D')
else:
x_step = np.array(0.5)
if np.issubdtype(x_test.dtype, np.datetime64):
# Avoid integer timedelta truncation
x_step = x_step.astype('timedelta64[ns]')
return width * x_step / ncols
def _apply_bar(
self, *args, stack=None, stacked=None,
lw=None, linewidth=None, color=None, facecolor=None, edgecolor='black',
negpos=False, negcolor=None, poscolor=None, absolute_width=False, **kwargs
):
"""
Apply bar or barh command. Support default "minima" at zero.
"""
# Parse args
# TODO: Stacked feature is implemented in `apply_cycle`, but makes more
# sense do document here. Figure out way to move it here?
method = kwargs.pop('_method')
name = method.__name__
stack = _not_none(stack=stack, stacked=stacked)
color = _not_none(color=color, facecolor=facecolor)
linewidth = _not_none(lw=lw, linewidth=linewidth, default=rc['patch.linewidth'])
kwargs.update({'linewidth': linewidth, 'edgecolor': edgecolor})
args = list(args)
if len(args) > 4:
raise ValueError(f'Expected 1-4 positional args, got {len(args)}.')
if len(args) == 4 and stack:
warnings._warn_proplot(
f'{name}() cannot have four positional arguments with stack=True. '
'Ignoring fourth argument.' # i.e. ignore default 'bottom'
)
if len(args) == 2:
args.append(np.array([0.8])) # default width
if len(args) == 3:
args.append(np.array([0.0])) # default base
# Call func after converting bar width
x, h, w, b = args
reduce = any(kwargs.get(s) for s in ('mean', 'means', 'median', 'medians'))
absolute_width = absolute_width or getattr(self, '_bar_absolute_width', False)
if not stack and not absolute_width:
ncols = 1 if reduce or h.ndim == 1 else h.shape[1]
w = _convert_bar_width(x, w, ncols=ncols)
if not negpos:
# Draw simple bars
kwargs['stack'] = stack
if color is not None:
kwargs['color'] = color
return method(self, x, h, w, b, **kwargs)
else:
# Draw negative and positive bars
_check_negpos(name, stack=stack, color=color)
if x.ndim > 1 or h.ndim > 1:
raise ValueError(f'{name}() arguments with negpos=True must be 1D.')
hneg = _mask_array(h < b, h)
color = _not_none(negcolor, rc['negcolor'])
negobj = method(self, x, hneg, w, b, facecolor=color, **kwargs)
hpos = _mask_array(h >= b, h)
color = _not_none(poscolor, rc['poscolor'])
posobj = method(self, x, hpos, w, b, facecolor=color, **kwargs)
return (negobj, posobj)
@docstring.add_snippets
def bar_extras(self, *args, **kwargs):
"""
%(axes.bar)s
"""
return _apply_bar(self, *args, **kwargs)
@docstring.add_snippets
def barh_extras(self, *args, **kwargs):
"""
%(axes.barh)s
"""
return _apply_bar(self, *args, **kwargs)
def boxplot_extras(
self, *args,
mean=None, means=None,
fill=True, fillcolor=None, fillalpha=None,
lw=None, linewidth=None,
color=None, edgecolor=None,
boxcolor=None, boxlw=None, boxlinewidth=None,
capcolor=None, caplw=None, caplinewidth=None,
whiskercolor=None, whiskerlw=None, whiskerlinewidth=None,
fliercolor=None, flierlw=None, flierlinewidth=None, # fliers have no line width
meancolor=None, meanlw=None, meanlinewidth=None,
mediancolor=None, medianlw=None, medianlinewidth=None,
meanls=None, meanlinestyle=None, medianls=None, medianlinestyle=None,
marker=None, markersize=None,
**kwargs
):
"""
Support convenient keyword arguments and change the default boxplot style.
Important
---------
This function wraps `~matplotlib.axes.Axes.boxplot`.
Parameters
----------
*args : 1D or 2D ndarray
The data array.
vert : bool, optional
If ``False``, box plots are drawn horizontally. Otherwise, box plots
are drawn vertically.
orientation : {{None, 'vertical', 'horizontal'}}, optional
Alternative to the native `vert` keyword arg. Added for
consistency with the rest of matplotlib.
mean, means : bool, optional
If ``True``, this passes ``showmeans=True`` and ``meanline=True`` to
`~matplotlib.axes.Axes.boxplot`.
fill : bool, optional
Whether to fill the box with a color.
fillcolor : color-spec, list, optional
The fill color for the boxes. Default is the next color cycler color. If
a list, it should be the same length as the number of objects.
fillalpha : float, optional
The opacity of the boxes. Default is ``0.7``. If a list, should be
the same length as the number of objects.
lw, linewidth : float, optional
The linewidth of all objects. Default is ``0.8``.
color, edgecolor : color-spec, list, optional
The color of all objects. Defalut is ``'black'``. If a list, it should
be the same length as the number of objects.
meanls, medianls, meanlinestyle, medianlinestyle : line style-spec, optional
The line style for the mean and median lines drawn horizontally
across the box.
boxcolor, capcolor, whiskercolor, fliercolor, meancolor, mediancolor \
: color-spec, list, optional
The color of various boxplot components. If a list, it should be the
same length as the number of objects. These are shorthands so you don't
have to pass e.g. a ``boxprops`` dictionary.
boxlw, caplw, whiskerlw, flierlw, meanlw, medianlw, boxlinewidth, caplinewidth, \
meanlinewidth, medianlinewidth, whiskerlinewidth, flierlinewidth : float, optional
The line width of various boxplot components. These are shorthands so
you don't have to pass e.g. a ``boxprops`` dictionary.
marker : marker-spec, optional
Marker style for the 'fliers', i.e. outliers.
markersize : float, optional
Marker size for the 'fliers', i.e. outliers.
Other parameters
----------------
**kwargs
Passed to `~matplotlib.axes.Axes.boxplot`.
See also
--------
matplotlib.axes.Axes.boxplot
proplot.axes.Axes.boxes
standardize_1d
indicate_error
apply_cycle
"""
# Parse keyword args
# NOTE: For some reason native violinplot() uses _get_lines for
# property cycler. We dot he same here.
method = kwargs.pop('_method')
fill = fill is True or fillcolor is not None or fillalpha is not None
fillalpha = _not_none(fillalpha, default=0.7)
if fill and fillcolor is None:
cycler = next(self._get_lines.prop_cycler)
fillcolor = cycler.get('color', None)
color = _not_none(color=color, edgecolor=edgecolor, default='black')
linewidth = _not_none(lw=lw, linewidth=linewidth, default=0.8)
boxlinewidth = _not_none(boxlw=boxlw, boxlinewidth=boxlinewidth)
caplinewidth = _not_none(caplw=caplw, caplinewidth=caplinewidth)
whiskerlinewidth = _not_none(whiskerlw=whiskerlw, whiskerlinewidth=whiskerlinewidth)
flierlinewidth = _not_none(flierlw=flierlw, flierlinewidth=flierlinewidth)
meanlinewidth = _not_none(meanlw=meanlw, meanlinewidth=meanlinewidth)
medianlinewidth = _not_none(medianlw=medianlw, medianlinewidth=medianlinewidth)
meanlinestyle = _not_none(meanls=meanls, meanlinestyle=meanlinestyle)
medianlinestyle = _not_none(medianls=medianls, medianlinestyle=medianlinestyle)
means = _not_none(mean=mean, means=means, showmeans=kwargs.get('showmeans'))
if means:
kwargs['showmeans'] = kwargs['meanline'] = True
# Call function
obj = method(self, *args, **kwargs)
# Modify artist settings
# TODO: Pass props keyword args instead? Maybe does not matter.
for key, icolor, ilinewidth, ilinestyle in (
('boxes', boxcolor, boxlinewidth, None),
('caps', capcolor, caplinewidth, None),
('whiskers', whiskercolor, whiskerlinewidth, None),
('fliers', fliercolor, flierlinewidth, None),
('means', meancolor, meanlinewidth, meanlinestyle),
('medians', mediancolor, medianlinewidth, medianlinestyle),
):
if key not in obj: # possible if not rendered
continue
artists = obj[key]
icolor = _not_none(icolor, color)
ilinewidth = _not_none(ilinewidth, linewidth)
if not isinstance(fillalpha, list):
fillalpha = [fillalpha] * len(artists)
if not isinstance(fillcolor, list):
fillcolor = [fillcolor] * len(artists)
for i, artist in enumerate(artists):
# Lines used for boxplot components
jcolor = icolor
if isinstance(icolor, list):
jcolor = icolor[i // 2 if key in ('caps', 'whiskers') else i]
if ilinestyle is not None:
artist.set_linestyle(ilinestyle)
if ilinewidth is not None:
artist.set_linewidth(ilinewidth)
artist.set_markeredgewidth(ilinewidth)
if jcolor is not None:
artist.set_color(jcolor)
artist.set_markeredgecolor(jcolor)
# "Filled" boxplot by adding patch beneath line path
if fill and key == 'boxes':
patch = mpatches.PathPatch(
artist.get_path(), linewidth=0,
facecolor=fillcolor[i], alpha=fillalpha[i]
)
self.add_artist(patch)
# Outlier markers
if key == 'fliers':
if marker is not None:
artist.set_marker(marker)
if markersize is not None:
artist.set_markersize(markersize)
return obj
def violinplot_extras(
self, *args, fillcolor=None, fillalpha=None,
lw=None, linewidth=None, color=None, edgecolor=None, **kwargs
):
"""
Support convenient keyword arguments and change the default violinplot style
to match `this matplotlib example \
<https://matplotlib.org/stable/gallery/statistics/customized_violin.html>`__.
It is also no longer possible to show minima and maxima with whiskers --
while this is useful for `~matplotlib.axes.Axes.boxplot`\\ s it is
redundant for `~matplotlib.axes.Axes.violinplot`\\ s.
Important
---------
This function wraps `~matplotlib.axes.Axes.violinplot`.
Parameters
----------
*args : 1D or 2D ndarray
The data array.
vert : bool, optional
If ``False``, violin plots are drawn horizontally. Otherwise,
violin plots are drawn vertically.
orientation : {{None, 'vertical', 'horizontal'}}, optional
Alternative to the native `vert` keyword arg.
Added for consistency with the rest of matplotlib.
fillcolor : color-spec, list, optional
The violin plot fill color. Default is the next color cycler color. If
a list, it should be the same length as the number of objects.
fillalpha : float, optional
The opacity of the violins. Default is ``0.7``. If a list, it
should be the same length as the number of objects.
lw, linewidth : float, optional
The linewidth of the line objects. Default is ``0.8``.
color, edgecolor : color-spec, list, optional
The edge color for the violin patches. Default is ``'black'``. If a
list, it should be the same length as the number of objects.
Other parameters
----------------
**kwargs
Passed to `~matplotlib.axes.Axes.violinplot`.
See also
--------
matplotlib.axes.Axes.violinplot
proplot.axes.Axes.violins
standardize_1d
indicate_error
apply_cycle
"""
# Parse keyword args
method = kwargs.pop('_method')
color = _not_none(color=color, edgecolor=edgecolor, default='black')
linewidth = _not_none(lw=lw, linewidth=linewidth, default=0.8)
fillalpha = _not_none(fillalpha, default=0.7)
kwargs.setdefault('capsize', 0) # caps are redundant for violin plots
kwargs.setdefault('means', kwargs.pop('showmeans', None)) # for indicate_error
kwargs.setdefault('medians', kwargs.pop('showmedians', None))
if kwargs.pop('showextrema', None):
warnings._warn_proplot('Ignoring showextrema=True.')
# Call function
kwargs.update({'showmeans': False, 'showmedians': False, 'showextrema': False})
obj = result = method(self, *args, linewidth=linewidth, **kwargs)
if not args:
return result
# Modify body settings
if isinstance(obj, (list, tuple)):
obj = obj[0]
artists = obj['bodies']
if not isinstance(fillalpha, list):
fillalpha = [fillalpha] * len(artists)
if not isinstance(fillcolor, list):
fillcolor = [fillcolor] * len(artists)
if not isinstance(color, list):
color = [color] * len(artists)
for i, artist in enumerate(artists):
artist.set_linewidths(linewidth)
if fillalpha[i] is not None:
artist.set_alpha(fillalpha[i])
if fillcolor[i] is not None:
artist.set_facecolor(fillcolor[i])
if color[i] is not None:
artist.set_edgecolor(color[i])
return result
def _get_transform(self, transform):
"""
Translates user input transform. Also used in an axes method.
"""
try:
from cartopy.crs import CRS
except ModuleNotFoundError:
CRS = None
cartopy = getattr(self, 'name', None) == 'proplot_cartopy'
if (
isinstance(transform, mtransforms.Transform)
or CRS and isinstance(transform, CRS)
):
return transform
elif transform == 'figure':
return self.figure.transFigure
elif transform == 'axes':
return self.transAxes
elif transform == 'data':
return PlateCarree() if cartopy else self.transData
elif cartopy and transform == 'map':
return self.transData
else:
raise ValueError(f'Unknown transform {transform!r}.')
def _update_text(self, props):
"""
Monkey patch that adds pseudo "border" and "bbox" properties to text objects without
wrapping the entire class. Overrides update to facilitate updating inset titles.
"""
props = props.copy() # shallow copy
# Update border
border = props.pop('border', None)
bordercolor = props.pop('bordercolor', 'w')
borderinvert = props.pop('borderinvert', False)
borderwidth = props.pop('borderwidth', 2)
if border:
facecolor, bgcolor = self.get_color(), bordercolor
if borderinvert:
facecolor, bgcolor = bgcolor, facecolor
kwargs = {
'linewidth': borderwidth,
'foreground': bgcolor,
'joinstyle': 'miter',
}
self.update({
'color': facecolor,
'path_effects': [mpatheffects.Stroke(**kwargs), mpatheffects.Normal()],
})
elif border is False:
self.update({
'path_effects': None,
})
# Update bounding box
# NOTE: We use '_title_pad' and '_title_above' for both titles and a-b-c labels
# because always want to keep them aligned.
# NOTE: For some reason using pad / 10 results in perfect alignment. Matplotlib
# docs are vague about bounding box units, maybe they are tens of points?
bbox = props.pop('bbox', None)
bboxcolor = props.pop('bboxcolor', 'w')
bboxstyle = props.pop('bboxstyle', 'round')
bboxalpha = props.pop('bboxalpha', 0.5)
bboxpad = _not_none(props.pop('bboxpad', None), self.axes._title_pad / 10)
if isinstance(bbox, dict): # *native* matplotlib usage
props['bbox'] = bbox
elif bbox:
self.set_bbox({
'edgecolor': 'black',
'facecolor': bboxcolor,
'boxstyle': bboxstyle,
'alpha': bboxalpha,
'pad': bboxpad,
})
elif bbox is False:
self.set_bbox(None) # disables the bbox
return type(self).update(self, props)
def text_extras(
self, x=0, y=0, s='', transform='data', *,
family=None, fontfamily=None, fontname=None, fontsize=None, size=None,
border=False, bordercolor='w', borderwidth=2, borderinvert=False,
bbox=False, bboxcolor='w', bboxstyle='round', bboxalpha=0.5, bboxpad=None, **kwargs
):
"""
Support specifying the coordinate `tranform` with a string name and
drawing white borders and bounding boxes around the text.
Important
---------
This function wraps {methods}
Parameters
----------
x, y : float
The *x* and *y* coordinates for the text.
s : str
The text string.
transform \
: {{'data', 'axes', 'figure'}} or `~matplotlib.transforms.Transform`, optional
The transform used to interpret `x` and `y`. Can be a
`~matplotlib.transforms.Transform` object or a string corresponding to
`~matplotlib.axes.Axes.transData`, `~matplotlib.axes.Axes.transAxes`,
or `~matplotlib.figure.Figure.transFigure` transforms. Default is
``'data'``, i.e. the text is positioned in data coordinates.
fontsize, size : float or str, optional
The font size. If float, units are inches. If string, units are
interpreted by `~proplot.utils.units`.
fontname, fontfamily, family : str, optional
The font name (e.g. ``'Fira Math'``) or font family name (e.g. ``'serif'``).
Matplotlib falls back to the system default if not found.
fontweight, weight, fontstyle, style, fontvariant, variant : str, optional
Additional font properties. See `~matplotlib.text.Text` for details.
border : bool, optional
Whether to draw border around text.
borderwidth : float, optional
The width of the text border. Default is ``2`` points.
bordercolor : color-spec, optional
The color of the text border. Default is ``'w'``.
borderinvert : bool, optional
If ``True``, the text and border colors are swapped.
bbox : bool, optional
Whether to draw a bounding box around text.
bboxcolor : color-spec, optional
The color of the text bounding box. Default is ``'w'``.
bboxstyle : boxstyle, optional
The style of the bounding box. Default is ``'round'``.
bboxalpha : float, optional
The alpha for the bounding box. Default is ``'0.5'``.
bboxpad : float, optional
The padding for the bounding box. Default is :rc:`title.bboxpad`.
Other parameters
----------------
**kwargs
Passed to `~matplotlib.axes.Axes.text`.
See also
--------
matplotlib.axes.Axes.text
"""
# Parse input args
# NOTE: Previously issued warning if fontname did not match any of names
# in ttflist but this would result in warning for e.g. family='sans-serif'.
# Matplotlib font API makes it very difficult to inject warning in
# correct place. Simpler to just
# NOTE: Do not emit warning if user supplied conflicting properties
# because matplotlib has like 100 conflicting text properties for which
# it doesn't emit warnings. Prefer not to fix all of them.
method = kwargs.pop('_method')
fontsize = _not_none(fontsize, size)
fontfamily = _not_none(fontname, fontfamily, family)
if fontsize is not None:
if fontsize in mfonts.font_scalings:
fontsize = rc._scale_font(fontsize)
else:
fontsize = units(fontsize, 'pt')
kwargs['fontsize'] = fontsize
if fontfamily is not None:
kwargs['fontfamily'] = fontfamily
if not transform:
transform = self.transData
else:
transform = _get_transform(self, transform)
# Apply monkey patch to text object
# TODO: Why only support this here, and not in arbitrary places throughout
# rest of matplotlib API? Units engine needs better implementation.
obj = method(self, x, y, s, transform=transform, **kwargs)
obj.update = _update_text.__get__(obj)
obj.update({
'border': border,
'bordercolor': bordercolor,
'borderinvert': borderinvert,
'borderwidth': borderwidth,
'bbox': bbox,
'bboxcolor': bboxcolor,
'bboxstyle': bboxstyle,
'bboxalpha': bboxalpha,
'bboxpad': bboxpad,
})
return obj
def _iter_objs_labels(objs):
"""
Retrieve the (object, label) pairs for objects with actual labels
from nested lists and tuples of objects.
"""
# Account for (1) multiple columns of data, (2) functions that return
# multiple values (e.g. hist() returns (bins, values, patches)), and
# (3) matplotlib.Collection list subclasses.
label = _get_label(objs)
if label:
yield (objs, label)
elif isinstance(objs, list):
for obj in objs:
yield from _iter_objs_labels(obj)
def _update_cycle(self, cycle, scatter=False, **kwargs):
"""
Try to update the `~cycler.Cycler` without resetting it if it has not changed.
Also return keys that should be explicitly iterated over for commands that
otherwise don't use the property cycler (currently just scatter).
"""
# Get the original property cycle
# NOTE: Matplotlib saves itertools.cycle(cycler), not the original
# cycler object, so we must build up the keys again.
# NOTE: Axes cycle has no getter, only set_prop_cycle, which sets a
# prop_cycler attribute on the hidden _get_lines and _get_patches_for_fill
# objects. This is the only way to query current axes cycler! Should not
# wrap set_prop_cycle because would get messy and fragile.
# NOTE: The _get_lines cycler is an *itertools cycler*. Has no length, so
# we must cycle over it with next(). We try calling next() the same number
# of times as the length of input cycle. If the input cycle *is* in fact
# the same, below does not reset the color position, cycles us to start!
i = 0
by_key = {}
cycle_orig = self._get_lines.prop_cycler
for i in range(len(cycle)): # use the cycler object length as a guess
prop = next(cycle_orig)
for key, value in prop.items():
if key not in by_key:
by_key[key] = set()
if isinstance(value, (list, np.ndarray)):
value = tuple(value)
by_key[key].add(value)
# Reset property cycler if it differs
reset = set(by_key) != set(cycle.by_key())
if not reset: # test individual entries
for key, value in cycle.by_key().items():
if by_key[key] != set(value):
reset = True
break
if reset:
self.set_prop_cycle(cycle) # updates both _get_lines and _get_patches_for_fill
# Psuedo-expansion of matplotlib's property cycling for scatter(). Return dict
# of cycle keys and translated scatter() keywords for those not specified by user
# NOTE: This is similar to _process_plot_var_args._getdefaults but want to rely
# minimally on private API.
# NOTE: By default matplotlib uses the property cycler in _get_patches_for_fill
# for scatter() plots. It also only inherits color from that cycler. We instead
# use _get_lines with scatter() to help overarching goal of unifying plot() and
# scatter(). Now shading/bars loop over one cycle, plot/scatter along another.
apply_manually = {} # which keys to apply from property cycler
if scatter:
parser = self._get_lines # the _process_plot_var_args instance
prop_keys = set(parser._prop_keys) - {'color', 'linestyle', 'dashes'}
for prop, key in (
('markersize', 's'),
('linewidth', 'linewidths'),
('markeredgewidth', 'linewidths'),
('markeredgecolor', 'edgecolors'),
('alpha', 'alpha'),
('marker', 'marker'),
):
value = kwargs.get(key, None) # a apply_cycle argument
if prop in prop_keys and value is None: # if key in cycler and prop unset
apply_manually[prop] = key
return apply_manually # set indicating additional keys we cycle through
def apply_cycle(
self, *args,
cycle=None, cycle_kw=None,
label=None, labels=None, values=None,
legend=None, legend_kw=None,
colorbar=None, colorbar_kw=None,
**kwargs
):
"""
Support on-the-fly creation and application of property cycles, and support
on-the-fly legends and colorbars.
Important
---------
This function wraps {methods}
Parameters
----------
cycle : cycle-spec, optional
The cycle specifer, passed to the `~proplot.constructor.Cycle`
constructor. If the returned list of colors is unchanged from the
current axes color cycler, the axes cycle will **not** be reset to the
first position.
cycle_kw : dict-like, optional
Passed to `~proplot.constructor.Cycle`.
label : float or str, optional
The legend label to be used for this plotted element.
labels, values : list of float or list of str, optional
Used with 2D input arrays. The legend labels or colorbar coordinates for
each column in the array. Can be numeric or string, and must match
the number of columns in the 2D array.
legend : bool, int, or str, optional
If not ``None``, this is a location specifying where to draw an *inset*
or *panel* legend from the resulting handle(s). If ``True``, the
default location is used. Valid locations are described in
`~proplot.axes.Axes.legend`.
legend_kw : dict-like, optional
Ignored if `legend` is ``None``. Extra keyword args for the call
to `~proplot.axes.Axes.legend`.
colorbar : bool, int, or str, optional
If not ``None``, this is a location specifying where to draw an *inset*
or *panel* colorbar from the resulting handle(s). If ``True``, the
default location is used. Valid locations are described in
`~proplot.axes.Axes.colorbar`.
colorbar_kw : dict-like, optional
Ignored if `colorbar` is ``None``. Extra keyword args for the call
to `~proplot.axes.Axes.colorbar`.
Other parameters
----------------
*args, **kwargs
Passed to the matplotlib plotting method.
See also
--------
standardize_1d
indicate_error
proplot.constructor.Cycle
proplot.constructor.Colors
"""
# Parse input arguments
# NOTE: Requires standardize_1d wrapper before reaching this.
method = kwargs.pop('_method')
errobjs = kwargs.pop('_errobjs', None)
name = method.__name__
plot = name in ('plot',)
scatter = name in ('scatter',)
fill = name in ('fill_between', 'fill_betweenx')
bar = name in ('bar', 'barh')
lines = name in ('vlines', 'hlines')
box = name in ('boxplot', 'violinplot')
violin = name in ('violinplot',)
hist = name in ('hist',)
pie = name in ('pie',)
cycle_kw = cycle_kw or {}
legend_kw = legend_kw or {}
colorbar_kw = colorbar_kw or {}
labels = _not_none(label=label, values=values, labels=labels)
# Special cases
# TODO: Support stacking vlines/hlines because it would be really easy
x, y, *args = args
stack = False
if lines or fill or bar:
stack = kwargs.pop('stack', False)
elif stack in kwargs:
raise TypeError(f"{name}() got unexpected keyword argument 'stack'.")
if fill or lines:
ncols = max(1 if iy.ndim == 1 else iy.shape[1] for iy in (y, args[0]))
else:
ncols = 1 if pie or box or y.ndim == 1 else y.shape[1]
# Update the property cycler
apply_manually = {}
if cycle is not None or cycle_kw:
if y.ndim > 1 and y.shape[1] > 1: # default samples count
cycle_kw.setdefault('N', y.shape[1])
cycle_args = () if cycle is None else (cycle,)
cycle = constructor.Cycle(*cycle_args, **cycle_kw)
apply_manually = _update_cycle(self, cycle, scatter=scatter, **kwargs)
# Handle legend labels
if pie or box:
# Functions handle multiple labels on their own
# NOTE: Using boxplot() without this will overwrite labels previously
# set along the x-axis by _auto_format_1d.
if not violin and labels is not None:
kwargs['labels'] = _to_ndarray(labels)
else:
# Check and standardize labels
# NOTE: Must convert to ndarray or can get singleton DataArrays
if not np.iterable(labels) or isinstance(labels, str):
labels = [labels] * ncols
if len(labels) != ncols:
raise ValueError(f'Array has {ncols} columns but got {len(labels)} labels.')
if labels is not None:
labels = [str(_not_none(label, '')) for label in _to_ndarray(labels)]
else:
labels = [None] * ncols
# Plot successive columns
objs = []
for i in range(ncols):
# Property cycling for scatter plots
# NOTE: See comments in _update_cycle
ikwargs = kwargs.copy()
if apply_manually:
props = next(self._get_lines.prop_cycler)
for prop, key in apply_manually.items():
ikwargs[key] = props[prop]
# The x coordinates for bar plots
ix, iy, iargs = x, y, args.copy()
offset = 0
if bar and not stack:
offset = iargs[0] * (i - 0.5 * (ncols - 1)) # 3rd positional arg is 'width'
ix = x + offset
# The y coordinates for stacked plots
# NOTE: If stack=True then we always *ignore* second argument passed
# to area or lines. Warning should be issued by 'extras' wrappers.
if stack and ncols > 1:
if bar:
iargs[1] = iy[:, :i].sum(axis=1) # the new 'bottom'
else:
iy = iargs[0] # for vlines, hlines, area, arex
ys = (iy if iy.ndim == 1 else iy[:, :j].sum(axis=1) for j in (i, i + 1))
iy, iargs[0] = ys
# The y coordinates and labels
# NOTE: Support 1D x, 2D y1, and 2D y2 input
if not pie and not box: # only ever have one y value
iy, *iargs = (
arg if not isinstance(arg, np.ndarray) or arg.ndim == 1
else arg[:, i] for arg in (iy, *iargs)
)
ikwargs['label'] = labels[i] or None
# Call function for relevant column
# NOTE: Should have already applied 'x' coordinates with keywords
# or as labels by this point for funcs where we omit them. Also note
# hist() does not pass kwargs to bar() so need this funky state context.
with _state_context(self, _bar_absolute_width=True, _no_sticky_edges=True):
if pie or box or hist:
obj = method(self, iy, *iargs, **ikwargs)
else:
obj = method(self, ix, iy, *iargs, **ikwargs)
if isinstance(obj, (list, tuple)) and len(obj) == 1:
obj = obj[0]
objs.append(obj)
# Add colorbar
# NOTE: Colorbar will get the labels from the artists. Don't need to extract
# them because can't have multiple-artist entries like for legend()
if colorbar:
self.colorbar(objs, loc=colorbar, queue=True, **colorbar_kw)
# Add legend
# NOTE: Put error bounds objects *before* line objects in the tuple
# so that line gets drawn on top of bounds.
# NOTE: If error objects have separate label, allocate separate legend entry.
# If they do not, try to combine with current legend entry.
if legend:
if not isinstance(errobjs, (list, tuple)):
errobjs = (errobjs,)
eobjs = [obj for obj in errobjs if obj and not _get_label(obj)]
hobjs = [(*eobjs[::-1], *objs)] if eobjs else objs.copy()
hobjs.extend(obj for obj in errobjs if obj and _get_label(obj))
try:
hobjs, labels = list(zip(*_iter_objs_labels(hobjs)))
except ValueError:
hobjs = labels = ()
self.legend(hobjs, labels, loc=legend, queue=True, **legend_kw)
# Return
# WARNING: Make sure plot always returns tuple of objects, and bar always
# returns singleton unless we have bulk drawn bar plots! Other matplotlib
# methods call these internally and expect a certain output format!
if plot:
return tuple(objs) # always return tuple of objects
else:
return objs[0] if len(objs) == 1 else tuple(objs)
def _adjust_inbounds(self, x, y, z, *, centers=False):
"""
Adjust the smaple based on the axis limits.
"""
# Get masks
# TODO: Expand this to selecting x-limits giving scales y-limits, vice versa?
# NOTE: X and Y coordinates were sanitized by standardize_2d when passed here.
xmask = ymask = None
if any(_ is None or _.ndim not in (1, 2) for _ in (x, y, z)):
return z
if centers and z.ndim == 2:
x, y = _enforce_centers(x, y, z)
if not self.get_autoscalex_on():
xlim = self.get_xlim()
xmask = (x >= xlim[0]) & (x <= xlim[1])
if not self.get_autoscaley_on():
ylim = self.get_ylim()
ymask = (y >= ylim[0]) & (y <= ylim[1])
# Subsample
if xmask is not None and ymask is not None:
z = z[np.ix_(ymask, xmask)] if z.ndim == 2 and xmask.ndim == 1 else z[ymask & xmask] # noqa: E501
elif xmask is not None:
z = z[:, xmask] if z.ndim == 2 and xmask.ndim == 1 else z[xmask]
elif ymask is not None:
z = z[ymask, :] if z.ndim == 2 and ymask.ndim == 1 else z[ymask]
return z
def _auto_levels_locator(
self, *args, N=None, norm=None, norm_kw=None, extend='neither',
vmin=None, vmax=None, locator=None, locator_kw=None,
symmetric=False, positive=False, negative=False, nozero=False,
inbounds=None, centers=False, counts=False,
):
"""
Automatically generate level locations based on the input data, the
input locator, and the input normalizer.
Parameters
----------
*args
The x, y, z, data.
N : int, optional
The (approximate) number of levels to create.
norm, norm_kw
Passed to `~proplot.constructor.Norm`. Used to determine suitable
level locations if `locator` is not passed.
extend : str, optional
The extend setting.
vmin, vmax : float, optional
The data limits.
locator, locator_kw
Passed to `~proplot.constructor.Locator`. Used to determine suitable
level locations.
symmetric, positive, negative : bool, optional
Whether the automatic levels should be symmetric, should be all positive
with a minimum at zero, or should be all negative with a maximum at zero.
nozero : bool, optional
Whether zero should be excluded from the levels.
inbounds : bool, optional
Whether to filter to in-bounds data.
centers : bool, optional
Whether to convert coordinates to 'centers'.
counts : bool, optional
Whether to guesstimate histogram counts rather than use data.
Returns
-------
levels : ndarray
The levels.
locator : ndarray or `matplotlib.ticker.Locator`
The locator used for colorbar tick locations.
"""
# Input args
norm_kw = norm_kw or {}
locator_kw = locator_kw or {}
inbounds = _not_none(inbounds, rc['image.inbounds'])
N = _not_none(N, rc['image.levels'])
if np.iterable(N):
# Included so we can use this to apply positive, negative, nozero
levels = tick_locator = N
else:
# Get default locator based on input norm
norm = constructor.Norm(norm or 'linear', **norm_kw)
if positive and negative:
raise ValueError('Incompatible options: positive=True and negative=True.')
if locator is not None:
level_locator = tick_locator = constructor.Locator(locator, **locator_kw)
elif isinstance(norm, mcolors.LogNorm):
level_locator = tick_locator = mticker.LogLocator(**locator_kw)
elif isinstance(norm, mcolors.SymLogNorm):
locator_kw.setdefault('base', _getattr_flexible(norm, 'base', 10))
locator_kw.setdefault('linthresh', _getattr_flexible(norm, 'linthresh', 1))
level_locator = tick_locator = mticker.SymmetricalLogLocator(**locator_kw)
else:
nbins = N * 2 if positive or negative else N
locator_kw.setdefault('symmetric', symmetric or positive or negative)
level_locator = mticker.MaxNLocator(nbins, min_n_ticks=1, **locator_kw)
tick_locator = None
# Get level locations
# NOTE: Critical to use _to_arraylike here because some commands
# are unstandardized.
# NOTE: Try to get reasonable *count* levels for hexbin/hist2d, but in
# general have no way to select nice ones a priori which is why discrete
# is disabled by default.
automin = vmin is None
automax = vmax is None
if automin or automax:
# Get sample data
x = y = None
if len(args) < 3:
zs = map(_to_arraylike, args)
else:
x, y, *zs = map(_to_arraylike, args)
vmins, vmaxs = [], []
for z in zs:
# Restrict to in-bounds data
# Use catch-all exception because it really isn't mission-critical.
if z.ndim > 2:
continue # 3D imshow plots will ignore the cmap we give it
if counts:
z = np.array([0, z.size]) // 10
elif inbounds:
# WARNING: Experimental, seems robust but this is not
# mission-critical so keep this try-except clause for now.
try:
z = _adjust_inbounds(self, x, y, z, centers=centers)
except Exception:
warnings._warn_proplot(
'Failed to adjust bounds for automatic colormap normalization.' # noqa: E501
)
# Mask invalid data
z = ma.masked_invalid(z, copy=False)
if automin:
vmin = float(z.min())
if automax:
vmax = float(z.max())
if vmin == vmax or ma.is_masked(vmin) or ma.is_masked(vmax):
vmin, vmax = 0, 1
vmins.append(vmin)
vmaxs.append(vmax)
if vmins:
vmin, vmax = min(vmins), max(vmaxs)
else:
vmin, vmax = 0, 1 # simple default
try:
levels = level_locator.tick_values(vmin, vmax)
except RuntimeError: # too-many-ticks error
levels = np.linspace(vmin, vmax, N) # TODO: _autolev used N+1
# Trim excess levels the locator may have supplied
# NOTE: This part is mostly copied from matplotlib _autolev
if not locator_kw.get('symmetric', None):
i0, i1 = 0, len(levels) # defaults
under, = np.where(levels < vmin)
if len(under):
i0 = under[-1]
if not automin or extend in ('min', 'both'):
i0 += 1 # permit out-of-bounds data
over, = np.where(levels > vmax)
if len(over):
i1 = over[0] + 1 if len(over) else len(levels)
if not automax or extend in ('max', 'both'):
i1 -= 1 # permit out-of-bounds data
if i1 - i0 < 3:
i0, i1 = 0, len(levels) # revert
levels = levels[i0:i1]
# Compare the no. of levels we *got* (levels) to what we *wanted* (N)
# If we wanted more than 2 times the result, then add nn - 1 extra
# levels in-between the returned levels *in normalized space*.
# Example: A LogNorm gives too few levels, so we select extra levels
# here, but use the locator for determining tick locations.
nn = N // len(levels)
if nn >= 2:
olevels = norm(levels)
nlevels = []
for i in range(len(levels) - 1):
l1, l2 = olevels[i], olevels[i + 1]
nlevels.extend(np.linspace(l1, l2, nn + 1)[:-1])
nlevels.append(olevels[-1])
levels = norm.inverse(nlevels)
# Filter the remaining contours
if nozero and 0 in levels:
levels = levels[levels != 0]
if positive:
levels = levels[levels >= 0]
if negative:
levels = levels[levels <= 0]
# Use auto-generated levels for ticks if still None
return levels, _not_none(tick_locator, levels)
def _build_discrete_norm(
self, *args, levels=None, values=None, cmap=None, norm=None, norm_kw=None,
extend='neither', vmin=None, vmax=None, minlength=2, **kwargs,
):
"""
Build a `~proplot.colors.DiscreteNorm` or `~proplot.colors.BoundaryNorm`
from the input arguments. This automatically calculates "nice" level
boundaries if they were not provided.
Parameters
----------
*args
The data.
cmap : `matplotlib.colors.Colormap`, optional
The colormap. Passed to `DiscreteNorm`.
norm, norm_kw
Passed to `~proplot.constructor.Norm` and then to `DiscreteNorm`.
extend : str, optional
The extend setting.
levels, N, values : ndarray, optional
The explicit boundaries.
vmin, vmax : float, optional
The minimum and maximum values for the normalizer.
minlength : int, optional
The minimum length for level lists.
**kwargs
Passed to `_auto_levels_locator`.
Returns
-------
norm : `matplotlib.colors.Normalize`
The normalizer.
ticks : `numpy.ndarray` or `matplotlib.locator.Locator`
The axis locator or the tick location candidates.
"""
# Parse flexible keyword args
norm_kw = norm_kw or {}
levels = _not_none(
levels=levels,
norm_kw_levels=norm_kw.pop('levels', None),
default=rc['image.levels']
)
vmin = _not_none(vmin=vmin, norm_kw_vmin=norm_kw.pop('vmin', None))
vmax = _not_none(vmax=vmax, norm_kw_vmax=norm_kw.pop('vmax', None))
if norm == 'segments': # TODO: remove
norm = 'segmented'
# NOTE: Matplotlib colorbar algorithm *cannot* handle descending levels
# so this function reverses them and adds special attribute to the
# normalizer. Then colorbar_extras reads this attribute and flips the
# axis and the colormap direction.
# Check input levels and values
for key, val in (('levels', levels), ('values', values)):
if not np.iterable(val):
continue
if len(val) < minlength or len(val) >= 2 and any(
np.sign(np.diff(val)) != np.sign(val[1] - val[0])
):
raise ValueError(
f'{key!r} must be monotonically increasing or decreasing '
f'and at least length {minlength}, got {val}.'
)
# Get level edges from level centers
locator = None
if isinstance(values, Integral):
levels = values + 1
elif np.iterable(values) and len(values) == 1:
levels = [values[0] - 1, values[0] + 1] # weird but why not
elif np.iterable(values) and len(values) > 1:
# Try to generate levels such that a LinearSegmentedNorm will
# place values ticks at the center of each colorbar level.
# utils.edges works only for evenly spaced values arrays.
# We solve for: (x1 + x2)/2 = y --> x2 = 2*y - x1
# with arbitrary starting point x1. We also start the algorithm
# on the end with *smaller* differences.
if norm is None or norm == 'segmented':
reverse = abs(values[-1] - values[-2]) < abs(values[1] - values[0])
if reverse:
values = values[::-1]
levels = [values[0] - (values[1] - values[0]) / 2]
for val in values:
levels.append(2 * val - levels[-1])
if reverse:
levels = levels[::-1]
if any(np.sign(np.diff(levels)) != np.sign(levels[1] - levels[0])):
levels = edges(values) # backup plan, weird tick locations
# Generate levels by finding in-between points in the
# normalized numeric space, e.g. LogNorm space.
else:
inorm = constructor.Norm(norm, **norm_kw)
levels = inorm.inverse(edges(inorm(values)))
elif values is not None:
raise ValueError(
f'Unexpected input values={values!r}. '
'Must be integer or list of numbers.'
)
# Get default normalizer
# Only use LinearSegmentedNorm if necessary, because it is slow
descending = False
if
|
np.iterable(levels)
|
numpy.iterable
|
#! /usr/bin/python
## Display array antenna locations on EARTH grid using astropy coordinates
import numpy
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# Copied library functions
def shoot(lon, lat, azimuth, maxdist=None):
"""Shooter Function
Original javascript on http://williams.best.vwh.net/gccalc.htm
Translated to python by <NAME>
"""
import numpy as np
glat1 = lat * np.pi / 180.
glon1 = lon * np.pi / 180.
s = maxdist / 1.852
faz = azimuth * np.pi / 180.
EPS= 0.00000000005
if ((np.abs(np.cos(glat1))<EPS) and not (np.abs(np.sin(faz))<EPS)):
alert("Only N-S courses are meaningful, starting at a pole!")
a=6378.13/1.852
f=1/298.257223563
r = 1 - f
tu = r * np.tan(glat1)
sf = np.sin(faz)
cf =
|
np.cos(faz)
|
numpy.cos
|
from collections import OrderedDict
import copy
import getpass
import itertools
import numpy as np
from scipy import signal
import time
LOCAL_MODE = getpass.getuser() == 'tom'
CONFIG = {
'halite_config_setting_divisor': 1.0,
'collect_smoothed_multiplier': 0.0,
'collect_actual_multiplier': 5.0,
'collect_less_halite_ships_multiplier_base': 0.55,
'collect_base_nearest_distance_exponent': 0.2,
'return_base_multiplier': 8.0,
'return_base_less_halite_ships_multiplier_base': 0.85,
'early_game_return_base_additional_multiplier': 0.1,
'early_game_return_boost_step': 50,
'establish_base_smoothed_multiplier': 0.0,
'establish_first_base_smoothed_multiplier_correction': 2.0,
'establish_base_dm_exponent': 1.1,
'first_base_no_4_way_camping_spot_bonus': 300*0,
'start_camp_if_not_winning': 0,
'max_camper_ship_budget': 2*1,
'relative_step_start_camping': 0.15,
'establish_base_deposit_multiplier': 1.0,
'establish_base_less_halite_ships_multiplier_base': 1.0,
'max_attackers_per_base': 3*1,
'attack_base_multiplier': 300.0,
'attack_base_less_halite_ships_multiplier_base': 0.9,
'attack_base_halite_sum_multiplier': 2.0,
'attack_base_run_opponent_multiplier': 1.0,
'attack_base_catch_opponent_multiplier': 1.0,
'collect_run_opponent_multiplier': 10.0,
'return_base_run_opponent_multiplier': 2.5,
'establish_base_run_opponent_multiplier': 2.5,
'collect_catch_opponent_multiplier': 1.0,
'return_base_catch_opponent_multiplier': 1.0,
'establish_base_catch_opponent_multiplier': 0.5,
'two_step_avoid_boxed_opponent_multiplier_base': 0.7,
'n_step_avoid_boxed_opponent_multiplier_base': 0.45,
'min_consecutive_chase_extrapolate': 6,
'chase_return_base_exponential_bonus': 2.0,
'ignore_catch_prob': 0.3,
'max_initial_ships': 60,
'max_final_ships': 60,
'max_standard_ships_decided_end_pack_hunting': 2,
'nearby_ship_halite_spawn_constant': 3.0,
'nearby_halite_spawn_constant': 5.0,
'remaining_budget_spawn_constant': 0.2,
'spawn_score_threshold': 75.0,
'boxed_in_halite_convert_divisor': 1.0,
'n_step_avoid_min_die_prob_cutoff': 0.05,
'n_step_avoid_window_size': 7,
'influence_map_base_weight': 2.0,
'influence_map_min_ship_weight': 0.0,
'influence_weights_additional_multiplier': 2.0,
'influence_weights_exponent': 8.0,
'escape_influence_prob_divisor': 3.0,
'rescue_ships_in_trouble': 1,
'target_strategic_base_distance': 8.0,
'target_strategic_num_bases_ship_divisor': 9,
'target_strategic_triangle_weight': 20.0, # initially: 20
'target_strategic_independent_base_distance_multiplier': 8.0, # initially 8.0
'target_strategic_influence_desirability_multiplier': 1.0, # initially: 1.0
'target_strategic_potential_divisor': 15.0, # initially: 15.0
'max_spawn_relative_step_divisor': 12.0,
'no_spawn_near_base_ship_limit': 100,
'avoid_cycles': 1,
'max_risk_n_step_risky': 0.5,
'max_steps_n_step_risky': 70,
'log_near_base_distance': 2,
'max_recent_considered_relevant_zero_move_count': 120,
'near_base_2_step_risky_min_count': 50,
'relative_stand_still_collect_boost': 1.5,
'initial_collect_boost_away_from_base': 2.0,
'start_hunting_season_relative_step': 0.1875,
'end_hunting_season_relative_step': 0.75,
'early_hunting_season_less_collect_relative_step': 0.375,
'max_standard_ships_early_hunting_season': 2,
'late_hunting_season_more_collect_relative_step': 0.5,
'late_hunting_season_collect_max_n_step_risk': 0.2,
'after_hunting_season_collect_max_n_step_risk': 0.5,
'late_hunting_season_standard_min_fraction': 0.7,
'max_standard_ships_late_hunting_season': 15,
'collect_on_safe_return_relative_step': 0.075,
'min_halite_to_stop_early_hunt': 15000.0,
'early_best_opponent_relative_step': 0.5,
'surrounding_ships_cycle_extrapolate_step_count': 5,
'surrounding_ships_extended_cycle_extrapolate_step_count': 7,
}
NORTH = "NORTH"
SOUTH = "SOUTH"
EAST = "EAST"
WEST = "WEST"
CONVERT = "CONVERT"
SPAWN = "SPAWN"
NOT_NONE_DIRECTIONS = [NORTH, SOUTH, EAST, WEST]
MOVE_DIRECTIONS = [None, NORTH, SOUTH, EAST, WEST]
MOVE_DIRECTIONS_TO_ID = {None: 0, NORTH: 1, SOUTH: 2, EAST: 3, WEST: 4}
RELATIVE_DIR_MAPPING = {None: (0, 0), NORTH: (-1, 0), SOUTH: (1, 0),
EAST: (0, 1), WEST: (0, -1)}
RELATIVE_DIR_TO_DIRECTION_MAPPING = {
v: k for k, v in RELATIVE_DIR_MAPPING.items()}
OPPOSITE_MAPPING = {None: None, NORTH: SOUTH, SOUTH: NORTH, EAST: WEST,
WEST: EAST}
RELATIVE_DIRECTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1), (0, 0)]
RELATIVE_NOT_NONE_DIRECTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
MOVE_GATHER_OPTIONS = [(-1, 0, False), (1, 0, False), (0, -1, False),
(0, 1, False), (0, 0, True)]
TWO_STEP_THREAT_DIRECTIONS = {
(-2, 0): [(-1, 0)],
(-1, -1): [(-1, 0), (0, -1)],
(-1, 0): [(-1, 0), (0, 0)],
(-1, 1): [(-1, 0), (0, 1)],
(0, -2): [(0, -1)],
(0, -1): [(0, -1), (0, 0)],
(0, 1): [(0, 1), (0, 0)],
(0, 2): [(0, 1)],
(1, -1): [(1, 0), (0, -1)],
(1, 0): [(1, 0), (0, 0)],
(1, 1): [(1, 0),(0, 1)],
(2, 0): [(1, 0)],
}
GAUSSIAN_2D_KERNELS = {}
for dim in range(3, 20, 2):
# Modified from https://scipy-lectures.org/intro/scipy/auto_examples/solutions/plot_image_blur.html
center_distance = np.floor(np.abs(np.arange(dim) - (dim-1)/2))
horiz_distance = np.tile(center_distance, [dim, 1])
vert_distance = np.tile(np.expand_dims(center_distance, 1), [1, dim])
manh_distance = horiz_distance + vert_distance
kernel = np.exp(-manh_distance/(dim/4))
kernel[manh_distance > dim/2] = 0
GAUSSIAN_2D_KERNELS[dim] = kernel
DISTANCES = {}
DISTANCE_MASKS = {}
HALF_PLANES_CATCH = {}
HALF_PLANES_RUN = {}
ROW_COL_DISTANCE_MASKS = {}
ROW_COL_MAX_DISTANCE_MASKS = {}
ROW_COL_BOX_MAX_DISTANCE_MASKS = {}
ROW_COL_BOX_DIR_MAX_DISTANCE_MASKS = {}
BOX_DIR_MAX_DISTANCE = 4
BOX_DIRECTION_MASKS = {}
ROW_MASK = {}
COLUMN_MASK = {}
DISTANCE_MASK_DIM = 21
half_distance_mask_dim = int(DISTANCE_MASK_DIM/2)
for row in range(DISTANCE_MASK_DIM):
row_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM), dtype=np.bool)
row_mask[row] = 1
col_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM), dtype=np.bool)
col_mask[:, row] = 1
ROW_MASK [row] = row_mask
COLUMN_MASK[row] = col_mask
for col in range(DISTANCE_MASK_DIM):
horiz_distance = np.minimum(
np.abs(np.arange(DISTANCE_MASK_DIM) - col),
np.abs(np.arange(DISTANCE_MASK_DIM) - col - DISTANCE_MASK_DIM))
horiz_distance = np.minimum(
horiz_distance,
np.abs(np.arange(DISTANCE_MASK_DIM) - col + DISTANCE_MASK_DIM))
vert_distance = np.minimum(
np.abs(np.arange(DISTANCE_MASK_DIM) - row),
np.abs(np.arange(DISTANCE_MASK_DIM) - row - DISTANCE_MASK_DIM))
vert_distance = np.minimum(
vert_distance,
np.abs(np.arange(DISTANCE_MASK_DIM) - row + DISTANCE_MASK_DIM))
horiz_distance = np.tile(horiz_distance, [DISTANCE_MASK_DIM, 1])
vert_distance = np.tile(np.expand_dims(vert_distance, 1),
[1, DISTANCE_MASK_DIM])
manh_distance = horiz_distance + vert_distance
kernel = np.exp(-manh_distance/(DISTANCE_MASK_DIM/4))
DISTANCE_MASKS[(row, col)] = kernel
DISTANCES[(row, col)] = manh_distance
catch_distance_masks = {}
run_distance_masks = {}
for d in MOVE_DIRECTIONS:
if d is None:
catch_rows = np.array([]).astype(np.int)
catch_cols = np.array([]).astype(np.int)
if d == NORTH:
catch_rows = np.mod(row - np.arange(half_distance_mask_dim) - 1,
DISTANCE_MASK_DIM)
catch_cols = np.arange(DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(BOX_DIR_MAX_DISTANCE) + 1,
DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == SOUTH:
catch_rows = np.mod(row + np.arange(half_distance_mask_dim) + 1,
DISTANCE_MASK_DIM)
catch_cols = np.arange(DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row - np.arange(BOX_DIR_MAX_DISTANCE) - 1,
DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == WEST:
catch_cols = np.mod(col - np.arange(half_distance_mask_dim) - 1,
DISTANCE_MASK_DIM)
catch_rows = np.arange(DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(BOX_DIR_MAX_DISTANCE) + 1,
DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == EAST:
catch_cols = np.mod(col + np.arange(half_distance_mask_dim) + 1,
DISTANCE_MASK_DIM)
catch_rows = np.arange(DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col - np.arange(BOX_DIR_MAX_DISTANCE) - 1,
DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
catch_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM),
dtype=np.bool)
catch_mask[catch_rows[:, None], catch_cols] = 1
run_mask = np.copy(catch_mask)
run_mask[row, col] = 1
catch_distance_masks[d] = catch_mask
run_distance_masks[d] = run_mask
if d is not None:
box_dir_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM),
dtype=np.bool)
box_dir_mask[box_dir_rows[:, None], box_dir_cols] = 1
if d in [NORTH, SOUTH]:
box_dir_mask &= (horiz_distance <= vert_distance)
else:
box_dir_mask &= (horiz_distance >= vert_distance)
ROW_COL_BOX_DIR_MAX_DISTANCE_MASKS[(row, col, d)] = box_dir_mask
HALF_PLANES_CATCH[(row, col)] = catch_distance_masks
HALF_PLANES_RUN[(row, col)] = run_distance_masks
for d in range(1, DISTANCE_MASK_DIM):
ROW_COL_DISTANCE_MASKS[(row, col, d)] = manh_distance == d
for d in range(half_distance_mask_dim):
ROW_COL_MAX_DISTANCE_MASKS[(row, col, d)] = manh_distance <= d
ROW_COL_BOX_MAX_DISTANCE_MASKS[(row, col, d)] = np.logical_and(
horiz_distance <= d, vert_distance <= d)
for dist in range(2, half_distance_mask_dim+1):
dist_mask_dim = dist*2+1
row_pos = np.tile(np.expand_dims(np.arange(dist_mask_dim), 1),
[1, dist_mask_dim])
col_pos = np.tile(np.arange(dist_mask_dim), [dist_mask_dim, 1])
for direction in NOT_NONE_DIRECTIONS:
if direction == NORTH:
box_mask = (row_pos < dist) & (
|
np.abs(col_pos-dist)
|
numpy.abs
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# test_concrete_models.py: Checks that built-in instances of Model work properly.
##
# © 2017, <NAME> (<EMAIL>) and
# <NAME> (<EMAIL>).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##
## FEATURES ###################################################################
from __future__ import absolute_import
from __future__ import division # Ensures that a/b is always a float.
from future.utils import with_metaclass
## IMPORTS ####################################################################
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
import numpy.lib.recfunctions as rfn
from qinfer.tests.base_test import (
DerandomizedTestCase,
ConcreteDifferentiableModelTest,
ConcreteModelTest,
ConcreteSimulatableTest,
MockDirectView,
MockModel
)
import abc
from qinfer import (
SimplePrecessionModel, SimpleInversionModel, UnknownT2Model,
CoinModel, NoisyCoinModel, NDieModel,
RandomizedBenchmarkingModel,
PoisonedModel, BinomialModel, MultinomialModel,
MLEModel, RandomWalkModel, GaussianRandomWalkModel,
ProductDistribution,
NormalDistribution,
BetaDistribution, UniformDistribution,
PostselectedDistribution,
ConstrainedSumDistribution, DirichletDistribution,
DirectViewParallelizedModel,
GaussianHyperparameterizedModel
)
from qinfer.ale import ALEApproximateModel
from qinfer.tomography import TomographyModel, DiffusiveTomographyModel, pauli_basis, GinibreDistribution
from qinfer.utils import check_qutip_version, to_simplex, from_simplex
import unittest
# We skip this module entirely under Python 3.3, since there are a lot of
# spurious known failures that still need to be debugged.
import sys
if sys.version_info.major == 3 and sys.version_info.minor <= 3:
raise unittest.SkipTest("Skipping known failures on 3.3.")
## SIMPLE TEST MODELS #########################################################
class TestSimplePrecessionModel(ConcreteDifferentiableModelTest, DerandomizedTestCase):
"""
Tests SimplePrecessionModel.
"""
def instantiate_model(self):
return SimplePrecessionModel()
def instantiate_prior(self):
return UniformDistribution(np.array([[10,12]]))
def instantiate_expparams(self):
return np.arange(10,20).astype(self.model.expparams_dtype)
class TestUnknownT2Model(ConcreteModelTest, DerandomizedTestCase):
"""
Tests UnknownT2Model.
"""
def instantiate_model(self):
return UnknownT2Model()
def instantiate_prior(self):
return UniformDistribution(np.array([[1,8],[1,5]]))
def instantiate_expparams(self):
return np.linspace(0,5,10, dtype=[('t','float')])
class TestSimpleInversionModel(ConcreteDifferentiableModelTest, DerandomizedTestCase):
"""
Tests SimpleInversionModel.
"""
def instantiate_model(self):
return SimpleInversionModel()
def instantiate_prior(self):
return UniformDistribution(np.array([[5,8]]))
def instantiate_expparams(self):
ws = np.linspace(0,0.5,10, dtype=[('w_','float')])
ts = np.linspace(0,5,10, dtype=[('t','float')])
return rfn.merge_arrays([ts, ws])
class TestCoinModel(ConcreteDifferentiableModelTest, DerandomizedTestCase):
"""
Tests CoinModel.
"""
def instantiate_model(self):
return CoinModel()
def instantiate_prior(self):
return BetaDistribution(mean=0.5, var=0.1)
def instantiate_expparams(self):
# only the length of this array matters since CoinModel has no expparams.
return np.ones((10,),dtype=int)
class TestNoisyCoinModel(ConcreteModelTest, DerandomizedTestCase):
"""
Tests NoisyCoinModel.
"""
def instantiate_model(self):
return NoisyCoinModel()
def instantiate_prior(self):
return BetaDistribution(mean=0.5, var=0.1)
def instantiate_expparams(self):
alphas = (0.1 * np.ones((10,))).astype([('alpha','float')])
betas = np.linspace(0,0.5,10, dtype=[('beta','float')])
return rfn.merge_arrays([alphas,betas])
class TestNDieModel(ConcreteModelTest, DerandomizedTestCase):
"""
Tests NoisyCoinModel.
"""
def instantiate_model(self):
return NDieModel(n=6)
def instantiate_prior(self):
unif = UniformDistribution(np.array([[0,1],[0,1],[0,1],[0,1],[0,1],[0,1]]))
return ConstrainedSumDistribution(unif, desired_total=1)
def instantiate_expparams(self):
return np.arange(10).astype(self.model.expparams_dtype)
## TOMOGRAPHY MODELS ##########################################################
@unittest.skipIf(not check_qutip_version('3.2'), 'This test requires qutip 3.2 or higher to run.')
class TestTomographyModel(ConcreteModelTest, DerandomizedTestCase):
"""
Tests TomographyModel.
"""
def instantiate_model(self):
basis = pauli_basis(nq=2)
return TomographyModel(basis=basis)
def instantiate_prior(self):
basis = pauli_basis(nq=2)
return GinibreDistribution(basis)
def instantiate_expparams(self):
# 10 different random measurements, each measurement
# is an operator expressed in the 2-qubit pauli basis.
eps = np.random.rand(10, 2 ** 4)
# now we need to convert to fancy data type by putting
# the second index into the 'meas' structure
eps = eps.view(dtype=self.model.expparams_dtype).squeeze(-1)
return eps
## RB MODELS ##################################################################
class TestRBModel(ConcreteDifferentiableModelTest, DerandomizedTestCase):
"""
Tests RandomizedBenchmarkingModel without interleaving.
"""
def instantiate_model(self):
return RandomizedBenchmarkingModel(interleaved=False)
def instantiate_prior(self):
return PostselectedDistribution(
UniformDistribution(np.array([[0,1],[0,1],[0,1]])),
self.model
)
def instantiate_expparams(self):
ms = np.arange(10).astype(self.model.expparams_dtype)
return ms
class TestIRBModel(ConcreteDifferentiableModelTest, DerandomizedTestCase):
"""
Tests RandomizedBenchmarkingModel with interleaving.
"""
def instantiate_model(self):
return RandomizedBenchmarkingModel(interleaved=True)
def instantiate_prior(self):
return PostselectedDistribution(
UniformDistribution(np.array([[0,1],[0,1],[0,1],[0,1]])),
self.model
)
def instantiate_expparams(self):
# sequential sequences
ms = np.arange(10).astype([('m','uint')])
isref = np.random.rand(10).round().astype([('reference',bool)])
return rfn.merge_arrays([ms, isref])
## DERIVED MODELS #############################################################
# not technically a derived model, but should be.
class TestALEApproximateModel(ConcreteModelTest, DerandomizedTestCase):
"""
Tests ALEApproximateModel with SimplePrecessionModel as the underlying model
(underlying model has 1 scalar expparams).
"""
def instantiate_model(self):
return ALEApproximateModel(SimplePrecessionModel())
def instantiate_prior(self):
return UniformDistribution(np.array([[5,8]]))
def instantiate_expparams(self):
ts = np.linspace(0,5,10, dtype=self.model.expparams_dtype)
return ts
class TestBinomialModel(ConcreteModelTest, DerandomizedTestCase):
"""
Tests BinomialModel with CoinModel as the underlying model
(underlying model has no expparams).
"""
def instantiate_model(self):
return BinomialModel(CoinModel())
def instantiate_prior(self):
return BetaDistribution(mean=0.5, var=0.1)
def instantiate_expparams(self):
return np.arange(100, 120).astype(self.model.expparams_dtype)
class TestBinomialModel1(ConcreteModelTest, DerandomizedTestCase):
"""
Tests BinomialModel with SimplePrecessionModel as the underlying model
(underlying model has 1 scalar expparams).
"""
def instantiate_model(self):
return BinomialModel(SimplePrecessionModel())
def instantiate_prior(self):
return UniformDistribution(np.array([[5,8]]))
def instantiate_expparams(self):
# the scalar expparam is given name 'x' by BinomialModel
ts = np.linspace(0,5,10, dtype=[('x','float')])
nmeas = np.arange(10,20).astype([('n_meas','int')])
return rfn.merge_arrays([ts,nmeas])
class TestBinomialModel2(ConcreteModelTest, DerandomizedTestCase):
"""
Tests BinomialModel with NoisyCoinModel as the underlying model
(underlying model has 2 expparams).
"""
def instantiate_model(self):
return BinomialModel(NoisyCoinModel())
def instantiate_prior(self):
return BetaDistribution(mean=0.5, var=0.1)
def instantiate_expparams(self):
alphas = (0.1 * np.ones((10,))).astype([('alpha','float')])
betas = np.linspace(0,0.5,10, dtype=[('beta','float')])
nmeas = np.arange(10,20).astype([('n_meas','int')])
return rfn.merge_arrays([alphas,betas,nmeas])
class TestMultinomialModel(ConcreteModelTest, DerandomizedTestCase):
"""
Tests MultinomialModel with NDieModel as the underlying model
(underlying model has no expparams).
"""
def instantiate_model(self):
return MultinomialModel(NDieModel(n=6))
def instantiate_prior(self):
return DirichletDistribution([1,2,3,10,1,3])
def instantiate_expparams(self):
return np.arange(10).astype(self.model.expparams_dtype)
class TestPoisonedModelALE(ConcreteModelTest, DerandomizedTestCase):
"""
Tests PoisonedModel with SimplePrecessionModel as the underlying model
in ALE mode.
"""
def instantiate_model(self):
return PoisonedModel(
SimplePrecessionModel(),
tol = 1e-4
)
def instantiate_prior(self):
return UniformDistribution(np.array([[5,8]]))
def instantiate_expparams(self):
return np.arange(10,20).astype(self.model.expparams_dtype)
class TestPoisonedModelMLE(ConcreteModelTest, DerandomizedTestCase):
"""
Tests PoisonedModel with SimplePrecessionModel as the underlying model
in ALE mode.
"""
def instantiate_model(self):
return PoisonedModel(
SimplePrecessionModel(),
n_samples = 10,
hedge = 0.01
)
def instantiate_prior(self):
return UniformDistribution(np.array([[5,8]]))
def instantiate_expparams(self):
return
|
np.arange(10,20)
|
numpy.arange
|
# extract_data.py
import numpy as np
import matplotlib.pyplot as plt
import astropy.constants as ac
import astropy.units as au
import pyathena as pa
from pyathena.classic import cc_arr
from ..load_sim import LoadSim
class ExtractData:
@LoadSim.Decorators.check_pickle
def read_VFF_Peters17(self, num, savdir=None, force_override=False):
r = dict()
ds = self.load_vtk(num, load_method='pyathena_classic')
x1d, y1d, z1d = cc_arr(ds.domain)
z, _, _ = np.meshgrid(z1d, y1d, x1d, indexing='ij')
idx_z = np.abs(z) < 100.0
tot = idx_z.sum()
T = ds.read_all_data('temperature')
xn = ds.read_all_data('xn')
idx_c = (T[idx_z] <= 300.0)
idx_wi = ((T[idx_z] > 300.0) & (T[idx_z] <= 8.0e3) & (xn[idx_z] < 0.1))
idx_wn = ((T[idx_z] > 300.0) & (T[idx_z] <= 8.0e3) & (xn[idx_z] > 0.1))
idx_whn = ((T[idx_z] > 8000.0) & (T[idx_z] < 5.0e5) & (xn[idx_z] > 0.1))
idx_whi = ((T[idx_z] > 8000.0) & (T[idx_z] < 5.0e5) & (xn[idx_z] < 0.1))
idx_h = (T[idx_z] > 5e5)
r['time'] = ds.domain['time']
r['f_c'] = idx_c.sum()/tot
r['f_wi'] = idx_wi.sum()/tot
r['f_wn'] = idx_wn.sum()/tot
r['f_whi'] = idx_whi.sum()/tot
r['f_whn'] = idx_whn.sum()/tot
r['f_h'] = idx_h.sum()/tot
return r
@LoadSim.Decorators.check_pickle
def read_EM_pdf(self, num, savdir=None, force_override=False):
ds = self.load_vtk(num)
nH = ds.get_field(field='density')
xn = ds.get_field(field='specific_scalar[0]')
nesq = ((1.0 - xn)*nH)**2
z2 = 200.0
bins = np.linspace(-8, 5, 100)
dz = ds.domain['dx'][0]
id0 = 0
id1 = ds.domain['Nx'][2] // 2
# Calculate EM integrated from z = 200pc
id2 = id1 + int(z2/dz)
EM0 = nesq[id0:,:,:].sum(axis=0)*dz
EM1 = nesq[id1:,:,:].sum(axis=0)*dz
EM2 = nesq[id2:,:,:].sum(axis=0)*dz
h0, b0, _ = plt.hist(np.log10(EM0.flatten()), bins=bins, histtype='step', color='C0');
h1, b1, _ = plt.hist(np.log10(EM1.flatten()), bins=bins, histtype='step', color='C1');
h2, b2, _ = plt.hist(np.log10(EM2.flatten()), bins=bins, histtype='step', color='C2');
return dict(EM0=EM0, EM1=EM1, EM2=EM2, bins=bins, h0=h0, h1=h1, h2=h2)
@LoadSim.Decorators.check_pickle
def read_phot_dust_U_pdf(self, num, z0=200.0,
ifreq_ion=0, savdir=None, force_override=False):
s = self
sigmapi = s.par['radps']['sigma_ph[0]']
sigmad = s.par['radps']['kappa_dust[0]']*s.u.density.value
c = ac.c.cgs.value
# mean energy of ionizing photons
hnu = s.par['radps']['hnu[{0:1d}]'.format(ifreq_ion)]*((1.0*au.eV).cgs.value)
ds = s.load_vtk(num=num, load_method='pyathena_classic')
#print(ds.domain)
bins_nH = np.linspace(-5, 4, 61)
bins_U = np.linspace(-6, 1, 61)
bins_z = np.linspace(ds.domain['left_edge'][2], ds.domain['right_edge'][2], ds.domain['Nx'][2]//16 + 1)
#print(bins_nH, bins_U, bins_z)
nH = ds.read_all_data('density').flatten()
Erad0 = ds.read_all_data('Erad0').flatten() # cgs unit
xn = ds.read_all_data('xn').flatten()
T = ds.read_all_data('temperature').flatten()
ne = nH*(1.0 - xn)
nHI = nH*xn
Erad0ph = Erad0/hnu # photon number density
U = Erad0ph/nH # ionization parameter
x1d, y1d, z1d = pa.classic.cc_arr(ds.domain)
z, _, _ = np.meshgrid(z1d, y1d, x1d, indexing='ij')
# Warm phase indices
w = ((T > 5050.0) & (T < 2.0e4) & (xn < 0.1))
dvol = ds.domain['dx'].prod()*ac.pc.cgs.value**3
zw = z.flatten()[w]
Uw = U[w]
nesqw = (ne**2)[w]
nHw = nH[w]
# Tw = T[w]
# Local photoionization/dust absorption rate in a cell
ph_rate = nHI[w]*c*sigmapi*Erad0ph[w]*dvol
di_rate = nH[w]*c*sigmad*Erad0ph[w]*dvol
# print('phrate, dirate',ph_rate.sum(),di_rate.sum())
q = di_rate/(ph_rate + di_rate)
qma = np.ma.masked_invalid(q)
ma = qma.mask
wlz = np.abs(zw) < z0
bins = np.linspace(-5, 3, 80)
# nH_warm PDF weighted by ph_rate or di_rate (all, at |z| < 200pc, above 200 pc)
hdi, bedi, _ = plt.hist(
|
np.log10(nHw[~ma])
|
numpy.log10
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import (
BertTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
default_data_collator,
is_tf_available,
is_torch_available,
set_seed,
)
from transformers.testing_utils import require_tf, require_torch
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_torch
class DataCollatorIntegrationTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt")
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_default_with_dict(self):
features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# With label_ids
features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor([[0, 1, 2]] * 8)))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# Features can already be tensors
features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
# Labels can already be tensors
features = [{"label": torch.tensor(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
def test_default_classification_and_regression(self):
data_collator = default_data_collator
features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)]
batch = data_collator(features)
self.assertEqual(batch["labels"].dtype, torch.long)
features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)]
batch = data_collator(features)
self.assertEqual(batch["labels"].dtype, torch.float)
def test_default_with_no_labels(self):
features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# With label_ids
features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
def test_data_collator_with_padding(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorWithPadding(tokenizer)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6]))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 10]))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 8]))
def test_data_collator_for_token_classification(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2], "labels": [0, 1, 2]},
{"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]},
]
data_collator = DataCollatorForTokenClassification(tokenizer)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6]))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, torch.Size([2, 6]))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3)
data_collator = DataCollatorForTokenClassification(tokenizer, padding="max_length", max_length=10)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 10]))
self.assertEqual(batch["labels"].shape, torch.Size([2, 10]))
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 8]))
self.assertEqual(batch["labels"].shape, torch.Size([2, 8]))
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6]))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, torch.Size([2, 6]))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3)
def _test_no_pad_and_pad(self, no_pad_features, pad_features):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, pad_to_multiple_of=8)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 16)))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 16)))
tokenizer._pad_token = None
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
with self.assertRaises(ValueError):
# Expect error due to padding token missing
data_collator(pad_features)
set_seed(42) # For reproducibility
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 16)))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 16)))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
def test_data_collator_for_language_modeling(self):
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
self._test_no_pad_and_pad(no_pad_features, pad_features)
no_pad_features = [list(range(10)), list(range(10))]
pad_features = [list(range(5)), list(range(10))]
self._test_no_pad_and_pad(no_pad_features, pad_features)
def test_data_collator_for_whole_word_mask(self):
features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="pt")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
def test_plm(self):
tokenizer = BertTokenizer(self.vocab_file)
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer)
batch = data_collator(pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
batch = data_collator(no_pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
example = [np.random.randint(0, 5, [5])]
with self.assertRaises(ValueError):
# Expect error due to odd sequence length
data_collator(example)
def test_nsp(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 5)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 5)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 5)))
self.assertEqual(batch["next_sentence_label"].shape, torch.Size((2,)))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 8)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 8)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 8)))
self.assertEqual(batch["next_sentence_label"].shape, torch.Size((2,)))
def test_sop(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{
"input_ids": torch.tensor([0, 1, 2, 3, 4]),
"token_type_ids": torch.tensor([0, 1, 2, 3, 4]),
"sentence_order_label": i,
}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 5)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 5)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 5)))
self.assertEqual(batch["sentence_order_label"].shape, torch.Size((2,)))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 8)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 8)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 8)))
self.assertEqual(batch["sentence_order_label"].shape, torch.Size((2,)))
@require_tf
class TFDataCollatorIntegrationTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt")
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_default_with_dict(self):
features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].numpy().tolist(), list(range(8)))
self.assertEqual(batch["labels"].dtype, tf.int64)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 6])
# With label_ids
features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].numpy().tolist(), ([[0, 1, 2]] * 8))
self.assertEqual(batch["labels"].dtype, tf.int64)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 6])
# Features can already be tensors
features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].numpy().tolist(), (list(range(8))))
self.assertEqual(batch["labels"].dtype, tf.int64)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 10])
# Labels can already be tensors
features = [{"label": np.array(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].dtype, tf.int64)
self.assertEqual(batch["labels"].numpy().tolist(), list(range(8)))
self.assertEqual(batch["labels"].dtype, tf.int64)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 10])
def test_default_classification_and_regression(self):
data_collator = default_data_collator
features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)]
batch = data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].dtype, tf.int64)
features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)]
batch = data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].dtype, tf.float32)
def test_default_with_no_labels(self):
features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 6])
# With label_ids
features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 6])
def test_data_collator_with_padding(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorWithPadding(tokenizer, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6])
self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, [2, 8])
def test_data_collator_for_token_classification(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2], "labels": [0, 1, 2]},
{"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]},
]
data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6])
self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape.as_list(), [2, 6])
self.assertEqual(batch["labels"][0].numpy().tolist(), [0, 1, 2] + [-100] * 3)
data_collator = DataCollatorForTokenClassification(
tokenizer, padding="max_length", max_length=10, return_tensors="tf"
)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8])
self.assertEqual(batch["labels"].shape.as_list(), [2, 8])
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6])
self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape.as_list(), [2, 6])
self.assertEqual(batch["labels"][0].numpy().tolist(), [0, 1, 2] + [-1] * 3)
def _test_no_pad_and_pad(self, no_pad_features, pad_features):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="tf")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
data_collator = DataCollatorForLanguageModeling(
tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors="tf"
)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16])
self.assertEqual(batch["labels"].shape.as_list(), [2, 16])
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16])
self.assertEqual(batch["labels"].shape.as_list(), [2, 16])
tokenizer._pad_token = None
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="tf")
with self.assertRaises(ValueError):
# Expect error due to padding token missing
data_collator(pad_features)
set_seed(42) # For reproducibility
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(tf.reduce_any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist()))
batch = data_collator(pad_features, return_tensors="tf")
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(tf.reduce_any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist()))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16])
self.assertEqual(batch["labels"].shape.as_list(), [2, 16])
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(tf.reduce_any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist()))
batch = data_collator(pad_features, return_tensors="tf")
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16])
self.assertEqual(batch["labels"].shape.as_list(), [2, 16])
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(tf.reduce_any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist()))
def test_data_collator_for_language_modeling(self):
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
self._test_no_pad_and_pad(no_pad_features, pad_features)
no_pad_features = [list(range(10)), list(range(10))]
pad_features = [list(range(5)), list(range(10))]
self._test_no_pad_and_pad(no_pad_features, pad_features)
def test_data_collator_for_whole_word_mask(self):
features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
def test_plm(self):
tokenizer = BertTokenizer(self.vocab_file)
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors="tf")
batch = data_collator(pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["perm_mask"].shape.as_list(), [2, 10, 10])
self.assertEqual(batch["target_mapping"].shape.as_list(), [2, 10, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
batch = data_collator(no_pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["perm_mask"].shape.as_list(), [2, 10, 10])
self.assertEqual(batch["target_mapping"].shape.as_list(), [2, 10, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
example = [np.random.randint(0, 5, [5])]
with self.assertRaises(ValueError):
# Expect error due to odd sequence length
data_collator(example)
def test_nsp(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 5])
self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 5])
self.assertEqual(batch["labels"].shape.as_list(), [2, 5])
self.assertEqual(batch["next_sentence_label"].shape.as_list(), [2])
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8])
self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 8])
self.assertEqual(batch["labels"].shape.as_list(), [2, 8])
self.assertEqual(batch["next_sentence_label"].shape.as_list(), [2])
def test_sop(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{
"input_ids": tf.convert_to_tensor([0, 1, 2, 3, 4]),
"token_type_ids": tf.convert_to_tensor([0, 1, 2, 3, 4]),
"sentence_order_label": i,
}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 5])
self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 5])
self.assertEqual(batch["labels"].shape.as_list(), [2, 5])
self.assertEqual(batch["sentence_order_label"].shape.as_list(), [2])
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8])
self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 8])
self.assertEqual(batch["labels"].shape.as_list(), [2, 8])
self.assertEqual(batch["sentence_order_label"].shape.as_list(), [2])
class NumpyDataCollatorIntegrationTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt")
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_default_with_dict(self):
features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].tolist(), list(range(8)))
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 6))
# With label_ids
features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].tolist(), [[0, 1, 2]] * 8)
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 6))
# Features can already be tensors
features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].tolist(), list(range(8)))
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 10))
# Labels can already be tensors
features = [{"label": np.array(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["labels"].tolist(), (list(range(8))))
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 10))
def test_default_classification_and_regression(self):
data_collator = default_data_collator
features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)]
batch = data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].dtype, np.int64)
features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)]
batch = data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].dtype, np.float32)
def test_default_with_no_labels(self):
features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, (8, 6))
# With label_ids
features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, (8, 6))
def test_data_collator_with_padding(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorWithPadding(tokenizer, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 8))
def test_data_collator_for_token_classification(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2], "labels": [0, 1, 2]},
{"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]},
]
data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, (2, 6))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3)
data_collator = DataCollatorForTokenClassification(
tokenizer, padding="max_length", max_length=10, return_tensors="np"
)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 8))
self.assertEqual(batch["labels"].shape, (2, 8))
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, (2, 6))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3)
def _test_no_pad_and_pad(self, no_pad_features, pad_features):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="np")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
batch = data_collator(pad_features, return_tensors="np")
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
data_collator = DataCollatorForLanguageModeling(
tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors="np"
)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
batch = data_collator(pad_features, return_tensors="np")
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
tokenizer._pad_token = None
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="np")
with self.assertRaises(ValueError):
# Expect error due to padding token missing
data_collator(pad_features)
set_seed(42) # For reproducibility
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
def test_data_collator_for_language_modeling(self):
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
self._test_no_pad_and_pad(no_pad_features, pad_features)
no_pad_features = [list(range(10)), list(range(10))]
pad_features = [list(range(5)), list(range(10))]
self._test_no_pad_and_pad(no_pad_features, pad_features)
def test_data_collator_for_whole_word_mask(self):
features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
def test_plm(self):
tokenizer = BertTokenizer(self.vocab_file)
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors="np")
batch = data_collator(pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["perm_mask"].shape, (2, 10, 10))
self.assertEqual(batch["target_mapping"].shape, (2, 10, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
batch = data_collator(no_pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["perm_mask"].shape, (2, 10, 10))
self.assertEqual(batch["target_mapping"].shape, (2, 10, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
example = [
|
np.random.randint(0, 5, [5])
|
numpy.random.randint
|
import numpy as np
_nonZeroEps = 1.0e-3
def _check_non_zero(array):
norm = np.linalg.norm(array)
assert abs(norm) > _nonZeroEps
return norm
def normalized(array):
norm = _check_non_zero(array)
return array / norm
def unit_x():
return np.array((1, 0, 0), np.float32)
def unit_y():
return np.array((0, 1, 0), np.float32)
def unit_z():
return np.array((0, 0, 1), np.float32)
def scale(*args):
assert len(args) == 3
result = np.zeros((4, 4), np.float32)
result[0, 0] = args[0]
result[1, 1] = args[1]
result[2, 2] = args[2]
result[3, 3] = 1.0
return result
def translate(*args):
assert len(args) == 3
result = np.identity(4, dtype=np.float32)
result[0, 3] = args[0]
result[1, 3] = args[1]
result[2, 3] = args[2]
return result
def _cross_product_matrix(axis):
return np.asarray(
(
(0, -axis[2], axis[1]),
(axis[2], 0, -axis[0]),
(-axis[1], axis[0], 0)
),
np.float32
)
def rotate(axis, angle, degree=False):
assert axis.size == 3
axis = normalized(axis)
if degree:
angle = np.deg2rad(angle)
mat3 = np.cos(angle) *
|
np.identity(3, dtype=np.float32)
|
numpy.identity
|
#!/usr/bin/env python3
import numpy
import pdb
import scipy.spatial
import shapely.geometry
import time
import yaml
import geodesy.conversions
import navigation.obstacle_space
import navigation.obstacle_primitives
import matplotlib.pyplot as plt
import matplotlib.collections
if __name__ == '__main__':
# use the obstacle file with radan degrees or no
use_radians = False
if use_radians:
obstacles_file = 'minneapolis.yaml'
else:
obstacles_file = 'minneapolis_deg.yaml'
with open(obstacles_file, 'r') as yfile:
obstacle_data = yaml.load(yfile)
ref_pt = numpy.array(obstacle_data['lla_ref'], ndmin=2)
obstacles = obstacle_data['obstacles']
if use_radians:
ospace = navigation.obstacle_space.PrismaticObstacleSpace(
obstacles, ref_pt, is_radians=True)
else:
ospace = navigation.obstacle_space.PrismaticObstacleSpace(
obstacles, ref_pt, is_radians=False)
patches = []
for o in ospace._obstacles:
p = numpy.array(o._shape.exterior)
plt.plot(p[:,0], p[:,1])
x = []
y = []
z = []
for i in range(p.shape[0] - 1):
x += [p[i,0], p[i+1,0], p[i+1,0] + 0.01, p[i,0] + 0.01, p[i,0]]
y += [p[i,1], p[i+1,1], p[i+1,1], p[i,1], p[i,1]]
z += [o._zt, o._zt, 0.0, 0.0, o._zt]
x += p[:,0].tolist()
y += p[:,1].tolist()
z += (
|
numpy.zeros(p[:,0].shape)
|
numpy.zeros
|
import numpy as np
from yt.testing import (
assert_array_equal,
assert_array_less,
assert_equal,
assert_raises,
fake_random_ds,
)
from yt.utilities.lib.misc_utilities import (
obtain_position_vector,
obtain_relative_velocity_vector,
)
_fields = ("density", "velocity_x", "velocity_y", "velocity_z")
# TODO: error compact/spread bits for incorrect size
# TODO: test msdb for [0,0], [1,1], [2,2] etc.
def test_spread_bits():
from yt.utilities.lib.geometry_utils import spread_bits
li = [
(
np.uint64(0b111111111111111111111),
np.uint64(0b1001001001001001001001001001001001001001001001001001001001001),
)
]
for i, ans in li:
out = spread_bits(i)
assert_equal(out, ans)
def test_compact_bits():
from yt.utilities.lib.geometry_utils import compact_bits
li = [
(
np.uint64(0b111111111111111111111),
np.uint64(0b1001001001001001001001001001001001001001001001001001001001001),
)
]
for ans, i in li:
out = compact_bits(i)
assert_equal(out, ans)
def test_spread_and_compact_bits():
from yt.utilities.lib.geometry_utils import compact_bits, spread_bits
li = [np.uint64(0b111111111111111111111)]
for ans in li:
mi = spread_bits(ans)
out = compact_bits(mi)
assert_equal(out, ans)
def test_lsz():
from yt.utilities.lib.geometry_utils import lsz
li = [
(
np.uint64(0b1001001001001001001001001001001001001001001001001001001001001),
3 * 21,
3,
0,
),
(
np.uint64(0b1001001001001001001001001001001001001001001001001001001001000),
3 * 0,
3,
0,
),
(
np.uint64(0b1001001001001001001001001001001001001001001001001001001000001),
3 * 1,
3,
0,
),
(
np.uint64(0b1001001001001001001001001001001001001001001001001001000001001),
3 * 2,
3,
0,
),
(
np.uint64(0b10010010010010010010010010010010010010010010010010010010010010),
3 * 0,
3,
0,
),
(
np.uint64(
0b100100100100100100100100100100100100100100100100100100100100100
),
3 * 0,
3,
0,
),
(np.uint64(0b100), 0, 1, 0),
(np.uint64(0b100), 1, 1, 1),
(np.uint64(0b100), 3, 1, 2),
(np.uint64(0b100), 3, 1, 3),
]
for i, ans, stride, start in li:
out = lsz(i, stride=stride, start=start)
assert_equal(out, ans)
def test_lsb():
from yt.utilities.lib.geometry_utils import lsb
li = [
(
np.uint64(0b1001001001001001001001001001001001001001001001001001001001001),
3 * 0,
),
(
np.uint64(0b1001001001001001001001001001001001001001001001001001001001000),
3 * 1,
),
(
np.uint64(0b1001001001001001001001001001001001001001001001001001001000000),
3 * 2,
),
(
np.uint64(0b1001001001001001001001001001001001001001001001001001000000000),
3 * 3,
),
(
np.uint64(0b10010010010010010010010010010010010010010010010010010010010010),
3 * 21,
),
(
np.uint64(
0b100100100100100100100100100100100100100100100100100100100100100
),
3 * 21,
),
]
for i, ans in li:
out = lsb(i, stride=3)
assert_equal(out, ans)
def test_bitwise_addition():
from yt.utilities.lib.geometry_utils import bitwise_addition
# TODO: Handle negative & periodic boundaries
lz = [
(0, 1),
# (0,-1),
(1, 1),
(1, 2),
(1, 4),
(1, -1),
(2, 1),
(2, 2),
(2, -1),
(2, -2),
(3, 1),
(3, 5),
(3, -1),
]
for i, a in lz:
i = np.uint64(i)
a = np.int64(a)
out = bitwise_addition(i, a, stride=1, start=0)
assert_equal(out, i + a)
# def test_add_to_morton_coord():
# from yt.utilities.lib.geometry_utils import add_to_morton_coord
def test_get_morton_indices():
from yt.utilities.lib.geometry_utils import (
get_morton_indices,
get_morton_indices_unravel,
)
INDEX_MAX_64 = np.uint64(2097151)
li = np.arange(6, dtype=np.uint64).reshape((2, 3))
mi_ans = np.array([10, 229], dtype=np.uint64)
mi_out = get_morton_indices(li)
mi_out2 = get_morton_indices_unravel(li[:, 0], li[:, 1], li[:, 2])
assert_array_equal(mi_out, mi_ans)
assert_array_equal(mi_out2, mi_ans)
li[0, :] = INDEX_MAX_64 * np.ones(3, dtype=np.uint64)
assert_raises(ValueError, get_morton_indices, li)
assert_raises(ValueError, get_morton_indices_unravel, li[:, 0], li[:, 1], li[:, 2])
def test_get_morton_points():
from yt.utilities.lib.geometry_utils import get_morton_points
mi = np.array([10, 229], dtype=np.uint64)
li_ans = np.arange(6, dtype=np.uint64).reshape((2, 3))
li_out = get_morton_points(mi)
assert_array_equal(li_out, li_ans)
def test_compare_morton():
# TODO: Add error messages to assertions
from yt.utilities.lib.geometry_utils import compare_morton
# Diagonal
p = np.array([0.0, 0.0, 0.0], dtype=np.float64)
q = np.array([1.0, 1.0, 1.0], dtype=np.float64)
assert_equal(compare_morton(p, q), 1)
assert_equal(compare_morton(q, p), 0)
assert_equal(compare_morton(p, p), 0)
# 1-1 vs 0-1
p =
|
np.array([1.0, 1.0, 0.0], dtype=np.float64)
|
numpy.array
|
import os
import json
import netCDF4
import logging
import datetime
import numpy as np
import seawater
from scipy import interpolate
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import matplotlib.patches as patches
from matplotlib.path import Path
from mpl_toolkits.mplot3d import Axes3D
from geopy.distance import vincenty
import cmocean
import scipy.io as sio
import warnings
import matplotlib.cbook
from scipy.interpolate import splprep, splev
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
def prepare_map(coordinates, res='i', proj='merc'):
"""Return a fig, m and ax objects
given a set of coordinates defining a bounding box
:param coordinates: list of coordinates (lonmin, lonmax, latmin, latmax)
:param res: resolution in the projection ; 'i' by default (intermediate)
:return: fig
:type fig: Figure object
:return m
:type m: Basemap object
:return ax
:type ax: AxesSubplot object
"""
m = Basemap(projection=proj,
llcrnrlon=coordinates[0], llcrnrlat=coordinates[2],
urcrnrlon=coordinates[1], urcrnrlat=coordinates[3],
lat_ts=0.5 * (coordinates[2] + coordinates[3]), resolution=res)
fig = plt.figure()
ax = plt.subplot(111)
m.ax = ax
return fig, m, ax
def create_rect_patch(coordinates, m, **kwargs):
"""
Create a rectangular patch to add on the map
:param coordinates:
:param m: Basemap object
:return: patch
"""
xr1, yr1 = m(coordinates[0], coordinates[2])
xr2, yr2 = m(coordinates[0], coordinates[3])
xr3, yr3 = m(coordinates[1], coordinates[3])
xr4, yr4 = m(coordinates[1], coordinates[2])
verts = [(xr1, yr1), (xr2, yr2), (xr3, yr3), (xr4, yr4), (xr1, yr1), ]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY, ]
path = Path(verts, codes)
patch = patches.PathPatch(path, **kwargs)
return patch
def configure_logging(logfile="./alborexFig.log"):
"""
repare the logging messages and file
"""
logger = logging.getLogger("alborex_logger")
logger.setLevel(logging.DEBUG)
# Format for our loglines
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# Setup console logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Setup file logging as well
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def add_map_grid(m, coordinates, dlon, dlat, **kwargs):
"""Add x and y ticks (no line plotted for better visibility)
"""
m.drawparallels(np.arange(round(coordinates[2]), coordinates[3], dlat), labels=[1, 0, 0, 0], **kwargs)
m.drawmeridians(np.arange(round(coordinates[0]), coordinates[1], dlon), labels=[0, 0, 0, 1], **kwargs)
def load_lonloat_ctdleg(datafile):
"""Return coordinates from the file containing the information
on the different CTD legs
"""
lon, lat = [], []
with open(datafile) as f:
line = f.readline().rsplit()
while line:
# print(line)
lon.append(float(line[2]))
lat.append(float(line[3]))
line = f.readline().rsplit()
return lon, lat
def read_lonlat_coast(filename, valex=999):
"""
Return the coordinates of the contours
as a list of lists (one list per contour)
"""
with open(filename) as f:
lonall, latall = [], []
lon, lat = [], []
line = f.readline().rsplit()
while line:
if float(line[0]) == valex:
lonall.append(lon)
latall.append(lat)
lon, lat = [], []
else:
lon.append(float(line[0]))
lat.append(float(line[1]))
line = f.readline().rsplit()
return lonall, latall
class Front(object):
def __init__(self, lon=None, lat=None):
self.lon = lon
self.lat = lat
def get_from_file(self, filename):
"""
Read the coordinates from a text file (lon, lat)
:param filename: file name
:type filename: str
"""
self.lon = []
self.lat = []
if os.path.exists(filename):
with open(filename, "r") as df:
for lines in df.readlines():
self.lon.append(float(lines.rstrip().split(',')[0]))
self.lat.append(float(lines.rstrip().split(',')[1]))
def smooth(self, n=4, s=0.01, nest=4):
"""
Applying a smoothing function on the front coordinates
:param N: subsampling factor
:param s: smoothness parameter
:param nest: estimate of number of knots needed (-1 = maximal)
:return:
"""
npoints = len(self.lon)
if npoints > 0:
if npoints == len(self.lat):
t = np.linspace(0, 1, npoints)
t2 = np.linspace(0, 1, n * npoints)
# find the knot points
tckp, u = interpolate.splprep([t, self.lon, self.lat], s=s, nest=-1)
# evaluate spline, including interpolated points
xnew, self.lon, self.lat = interpolate.splev(t2, tckp)
class Drifter(object):
def __init__(self, lon=None, lat=None, time=None, temperature=None,
qclon=None, qclat=None):
self.lon = lon
self.lat = lat
self.time = time
self.temperature = temperature
self.qclon = qclon
self.qclat = qclat
self.timeunits = None
self.dates = None
self.velocity = None
self.distance2front = None
def get_from_netcdf(self, datafile):
"""
Read the coordinates and the temperature from existing data file
"""
with netCDF4.Dataset(datafile, 'r') as nc:
self.lon = nc.get_variables_by_attributes(standard_name='longitude')[0][:]
self.lat = nc.get_variables_by_attributes(standard_name='latitude')[0][:]
self.time = nc.get_variables_by_attributes(standard_name='time')[0][:]
self.timeunits = nc.get_variables_by_attributes(standard_name='time')[0].units
self.dates = netCDF4.num2date(self.time, self.timeunits)
try:
self.qclat = nc.get_variables_by_attributes(standard_name='latitude status_flag')[0][:]
except IndexError:
self.qclat = None
try:
self.qclon = nc.get_variables_by_attributes(standard_name='longitude status_flag')[0][:]
except IndexError:
self.qclon = None
try:
self.temperature = nc.get_variables_by_attributes(standard_name='sea_water_temperature')[0][:]
except IndexError:
self.temperature = None
def apply_qc_latlon(self, QC=[1]):
"""
Discard the measurements of which the position
doesn't have the indicated quality flag
"""
if (self.qclon is not None) and (self.qclat is not None):
badlon = [qc not in QC for qc in self.qclon]
badlat = [qc not in QC for qc in self.qclat]
badposition = np.logical_or(np.array(badlon), np.array(badlat))
self.lon = np.ma.masked_where(badposition, self.lon)
self.lat = np.ma.masked_where(badposition, self.lat)
def mask_temp(self, tmin, tmax):
if self.temperature is not None:
self.temperature = np.ma.masked_outside(self.temperature,
tmin,
tmax,
copy=True)
def select_dates(self, finaldate, initialdate=None):
"""
Mask the time outside the selected period
finaldate and initialdate are `datetime` obects
for example: finaldate=datatime.datetime(2017, 5, 3, 18, 30, 0)
"""
if initialdate is not None:
self.lon = np.ma.masked_where(np.logical_or(self.dates > finaldate,
self.dates < initialdate),
self.lon)
self.lat = np.ma.masked_where(np.logical_or(self.dates > finaldate,
self.dates < initialdate),
self.lat)
else:
self.lon = np.ma.masked_where(self.dates > finaldate, self.lon)
self.lat = np.ma.masked_where(self.dates > finaldate, self.lat)
def scatter_plot(self, m, **kwargs):
scat = m.scatter(self.lon, self.lat, c=self.temperature, latlon=True, **kwargs)
return scat
def point_plot(self, m, **kwargs):
m.plot(self.lon.compressed(), self.lat.compressed(), latlon=True, **kwargs)
def add_initial_position(self, m, **kwargs):
m.plot(self.lon[0], self.lat[0], latlon=True, linewidth=0, **kwargs)
def compute_velocity(self, velmax=5.):
"""
Compute the velocity using the Vincenty distance
The values above velmax are masked
"""
distancevec = np.zeros(len(self.lon)-1)
timevec = self.time[1:] - self.time[:-1]
for ii in range(0, len(self.lon)-1):
distancevec[ii] = vincenty((self.lat[ii+1], self.lon[ii+1]),
(self.lat[ii], self.lon[ii])).m
self.velocity = distancevec / timevec
self.velocity = np.ma.masked_greater(self.velocity, velmax, copy=True)
def get_distance_front(self, frontlon, frontlat):
"""
For each position of the drifter, compute the distance to the front,
specified by 2 arrays of longitudes and latitudes
**Note:**
Brute force approach but could also approximate the front by a parabola
and use the formula to get the distance.
"""
npoints = len(frontlon)
distance2front = np.zeros(len(self.lon))
jj = 0
for lond, latd in zip(self.lon, self.lat):
dd = np.zeros(npoints)
ii = 0
for lonf, latf in zip(frontlon, frontlat):
dd[ii] = vincenty((lonf, latf), (lond, latf)).m
ii += 1
distance2front[jj] = np.min(dd)
jj += 1
self.distance2front = distance2front
class Thermosal(object):
"""
Thermosalinograph (temperature and salinity measured by the
ship near the surface)
"""
def __init__(self, lon=None, lat=None, time=None,
temperature=None, salinity=None, qclon=None, qclat=None,
qctemp=None, qcsal=None):
self.lon = lon
self.lat = lat
self.time = time
self.temperature = temperature
self.salinity = salinity
def get_from_netcdf(self, datafile):
"""
Read the coordinates and the field values from a netCDF file
"""
with netCDF4.Dataset(datafile, 'r') as nc:
self.lon = nc.get_variables_by_attributes(standard_name='longitude')[0][:]
self.lat = nc.get_variables_by_attributes(standard_name='latitude')[0][:]
self.time = nc.get_variables_by_attributes(standard_name='time')[0][:]
timeunits = nc.get_variables_by_attributes(standard_name='time')[0].units
self.dates = netCDF4.num2date(self.time, timeunits)
self.salinity = nc.get_variables_by_attributes(standard_name='sea_water_salinity')[0][:]
self.temperature = nc.get_variables_by_attributes(standard_name='sea_water_temperature')[0][:]
class CTD():
def __init__(self, lon=None, lat=None, time=None, depth=None, pressure=None,
temperature=None, salinity=None, qclon=None, qclat=None,
qctemp=None, qcsal=None, chloro=None, oxygen=None):
self.lon = lon
self.lat = lat
self.time = time
self.depth = depth
self.pressure = pressure
self.temperature = temperature
self.salinity = salinity
self.qclon = qclon
self.qclat = qclat
self.qctemp = qctemp
self.qcsal = qcsal
self.timeunits = None
self.dates = None
self.chloro = chloro
self.oxygen = oxygen
def get_from_netcdf(self, datafile):
"""
Read the coordinates and the temperature from existing data file
"""
with netCDF4.Dataset(datafile, 'r') as nc:
try:
self.pressure = nc.get_variables_by_attributes(standard_name='sea_water_pressure')[0][:]
except IndexError:
self.pressure = None
self.lon = nc.get_variables_by_attributes(standard_name='longitude')[0][:]
self.lat = nc.get_variables_by_attributes(standard_name='latitude')[0][:]
self.depth = nc.get_variables_by_attributes(standard_name='depth')[0][:]
self.time = nc.get_variables_by_attributes(standard_name='time')[0][:]
self.timeunits = nc.get_variables_by_attributes(standard_name='time')[0].units
self.dates = netCDF4.num2date(self.time, self.timeunits)
try:
self.oxygen = nc.get_variables_by_attributes(long_name='oxygen concentration')[0][:]
except IndexError:
self.oxygen = None
try:
self.chloro = nc.variables["CHLO"][:]
except KeyError:
self.chloro = None
try:
self.qclat = nc.get_variables_by_attributes(standard_name='latitude status_flag')[0][:]
except IndexError:
self.qclat = None
try:
self.qclon = nc.get_variables_by_attributes(standard_name='longitude status_flag')[0][:]
except IndexError:
self.qclon = None
# Get salinity
try:
salinityvar = nc.get_variables_by_attributes(standard_name='sea_water_practical_salinity')[0]
salinityqcvar = salinityvar.ancillary_variables
self.salinity = salinityvar[:]
self.qcsal = nc.variables[salinityqcvar][:]
except IndexError:
try:
salinityvar = nc.get_variables_by_attributes(standard_name='sea_water_salinity')[0]
self.salinity = salinityvar[:]
salinityqcvar = salinityvar.ancillary_variables
try:
self.qcsal = nc.variables[salinityqcvar][:]
except KeyError:
self.qcsal = None
except AttributeError:
self.qcsal = None
# Get (potential) temperature and convert if necessary
try:
tempvar = nc.get_variables_by_attributes(standard_name='sea_water_temperature')[0]
self.temperature = tempvar[:]
except IndexError:
try:
tempvar = nc.get_variables_by_attributes(standard_name='sea_water_potential_temperature')[0]
potentialtemp = tempvar[:]
self.temperature = seawater.temp(self.salinity, potentialtemp, self.pressure)
except IndexError:
self.temperature = None
self.qctemp = None
try:
tempqcvar = tempvar.ancillary_variables
try:
self.qctemp = nc.variables[tempqcvar][:]
except KeyError:
self.qctemp = None
except AttributeError:
self.qctemp = None
class Glider(CTD):
def remove_masked_coords(self):
"""
Remove the masked coordinates (lon, lat, time, dates)
"""
coordmask = np.logical_not(self.lon.mask)
self.time = self.time.compress(coordmask)
self.dates = self.dates.compress(coordmask)
self.lon = self.lon.compressed()
self.lat = self.lat.compressed()
def get_coords(self, datafile):
"""
Load the coordinates from a glider file
:param datafile: name of the glider netCDF file
:return: lon: longitude
:return: lat: latitude
:return: depth: depth
:return: time: time
"""
with netCDF4.Dataset(datafile, 'r') as nc:
self.lon = nc.variables['longitude'][:]
self.lat = nc.variables['latitude'][:]
self.depth = nc.variables['depth'][:]
self.time = nc.variables['time'][:]
def get_day_indices(self, ndays=1):
"""
Get the time indices corresponding to the start of days,
separated by "ndays"
"""
day_indices = []
date_list = []
# Convert the time to datses
datestart, dateend = self.dates[0], self.dates[-1]
date = datetime.datetime(datestart.year, datestart.month, datestart.day,
0, 0, 0)
while date <= dateend:
# Increment initial date
date += datetime.timedelta(days=ndays)
date_list.append(date)
# Get corresponding index
index = np.argmin(abs(self.time - netCDF4.date2num(date, self.timeunits)))
day_indices.append(index)
return day_indices, date_list
def scatter_plot(self, ax, **kwargs):
"""
Add the measurements to a 3D scatter plot
"""
scat3D = ax.scatter(self.lon, self.lat, -self.depth, **kwargs)
return scat3D
def get_temperature_all(self, datafile):
"""
Read the temperatures
"""
with netCDF4.Dataset(datafile, 'r') as nc:
self.temp_ori = nc.variables["temperature"][:]
self.temp_corr = nc.variables["temperature_corrected_thermal"][:]
self.temp_oxy = nc.variables["temperature_oxygen"][:]
def to_json(self, filename, varname, NN=100):
"""
Create a geoJSON file containing the glider coordinates as a LineString object
:param filename: name of the JSON file
:varname: name of the variable in the JSON file
:NN: value used for the data subsampling
"""
# Remove masked values and apply sub-sampling
# (otherwise too many points)
lon = np.ma.compressed(self.lon)[::NN]
lat = np.ma.compressed(self.lat)[::NN]
# Create list of tuples
gliderlist = [(llon, llat) for llon, llat in zip(lon, lat)]
# Create LineString object
gliderGeoJson = geojson.LineString(Glider1list)
# Write in new file
with open(filename, 'w') as f:
f.write("var {0} = ".format(varname))
geojson.dump(gliderGeoJson, f)
class Profiler(CTD):
"""
Stores Argo profiler data
"""
def select_dates(self, finaldate, initialdate=None):
"""
Mask the time outside the selected period
finaldate and initialdate are `datetime` obects
for example: finaldate=datatime.datetime(2017, 5, 3, 18, 30, 0)
"""
if initialdate is not None:
dates2mask = np.logical_or(self.dates > finaldate,
self.dates < initialdate)
else:
dates2mask = self.dates > finaldate
ndepth = self.depth.shape[1]
dates2mask2D = np.matlib.repmat(dates2mask, ndepth, 1).transpose()
self.lon = np.ma.masked_where(dates2mask, self.lon)
self.lat = np.ma.masked_where(dates2mask, self.lat)
self.dates = np.ma.masked_where(dates2mask, self.dates)
self.depth = np.ma.masked_where(dates2mask2D, self.depth)
self.temperature = np.ma.masked_where(dates2mask2D, self.temperature)
self.salinity = np.ma.masked_where(dates2mask2D, self.salinity)
def read_profile_from_mat(datafile):
"""
Read the profile stored in a mat file
Return
the coordinates lon, lat and time (scalars)
the depth (array)
the temperature and salinity (arrays)
"""
if os.path.exists(datafile):
data_argo = sio.loadmat(datafile)
lon = data_argo["lon"][0][0]
lat = data_argo["lat"][0][0]
time = data_argo["time"]
temperature = np.array([t[0] for t in data_argo["temp"]])
salinity = np.array([s[0] for s in data_argo["saly"]])
pressure = np.array([p[0] for p in data_argo["pres"]])
else:
lon, lat, pressure, time, temperature, salinity = \
None, None, None, None, None, None
return lon, lat, pressure, time, temperature, salinity
def read_profiles_from_list(filelist):
"""
Read all the profiles from a list of files
Return
arrays for lon, lat and time
"""
nfiles = len(filelist)
if nfiles > 0:
# Allocate arrays
# lon, lat and time are fixed for each profile, so we have
# 1D arrays
lon_array = np.empty(nfiles)
lat_array = np.empty(nfiles)
time_array = np.empty(nfiles)
# For the other variables, we use arrays of arrays (one per profile)
# We start with empty lists that will be turned into lists of lists
temp_list = []
salt_list = []
pressure_list = []
for idata, datafile in enumerate(filelist):
# Read the data from the file
lon, lat, pressure, time, temperature, salinity = Profiler.read_profile_from_mat(datafile)
# Fill the arrays
lon_array[idata] = lon
lat_array[idata] = lat
time_array[idata] = time
temp_list.append(temperature)
salt_list.append(salinity)
pressure_list.append(pressure)
temp_array = np.array(temp_list)
salt_array = np.array(salt_list)
pressure_array = np.array(pressure_list)
return lon_array, lat_array, time_array, pressure_array, temp_array, salt_array
def arrays_to_netcdf(ncfile, lon, lat, t, p, T, S):
"""
Write the arrays into a single netCDF file `ncfile`
with a structure similar to SOCIB files
Inputs:
lon, lat, time, pressure, T and S are numpy ndarrays
(arrays of arrays), one array per profile
"""
with netCDF4.Dataset(ncfile, "w", format="NETCDF4") as nc:
ndepth = len(p)
# Dimensions
time = nc.createDimension("time", None) # unlimited
depth = nc.createDimension("depth", ndepth)
# Variables and attributes
time = nc.createVariable("time", "f8",("time",), fill_value=np.nan)
time.standard_name = "time"
time.units = "days since 01-01-01 00:00:00"
time.axis = "T"
time.calendar = "gregorian"
DEPTH = nc.createVariable("DEPTH", "f8",("time", "depth"))
DEPTH.ancillary_variables = "QC_DEPTH"
DEPTH.axis = "Z"
DEPTH.long_name = "Depth coordinate"
DEPTH.positive = "down"
DEPTH.reference_datum = "geographical coordinates, WGS84 projection"
DEPTH.standard_name = "depth"
DEPTH.units = "m"
LON = nc.createVariable("LON", "f4",("time",))
LON.standard_name = "longitude"
LON.long_name = "Longitude"
LON.units = "degrees_east"
LON.ancillary_variables = "QC_LON"
LON.axis = "X"
LON.valid_min = -180.
LON.valid_max = 180.
LON.reference_datum = "geographical coordinates, WGS84 projection" ;
LAT = nc.createVariable("LAT", "f4",("time",))
LAT.standard_name = "latitude"
LAT.long_name = "Latitude"
LAT.units = "degrees_north"
LAT.ancillary_variables = "QC_LAT"
LAT.axis = "Y"
LAT.valid_min = -90.
LAT.valid_max = 90.
LAT.reference_datum = "geographical coordinates, WGS84 projection"
WTR_PRE = nc.createVariable("WTR_PRE", "f8",("time", "depth"))
WTR_PRE.ancillary_variables = "QC_WTR_PRE"
WTR_PRE.coordinates = "time LAT LON DEPTH"
WTR_PRE.long_name = "Sea water pressure"
WTR_PRE.observation_type = "measured"
WTR_PRE.original_units = "dbar"
WTR_PRE.precision = "0.1"
WTR_PRE.resolution = "0.1"
WTR_PRE.standard_name = "sea_water_pressure"
WTR_PRE.units = "dbar"
WTR_TEM = nc.createVariable("WTR_TEM", "f8",("time", "depth"))
WTR_TEM.ancillary_variables = "QC_WTR_TEM"
WTR_TEM.coordinates = "time LAT LON DEPTH"
WTR_TEM.long_name = "Sea water tempature"
WTR_TEM.observation_type = "measured"
WTR_TEM.original_units = "C"
WTR_TEM.precision = "0.001"
WTR_TEM.resolution = "0.001"
WTR_TEM.standard_name = "sea_water_temperature"
WTR_TEM.units = "C"
SALT = nc.createVariable("SALT", "f8",("time", "depth"))
SALT.ancillary_variables = "QC_SALT"
SALT.coordinates = "time LAT LON DEPTH"
SALT.long_name = "Sea water salinity"
SALT.observation_type = "derived"
SALT.original_units = "psu"
SALT.precision = "0.001"
SALT.resolution = "0.001"
SALT.standard_name = "sea_water_salinity"
SALT.units = "psu"
# Add values to the variables
LON[:] = lon
LAT[:] = lat
# Remove 365 days because of reference year
time[:] = t - 365
for i, Pprofile in enumerate(p):
npoints = len(Pprofile)
if npoints > 0:
WTR_PRE[i,:npoints] = Pprofile
# Convert pressure to depth
depth = seawater.dpth(Pprofile, lat[i])
DEPTH[i,:npoints] = depth
for i, Tprofile in enumerate(T):
npoints = len(Tprofile)
WTR_TEM[i,:npoints] = Tprofile
for i, Sprofile in enumerate(S):
npoints = len(Sprofile)
SALT[i,:npoints] = Sprofile
class Ship(Drifter):
def apply_qc(self, qflag=1):
"""
Mask the coordinates with a quality flag different from the specified value
1 = good data
"""
badcoords = np.logical_or(self.qclon != 1, self.qclat !=1)
self.lon = np.ma.masked_where(badcoords, self.lon)
self.lat = np.ma.masked_where(badcoords, self.lat)
def plot_track(self, m, **kwargs):
m.plot(self.lon, self.lat, latlon=True, **kwargs)
class SST(object):
"""
Sea surface temperature field
"""
def __init__(self, lon=None, lat=None, field=None, qflag=None,
year=None, dayofyear=None):
self.lon = lon
self.lat = lat
self.field = field
self.qflag = qflag
self.timeunits = year
self.year = year
self.dayofyear = dayofyear
def read_from_oceancolorL2(self, filename):
"""
Load the SST from netCDF L2 file obtained from
https://oceancolor.gsfc.nasa.gov
:param filename: name of the netCDF file
:return: lon, lat, field, qflag, year, dayofyear
"""
if os.path.exists(filename):
with netCDF4.Dataset(filename) as nc:
# Read platform
sat = nc.platform
# Read time information
# Assume all the measurements made the same day (and same year)
self.year = nc.groups['scan_line_attributes'].variables['year'][0]
self.dayofyear = nc.groups['scan_line_attributes'].variables['day'][0]
# Read coordinates
self.lon = nc.groups['navigation_data'].variables['longitude'][:]
self.lat = nc.groups['navigation_data'].variables['latitude'][:]
# Read geophysical variables
try:
self.field = nc.groups['geophysical_data'].variables['sst'][:]
self.qflag = nc.groups['geophysical_data'].variables['qual_sst'][:]
except KeyError:
self.field = nc.groups['geophysical_data'].variables['sst4'][:]
self.qflag = nc.groups['geophysical_data'].variables['qual_sst4'][:]
def apply_qc(self, qf=1):
"""
Mask the sst values which don't match the mentioned quality flag
"""
self.field = np.ma.masked_where(self.qflag >= 1, self.field)
class Adcp(object):
"""
Stores ADCP transects
"""
def __init_(self, lon=None, lat=None, depth=None,
u=None, v=None, qcu=None, qcv=None,
time=None, dates=None):
self.lon = lon
self.lat = lat
self.depth = depth
self.u = u
self.v = v
self.qclon = qclon
self.qclat = qclat
self.qcu = qcu
self.qcv = qcv
self.time = time
self.dates = dates
def get_from_netcdf(self, filename):
"""
Read the coordinates and the velocity components
from the netCDF file
"""
with netCDF4.Dataset(filename) as nc:
self.lon = nc.get_variables_by_attributes(standard_name='longitude')[0][:]
self.lat = nc.get_variables_by_attributes(standard_name='latitude')[0][:]
self.depth = nc.get_variables_by_attributes(standard_name='depth')[0][:]
self.time = nc.get_variables_by_attributes(standard_name='time')[0][:]
self.timeunits = nc.get_variables_by_attributes(standard_name='time')[0].units
self.dates = netCDF4.num2date(self.time, self.timeunits)
self.qclat = nc.get_variables_by_attributes(standard_name='latitude status_flag')[0][:]
self.qclon = nc.get_variables_by_attributes(standard_name='longitude status_flag')[0][:]
# Velocity components
uvar = nc.get_variables_by_attributes(standard_name='eastward_sea_water_velocity')[0]
vvar = nc.get_variables_by_attributes(standard_name='northward_sea_water_velocity')[0]
self.u = uvar[:]
self.v = vvar[:]
# Quality flags for velocity
uqcvar = uvar.ancillary_variables
vqcvar = vvar.ancillary_variables
self.qcu = nc.variables[uqcvar][:]
self.qcv = nc.variables[uqcvar][:]
def get_from_matfile(self, filename):
"""
Read the coordinates (lon, lat, depth) and
the velocity components from the .mat files
"""
# Read the mat file
dataadcp = sio.loadmat(filename)
self.lon = dataadcp["AnFLonDeg"]
self.lat = dataadcp["AnFLatDeg"]
self.u = dataadcp["SerEmmpersec"]
self.v = dataadcp["SerNmmpersec"]
ndepth = self.u.shape[1]
depthmin = 16.
deltadepth = 8.
depthmax = depthmin + (ndepth - 1) * deltadepth
self.depth = np.linspace(depthmin, depthmax, int(nbins))
def apply_qc(self, qf=1):
"""
Mask the velocity values which don't match the mentioned quality flag
"""
self.u = np.ma.masked_where(self.qcu != 1, self.u)
self.v = np.ma.masked_where(self.qcv != 1, self.v)
def get_norm(self):
"""
Compute the norm of the velocity vectors
"""
self.velnorm = np.sqrt(self.u * self.u + self.v * self.v)
def get_time_index(self, datemin=None, datemax=None):
"""
Return an array of indices corresponding to the dates between
datemin and datemax
"""
if datemin is not None:
if datemax is not None:
gooddates = np.where( (self.dates >= datemin) and (self.dates <= datemax))[0]
else:
gooddates = np.where(self.dates >= datemin)[0]
else:
if datemax is not None:
gooddates = np.where(self.dates <= datemax)[0]
else:
gooddates = np.where(self.dates)[0]
return gooddates
def plot_adcp_quiver(self, m, depthindex=0, depth=None, datemin=None, datemax=None):
"""
Plot velocity field with arrows on a map
"""
gooddates = self.get_time_index(datemin, datemax)
m.plot(self.lon[gooddates], self.lat[gooddates], "k--", lw=.2, latlon=True)
llon, llat = m(self.lon[gooddates], self.lat[gooddates])
qv = plt.quiver(llon, llat,
self.u[gooddates, depthindex] / self.velnorm[gooddates, depthindex],
self.v[gooddates, depthindex] / self.velnorm[gooddates, depthindex],
self.velnorm[gooddates, depthindex], headwidth=0, scale=25, cmap=cmocean.cm.speed)
cb = plt.colorbar(qv, shrink=0.8, extend="max")
cb.set_label("$\|v\|$\n(m/s)", rotation=0, ha="left", fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=12)
plt.clim(0, 1.)
if depth:
plt.title("Depth: {} m".format(depth), fontsize=20)
def add_rectangle(self, N1, N2, m, dlon=0.02, dlat=0.02, label=None):
"""
Draw a rectangle around the transect
N1 and N2 are the indices of the extreme points
of the considered section
"""
lonmin = self.lon[N1:N2].min() - dlon
lonmax = self.lon[N1:N2].max() + dlon
latmin = self.lat[N1:N2].min() - dlat
latmax = self.lat[N1:N2].max() + dlat
lonrec = [lonmin, lonmax, lonmax, lonmin, lonmin]
latrec = [latmin, latmin, latmax, latmax, latmin]
m.plot(lonrec, latrec, "k-.", lw=1, latlon=True)
# Add a label on top of the rectangle
if label is not None:
lontext = 0.5 * (lonmin + lonmax)
lattext = latmax
xt, yt = m(lontext, lattext)
plt.text(xt, yt, label, fontsize=16, ha="center", va="bottom")
@staticmethod
def make_velocity_section(lat, depth, u, frontlat=None, title=None, xlabel=None):
"""
Create a meridional section of zonal velocity
Inputs:
lat: 1-D array of latitudes
depth: 1-D array of depths
u: 2-D array of velocities
"""
plt.pcolormesh(lat, depth, u, cmap=cmocean.cm.speed, vmin=0, vmax=1.)
# Front position
if frontlat is not None:
plt.vlines(frontlat, 0, 400, colors='k', linestyles='--', linewidth=.5)
if xlabel is not None:
plt.xlabel(xlabel, fontsize=14)
plt.ylabel("Depth\n(m)", rotation=0, ha="right", fontsize=14)
cb = plt.colorbar(extend="max")
cb.set_label("u\n(m/s)", rotation=0, ha="left", fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=12)
if title is not None:
plt.title(title, fontsize=20)
xticks =
|
np.arange(36.5, 37.5, 0.1)
|
numpy.arange
|
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from numpy.linalg import norm
import os
from random import normalvariate
from math import sqrt
FOLDER = "./Dataset/"
FILES = os.listdir(FOLDER)
TEST_DIR = "./Testset/"
def load_images_train_and_test(TEST):
test=np.asarray(Image.open(TEST)).flatten()
train=[]
for name in FILES:
train.append(np.asarray(Image.open(FOLDER + name)).flatten())
train= np.array(train)
return test,train
def normalize(test,train):
"""
TODO : Normalize test and train and return them properly
Hint : To calculate mean properly use two arguments version of mean numpy method (https://www.javatpoint.com/numpy-mean)
Hint : normalize test with train mean
"""
arr = np.mean(train, axis=0)
normalized_test = test - arr
normalized_train = np.empty((0,train.shape[1]))
for i in range(train.shape[0]):
normalized_train = np.vstack([normalized_train, (np.array(train[i, :]) - arr)])
return normalized_test,normalized_train
def svd_function(images):
"""
TODO : implement SVD (use np.linalg.svd) and return u,s,v
Additional(Emtiazi) todo : implement svd without using np.linalg.svd
"""
if iteration == 0:
return svd(images)
else:
return u,s,v
# return np.linalg.svd(images, full_matrices=False)
def project_and_calculate_weights(img,u):
"""
TODO : calculate element wise multiplication of img and u . (you can use numpy methods)
"""
return np.multiply(img, u)
def predict(test,train):
"""
TODO : Find the most similar face to test among train set by calculating errors and finding the face that has minimum error
return : index of the data that has minimum error in train dataset
Hint : error(i) = norm(train[:,i] - test) (you can use np.linalg.norm)
"""
min_error = 1_000_000_000
min_index = 0
for i in range(train.shape[1]):
error = np.linalg.norm(train[:,i] - test)
if error < min_error:
min_index = i
min_error = error
return min_index
def plot_face(tested,predicted):
"""
TODO : Plot tested image and predicted image . It would be great if you show them next to each other
with subplot and figures that you learned in matplotlib video in the channel.
But you are allowed to show them one by one
"""
f, plt_arr = plt.subplots(1, 2 ,figsize=(7, 3))
f.suptitle('Result Plots')
plt_arr[0].imshow(tested, cmap = "gray")
plt_arr[0].set_title('tested')
plt_arr[1].imshow(predicted, cmap = "gray")
plt_arr[1].set_title('predicted')
###################################### SVD part ######################################
def randomUnitVector(n):
unnormalized = [normalvariate(0, 1) for _ in range(n)]
theNorm = sqrt(sum(x * x for x in unnormalized))
return [x / theNorm for x in unnormalized]
def svd_1d(A):
''' The one-dimensional SVD
we use Power iteration method to calculate svd :
In fact this algorithm will produce the greatest (in absolute value) eigenvalue of A,
so with help of this algo we can find eigen values and eigen vectors
(eigen vectors will be orthogonal) one by one and then construct svd from them.
In Power iteration method we start with v_0 which might be a random vector.
At every iteration this vector is updated using following rule:
v_k+1 = Bv_k / ||Bv_k||
We’ll continue until result has converged.
Power method has few assumptions:
- v_0 has a nonzero component in the direction of an eigenvector associated with the dominant eigenvalue.
(it means v_0 is NOT orthogonal to the eigenvector)
Initializing v_0 randomly minimizes possibility that this assumption is not fulfilled.
- matrix A has dominant eigenvalue which has strictly greater magnitude than other eigenvalues.
These assumptions guarantee that algorithm converges to a reasonable result.
So at the end when v_i converges enough this method found dominant singular value/eigenvector and return the eigen vector.
'''
n, m = A.shape
# v_0 = x = random unit vector
x = randomUnitVector(min(n,m))
currentV = x
#v_1 = ?
lastV = None
# calculate B according to shape of A so that we smaller size computation
if n > m:
B = np.dot(A.T, A)
else:
B = np.dot(A, A.T)
# v_k+1 = Bv_k / ||Bv_k||
iterations = 0
epsilon=1e-10
while True:
iterations += 1
lastV = currentV
currentV = np.dot(B, lastV)
currentV = currentV / norm(currentV)
# continue until result has converged (updates are less than threshold).
# if two normal vector become same then inner product of them will be 1
if abs(np.dot(currentV, lastV)) > 1 - epsilon:
return currentV
def svd(A):
'''
Compute the singular value decomposition of a matrix A
using the power method. A is the input matrix, and k
is the number of singular values you wish to compute.
If k is None, this computes the full-rank decomposition.
'''
A = np.array(A, dtype=float)
n, m = A.shape
# save (singular value, u, v) as each element
svdSoFar = []
k = min(n, m)
for i in range(k):
matrixFor1D = A.copy()
# remove all previous eigen values and vectors (dominant one) from matrix
# so the next dominant eigen value and vector won't be repetitive
# A_next = A-(singular_value)(u)(v.T)
for singularValue, u, v in svdSoFar[:i]:
matrixFor1D -= singularValue * np.outer(u, v)
# 1. find v_i which is the next eigen vector for B = A.T @ A
# 2. find sigma_i = ||Av_i|| (reason is in the next line)
# ||Av_i||^2 = (Av_i).T A (Av_i) = (v_i).T @ A.T @ A @ v_i = (v_i).T @ (landa_i * v_i) ==v_i is orthonormal== landa_i = sigma_i ^ 2
# 3. find u_i = 1/sigma_i * Av_i
if n > m:
# 1
v = svd_1d(matrixFor1D)
u_unnormalized = np.dot(A, v)
# 2
sigma = norm(u_unnormalized) # next singular value
# 3
u = u_unnormalized / sigma
else:
u = svd_1d(matrixFor1D) # next singular vector
v_unnormalized =
|
np.dot(A.T, u)
|
numpy.dot
|
import numpy as np
from SEAL.SplineFunction import SplineFunction
from SEAL.SplineSpace import SplineSpace
from SEAL.lib import knot_averages
def variation_diminishing_spline_approximation(f, p, t):
"""
Given a callable function f defined on the knot vector of S,
finds the variation diminishing spline approximation (VDSA) to f
in the spline space S.
:param f: callable function defined on knot vector
:param p: spline degree
:param t: p + 1 regular knot vector with t1 = a, t_n+1 = b
:return: the variation diminishing spline approximation to f
"""
vdsa_coefficients = [f(tau) for tau in knot_averages(t, p)]
return SplineFunction(p, t, vdsa_coefficients)
def least_squares_spline_approximation(parameter_values, data_values, spline_space, weights=None):
"""
Given a set of m data points (x_i, y_i), and a SplineSpace S,
compute the weighted least squares spline approximation to the data.
:type spline_space: SplineSpace
:param parameter_values: np.ndarray, shape (m, 2)
:param data_values: np.ndarray, shape (m, 2)
:param spline_space: SplineSpace object
:param weights: Optional. np.ndarray, shape (m, 1),
:return: SplineFunction, the least squares spline approximation
"""
m = len(parameter_values)
n = spline_space.n
basis = spline_space.basis
if not weights:
weights = np.ones(m)
# TODO: Make sure this is sufficient
if isinstance(data_values, (list, tuple)) or data_values.ndim == 1:
dim = 1
data_values =
|
np.reshape(data_values, (m, 1))
|
numpy.reshape
|
import os
import h5py
import numpy as np
from sklearn.model_selection import train_test_split
from skimage.transform import resize
################# Load Data ########################
print('loading data...')
datafilename = '10800_K_Press_Sat.npz'
dirName = '/p/lustre2/tang39/SMART/data_generator/saturation_data_generator/'
filepath = dirName + datafilename
data = np.load(filepath, allow_pickle=True)
kHydro = data['kHydro']
saturation = data['Saturation']
poro_scale_factor = data['poro_scale_factor']
print('shape of k: ', kHydro.shape)
print('shape of saturation: ', saturation.shape)
print('shape of poro_scale_factor:', poro_scale_factor.shape)
################# Data Preprocess ########################
nr, nt, nx, ny = saturation.shape
train_nr = 1000
val_nr = 400
test_nr = 400
maplength = 200
cropstart = 6
time = np.arange(1, 11, 1)/10
time = np.repeat(time[None, :], nr, axis=0)
print('time shape: ', time.shape)
porosity = (kHydro / (1e-15) / 0.0009)**(1 / 4.0001) / 100 * \
poro_scale_factor[:, np.newaxis, np.newaxis]
gas_volume = saturation[:, 4, :, :] * porosity
well_1 = np.sum(gas_volume[:, 0:105, 0:105], axis=(1, 2))
well_2 = np.sum(gas_volume[:, 0:105, 105:], axis=(1, 2))
well_3 = np.sum(gas_volume[:, 105:, 0:105], axis=(1, 2))
well_4 = np.sum(gas_volume[:, 105:, 105:], axis=(1, 2))
well_sum = well_1 + well_2 + well_3 + well_4
del porosity, gas_volume
ratio_map = np.zeros(saturation.shape, dtype=np.float32)
ratio_map[:, :, 71, 71] = well_1[:, None]/well_sum[:, None]/poro_scale_factor[:, None] * time
ratio_map[:, :, 71, 141] = well_2[:, None]/well_sum[:, None]/poro_scale_factor[:, None] * time
ratio_map[:, :, 141, 71] = well_3[:, None]/well_sum[:, None]/poro_scale_factor[:, None] * time
ratio_map[:, :, 141, 141] = well_4[:, None]/well_sum[:, None]/poro_scale_factor[:, None] * time
ratio_map_scale = ratio_map[:, :, cropstart:cropstart+maplength, cropstart:cropstart+maplength]
ratio_map_scale =
|
np.reshape(ratio_map_scale, (-1, maplength, maplength))
|
numpy.reshape
|
r"""
This module contains specific inner product matrices for the different bases in
the Legendre family.
A naming convention is used for the first three capital letters for all matrices.
The first letter refers to type of matrix.
- Mass matrices start with `B`
- One derivative start with `C`
- Stiffness - One derivative for test and trial - start with `A`
- Biharmonic - Two derivatives for test and trial - start with `S`
The next two letters refer to the test and trialfunctions, respectively
- Dirichlet: `D`
- Neumann: `N`
- Legendre: `L`
- Biharmonic: `B`
As such, there are 4 mass matrices, BDDmat, BNNmat, BLLmat and BBBmat,
corresponding to the four bases above.
A matrix may consist of different types of test and trialfunctions as long as
they are all in the Legendre family. A mass matrix using Dirichlet test and
Neumann trial is named BDNmat.
All matrices in this module may be looked up using the 'mat' dictionary,
which takes test and trialfunctions along with the number of derivatives
to be applied to each. As such the mass matrix BDDmat may be looked up
as
>>> import numpy as np
>>> from shenfun.legendre.matrices import mat
>>> from shenfun.legendre.bases import ShenDirichlet as SD
>>> B = mat[((SD, 0), (SD, 0))]
and an instance of the matrix can be created as
>>> B0 = SD(10)
>>> BM = B((B0, 0), (B0, 0))
>>> d = {-2: np.array([-0.4, -0.28571429, -0.22222222, -0.18181818, -0.15384615, -0.13333333]),
... 0: np.array([2.4, 0.95238095, 0.62222222, 0.46753247, 0.37606838, 0.31515152, 0.27149321, 0.23859649]),
... 2: np.array([-0.4, -0.28571429, -0.22222222, -0.18181818, -0.15384615, -0.13333333])}
>>> [np.all(abs(BM[k]-v) < 1e-7) for k, v in d.items()]
[True, True, True]
However, this way of creating matrices is not reccommended use. It is far
more elegant to use the TrialFunction/TestFunction interface, and to
generate the matrix as an inner product:
>>> from shenfun import TrialFunction, TestFunction, inner
>>> u = TrialFunction(B0)
>>> v = TestFunction(B0)
>>> BM = inner(u, v)
>>> [np.all(abs(BM[k]-v) < 1e-7) for k, v in d.items()]
[True, True, True]
To see that this is in fact the BDDmat:
>>> print(BM.__class__)
<class 'shenfun.legendre.matrices.BDDmat'>
"""
from __future__ import division
#__all__ = ['mat']
import functools
import numpy as np
import sympy as sp
from shenfun.matrixbase import SpectralMatrix
from shenfun.la import TDMA as neumann_TDMA
from shenfun.optimization import cython
from .la import TDMA
from . import bases
# Short names for instances of bases
LB = bases.Orthogonal
SD = bases.ShenDirichlet
SB = bases.ShenBiharmonic
SN = bases.ShenNeumann
SU = bases.UpperDirichlet
DN = bases.DirichletNeumann
CD = bases.BCDirichlet
CB = bases.BCBiharmonic
BF = bases.BeamFixedFree
x = sp.symbols('x', real=True)
xp = sp.symbols('x', real=True, positive=True)
#pylint: disable=unused-variable, redefined-builtin, bad-continuation
class BLLmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (L_j, L_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N
and :math:`L_k` is the Legendre basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], LB)
assert isinstance(trial[0], LB)
N = test[0].N
k = np.arange(N, dtype=np.float)
d = {0: 2./(2.*k+1)}
if test[0].quad == 'GL':
d[0][-1] = 2./(N-1)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
def solve(self, b, u=None, axis=0):
s = self.trialfunction[0].slice()
if u is None:
u = b
else:
assert u.shape == b.shape
sl = [np.newaxis]*u.ndim
sl[axis] = s
sl = tuple(sl)
ss = self.trialfunction[0].sl[s]
d = (1./self.scale)/self[0]
u[ss] = b[ss]*d[sl]
return u
class BDDmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
N = test[0].N
k = np.arange(N-2, dtype=np.float)
d = {-2: -2./(2*k[2:] + 1),
0: 2./(2.*k+1) + 2./(2*k+5)}
if test[0].quad == 'GL':
d[0][-1] = 2./(2*(N-3)+1) + 2./(N-1)
if test[0].is_scaled():
d[0] /= (4*k+6)
d[-2] /= (np.sqrt(4*k[2:]+6)*np.sqrt(4*k[:-2]+6))
d[2] = d[-2]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
self.solve = TDMA(self)
class BNNmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Neumann basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SN)
assert isinstance(trial[0], SN)
N = test[0].N
k = np.arange(N-2, dtype=np.float)
alpha = k*(k+1)/(k+2)/(k+3)
d0 = 2./(2*k+1)
d = {0: d0 + alpha**2*2./(2*(k+2)+1),
2: -d0[2:]*alpha[:-2]}
if test[0].quad == 'GL':
d[0][-1] = d0[-1] + alpha[-1]**2*2./(N-1)
d[-2] = d[2]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
#self.solve = neumann_TDMA(self)
class BBBmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-4 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_k` is the Shen Legendre Biharmonic basis function.
"""
def __init__(self, test, trial, measure=1):
from shenfun.la import PDMA
assert isinstance(test[0], SB)
assert isinstance(trial[0], SB)
N = test[0].N
k = np.arange(N, dtype=np.float)
gk = (2*k+3)/(2*k+7)
hk = -(1+gk)
ek = 2./(2*k+1)
if test[0].quad == 'GL':
ek[-1] = 2./(N-1)
d = {0: ek[:-4] + hk[:-4]**2*ek[2:-2] + gk[:-4]**2*ek[4:],
2: hk[:-6]*ek[2:-4] + gk[:-6]*hk[2:-4]*ek[4:-2],
4: gk[:-8]*ek[4:-4]}
d[-2] = d[2]
d[-4] = d[4]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
self.solve = PDMA(self)
class BBFBFmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-4 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_k` is the BeamFixedFree Biharmonic basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], BF)
assert isinstance(trial[0], BF)
N = test[0].N
k = np.arange(N-4, dtype=np.float)
f1 = lambda k: 4*(2*k+3)/((k+3)**2)
f2 = lambda k: -(2*(k-1)*(k+1)*(k+6)*(2*k+5)/((k+3)**2*(k+4)*(2*k+7)))
f3 = lambda k: -4*(k+1)**2*(2*k+3)/((k+3)**2*(k+4)**2)
f4 = lambda k: (((k+1)/(k+3))*((k+2)/(k+4)))**2*(2*k+3)/(2*k+7)
d = {0: 2/(2*k+1)+f1(k)**2*2/(2*k+3)+f2(k)**2*2/(2*k+5)+f3(k)**2*2/(2*k+7)+f4(k)**2*2/(2*k+9),
1: (f1(k)*2/(2*k+3)+f1(k+1)*f2(k)*2/(2*k+5)+f2(k+1)*f3(k)*2/(2*k+7)+f3(k+1)*f4(k)*2/(2*k+9))[:-1],
2: (f2(k)*2/(2*k+5)+f1(k+2)*f3(k)*2/(2*k+7)+f2(k+2)*f4(k)*2/(2*k+9))[:-2],
3: (f3(k)*2/(2*k+7)+f1(k+3)*f4(k)*2/(2*k+9))[:-3],
4: (f4(k)*2/(2*k+9))[:-4]
}
d[-1] = d[1].copy()
d[-2] = d[2].copy()
d[-3] = d[3].copy()
d[-4] = d[4].copy()
if test[0].quad == 'GL':
k = N-5
d[0][-1] = 2/(2*k+1)+f1(k)**2*2/(2*k+3)+f2(k)**2*2/(2*k+5)+f3(k)**2*2/(2*k+7)+f4(k)**2*2/(N-1)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class BDLmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (L_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], LB)
N = test[0].N
k = np.arange(N, dtype=np.float)
sc = np.ones(N)
if test[0].is_scaled():
sc = 1. / np.sqrt(4*k+6)
d = {2: -2./(2*k[2:] + 1)*sc[:-2],
0: 2./(2.*k[:-2]+1)*sc[:-2]}
if test[0].quad == 'GL':
d[2][-1] = -2./(N-1)*sc[N-3]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class BLDmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, L_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N
and :math:`\psi_j` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], LB)
assert isinstance(trial[0], SD)
N = test[0].N
k = np.arange(N, dtype=np.float)
sc = np.ones(N)
if trial[0].is_scaled():
sc = 1. / np.sqrt(4*k+6)
d = {-2: -2./(2*k[2:] + 1)*sc[:-2],
0: 2./(2.*k[:-2]+1)*sc[:-2]}
if test[0].quad == 'GL':
d[-2][-1] = -2./(N-1)*sc[-3]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class BDNDNmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is a mixed Legendre Dirichlet/Neumann basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], DN)
assert isinstance(trial[0], DN)
N = test[0].N
k = np.arange(N-2, dtype=np.float)
km = k[:-1]
kp = k[:-2]
d = {0: 2/(2*k+1) + 2*((2*k+3)/(k+2))/(k+2)**3 + 2*((k+1)/(k+2))**4/(2*k+5),
1: (2/(km+2)**2 - 2*((km+1)/(km+2))**2/(km+3)**2),
2: -2*((kp+1)/(kp+2))**2/(2*kp+5)
}
d[-1] = d[1].copy()
d[-2] = d[2].copy()
if test[0].quad == 'GL':
k = N-3
d[0][-1] = 2/(2*k+1) + 2*((2*k+3)/(k+2))/(k+2)**3 + 2*((k+1)/(k+2))**4/(N-1)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class ADDmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
A_{kj} = (\psi'_j, \psi'_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
N = test[0].N
k = np.arange(N-2, dtype=np.float)
if not test[0].is_scaled():
d = {0: 4*k+6}
else:
d = {0: 1}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def solve(self, b, u=None, axis=0):
N = self.shape[0] + 2
assert N == b.shape[axis]
s = self.trialfunction[0].slice()
if u is None:
u = b
else:
assert u.shape == b.shape
if not self.trialfunction[0].is_scaled():
# Move axis to first
if axis > 0:
u = np.moveaxis(u, axis, 0)
if u is not b:
b = np.moveaxis(b, axis, 0)
bs = b[s]
us = u[s]
d = 1./self[0]
sl = [np.newaxis]*bs.ndim
sl[0] = slice(None)
us[:] = bs*d[tuple(sl)]
u /= self.scale
self.testfunction[0].bc.set_boundary_dofs(u, True)
if axis > 0:
u = np.moveaxis(u, 0, axis)
if u is not b:
b = np.moveaxis(b, axis, 0)
else:
ss = [slice(None)]*b.ndim
ss[axis] = s
ss = tuple(ss)
u[ss] = b[ss]
u /= self.scale
self.testfunction[0].bc.set_boundary_dofs(u, True)
return u
class ANNmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
A_{kj} = (\psi'_j, \psi'_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Neumann basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SN)
assert isinstance(trial[0], SN)
N = test[0].N
k = np.arange(N-2, dtype=np.float)
alpha = k*(k+1)/(k+2)/(k+3)
d0 = 2./(2*k+1)
d = {0: d0*alpha*(k+0.5)*((k+2)*(k+3)-k*(k+1))}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def solve(self, b, u=None, axis=0):
N = self.shape[0] + 2
assert N == b.shape[axis]
s = self.trialfunction[0].slice()
if u is None:
u = b
else:
assert u.shape == b.shape
# Move axis to first
if axis > 0:
u = np.moveaxis(u, axis, 0)
if u is not b:
b = np.moveaxis(b, axis, 0)
bs = b[s]
us = u[s]
d = np.ones(self.shape[0])
d[1:] = 1./self[0][1:]
sl = [np.newaxis]*bs.ndim
sl[0] = slice(None)
us[:] = bs*d[tuple(sl)]
u /= self.scale
self.testfunction[0].bc.set_boundary_dofs(u, True)
u[0] = self.testfunction[0].mean/(2/self.testfunction[0].domain_factor())
if axis > 0:
u = np.moveaxis(u, 0, axis)
if u is not b:
b = np.moveaxis(b, axis, 0)
return u
class ABBmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
A_{kj} = (\psi'_j, \psi'_k)_w
where
.. math::
j = 0, 1, ..., N-4 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_k` is the Shen Legendre Biharmonic basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SB)
assert isinstance(trial[0], SB)
N = test[0].N
k = np.arange(N-4, dtype=np.float)
gk = (2*k+3)/(2*k+7)
d = {0: 2*(2*k+3)*(1+gk),
2: -2*(2*k[:-2]+3)}
d[-2] = d[2]
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class ADNDNmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
A_{kj} = (\psi'_j, \psi'_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the mixed Legendre Dirichlet/Neumann basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], DN)
assert isinstance(trial[0], DN)
N = test[0].N
k = np.arange(N-2, dtype=np.float)
d = {0: ((k+1)/(k+2))**2*((k+2)*(k+3)- k*(k+1))}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def solve(self, b, u=None, axis=0):
N = self.shape[0] + 2
assert N == b.shape[axis]
s = self.trialfunction[0].slice()
if u is None:
u = b
else:
assert u.shape == b.shape
# Move axis to first
if axis > 0:
u = np.moveaxis(u, axis, 0)
if u is not b:
b = np.moveaxis(b, axis, 0)
bs = b[s]
us = u[s]
d = 1./self[0]
sl = [np.newaxis]*bs.ndim
sl[0] = slice(None)
us[:] = bs*d[tuple(sl)]
u /= self.scale
self.testfunction[0].bc.set_boundary_dofs(u, True)
if axis > 0:
u = np.moveaxis(u, 0, axis)
if u is not b:
b = np.moveaxis(b, axis, 0)
return u
class SBFBFmat(SpectralMatrix):
r"""Biharmonic matrix for inner product
.. math::
S_{kj} = (\psi''_j, \psi''_k)_w
where
.. math::
j = 0, 1, ..., N-4 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_k` is the BeamFixedFree basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], BF)
assert isinstance(trial[0], BF)
N = test[0].N
k = np.arange(N-4, dtype=np.float)
f4 = (((k+1)/(k+3))*((k+2)/(k+4)))**2*(2*k+3)/(2*k+7)
d = {0: f4*(k+2.5)*((k+4)*(k+5)-(k+2)*(k+3))*((k+2)*(k+3)-k*(k+1))}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def solve(self, b, u=None, axis=0):
N = self.shape[0] + 4
assert N == b.shape[axis]
s = self.trialfunction[0].slice()
if u is None:
u = b
else:
assert u.shape == b.shape
# Move axis to first
if axis > 0:
u = np.moveaxis(u, axis, 0)
if u is not b:
b = np.moveaxis(b, axis, 0)
bs = b[s]
us = u[s]
d = 1./self[0]
sl = [np.newaxis]*bs.ndim
sl[0] = slice(None)
us[:] = bs*d[tuple(sl)]
u /= self.scale
self.testfunction[0].bc.set_boundary_dofs(u, True)
if axis > 0:
u = np.moveaxis(u, 0, axis)
if u is not b:
b = np.moveaxis(b, axis, 0)
return u
class GLLmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
B_{kj} = (L_j'', L_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N
and :math:`L_k` is the Legendre basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], LB)
assert isinstance(trial[0], LB)
N = test[0].N
k = np.arange(N, dtype=np.float)
d = {}
for j in range(2, N, 2):
jj = j if trial[1] else -j
d[jj] = (k[:-j]+0.5)*(k[j:]*(k[j:]+1) - k[:-j]*(k[:-j]+1))*2./(2*k[:-j]+1)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
self._matvec_methods += ['cython']
def matvec(self, v, c, format='cython', axis=0):
c.fill(0)
trial = self.trialfunction[1]
if format == 'cython' and v.ndim == 3 and trial:
cython.Matvec.GLL_matvec3D_ptr(v, c, axis)
self.scale_array(c)
elif format == 'cython' and v.ndim == 2 and trial:
cython.Matvec.GLL_matvec2D_ptr(v, c, axis)
self.scale_array(c)
elif format == 'cython' and v.ndim == 1 and trial:
cython.Matvec.GLL_matvec(v, c)
self.scale_array(c)
else:
c = super(GLLmat, self).matvec(v, c, format=format, axis=axis)
return c
class SBBmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
A_{kj} = (\psi''_j, \psi''_k)_w
where
.. math::
j = 0, 1, ..., N-4 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_k` is the Shen Legendre Biharmonic basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SB)
assert isinstance(trial[0], SB)
N = test[0].N
k = np.arange(N-4, dtype=np.float)
d = {0: 2*(2*k+3)**2*(2*k+5)}
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class CLLmat(SpectralMatrix):
r"""Matrix for inner product
.. math::
C_{kj} = (\psi'_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N
and :math:`\psi_k` is the orthogonal Legendre basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], LB)
assert isinstance(trial[0], LB)
N = test[0].N
d = {}
for i in range(1, N, 2):
d[i] = 2
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
self._matvec_methods += ['cython', 'self']
def matvec(self, v, c, format='self', axis=0):
c.fill(0)
if format == 'self':
if axis > 0:
c = np.moveaxis(c, axis, 0)
v = np.moveaxis(v, axis, 0)
ve = v[-2:0:-2].cumsum(axis=0)
vo = v[-1:0:-2].cumsum(axis=0)
c[-3::-2] = ve*2
c[-2::-2] = vo*2
if axis > 0:
c = np.moveaxis(c, 0, axis)
v = np.moveaxis(v, 0, axis)
self.scale_array(c)
elif format == 'cython' and v.ndim == 3:
cython.Matvec.CLL_matvec3D_ptr(v, c, axis)
self.scale_array(c)
elif format == 'cython' and v.ndim == 2:
cython.Matvec.CLL_matvec2D_ptr(v, c, axis)
self.scale_array(c)
elif format == 'cython' and v.ndim == 1:
cython.Matvec.CLL_matvec(v, c)
self.scale_array(c)
else:
c = super(CLLmat, self).matvec(v, c, format=format, axis=axis)
return c
class CLLmatT(SpectralMatrix):
r"""Matrix for inner product
.. math::
C_{kj} = (\psi'_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N
and :math:`\psi_k` is the orthogonal Legendre basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], LB)
assert isinstance(trial[0], LB)
N = test[0].N
d = {}
for i in range(-1, -N, -2):
d[i] = 2
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class CLDmat(SpectralMatrix):
r"""Matrix for inner product
.. math::
C_{kj} = (\psi'_j, L_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], LB)
assert isinstance(trial[0], SD)
N = test[0].N
d = {-1: -2}
if trial[0].is_scaled():
k = np.arange(N-2, dtype=np.float)
d[-1] = -2. / np.sqrt(4*k+6)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class CDLmat(SpectralMatrix):
r"""Matrix for inner product
.. math::
C_{kj} = (L_j, \psi'_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], LB)
N = test[0].N
d = {1: -2}
if test[0].is_scaled():
k = np.arange(N-2, dtype=np.float)
d[1] = -2. / np.sqrt(4*k+6)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class CDDmat(SpectralMatrix):
r"""Matrix for inner product
.. math::
C_{kj} = (\psi'_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
N = test[0].N
d = {-1: -2, 1: 2}
if trial[0].is_scaled():
k = np.arange(N-2, dtype=np.float)
d[-1] = -2. / np.sqrt(4*k[:-1]+6)
d[1] = 2. / np.sqrt(4*k[:-1]+6)
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class ADDrp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j'(x) \psi_k'(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-2)
d = {0: 4*k+6, 1: 2*k[:-1]+4, -1: 2*k[:-1]+4}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class ADD2rp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k''(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-2)
d = {0: -(4*k+6), 1: -(2*k[:-1]+6), -1: -(2*k[:-1]+2)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class ADD2Trp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j''(x) \psi_k(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-2)
d = {0: -(4*k+6), -1: -(2*k[:-1]+6), 1: -(2*k[:-1]+2)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class AUUrp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j'(x) \psi_k'(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SU)
assert isinstance(trial[0], SU)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
d = {0: 2*k+2}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class AUUrp1smat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j'(x) \psi_k'(x) (1+x)**2 dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SU)
assert isinstance(trial[0], SU)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
#d = {0: 4*k**2*(k+1)/(2*k+1)+4*(k+1)**2*(k+2)/(2*k+3)-4*k*(k+1),
# 1: 2*(k[:-1]+1)*(k[:-1]+2)-4*(k[:-1]+1)**2*(k[:-1]+2)/(2*k[:-1]+3)}
d = {0: 2*(k+1)**2*(1/(2*k+1)+1/(2*k+3)),
1: 2*k[1:]*(k[1:]+1)/(2*k[1:]+1)}
d[-1] = d[1].copy()
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class GUUrp1smat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k''(x) (1+x)**2 dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SU)
assert isinstance(trial[0], SU)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
d = {0: -2*(k+1)*((k-1)/(2*k+1) + (k+3)/(2*k+3)),
1: -2*(k[1:]+1)*(k[1:]+2)/(2*k[1:]+1),
-1: -2*k[:-1]*(k[:-1]+1)/(2*k[:-1]+3)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class BUUrp1smat(SpectralMatrix):
r"""Matrix for inner product
.. math::
B_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k(x) (1+x)**2 dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SU)
assert isinstance(trial[0], SU)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
#a00 = 2/(2*k+1)
#a11 = 2/(2*k+3)
#a22 = 2/(2*k+5)
#c00 = ((k+1)**2/(2*k+1)/(2*k+3) + k**2/(2*k+1)/(2*k-1))*a00
#c11 = ((k+2)**2/(2*k+3)/(2*k+5) + (k+1)**2/(2*k+3)/(2*k+1))*a11
#c02 = (k+2)*(k+1)/(2*k+5)/(2*k+3)*a00
#c13 = ((k+3)*(k+2)/(2*k+7)/(2*k+5))*a11
#b01 = (k+1)/(2*k+3)*a00
#b12 = (k+2)/(2*k+5)*a11
#d = {0: a00+c00-4*b01+a11+c11,
# 1: (2*b01-c02-a11-c11+2*b12)[:-1],
# -1: (2*b01-c02-a11-c11+2*b12)[:-1],
# 2: (c02-2*b12+c13)[:-2],
# -2: (c02-2*b12+c13)[:-2],
# 3: -c13[:-3].copy(),
# -3: -c13[:-3].copy()}
d = {0: (k/(2*k+1))**2*(2/(2*k-1) + 2/(2*k+3)) + ((k+2)/(2*k+3))**2 * (2/(2*k+1)+2/(2*k+5)),
1: 2*k[1:]*(k[1:]+1)/(2*k[1:]+1)**2*(1/(2*k[1:]-1)+1/(2*k[1:]+3)) - 2*(k[1:]+2)*(k[1:]-1)/(2*k[1:]+3)/(2*k[1:]+1)/(2*k[1:]-1),
2: -2*k[2:]*(k[2:]-2)/(2*k[2:]+1)/(2*k[2:]-1)/(2*k[2:]-3)-2*k[2:]*(k[2:]+2)/(2*k[2:]+3)/(2*k[2:]+1)/(2*k[2:]-1),
3: -2*k[3:]*(k[3:]-1)/(2*k[3:]+1)/(2*k[3:]-1)/(2*k[3:]-3)}
d[-1] = d[1].copy()
d[-2] = d[2].copy()
d[-3] = d[3].copy()
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class CUUrp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k'(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SU)
assert isinstance(trial[0], SU)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
d = {0: -2*(k+1)/(2*k+1)+2*(k+1)/(2*k+3),
1: 2*(k[1:]+1)/(2*k[1:]+1),
-1: -2*(k[:-1]+1)/(2*k[:-1]+3)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class BUUmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-1 \text{ and } k = 0, 1, ..., N-1
and :math:`\psi_k` is the Legendre UpperDirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SU)
assert isinstance(trial[0], SU)
N = test[0].N
k = np.arange(N-1, dtype=np.float)
d = {-1: -2./(2*k[1:] + 1),
0: 2./(2.*k+1) + 2./(2*k+3)}
if test[0].quad == 'GL':
d[0][-1] = 2./(2*(N-2)+1) + 2./(N-1)
d[1] = d[-1]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class BUUrp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
B_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SU)
assert isinstance(trial[0], SU)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
d = {0: 2*k+2}
d = {0: 4*(k+1)/(2*k+1)/(2*k+3),
1: 4/(2*k[:-1]+1)/(2*k[:-1]+3)/(2*k[:-1]+5),
2: -2*(k[:-2]+2)/(2*k[:-2]+3)/(2*k[:-2]+5)}
d[-1] = d[1]
d[-2] = d[2]
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class BDD1orp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k(x) 1/(1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-2)
d = {0: 2*(2*k+3)/(k+1)/(k+2), 1: -2/(k[:-1]+2), -1: -2/(k[:-1]+2)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class BDDrp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-2)
d = {0: 2/(2*k+1)+2/(2*k+5),
1: 2/(2*k[:-1]+1)/(2*k[:-1]+5) + 2*(k[:-1]+3)/(2*k[:-1]+5)/(2*k[:-1]+7),
2: -2/(2*k[:-2]+5),
3: -2*(k[:-3]+3)/(2*k[:-3]+5)/(2*k[:-3]+7)}
d[-1] = d[1]
d[-2] = d[2]
d[-3] = d[3]
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class BCDmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \phi_k)_w
where
.. math::
j = 0, 1 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_j` is the Dirichlet boundary basis and
:math:`\phi_k` is the Shen Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], CD)
N = test[0].N
k = np.arange(N-2, dtype=np.float)
if not test[0].is_scaled():
d = {0: np.array([1, 1./3.]),
1: np.array([1.0]),
-1: np.array([-1./3., 0])}
else:
d = {0: np.array([1./np.sqrt(6.), 1./3./np.sqrt(10.)]),
1: np.array([1./np.sqrt(6.)]),
-1: np.array([-1./3./np.sqrt(10.), 0])}
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class BCBmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \phi_k)_w
where
.. math::
j = 0, 1, 2, 3 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_j` is the Biharmonic boundary basis and
:math:`\phi_k` is the Shen Biharmonic basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SB)
assert isinstance(trial[0], CB)
N = test[0].N
k =
|
np.arange(N-4, dtype=np.float)
|
numpy.arange
|
import math
from typing import TypeVar, Generic
import numpy as np
import pyquaternion
NumPy3DArray = TypeVar("NumPy 3D array")
NumPy4DArray = TypeVar("NumPy 4D array")
NumPy3x3Matrix = TypeVar("NumPy 3x3 matrix")
DateTime = TypeVar("datetime object")
class Maths:
TWOPI = 2*math.pi
HALFPI = 0.5*math.pi
@staticmethod
def normalize_vect(v: NumPy3DArray) -> NumPy3DArray:
"""
Returns the normalized vector (Euclidian norm)
:param v: NumPy 3D vector
:return: normalized NumPy 3D vector
"""
norm = np.linalg.norm(v)
return v/norm if norm != 0 else v
@staticmethod
def angle_vects(v: NumPy3DArray, w: NumPy3DArray) -> float:
"""
Returns the angle, in radians, between the two NumPy 3D vectors given
:param v: NumPy 3D vector
:param w: NumPy 3D vector
:return: Angle, in radians, between v and w
"""
if np.all(v != w):
d = w.dot(v)
vnorm = np.linalg.norm(v)
wnorm =
|
np.linalg.norm(w)
|
numpy.linalg.norm
|
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
def to_four_points(rectangle):
center_x, center_y, w, h, angle = rectangle
half_w = w / 2
half_h = h / 2
v1 = [half_w * np.cos(angle), half_w * np.sin(angle)]
v2 = [-half_h * np.sin(angle), half_h * np.cos(angle)]
p0 = np.asarray((center_x, center_y))
p1 = p0 - v1 - v2
p2 = p0 + v1 - v2
p3 = p0 + v1 + v2
p4 = p0 - v1 + v2
new_row = [
|
np.round(p1)
|
numpy.round
|
#!/usr/bin/env python
"""
Single module to hold the high-level API
"""
import numpy as np
from .cy_point_in_polygon import points_in_poly, points_in_polys, signed_area
def polygon_inside(polygon_verts, trial_points):
'''
Return a Boolean array the size of the trial point array True if point is inside
INPUTS
------
polygon_verts: Mx2 array
trial_points: Nx2 array
RETURNS
-------
inside_points: Boolean array (len(N))
True if the trial point is inside the polygon
'''
polygon_verts =
|
np.asarray(polygon_verts, dtype=np.float)
|
numpy.asarray
|
import os
import psutil
import sys
import time
# using threads on a ryzen 1900x is faster by a factor of 3
use_threading = True
force_recompute = True
if use_threading:
# 8 processors -> 4 workers with 2 threads
os.environ["OMP_NUM_THREADS"] = "2"
os.environ["MKL_NUM_THREADS"] = "2"
os.environ["NUMEXPR_NUM_THREADS"] = "2"
phys_cpus = psutil.cpu_count(logical=False)
#num_procs = int(phys_cpus) # set 1 worker for every cpu -> reduce OMP threads to 1!
num_procs = 4
import math
import numpy as np
import numpy.ma as ma
from flowbias.datasets import FlowOnlyNpDataset
from flowbias.evaluations.log_transforms import log_index_fwd, log_index_reverse
from flowbias.utils.meta_infrastructure import get_available_datasets
from flowbias.utils.localstorage import LocalStorage
from multiprocessing import Pool
pi = np.pi
twopi = 2 * np.pi
assert (len(sys.argv) == 2)
dataset_name = sys.argv[1]
#dataset_name = "kitti2015Valid" # "flyingChairsFull"
#dataset_name = "@/data/dataB/temp/predictedFlows/pwcWOX1_on_CTSK_flyingChairsValid"
if dataset_name[0] != "@":
datasets = get_available_datasets(force_mode="test", restrict_to=[dataset_name])
else:
flow_dataset = FlowOnlyNpDataset({}, dataset_name[1:])
dataset_name = os.path.basename(dataset_name[1:])
datasets = {
dataset_name: flow_dataset
}
assert(len(datasets) == 1)
field_extend = 1500
field_size = (2 * field_extend) + 1
rstat_bins = 1500
logstat_bins = 3000
ahisto_bins = int(twopi * 100)
def compute_matrices(id_range):
id_a = id_range[0]
id_b = id_range[1]
dataset = datasets[dataset_name]
field = np.zeros((field_size, field_size), np.int)
log_field = np.zeros((field_size, field_size), np.int)
rstat = np.zeros(rstat_bins, np.int)
logstat = np.zeros(logstat_bins, np.int)
ahisto = np.zeros(ahisto_bins, np.int) # angle histogram, hundreds of degree
for i in range(id_a, id_b):
sample = dataset[i]
flow = np.transpose(sample["target1"].cpu().detach().numpy(), (1, 2, 0))
if "input_valid" in sample:
mask = sample["input_valid"].cpu().detach().numpy().astype(np.bool).squeeze()
flow = flow[mask]
else:
flow = flow.reshape(-1, 2)
xx = flow[:, 0]
yy = flow[:, 1]
r = np.sqrt(xx ** 2 + yy ** 2) # radius
a = np.arctan2(yy, xx) # angle [-pi, +pi]
has_flow_selector = r > 1e-10
num_zero_flow = r.size - np.count_nonzero(has_flow_selector)
# write absolute stats (rstat)
rstat_part, _ = np.histogram(r, rstat_bins, (0, rstat_bins))
rstat += rstat_part
# write angle histogram
an = ((a[has_flow_selector] + twopi) * 100).astype(np.int) % int(100 * twopi) # to range [0, 2PI] * 100
ahisto_part, _ = np.histogram(an, ahisto_bins, (0, ahisto_bins))
ahisto += ahisto_part
# log_stat histogram
log_r = log_index_fwd(r[has_flow_selector])
log_stat_part, _ = np.histogram(log_r, logstat_bins, (0, logstat_bins))
logstat += log_stat_part
logstat[0] += num_zero_flow
# absolute flow vector histogram
field_part, _, _ = np.histogram2d(
xx, yy,
[field_size, field_size],
[[-field_extend, field_extend], [-field_extend, field_extend]])
field += field_part.astype(np.int)
# log flow vector histogram
selected_a = a[has_flow_selector]
log_x = np.cos(selected_a) * log_r
log_y =
|
np.sin(selected_a)
|
numpy.sin
|
import pandas as pd
import numpy as np
import pytest
from .conftest import DATA_DIR, assert_series_equal
from numpy.testing import assert_allclose
from pvlib import temperature, tools
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.fixture
def sapm_default():
return temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
def test_sapm_cell(sapm_default):
default = temperature.sapm_cell(900, 20, 5, sapm_default['a'],
sapm_default['b'], sapm_default['deltaT'])
assert_allclose(default, 43.509, 3)
def test_sapm_module(sapm_default):
default = temperature.sapm_module(900, 20, 5, sapm_default['a'],
sapm_default['b'])
assert_allclose(default, 40.809, 3)
def test_sapm_cell_from_module(sapm_default):
default = temperature.sapm_cell_from_module(50, 900,
sapm_default['deltaT'])
assert_allclose(default, 50 + 900 / 1000 * sapm_default['deltaT'])
def test_sapm_ndarray(sapm_default):
temps = np.array([0, 10, 5])
irrads = np.array([0, 500, 0])
winds = np.array([10, 5, 0])
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = np.array([0., 23.06066166, 5.])
expected_module = np.array([0., 21.56066166, 5.])
assert_allclose(expected_cell, cell_temps, 3)
assert_allclose(expected_module, module_temps, 3)
def test_sapm_series(sapm_default):
times = pd.date_range(start='2015-01-01', end='2015-01-02', freq='12H')
temps = pd.Series([0, 10, 5], index=times)
irrads = pd.Series([0, 500, 0], index=times)
winds = pd.Series([10, 5, 0], index=times)
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = pd.Series([0., 23.06066166, 5.], index=times)
expected_module = pd.Series([0., 21.56066166, 5.], index=times)
assert_series_equal(expected_cell, cell_temps)
assert_series_equal(expected_module, module_temps)
def test_pvsyst_cell_default():
result = temperature.pvsyst_cell(900, 20, 5)
assert_allclose(result, 45.137, 0.001)
def test_pvsyst_cell_kwargs():
result = temperature.pvsyst_cell(900, 20, wind_speed=5.0, u_c=23.5,
u_v=6.25, module_efficiency=0.1)
assert_allclose(result, 33.315, 0.001)
def test_pvsyst_cell_ndarray():
temps = np.array([0, 10, 5])
irrads = np.array([0, 500, 0])
winds = np.array([10, 5, 0])
result = temperature.pvsyst_cell(irrads, temps, wind_speed=winds)
expected = np.array([0.0, 23.96551, 5.0])
assert_allclose(expected, result, 3)
def test_pvsyst_cell_series():
times = pd.date_range(start="2015-01-01", end="2015-01-02", freq="12H")
temps = pd.Series([0, 10, 5], index=times)
irrads = pd.Series([0, 500, 0], index=times)
winds = pd.Series([10, 5, 0], index=times)
result = temperature.pvsyst_cell(irrads, temps, wind_speed=winds)
expected = pd.Series([0.0, 23.96551, 5.0], index=times)
assert_series_equal(expected, result)
def test_pvsyst_cell_eta_m_deprecated():
with pytest.warns(pvlibDeprecationWarning):
result = temperature.pvsyst_cell(900, 20, wind_speed=5.0, u_c=23.5,
u_v=6.25, eta_m=0.1)
|
assert_allclose(result, 33.315, 0.001)
|
numpy.testing.assert_allclose
|
import numpy as np
from .ReadASCIIFile import ReadASCIIFile
def ReadASCIIData(fname,Header=True,SkipLines=0,dtype=None,SplitChar=None,
Missing=None,FillValFloat=np.nan,FillValInt=9999999,RemoveChar=None):
'''
This will attempt to read a formatted ASCII file into a
numpy.recarray object.
Inputs:
fname: name and path of file to read, or a list of strings to be
treated as a file.
Header: Tells the routine to use the first line (after skipping)
to get the column names
SkipLines: Tels the routine to skip the first few lines before
reading the data
dtype: If None then an attempt will be made to automatically
determine the dtype of each column, otherwise set to a list
of tuples containing the dtype and column names e.g.
[('a','float32'),('b','int32')]
SplitChar: By default the character separating the fields is
space or tab, set this variable to a string with the
substring which splits the values in one row of data (e.g.
SplitChar=',' for a .csv file, typically)
RemoveChar: None
If this is set to a string, each character in this string
will be removed from the text prior to processing.
Returns:
numpy.recarray object
'''
intset = '0,1,2,3,4,5,6,7,8,9'.split(',')
floatset = '0,1,2,3,4,5,6,7,8,9,.,-,e,+'.split(',')
#read the files into an array of strings
if isinstance(fname,np.ndarray):
lines = fname
elif isinstance(fname,list):
lines = np.array(fname)
else:
lines = ReadASCIIFile(fname)
#skip any lines that may not have any data
if SkipLines > 0:
lines = lines[SkipLines:]
#get header if it exists
if Header:
head = lines[0]
lines = lines[1:]
#strip away some characters
if isinstance(RemoveChar,str):
chars = [l for l in RemoveChar]
for i in range(0,lines.size):
for c in chars:
lines[i] = lines[i].replace(c,'')
#get data dimensions (lines and columns)
nl = np.size(lines)
nc = np.size(lines[0].split(SplitChar))
#split data into columns
tmp = np.zeros(nl,dtype='object')
ncol =
|
np.zeros(nl,dtype='int32')
|
numpy.zeros
|
#!/usr/bin/env python
""" Where you at? """
import sys,os
import logging
from collections import OrderedDict as odict
from datetime import datetime,timedelta,tzinfo
import dateutil.parser
import mpl_toolkits.basemap as basemap
from matplotlib.patches import Ellipse, Circle
import matplotlib.patheffects as patheffects
from _tkinter import TclError
import numpy as np
import pylab as plt
import ephem
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "2.1.3"
MAXREF=5000 # Maximum number of refreshes
DECAM=1.1 # DECam radius (deg)
# Accurate DECam marker size depends on figsize and DPI
# This is a mess...
FIGSIZE=(10.5,8.5)
SCALE=np.sqrt((8.0*6.0)/(FIGSIZE[0]*FIGSIZE[1]))
DPI=80;
FILTERS = ['u','g','r','i','z','Y','VR']
BANDS = FILTERS + ['all']
COLORS = odict([
('none','black'),
('u','blue'),
('g','green'),
('r','red'),
('i','gold'),
('z','magenta'),
('Y','black'),
('VR','gray'),
])
# Allowed map projections
PROJ = odict([
('ortho' , dict(projection='ortho',celestial=True)),
('moll' , dict(projection='moll',celestial=True)),
('mol' , dict(projection='moll',celestial=True)),
('ait' , dict(projection='hammer',celestial=True)),
('mbt' , dict(projection='mbtfpq',celestial=True)),
('mbtfpq' , dict(projection='mbtfpq',celestial=True)),
('mcbryde', dict(projection='mbtfpq',celestial=True)),
])
# Derived from telra,teldec of 10000 exposures
SN = odict([
('E1',(7.874, -43.010)),
('E2',(9.500, -43.999)),
('X1',(34.476, -4.931)),
('X2',(35.664,-6.413)),
('X3',(36.449, -4.601)),
('S1',(42.818, 0.000)),
('S2',(41.193, -0.991)),
('C1',(54.274, -27.113)),
('C2',(54.274, -29.090)),
('C3',(52.647, -28.101)),
])
SN_LABELS = odict([
('SN-E',(8,-41)),
('SN-X',(35,-12)),
('SN-S',(45,1)),
('SN-C',(55,-35)),
])
# The allowed footprint outlines
FOOTPRINTS = ['none','des','des-sn','smash','maglites','bliss','decals','delve']
# CTIO location taken from:
#http://www.ctio.noao.edu/noao/content/Coordinates-Observatories-Cerro-Tololo-and-Cerro-Pachon
#http://arxiv.org/pdf/1210.1616v3.pdf
#(-30h 10m 10.73s, -70h 48m 23.52s, 2213m)
TEL_LON = -70.80653
TEL_LAT = -30.169647
TEL_HEIGHT = 2213
# Create the observatory object
CTIO = ephem.Observer()
CTIO.lon,CTIO.lat = str(TEL_LON),str(TEL_LAT)
CTIO.elevation = TEL_HEIGHT
def get_datadir():
""" Path to data directory. """
return os.path.join(os.path.dirname(os.path.realpath(__file__)),'data')
def setdefaults(kwargs,defaults):
""" set dictionary with defaults. """
for k,v in defaults.items():
kwargs.setdefault(k,v)
return kwargs
def gal2cel(glon, glat):
"""
Converts Galactic (deg) to Celestial J2000 (deg) coordinates
"""
glat = np.radians(glat)
sin_glat = np.sin(glat)
cos_glat = np.cos(glat)
glon = np.radians(glon)
ra_gp = np.radians(192.85948)
de_gp = np.radians(27.12825)
lcp = np.radians(122.932)
sin_lcp_glon = np.sin(lcp - glon)
cos_lcp_glon = np.cos(lcp - glon)
sin_d = (
|
np.sin(de_gp)
|
numpy.sin
|
import abc
import dataclasses
import math
from typing import List, Tuple, Union
import numpy as np
import pygame
CoordType = Union[Tuple[float, float], List[float], np.ndarray]
ArrayLike = Union[List[CoordType], Tuple[CoordType]]
def make_rect(width: float, height: float, outline: bool, dashed: bool = False):
rad_h = height / 2
rad_w = width / 2
points = [
(-rad_w, rad_h),
(rad_w, rad_h),
(rad_w, -rad_h),
(-rad_w, -rad_h),
]
poly = Poly(points, outline)
if dashed:
poly.dashed = True
return poly
def make_circle(radius, res, outline):
points = []
for i in range(res):
ang = 2 * math.pi * i / res
points.append((math.cos(ang) * radius, math.sin(ang) * radius))
return Poly(points, outline)
def make_square(side_length, outline):
return make_rect(side_length, side_length, outline)
@dataclasses.dataclass
class Transform:
matrix: np.ndarray
@classmethod
def from_matrix(cls, matrix: np.ndarray):
tr = cls()
tr.matrix = matrix
return tr
@staticmethod
def create_translation_matrix(translation=(0, 0)):
return np.asarray(
[
[1.0, 0.0, translation[0]],
[0.0, 1.0, translation[1]],
[0.0, 0.0, 1.0],
]
)
@staticmethod
def create_rotation_matrix(rotation):
cos = math.cos(rotation)
sin = math.sin(rotation)
return np.asarray([[cos, -sin, 0.0], [sin, cos, 0.0], [0.0, 0.0, 1.0]])
@staticmethod
def create_scaling_matrix(scale):
return
|
np.asarray([[scale[0], 0.0, 0.0], [0.0, scale[1], 0.0], [0.0, 0.0, 1.0]])
|
numpy.asarray
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME> @ UvA
"""
import warnings
import numpy as np
from typing import List, Optional, Callable, NamedTuple
from sklearn.base import is_classifier, is_regressor
from sklearn.base import RegressorMixin, BaseEstimator, ClassifierMixin
from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier, OutputCodeClassifier
from sklearn.neighbors import KDTree
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from scipy.stats import mode
############################
warnings.formatwarning = lambda msg, *args, **kwargs: \
f'\nWARNING: \n'+' '.join(str(msg).split())+'\n'
def _LESSwarn(msg, flag=True):
if (flag):
warnings.warn(msg)
############################
############################
# Supporting classes
class SklearnEstimator:
'''
Dummy base class
'''
def fit(self, X: np.array, y: np.array):
'''
Dummy fit function
'''
raise NotImplementedError('Needs to implement fit(X, y)')
def predict(self, X0: np.array):
'''
Dummy predict function
'''
raise NotImplementedError('Needs to implement predict(X, y)')
class LocalModel(NamedTuple):
'''
Auxiliary class to hold the local estimators
'''
estimator: SklearnEstimator
center: np.array
class Replication(NamedTuple):
'''
Auxiliary class to hold the replications
'''
sc_object: StandardScaler
global_estimator: SklearnEstimator
local_estimators: List[LocalModel]
############################
############################
def rbf(data, center, coeff=0.01):
'''
RBF kernel - L2 norm
This is is used as the default distance function in LESS
'''
return np.exp(-coeff * np.linalg.norm(np.array(data - center, dtype=float), ord=2, axis=1))
############################
class _LESS(BaseEstimator, SklearnEstimator):
'''
The base class for LESSRegressor and LESSClassifier
'''
def __init__(self):
# List to store the replications
self._replications: Optional[List[Replication]] = None
# Scaling object used for normalization (StandardScaler)
self._scobject = None
# Flag to check whether LESS is fitted
self._isfitted = False
def _set_local_attributes(self):
'''
Storing the local variables and checking the given parameters
'''
if self.local_estimator is None:
raise ValueError('LESS does not work without a local estimator.')
if is_classifier(self.local_estimator):
_LESSwarn('''
LESS might work with local classifiers.
However, we recommend using regressors as the local estimators.
''', self.warnings)
if (type(self) == LESSRegressor and is_classifier(self.global_estimator)):
_LESSwarn('''
LESSRegressor might work with a global classifier.
However, we recommend using a regressor as the global estimator.
''', self.warnings)
if (type(self) == LESSClassifier and is_regressor(self.global_estimator)):
_LESSwarn('''
LESSClassifier might work with a global regressor.
However, we recommend using a classifier as the global estimator.
''', self.warnings)
if self.val_size is not None:
if(self.val_size <= 0.0 or self.val_size >= 1.0):
raise ValueError('Parameter val_size should be in the interval (0, 1).')
if self.frac is not None:
if(self.frac <= 0.0 or self.frac > 1.0):
raise ValueError('Parameter frac should be in the interval (0, 1].')
if self.n_replications < 1:
raise ValueError('The number of replications should greater than equal to one.')
if self.cluster_method is not None:
if self.frac is not None \
or self.n_neighbors is not None \
or self.n_subsets is not None:
_LESSwarn('''
Parameter cluster_method overrides parameters frac, n_neighbors and n_subsets. \
Proceeding with clustering...
''', self.warnings)
self.frac = None
self.n_neighbors = None
# Different numbers of subsets may be generated by the clustering method
self.n_subsets = []
if 'n_clusters' in self.cluster_method().get_params().keys():
if self.cluster_method().get_params()['n_clusters'] == 1:
_LESSwarn('''
There is only one cluster, so the
global estimator is set to none.
''', self.warnings)
self.global_estimator = None
self.d_normalize = True
# If there is also no validation step, then there is
# no randomness. So, no need for replications.
if (self.val_size is None):
_LESSwarn('''
Since validation set is not used, there is no randomness.
Thus, the number of replications is set to one.
''', self.warnings)
self.n_replications = 1
elif (self.frac is None and
self.n_neighbors is None and
self.n_subsets is None):
self.frac = 0.05
def _check_input(self, len_X: int):
'''
Checks whether the input is valid (len_X is the length of input data)
'''
if self.cluster_method is None:
if self.frac is not None:
self.n_neighbors = int(np.ceil(self.frac * len_X))
self.n_subsets = int(len_X/self.n_neighbors)
if self.n_subsets is None:
self.n_subsets = int(len_X/self.n_neighbors)
if self.n_neighbors is None:
self.n_neighbors = int(len_X/self.n_subsets)
if self.n_neighbors > len_X:
_LESSwarn('''
The number of neighbors is larger than the
number of samples. Setting number of subsets to one.
''', self.warnings)
self.n_neighbors = len_X
self.n_subsets = 1
if self.n_subsets > len_X:
_LESSwarn('''
The number of subsets is larger than the
number of samples. Setting number of neighbors to one.
''', self.warnings)
self.n_neighbors = 1
self.n_subsets = len_X
if self.n_subsets == 1:
_LESSwarn('''
There is only one subset, so the
global estimator is set to none.
''', self.warnings)
self.global_estimator = None
self.d_normalize = True
# If there is also no validation step, then there is
# no randomness. So, no need for replications.
if (self.val_size is None):
_LESSwarn('''
Since validation set is not used, there is no randomness.
Thus, the number of replications is set to one.
''', self.warnings)
self.n_replications = 1
def _fitnoval(self, X: np.array, y: np.array):
'''
Fit function: All data is used with the global estimator (no validation)
Tree method is used (no clustering)
'''
len_X: int = len(X)
# Check the validity of the input
self._check_input(len_X)
# A nearest neighbor tree is grown for querying
tree = self.tree_method(X, self.n_subsets)
self._replications = []
for _ in range(self.n_replications):
# Select n_subsets many samples to construct the local sample sets
sample_indices = self._rng.choice(len_X, size=self.n_subsets)
# Construct the local sample sets
_, neighbor_indices_list = tree.query(X[sample_indices], k=self.n_neighbors)
local_models: List[LocalModel] = []
dists = np.zeros((len_X, self.n_subsets))
predicts = np.zeros((len_X, self.n_subsets))
for neighbor_i, neighbor_indices in enumerate(neighbor_indices_list):
Xneighbors, yneighbors = X[neighbor_indices], y[neighbor_indices]
# Centroid is used as the center of the local sample set
local_center = np.mean(Xneighbors, axis=0)
if 'random_state' in self.local_estimator().get_params().keys():
local_model = self.local_estimator().\
set_params(random_state=self._rng.integers(np.iinfo(np.int16).max)).\
fit(Xneighbors, yneighbors)
else:
local_model = self.local_estimator().fit(Xneighbors, yneighbors)
local_models.append(LocalModel(estimator=local_model, center=local_center))
predicts[:, neighbor_i] = local_model.predict(X)
if self.distance_function is None:
dists[:, neighbor_i] = rbf(X, local_center, \
coeff=1.0/np.power(self.n_subsets, 2.0))
else:
dists[:, neighbor_i] = self.distance_function(X, local_center)
# Normalize the distances from samples to the local subsets
if self.d_normalize:
denom = np.sum(dists, axis=1)
denom[denom < 1.0e-8] = 1.0e-8
dists = (dists.T/denom).T
Z = dists * predicts
scobject = StandardScaler()
if (self.scaling):
Z = scobject.fit_transform(Z)
if self.global_estimator is not None:
if 'random_state' in self.global_estimator().get_params().keys():
global_model = self.global_estimator().\
set_params(random_state=self._rng.integers(np.iinfo(np.int16).max)).\
fit(Z, y)
else:
global_model = self.global_estimator().fit(Z, y)
else:
global_model = None
self._replications.append(Replication(sc_object=scobject,
global_estimator=global_model,
local_estimators=local_models))
return self
def _fitval(self, X: np.array, y: np.array):
'''
Fit function: (val_size x data) is used for the global estimator (validation)
Tree method is used (no clustering)
'''
self._replications = []
for i in range(self.n_replications):
# Split for global estimation
X_train, X_val, y_train, y_val = train_test_split(X, y,
test_size=self.val_size,
random_state=self._rng.integers(np.iinfo(np.int16).max))
len_X_val: int = len(X_val)
len_X_train: int = len(X_train)
# Check the validity of the input
if i == 0:
self._check_input(len_X_train)
# A nearest neighbor tree is grown for querying
tree = self.tree_method(X_train, self.n_subsets)
# Select n_subsets many samples to construct the local sample sets
sample_indices = self._rng.choice(len_X_train, size=self.n_subsets)
# Construct the local sample sets
_, neighbor_indices_list = tree.query(X_train[sample_indices], k=self.n_neighbors)
local_models: List[LocalModel] = []
dists = np.zeros((len_X_val, self.n_subsets))
predicts = np.zeros((len_X_val, self.n_subsets))
for neighbor_i, neighbor_indices in enumerate(neighbor_indices_list):
Xneighbors, yneighbors = X_train[neighbor_indices], y_train[neighbor_indices]
# Centroid is used as the center of the local sample set
local_center = np.mean(Xneighbors, axis=0)
if 'random_state' in self.local_estimator().get_params().keys():
local_model = self.local_estimator().\
set_params(random_state=self._rng.integers(np.iinfo(np.int16).max)).\
fit(Xneighbors, yneighbors)
else:
local_model = self.local_estimator().fit(Xneighbors, yneighbors)
local_models.append(LocalModel(estimator=local_model, center=local_center))
predicts[:, neighbor_i] = local_model.predict(X_val)
if self.distance_function is None:
dists[:, neighbor_i] = rbf(X_val, local_center, \
coeff=1.0/np.power(self.n_subsets, 2.0))
else:
dists[:, neighbor_i] = self.distance_function(X_val, local_center)
# Normalize the distances from samples to the local subsets
if self.d_normalize:
denom = np.sum(dists, axis=1)
denom[denom < 1.0e-8] = 1.0e-8
dists = (dists.T/denom).T
Z = dists * predicts
scobject = StandardScaler()
if (self.scaling):
Z = scobject.fit_transform(Z)
if self.global_estimator is not None:
if 'random_state' in self.global_estimator().get_params().keys():
global_model = self.global_estimator().\
set_params(random_state=self._rng.integers(np.iinfo(np.int16).max)).\
fit(Z, y_val)
else:
global_model = self.global_estimator().fit(Z, y_val)
else:
global_model = None
self._replications.append(Replication(sc_object=scobject,
global_estimator=global_model,
local_estimators=local_models))
return self
def _fitnovalc(self, X: np.array, y: np.array):
'''
Fit function: All data is used for the global estimator (no validation)
Clustering is used (no tree method)
'''
len_X: int = len(X)
# Check the validity of the input
self._check_input(len_X)
if 'random_state' not in self.cluster_method().get_params().keys():
_LESSwarn('''
Clustering method is not random, so there is
no need for replications unless validaton set is used.
The number of replications is set to one.
''', self.warnings)
self.n_replications = 1
if self.n_replications == 1:
cluster_fit = self.cluster_method().fit(X)
self._replications = []
for i in range(self.n_replications):
if self.n_replications > 1:
cluster_fit = self.cluster_method().\
set_params(random_state=self._rng.integers(np.iinfo(np.int16).max)).\
fit(X)
# Some clustering methods may find less number of
# clusters than requested 'n_clusters'
self.n_subsets.append(len(np.unique(cluster_fit.labels_)))
n_subsets = self.n_subsets[i]
local_models: List[LocalModel] = []
dists = np.zeros((len_X, n_subsets))
predicts = np.zeros((len_X, n_subsets))
if hasattr(cluster_fit, 'cluster_centers_'):
use_cluster_centers = True
else:
use_cluster_centers = False
for cluster_indx, cluster in enumerate(np.unique(cluster_fit.labels_)):
neighbors = cluster_fit.labels_ == cluster
Xneighbors, yneighbors = X[neighbors], y[neighbors]
# Centroid is used as the center of the local sample set
if use_cluster_centers:
local_center = cluster_fit.cluster_centers_[cluster_indx]
else:
local_center = np.mean(Xneighbors, axis=0)
if 'random_state' in self.local_estimator().get_params().keys():
local_model = self.local_estimator().\
set_params(random_state=self._rng.integers(np.iinfo(np.int16).max)).\
fit(Xneighbors, yneighbors)
else:
local_model = self.local_estimator().fit(Xneighbors, yneighbors)
local_models.append(LocalModel(estimator=local_model, center=local_center))
predicts[:, cluster_indx] = local_model.predict(X)
if self.distance_function is None:
dists[:, cluster_indx] = rbf(X, local_center, \
coeff=1.0/np.power(n_subsets, 2.0))
else:
dists[:, cluster_indx] = self.distance_function(X, local_center)
# Normalize the distances from samples to the local subsets
if self.d_normalize:
denom = np.sum(dists, axis=1)
denom[denom < 1.0e-8] = 1.0e-8
dists = (dists.T/denom).T
Z = dists * predicts
scobject = StandardScaler()
if (self.scaling):
Z = scobject.fit_transform(Z)
if self.global_estimator is not None:
if 'random_state' in self.global_estimator().get_params().keys():
global_model = self.global_estimator().\
set_params(random_state=self._rng.integers(np.iinfo(np.int16).max)).\
fit(Z, y)
else:
global_model = self.global_estimator().fit(Z, y)
else:
global_model = None
self._replications.append(Replication(sc_object=scobject,
global_estimator=global_model,
local_estimators=local_models))
return self
def _fitvalc(self, X: np.array, y: np.array):
'''
Fit function: (val_size x data) is used for the global estimator (validation)
Clustering is used (no tree method)
'''
self._replications = []
for i in range(self.n_replications):
# Split for global estimation
X_train, X_val, y_train, y_val = train_test_split(X, y,
test_size=self.val_size,
random_state=self._rng.integers(np.iinfo(np.int16).max))
len_X_val: int = len(X_val)
len_X_train: int = len(X_train)
# Check the validity of the input
if i == 0:
self._check_input(len_X_train)
if 'random_state' in self.cluster_method().get_params().keys():
cluster_fit = self.cluster_method().\
set_params(random_state=self._rng.integers(np.iinfo(np.int16).max)).\
fit(X_train)
else:
cluster_fit = self.cluster_method().fit(X_train)
if i == 0:
if hasattr(cluster_fit, 'cluster_centers_'):
use_cluster_centers = True
else:
use_cluster_centers = False
# Some clustering methods may find less number of
# clusters than requested 'n_clusters'
self.n_subsets.append(len(np.unique(cluster_fit.labels_)))
n_subsets = self.n_subsets[i]
local_models: List[LocalModel] = []
dists = np.zeros((len_X_val, n_subsets))
predicts = np.zeros((len_X_val, n_subsets))
for cluster_indx, cluster in enumerate(np.unique(cluster_fit.labels_)):
neighbors = cluster_fit.labels_ == cluster
Xneighbors, yneighbors = X_train[neighbors], y_train[neighbors]
# Centroid is used as the center of the local sample set
if use_cluster_centers:
local_center = cluster_fit.cluster_centers_[cluster_indx]
else:
local_center = np.mean(Xneighbors, axis=0)
if 'random_state' in self.local_estimator().get_params().keys():
local_model = self.local_estimator().\
set_params(random_state=self._rng.integers(np.iinfo(np.int16).max)).\
fit(Xneighbors, yneighbors)
else:
local_model = self.local_estimator().fit(Xneighbors, yneighbors)
local_models.append(LocalModel(estimator=local_model, center=local_center))
predicts[:, cluster_indx] = local_model.predict(X_val)
if self.distance_function is None:
dists[:, cluster_indx] = rbf(X_val, local_center, \
coeff=1.0/np.power(n_subsets, 2.0))
else:
dists[:, cluster_indx] = self.distance_function(X_val, local_center)
# Normalize the distances from samples to the local subsets
if self.d_normalize:
denom = np.sum(dists, axis=1)
denom[denom < 1.0e-8] = 1.0e-8
dists = (dists.T/denom).T
Z = dists * predicts
scobject = StandardScaler()
if (self.scaling):
Z = scobject.fit_transform(Z)
if self.global_estimator is not None:
if 'random_state' in self.global_estimator().get_params().keys():
global_model = self.global_estimator().\
set_params(random_state=self._rng.integers(np.iinfo(np.int16).max)).\
fit(Z, y_val)
else:
global_model = self.global_estimator().fit(Z, y_val)
else:
global_model = None
self._replications.append(Replication(sc_object=scobject,
global_estimator=global_model,
local_estimators=local_models))
return self
def get_n_subsets(self):
'''
Auxiliary function returning the number of subsets
'''
return self.n_subsets
def get_n_neighbors(self):
'''
Auxiliary function returning the number of neighbors
'''
return self.n_neighbors
def get_frac(self):
'''
Auxiliary function returning the percentage of samples used to set the number of neighbors
'''
return self.frac
def get_n_replications(self):
'''
Auxiliary function returning the number of replications
'''
return self.n_replications
def get_d_normalize(self):
'''
Auxiliary function returning the flag for normalization
'''
return self.d_normalize
def get_scaling(self):
'''
Auxiliary function returning the flag for scaling
'''
return self.scaling
def get_val_size(self):
'''
Auxiliary function returning the validation set size
'''
return self.val_size
def get_random_state(self):
'''
Auxiliary function returning the random seed
'''
return self.random_state
class LESSClassifier(_LESS, ClassifierMixin):
'''
Classifier for Learning with Subset Selection (LESS)
This is a wrapper that calls the multiclass strategies, like one-vs-rest,
by using an auxiliary binary classifer for LESS (_LBC)
Parameters
----------
frac: fraction of total samples used for the number of neighbors (default is 0.05)
n_neighbors : number of neighbors (default is None)
n_subsets : number of subsets (default is None)
n_replications : number of replications (default is 50)
d_normalize : distance normalization (default is True)
val_size: percentage of samples used for validation (default is None - no validation)
random_state: initialization of the random seed (default is None)
tree_method : method used for constructing the nearest neighbor tree,
e.g., sklearn.neighbors.KDTree (default) or sklearn.neighbors.BallTree
cluster_method : method used for clustering the subsets,
e.g., sklearn.cluster.KMeans, sklearn.cluster.SpectralClustering (default is None)
local_estimator : estimator for the local models (default is LinearRegression)
global_estimator : estimator for the global model (default is DecisionTreeClassifier)
distance_function : distance function evaluating the distance from a subset to a sample,
e.g., df(subset, sample) which returns a vector of distances
(default is RBF(subset, sample, 1.0/n_subsets^2))
scaling: flag to normalize the input data (default is True)
warnings : flag to turn on (True) or off (False) the warnings (default is True)
multiclass : available strategies are 'ovr' (one-vs-rest, default),
'ovo' (one-vs-one), 'occ' (output-code-classifier)
Recommendation
--------------
Default implementation of LESS uses Euclidean distances with radial basis function.
Therefore, it is a good idea to scale the input data before fitting. This can be done by
setting the parameter 'scaling' to True (the default value) or preprocessing the data
as follows:
>>> from sklearn.preprocessing import StandardScaler
>>> SC = StandardarScaler()
>>> X_train = SC.fit_transform(X_train)
>>> X_test = SC.transform(X_test)
'''
def __init__(self, frac=None, n_neighbors=None, n_subsets=None,
n_replications=20, d_normalize=True, val_size=None, random_state=None,
tree_method=lambda data, n_subsets: KDTree(data, n_subsets),
cluster_method=None,
local_estimator=lambda: LinearRegression(),
global_estimator=lambda: DecisionTreeClassifier(),
distance_function: Callable[[np.array, np.array], np.array]=None,
scaling=True, warnings=True, multiclass='ovr'):
self.local_estimator = local_estimator
self.global_estimator = global_estimator
self.tree_method = tree_method
self.cluster_method = cluster_method
self.distance_function = distance_function
self.frac = frac
self.n_neighbors = n_neighbors
self.n_subsets = n_subsets
self.n_replications = n_replications
self.d_normalize = d_normalize
self.val_size = val_size
self.random_state = random_state
self._bclassifier = None
self._strategy = None
self.scaling = scaling
self.warnings = warnings
self.multiclass = multiclass
class _LESSBC(_LESS):
'''
Auxiliary binary classifier for Learning with Subset Selection (LESS)
'''
def __init__(self, frac=None, n_neighbors=None, n_subsets=None,
n_replications=20, d_normalize=True, val_size=None, random_state=None,
tree_method=lambda data, n_subsets: KDTree(data, n_subsets),
cluster_method=None,
local_estimator=lambda: LinearRegression(),
global_estimator=lambda: DecisionTreeClassifier(),
distance_function: Callable[[np.array, np.array], np.array]=None,
scaling=True, warnings=True):
self.local_estimator = local_estimator
self.global_estimator = global_estimator
self.tree_method = tree_method
self.cluster_method = cluster_method
self.distance_function = distance_function
self.frac = frac
self.n_neighbors = n_neighbors
self.n_subsets = n_subsets
self.n_replications = n_replications
self.d_normalize = d_normalize
self.val_size = val_size
self.random_state = random_state
self._rng = np.random.default_rng(self.random_state)
self.scaling = scaling
self.warnings = warnings
def fit(self, X: np.array, y: np.array):
'''
Dummy fit function that calls the proper method according to
validation and clustering parameters
Options are:
- Default fitting (no validation set, no clustering)
- Fitting with validation set (no clustering)
- Fitting with clustering (no) validation set)
- Fitting with validation set and clustering
'''
# Check that X and y have correct shape
X, y = check_X_y(X, y)
# Original labels
self._yorg = np.unique(y)
if len(self._yorg) != 2:
raise ValueError('LESSBinaryClassifier works only with two labels. \
Please try LESSClassifier.')
# Convert to binary labels
ymin1 = y == self._yorg[0]
ypls1 = y == self._yorg[1]
y[ymin1] = -1
y[ypls1] = 1
self._set_local_attributes()
if self.val_size is not None:
# Validation set is used for
# global estimation
if self.cluster_method is None:
self._fitval(X, y)
else:
self._fitvalc(X, y)
else:
# Validation set is not used for
# global estimation
if self.cluster_method is None:
self._fitnoval(X, y)
else:
self._fitnovalc(X, y)
# Convert to original labels
ymin1 = y == -1
ypls1 = y == 1
y[ymin1] = self._yorg[0]
y[ypls1] = self._yorg[1]
self._isfitted = True
return self
def predict(self, X0: np.array):
'''
Predictions are evaluated for the test samples in X0
'''
check_is_fitted(self, attributes='_isfitted')
# Input validation
X0 = check_array(X0)
len_X0: int = len(X0)
yhat = np.zeros((len_X0, self.n_replications))
for i in range(self.n_replications):
# Get the fitted global and local estimators
global_model = self._replications[i].global_estimator
local_models = self._replications[i].local_estimators
if self.cluster_method is None:
n_subsets = self.n_subsets
else:
n_subsets = self.n_subsets[i]
predicts = np.zeros((len_X0, n_subsets))
dists = np.zeros((len_X0, n_subsets))
for j in range(n_subsets):
local_center = local_models[j].center
local_model = local_models[j].estimator
predicts[:, j] = local_model.predict(X0)
if self.distance_function is None:
dists[:, j] = rbf(X0, local_center, \
coeff=1.0/np.power(n_subsets, 2.0))
else:
dists[:, j] = self.distance_function(X0, local_center)
# Normalize the distances from samples to the local subsets
if self.d_normalize:
denom = np.sum(dists, axis=1)
denom[denom < 1.0e-8] = 1.0e-8
dists = (dists.T/denom).T
Z0 = dists * predicts
if self.scaling:
Z0 = self._replications[i].sc_object.transform(Z0)
if global_model is not None:
yhat[:, i] = global_model.predict(Z0)
else:
rowsums = np.sum(Z0, axis=1)
yhat[rowsums < 0, i] = -1
yhat[rowsums >= 0, i] = 1
yhat = mode(yhat.astype(int), axis=1).mode.reshape(1, -1)[0]
# Convert to original labels
ymin1 = yhat == -1
ypls1 = yhat == 1
yhat[ymin1] = self._yorg[0]
yhat[ypls1] = self._yorg[1]
return yhat
def predict_proba(self, X0: np.array):
'''
Prediction probabilities are evaluated for the test samples in X0
'''
check_is_fitted(self, attributes='_isfitted')
# Input validation
X0 = check_array(X0)
len_X0: int = len(X0)
yhat = np.zeros((len_X0, self.n_replications), dtype=np.int)
predprobs = np.zeros((len_X0, 2), dtype=np.float16)
for i in range(self.n_replications):
# Get the fitted global and local estimators
global_model = self._replications[i].global_estimator
local_models = self._replications[i].local_estimators
if self.cluster_method is None:
n_subsets = self.n_subsets
else:
n_subsets = self.n_subsets[i]
predicts = np.zeros((len_X0, n_subsets))
dists = np.zeros((len_X0, n_subsets))
for j in range(n_subsets):
local_center = local_models[j].center
local_model = local_models[j].estimator
predicts[:, j] = local_model.predict(X0)
if self.distance_function is None:
dists[:, j] = rbf(X0, local_center, \
coeff=1.0/np.power(n_subsets, 2.0))
else:
dists[:, j] = self.distance_function(X0, local_center)
# Normalize the distances from samples to the local subsets
if self.d_normalize:
denom = np.sum(dists, axis=1)
denom[denom < 1.0e-8] = 1.0e-8
dists = (dists.T/denom).T
Z0 = dists * predicts
if self.scaling:
Z0 = self._replications[i].sc_object.transform(Z0)
if global_model is not None:
yhat[:, i] = global_model.predict(Z0)
# Convert to 0-1
yhat[:, i] = (yhat[:, i] + 1)/2
else:
rowsums = np.sum(Z0, axis=1)
yhat[rowsums < 0, i] = 0
yhat[rowsums >= 0, i] = 1
md, cnt = mode(yhat, axis=1)
yhat = md.reshape(1, -1)[0]
cnt = cnt.reshape(1, -1)[0]
yhat0 = yhat==0
yhat1 = yhat==1
predprobs[yhat0, 0] = cnt[yhat0]
predprobs[yhat0, 1] = self.n_replications - cnt[yhat0]
predprobs[yhat1, 1] = cnt[yhat1]
predprobs[yhat1, 0] = self.n_replications - cnt[yhat1]
predprobs /= self.n_replications
return predprobs
self._bclassifier = _LESSBC(frac=self.frac, n_neighbors=self.n_neighbors,
n_subsets=self.n_subsets,
n_replications=self.n_replications,
d_normalize=self.d_normalize,
val_size=self.val_size,
random_state=self.random_state,
tree_method=self.tree_method,
cluster_method=self.cluster_method,
local_estimator=self.local_estimator,
global_estimator=self.global_estimator,
distance_function=self.distance_function,
scaling=self.scaling,
warnings=self.warnings)
def fit(self, X: np.array, y: np.array):
'''
Dummy fit function that calls the fit method of the multiclass
strategy 'one-vs-rest'
'''
if self.scaling:
self._scobject = StandardScaler()
X = self._scobject.fit_transform(X)
n_classes = len(np.unique(y))
self._set_strategy(n_classes)
self._strategy.fit(X, y)
self._update_params(self._strategy.estimators_[0], n_classes)
self._isfitted = True
return self
def predict(self, X0: np.array):
'''
Dummy predict function that calls the predict method of the multiclass
strategy 'one-vs-rest'
'''
if self.scaling:
X0 = self._scobject.transform(X0)
return self._strategy.predict(X0)
def _set_strategy(self, n_classes):
'''
Auxiliary function to set the selected the strategy
'''
if n_classes == 2:
self._strategy = OneVsRestClassifier(self._bclassifier)
elif self.multiclass == 'ovr':
self._strategy = OneVsRestClassifier(self._bclassifier)
elif self.multiclass == 'ovo':
self._strategy = OneVsOneClassifier(self._bclassifier)
elif self.multiclass == 'occ':
self._strategy = OutputCodeClassifier(self._bclassifier)
else:
self._strategy = OneVsRestClassifier(self._bclassifier)
_LESSwarn('''
LESSClassifier works only with one of the following options:
(1) 'ovr' : OneVsRestClassifier (default),
(2) 'ovo' : OneVsOneClassifier,
(3) 'occ' : OutputCodeClassifier,
(see sklearn.multiclass for details).
Switching to 'ovr' ...
''', self.warnings)
def _update_params(self, firstestimator, n_classes):
'''
Parameters of the wrapper class are updated, since the functions
_set_local_attributes and _check_input may alter the following parameters
'''
self.global_estimator = firstestimator.global_estimator
self.frac = firstestimator.get_frac()
self.n_neighbors = firstestimator.get_n_neighbors()
self.n_subsets = firstestimator.get_n_subsets()
self.n_replications = firstestimator.get_n_replications()
self.d_normalize = firstestimator.get_d_normalize()
# Replications are stored only if it is a binary classification problem
# Otherwise, there are multiple binary classifiers, and hence, multiple replications
if n_classes == 2:
self._replications = firstestimator._replications
class LESSRegressor(_LESS, RegressorMixin):
'''
Regressor for Learning with Subset Selection (LESS)
Parameters
----------
frac: fraction of total samples used for the number of neighbors (default is 0.05)
n_neighbors : number of neighbors (default is None)
n_subsets : number of subsets (default is None)
n_replications : number of replications (default is 50)
d_normalize : distance normalization (default is True)
val_size: percentage of samples used for validation (default is None - no validation)
random_state: initialization of the random seed (default is None)
tree_method : method used for constructing the nearest neighbor tree,
e.g., sklearn.neighbors.KDTree (default) or sklearn.neighbors.BallTree
cluster_method : method used for clustering the subsets,
e.g., sklearn.cluster.KMeans, sklearn.cluster.SpectralClustering (default is None)
local_estimator : estimator for the local models (default is LinearRegression)
global_estimator : estimator for the global model (default is DecisionTreeRegressor)
distance_function : distance function evaluating the distance from a subset to a sample,
e.g., df(subset, sample) which returns a vector of distances
(default is RBF(subset, sample, 1.0/n_subsets^2))
scaling: flag to normalize the input data (default is True)
warnings : flag to turn on (True) or off (False) the warnings (default is True)
Recommendation
--------------
Default implementation of LESS uses Euclidean distances with radial basis function.
Therefore, it is a good idea to scale the input data before fitting. This can be done by
setting the parameter 'scaling' to True (the default value) or preprocessing the data
as follows:
>>> from sklearn.preprocessing import StandardScaler
>>> SC = StandardarScaler()
>>> X_train = SC.fit_transform(X_train)
>>> X_test = SC.transform(X_test)
'''
def __init__(self, frac=None, n_neighbors=None, n_subsets=None,
n_replications=20, d_normalize=True, val_size=None, random_state=None,
tree_method=lambda data, n_subsets: KDTree(data, n_subsets),
cluster_method=None,
local_estimator=lambda: LinearRegression(),
global_estimator=lambda: DecisionTreeRegressor(),
distance_function: Callable[[np.array, np.array], np.array]=None,
scaling=True, warnings=True):
self.local_estimator = local_estimator
self.global_estimator = global_estimator
self.tree_method = tree_method
self.cluster_method = cluster_method
self.distance_function = distance_function
self.frac = frac
self.n_neighbors = n_neighbors
self.n_subsets = n_subsets
self.n_replications = n_replications
self.d_normalize = d_normalize
self.val_size = val_size
self.random_state = random_state
self._rng = np.random.default_rng(self.random_state)
self.scaling = scaling
self.warnings = warnings
def fit(self, X: np.array, y: np.array):
'''
Dummy fit function that calls the proper method according to
validation and clustering parameters
Options are:
- Default fitting (no validation set, no clustering)
- Fitting with validation set (no clustering)
- Fitting with clustering (no) validation set)
- Fitting with validation set and clustering
'''
# Check that X and y have correct shape
X, y = check_X_y(X, y)
self._set_local_attributes()
if self.scaling:
self._scobject = StandardScaler()
X = self._scobject.fit_transform(X)
if self.val_size is not None:
# Validation set is not used for
# global estimation
if self.cluster_method is None:
self._fitval(X, y)
else:
self._fitvalc(X, y)
else:
# Validation set is used for
# global estimation
if self.cluster_method is None:
self._fitnoval(X, y)
else:
self._fitnovalc(X, y)
self._isfitted = True
return self
def predict(self, X0: np.array):
'''
Predictions are evaluated for the test samples in X0
'''
check_is_fitted(self, attributes='_isfitted')
# Input validation
X0 = check_array(X0)
if self.scaling:
X0 = self._scobject.transform(X0)
len_X0: int = len(X0)
yhat =
|
np.zeros(len_X0)
|
numpy.zeros
|
import numpy as np
import scipy as sp
from sklearn import svm, discriminant_analysis, dummy
from sklearn.linear_model import LogisticRegression, Perceptron
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree._tree import Tree
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, _gb_losses
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB, ComplementNB
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import LabelBinarizer
from th_sklearn_json import regression
from th_sklearn_json import csr
import json
def serialize_logistic_regression(model):
serialized_model = {
'meta': 'lr',
'classes_': model.classes_.tolist(),
'coef_': model.coef_.tolist(),
'intercept_': model.intercept_.tolist(),
'n_iter_': model.n_iter_.tolist(),
'params': model.get_params()
}
return serialized_model
def deserialize_logistic_regression(model_dict):
model = LogisticRegression(model_dict['params'])
model.classes_ = np.array(model_dict['classes_'])
model.coef_ = np.array(model_dict['coef_'])
model.intercept_ = np.array(model_dict['intercept_'])
model.n_iter_ = np.array(model_dict['intercept_'])
return model
def serialize_bernoulli_nb(model):
serialized_model = {
'meta': 'bernoulli-nb',
'classes_': model.classes_.tolist(),
'class_count_': model.class_count_.tolist(),
'class_log_prior_': model.class_log_prior_.tolist(),
'feature_count_': model.feature_count_.tolist(),
'feature_log_prob_': model.feature_log_prob_.tolist(),
'params': model.get_params()
}
return serialized_model
def deserialize_bernoulli_nb(model_dict):
model = BernoulliNB(model_dict['params'])
model.classes_ = np.array(model_dict['classes_'])
model.class_count_ = np.array(model_dict['class_count_'])
model.class_log_prior_ = np.array(model_dict['class_log_prior_'])
model.feature_count_= np.array(model_dict['feature_count_'])
model.feature_log_prob_ = np.array(model_dict['feature_log_prob_'])
return model
def serialize_gaussian_nb(model):
serialized_model = {
'meta': 'gaussian-nb',
'classes_': model.classes_.tolist(),
'class_count_': model.class_count_.tolist(),
'class_prior_': model.class_prior_.tolist(),
'theta_': model.theta_.tolist(),
'sigma_': model.sigma_.tolist(),
'epsilon_': model.epsilon_,
'params': model.get_params()
}
return serialized_model
def deserialize_gaussian_nb(model_dict):
model = GaussianNB(model_dict['params'])
model.classes_ = np.array(model_dict['classes_'])
model.class_count_ = np.array(model_dict['class_count_'])
model.class_prior_ = np.array(model_dict['class_prior_'])
model.theta_ = np.array(model_dict['theta_'])
model.sigma_ = np.array(model_dict['sigma_'])
model.epsilon_ = model_dict['epsilon_']
return model
def serialize_multinomial_nb(model):
serialized_model = {
'meta': 'multinomial-nb',
'classes_': model.classes_.tolist(),
'class_count_': model.class_count_.tolist(),
'class_log_prior_': model.class_log_prior_.tolist(),
'feature_count_': model.feature_count_.tolist(),
'feature_log_prob_': model.feature_log_prob_.tolist(),
'params': model.get_params()
}
return serialized_model
def deserialize_multinomial_nb(model_dict):
model = MultinomialNB(model_dict['params'])
model.classes_ = np.array(model_dict['classes_'])
model.class_count_ = np.array(model_dict['class_count_'])
model.class_log_prior_ = np.array(model_dict['class_log_prior_'])
model.feature_count_= np.array(model_dict['feature_count_'])
model.feature_log_prob_ = np.array(model_dict['feature_log_prob_'])
return model
def serialize_complement_nb(model):
serialized_model = {
'meta': 'complement-nb',
'classes_': model.classes_.tolist(),
'class_count_': model.class_count_.tolist(),
'class_log_prior_': model.class_log_prior_.tolist(),
'feature_count_': model.feature_count_.tolist(),
'feature_log_prob_': model.feature_log_prob_.tolist(),
'feature_all_': model.feature_all_.tolist(),
'params': model.get_params()
}
return serialized_model
def deserialize_complement_nb(model_dict):
model = ComplementNB(model_dict['params'])
model.classes_ = np.array(model_dict['classes_'])
model.class_count_ = np.array(model_dict['class_count_'])
model.class_log_prior_ = np.array(model_dict['class_log_prior_'])
model.feature_count_= np.array(model_dict['feature_count_'])
model.feature_log_prob_ = np.array(model_dict['feature_log_prob_'])
model.feature_all_ = np.array(model_dict['feature_all_'])
return model
def serialize_lda(model):
serialized_model = {
'meta': 'lda',
'coef_': model.coef_.tolist(),
'intercept_': model.intercept_.tolist(),
'explained_variance_ratio_': model.explained_variance_ratio_.tolist(),
'means_': model.means_.tolist(),
'priors_': model.priors_.tolist(),
'scalings_': model.scalings_.tolist(),
'xbar_': model.xbar_.tolist(),
'classes_': model.classes_.tolist(),
'params': model.get_params()
}
if 'covariance_' in model.__dict__:
serialized_model['covariance_'] = model.covariance_.tolist()
return serialized_model
def deserialize_lda(model_dict):
model = discriminant_analysis.LinearDiscriminantAnalysis(**model_dict['params'])
model.coef_ = np.array(model_dict['coef_']).astype(np.float64)
model.intercept_ = np.array(model_dict['intercept_']).astype(np.float64)
model.explained_variance_ratio_ = np.array(model_dict['explained_variance_ratio_']).astype(np.float64)
model.means_ = np.array(model_dict['means_']).astype(np.float64)
model.priors_ = np.array(model_dict['priors_']).astype(np.float64)
model.scalings_ = np.array(model_dict['scalings_']).astype(np.float64)
model.xbar_ = np.array(model_dict['xbar_']).astype(np.float64)
model.classes_ = np.array(model_dict['classes_']).astype(np.int64)
return model
def serialize_qda(model):
serialized_model = {
'meta': 'qda',
'means_': model.means_.tolist(),
'priors_': model.priors_.tolist(),
'scalings_': [array.tolist() for array in model.scalings_],
'rotations_': [array.tolist() for array in model.rotations_],
'classes_': model.classes_.tolist(),
'params': model.get_params()
}
if 'covariance_' in model.__dict__:
serialized_model['covariance_'] = model.covariance_.tolist()
return serialized_model
def deserialize_qda(model_dict):
model = discriminant_analysis.QuadraticDiscriminantAnalysis(**model_dict['params'])
model.means_ = np.array(model_dict['means_']).astype(np.float64)
model.priors_ = np.array(model_dict['priors_']).astype(np.float64)
model.scalings_ = np.array(model_dict['scalings_']).astype(np.float64)
model.rotations_ = np.array(model_dict['rotations_']).astype(np.float64)
model.classes_ = np.array(model_dict['classes_']).astype(np.int64)
return model
def serialize_svm(model):
serialized_model = {
'meta': 'svm',
'class_weight_': model.class_weight_.tolist(),
'classes_': model.classes_.tolist(),
'support_': model.support_.tolist(),
'_n_support': model.n_support_.tolist(),
'intercept_': model.intercept_.tolist(),
'_probA': model.probA_.tolist(),
'_probB': model.probB_.tolist(),
'_intercept_': model._intercept_.tolist(),
'shape_fit_': model.shape_fit_,
'_gamma': model._gamma,
'_sparse':model._sparse,
'params': model.get_params()
}
if isinstance(model.support_vectors_, sp.sparse.csr_matrix):
serialized_model['support_vectors_'] = csr.serialize_csr_matrix(model.support_vectors_)
elif isinstance(model.support_vectors_, np.ndarray):
serialized_model['support_vectors_'] = model.support_vectors_.tolist()
if isinstance(model.dual_coef_, sp.sparse.csr_matrix):
serialized_model['dual_coef_'] = csr.serialize_csr_matrix(model.dual_coef_)
elif isinstance(model.dual_coef_, np.ndarray):
serialized_model['dual_coef_'] = model.dual_coef_.tolist()
if isinstance(model._dual_coef_, sp.sparse.csr_matrix):
serialized_model['_dual_coef_'] = csr.serialize_csr_matrix(model._dual_coef_)
elif isinstance(model._dual_coef_, np.ndarray):
serialized_model['_dual_coef_'] = model._dual_coef_.tolist()
return serialized_model
def deserialize_svm(model_dict):
model = svm.SVC(**model_dict['params'])
model.shape_fit_ = model_dict['shape_fit_']
model._gamma = model_dict['_gamma']
model.class_weight_ = np.array(model_dict['class_weight_']).astype(np.float64)
model.classes_ = np.array(model_dict['classes_'])
model.support_ = np.array(model_dict['support_']).astype(np.int32)
model._n_support = np.array(model_dict['_n_support']).astype(np.int32)
model.intercept_ =
|
np.array(model_dict['intercept_'])
|
numpy.array
|
"""
Multi-object Panoptic Tracking evaluation.
Code written by Motional and the Robot Learning Lab, University of Freiburg.
"""
from typing import Dict, List, Tuple
import numpy as np
from nuscenes.eval.panoptic.panoptic_seg_evaluator import PanopticEval
class PanopticTrackingEval(PanopticEval):
""" Panoptic tracking evaluator"""
def __init__(self,
n_classes: int,
min_stuff_cls_id: int,
ignore: List[int] = None,
offset: int = 2 ** 32,
min_points: int = 30,
iou_thr: float = 0.5):
"""
:param n_classes: Number of classes.
:param min_stuff_cls_id: Minimum stuff class index, 11 for Panoptic nuScenes challenge classes.
:param ignore: List of ignored class index.
:param offset: Largest instance number in a frame.
:param min_points: minimal number of points to consider instances in GT.
:param iou_thr: IoU threshold to consider as a true positive. Note "iou_thr > 0.5" is required for Panoptic
Quality metric and its variants.
"""
super().__init__(n_classes=n_classes, ignore=ignore, offset=offset, min_points=min_points)
self.iou_thr = iou_thr
assert self.iou_thr >= 0.5, f'IoU threshold mush be >= 0.5, but {self.iou_thr} is given.'
self.min_stuff_cls_id = min_stuff_cls_id
# IoU stuff.
self.px_iou_conf_matrix = np.zeros((self.n_classes, self.n_classes), dtype=np.int64)
# Panoptic stuff.
self.pan_ids = np.zeros(self.n_classes, dtype=np.int64)
self.pan_soft_ids = np.zeros(self.n_classes, dtype=np.double)
self.pan_tp = np.zeros(self.n_classes, dtype=np.int64)
self.pan_iou = np.zeros(self.n_classes, dtype=np.double)
self.pan_fp = np.zeros(self.n_classes, dtype=np.int64)
self.pan_fn = np.zeros(self.n_classes, dtype=np.int64)
# Tracking stuff.
self.sequences = []
self.preds = {}
self.gts = {}
self.intersects = {}
self.intersects_ovr = {}
# PAT Tracking stuff.
self.instance_preds = {}
self.instance_gts = {}
# Per-class association quality stuff.
self.pan_aq = np.zeros(self.n_classes, dtype=np.double)
self.pan_aq_ovr = 0.0
@staticmethod
def update_dict_stat(stat_dict: Dict[int, int], unique_ids: np.ndarray, unique_cnts: np.ndarray) -> None:
"""
Update stats dict with new combo of ids and counts.
:param stat_dict: {class_id: counts}, a dict of stats for the counts of each class.
:param unique_ids: <np.int64, <k,>>, an array of class IDs.
:param unique_cnts: <np.int64, <k,>>, an array of counts for corresponding class IDs.
"""
for uniqueid, counts in zip(unique_ids, unique_cnts):
if uniqueid in stat_dict:
stat_dict[uniqueid] += counts
else:
stat_dict[uniqueid] = counts
def get_panoptic_track_stats(self,
x_inst_in_cl: np.ndarray,
y_inst_in_cl: np.ndarray,
x_inst_row: np.ndarray = None,
scene: str = None,
cl: int = None)\
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, Dict[int, int], Dict[int, int], np.ndarray]:
"""
Calculate class-specific panoptic tracking stats given predicted instances and target instances.
:param x_inst_in_cl: <np.int64: num_points>, instance IDs of each point for predicted instances.
:param y_inst_in_cl: <np.int64: num_points>, instance IDs of each point for target instances.
:param x_inst_row: <np.int64: num_points>, class-agnostic instance IDs of each point for predicted instances.
:param scene: str, name of scene.
:param cl: int, semantic class id.
:return: A tuple of MOPT stats:
{
counts_pred, # <np.int64, num_instances>, point counts of each predicted instance.
counts_gt, # <np.int64, num_instances>, point counts of each ground truth instance.
gt_labels, # <np.int64, num_instances>, instance ID of each ground truth instance.
pred_labels, # <np.int64, num_instances>, instance ID of each predicted instance.
id2idx_gt, # {instance ID: array index}, instance ID to array index mapping for ground truth instances.
id2idx_pred, # {instance ID: array index}, instance ID to array index mapping for predicted instances.
ious, # <np.float32, num_instances>, IoU scores between prediction and ground truth instance pair.
}
"""
# Generate the areas for each unique instance in prediction.
unique_pred, counts_pred = np.unique(x_inst_in_cl[x_inst_in_cl > 0], return_counts=True)
id2idx_pred = {inst_id: idx for idx, inst_id in enumerate(unique_pred)}
# Generate the areas for each unique instance in ground truth.
unique_gt, counts_gt = np.unique(y_inst_in_cl[y_inst_in_cl > 0], return_counts=True)
id2idx_gt = {inst_id: idx for idx, inst_id in enumerate(unique_gt)}
# Generate intersection using offset.
valid_combos = np.logical_and(x_inst_in_cl > 0, y_inst_in_cl > 0)
offset_combo = x_inst_in_cl[valid_combos] + self.offset * y_inst_in_cl[valid_combos]
unique_combo, counts_combo = np.unique(offset_combo, return_counts=True)
# Per-class accumulated stats.
if scene is not None and cl < self.min_stuff_cls_id:
cl_preds = self.preds[scene]
cl_gts = self.gts[scene][cl]
cl_intersects = self.intersects[scene][cl]
self.update_dict_stat(cl_gts,
unique_gt[counts_gt > self.min_points],
counts_gt[counts_gt > self.min_points])
self.update_dict_stat(cl_preds,
unique_pred[counts_pred > self.min_points],
counts_pred[counts_pred > self.min_points])
valid_combos_min_point = np.zeros_like(y_inst_in_cl) # instances which have more than self.min points
for valid_id in unique_gt[counts_gt > self.min_points]:
valid_combos_min_point = np.logical_or(valid_combos_min_point, y_inst_in_cl == valid_id)
y_inst_in_cl = y_inst_in_cl * valid_combos_min_point
valid_combos_ = np.logical_and(x_inst_row > 0, y_inst_in_cl > 0)
offset_combo_ = x_inst_row[valid_combos_] + self.offset * y_inst_in_cl[valid_combos_]
unique_combo_, counts_combo_ = np.unique(offset_combo_, return_counts=True)
self.update_dict_stat(cl_intersects, unique_combo_, counts_combo_)
# Computation for PAT score
# Computes unique gt instances and its number of points > self.min_points
unique_gt_, counts_gt_ = np.unique(y_inst_in_cl[y_inst_in_cl > 0], return_counts=True)
id2idx_gt_ = {inst_id: idx for idx, inst_id in enumerate(unique_gt_)}
# Computes unique pred instances (class-agnotstic) and its number of points
unique_pred_, counts_pred_ = np.unique(x_inst_row[x_inst_row > 0], return_counts=True)
id2idx_pred_ = {inst_id: idx for idx, inst_id in enumerate(unique_pred_)}
# Actually unique_combo_ = pred_labels_ + self.offset * gt_labels_
gt_labels_ = unique_combo_ // self.offset
pred_labels_ = unique_combo_ % self.offset
gt_areas_ = np.array([counts_gt_[id2idx_gt_[g_id]] for g_id in gt_labels_])
pred_areas_ = np.array([counts_pred_[id2idx_pred_[p_id]] for p_id in pred_labels_])
# Here counts_combo_ : TP (point-level)
intersections_ = counts_combo_
# Here gt_areas_ : TP + FN, pred_areas_ : TP + FP (point-level)
# Overall unions_ : TP + FP + FN (point-level)
unions_ = gt_areas_ + pred_areas_ - intersections_
# IoU : TP / (TP + FP + FN)
ious_agnostic = intersections_.astype(np.float32) / unions_.astype(np.float32)
# tp_indexes_agnostic : TP (instance-level, IoU > 0.5)
tp_indexes_agnostic = ious_agnostic > 0.5
matched_gt_ = np.array([False] * len(id2idx_gt_))
matched_gt_[[id2idx_gt_[g_id] for g_id in gt_labels_[tp_indexes_agnostic]]] = True
# Stores matched tracks (the corresponding class-agnostic predicted instance) for the unique gt instances:
for idx, value in enumerate(tp_indexes_agnostic):
if value:
g_label = gt_labels_[idx]
p_label = pred_labels_[idx]
if g_label not in self.instance_gts[scene][cl]:
self.instance_gts[scene][cl][g_label] = [p_label,]
else:
self.instance_gts[scene][cl][g_label].append(p_label)
# Stores unmatched tracks for the unique gt instances: assigns 1 for no match
for g_label in unique_gt_:
if not matched_gt_[id2idx_gt_[g_label]]:
if g_label not in self.instance_gts[scene][cl]:
self.instance_gts[scene][cl][g_label] = [1,]
else:
self.instance_gts[scene][cl][g_label].append(1)
# Generate an intersection map, count the intersections with over 0.5 IoU as TP.
gt_labels = unique_combo // self.offset
pred_labels = unique_combo % self.offset
gt_areas = np.array([counts_gt[id2idx_gt[g_id]] for g_id in gt_labels])
pred_areas = np.array([counts_pred[id2idx_pred[p_id]] for p_id in pred_labels])
intersections = counts_combo
unions = gt_areas + pred_areas - intersections
ious = intersections.astype(np.float32) / unions.astype(np.float32)
return counts_pred, counts_gt, gt_labels, pred_labels, id2idx_gt, id2idx_pred, ious
def add_batch_panoptic(self,
scene: str,
x_sem_row: List[np.ndarray],
x_inst_row: List[np.ndarray],
y_sem_row: List[np.ndarray],
y_inst_row: List[np.ndarray]) -> None:
"""
Add panoptic tracking metrics for one frame/batch.
:param scene: str, name of scene.
:param x_sem_row: [None, <np.int64: num_points>], predicted semantics.
:param x_inst_row: [None, <np.uint64: num_points>], predicted instances.
:param y_sem_row: [None, <np.int64: num_points>], target semantics.
:param y_inst_row: [None, <np.uint64: num_points>], target instances.
"""
if scene not in self.sequences:
self.sequences.append(scene)
self.preds[scene] = {}
self.gts[scene] = [{} for _ in range(self.n_classes)]
self.intersects[scene] = [{} for _ in range(self.n_classes)]
self.intersects_ovr[scene] = [{} for _ in range(self.n_classes)]
self.instance_preds[scene] = {}
self.instance_gts[scene] = [{} for _ in range(self.n_classes)]
# Make sure instance IDs are non-zeros. Otherwise, they will be ignored. Note in Panoptic nuScenes,
# instance IDs start from 1 already, so the following 2 lines of code are actually not necessary, but to be
# consistent with the PanopticEval class in panoptic_seg_evaluator.py from 3rd party. We keep these 2 lines. It
# means the actual instance IDs will start from 2 during metrics evaluation.
x_inst_row[1] = x_inst_row[1] + 1
y_inst_row[1] = y_inst_row[1] + 1
# Only interested in points that are outside the void area (not in excluded classes).
for cl in self.ignore:
# Current Frame.
gt_not_in_excl_mask = y_sem_row[1] != cl # make a mask for class cl.
# Remove all other points.
x_sem_row[1] = x_sem_row[1][gt_not_in_excl_mask]
y_sem_row[1] = y_sem_row[1][gt_not_in_excl_mask]
x_inst_row[1] = x_inst_row[1][gt_not_in_excl_mask]
y_inst_row[1] = y_inst_row[1][gt_not_in_excl_mask]
# Previous Frame.
if x_sem_row[0] is not None: # First frame.
gt_not_in_excl_mask = y_sem_row[0] != cl
# Remove all other points.
x_sem_row[0] = x_sem_row[0][gt_not_in_excl_mask]
y_sem_row[0] = y_sem_row[0][gt_not_in_excl_mask]
x_inst_row[0] = x_inst_row[0][gt_not_in_excl_mask]
y_inst_row[0] = y_inst_row[0][gt_not_in_excl_mask]
# Accumulate class-agnostic predictions
unique_pred_, counts_pred_ = np.unique(x_inst_row[1][x_inst_row[1] > 0], return_counts=True)
for p_id in unique_pred_[counts_pred_ > self.min_points]:
if p_id not in self.instance_preds[scene]:
self.instance_preds[scene][p_id] = 1
else:
self.instance_preds[scene][p_id] += 1
# First step is to count intersections > 0.5 IoU for each class (except the ignored ones).
for cl in self.include:
# Previous Frame.
inst_prev, gt_labels_prev, tp_indexes_prev = None, None, None
if x_sem_row[0] is not None: # First frame.
x_inst_in_cl_mask = x_sem_row[0] == cl
y_inst_in_cl_mask = y_sem_row[0] == cl
# Get instance points in class (makes outside stuff 0).
x_inst_in_cl = x_inst_row[0] * x_inst_in_cl_mask.astype(np.int64)
y_inst_in_cl = y_inst_row[0] * y_inst_in_cl_mask.astype(np.int64)
_, _, gt_labels_prev, inst_prev, _, _, ious = self.get_panoptic_track_stats(x_inst_in_cl, y_inst_in_cl)
tp_indexes_prev = ious > self.iou_thr
# Current Frame: get a class mask.
x_inst_in_cl_mask = x_sem_row[1] == cl
y_inst_in_cl_mask = y_sem_row[1] == cl
# Get instance points in class (makes outside stuff 0).
x_inst_in_cl = x_inst_row[1] * x_inst_in_cl_mask.astype(np.int64)
y_inst_in_cl = y_inst_row[1] * y_inst_in_cl_mask.astype(np.int64)
counts_pred, counts_gt, gt_labels, pred_labels, id2idx_gt, id2idx_pred, ious =\
self.get_panoptic_track_stats(x_inst_in_cl, y_inst_in_cl, x_inst_row[1], scene, cl)
inst_cur = pred_labels
tp_indexes = ious > 0.5
self.pan_tp[cl] +=
|
np.sum(tp_indexes)
|
numpy.sum
|
'''
Functions for training generator and assessing data. In particular, contains functions for
training generator and discriminator, generating synthetic data, and computing the similarity metrics
Author: <NAME>
'''
import numpy as np
from keras.utils import to_categorical
from sklearn.metrics.pairwise import cosine_similarity
from keras import backend as K
#FUNCTION FOR COMPUTING EUCLIDIAN DISTANCE (SFD) LOSS WITHIN KERAS OPTIMIZATION FUNCTION
def euc_dist_loss(y_true, y_pred):
return K.sqrt(K.sum(K.square(y_true - y_pred), axis=-1))
#FUNCTON FOR COMPUTING AVERAGE STATISTICAL FEATURE DURING TRAINING
def compute_SFD(real_features, synthetic_features):
distance_vector = np.sqrt(np.sum(np.square(real_features - synthetic_features), axis=1))
SFD = np.mean(distance_vector)
return SFD
#FUNCTION FOR GENERATING RANDOM INPUT BY SAMPLING FROM NORMAL DISTRIBUTION (INPUT VARIES AT EACH TIMESTEP)
def generate_input_noise(batch_size, latent_dim, time_steps):
return np.reshape(np.array(np.random.normal(0, 1, latent_dim * time_steps * batch_size)),(batch_size, time_steps, latent_dim))
#FUNCTION FOR GENERATING A SYNTHETIC DATA SET
def generate_synthetic_data(size, generator, latent_dim, time_steps):
noise = generate_input_noise(size, latent_dim, time_steps)
synthetic_data = generator.predict(noise)
return synthetic_data
#FUNCTION FOR TRAINING GENERATOR FROM DBOTH DISCRIMINATOR AND CLASSIFIER OUTPUT
def train_G(batch_size, X, class_label, actual_features, num_labels, model, latent_dim):
noise = generate_input_noise(batch_size, latent_dim, X.shape[1])
real_synthetic_labels = np.ones([batch_size, 1]) #labels related to whether data is real or synthetic
class_labels = to_categorical([class_label]*batch_size, num_classes=num_labels) #labels related to the class of the data
loss = model.train_on_batch(noise, [real_synthetic_labels,class_labels, actual_features])
return loss
#FUNCTION FOR TRAINING DISCRIMINATOR (FROM GENERATOR INPUT)
def train_D(batch_size, X, generator, discriminator_model, latent_dim):
#GENERATE SYNTHETIC DATA
noise = generate_input_noise(batch_size, latent_dim, X.shape[1])
synthetic_data = generator.predict(noise)
#SELECT A RANDOM BATCH OF REAL DATA
indices_toKeep = np.random.choice(X.shape[0], batch_size, replace=False)
real_data = X[indices_toKeep]
#MAKE FULL INPUT AND LABELS FOR FEEDING INTO NETWORK
full_input = np.concatenate((real_data, synthetic_data))
real_synthetic_label = np.ones([2 * batch_size, 1])
real_synthetic_label[batch_size:, :] = 0
#TRAIN D AND RETURN LOSS
loss = discriminator_model.train_on_batch(full_input, real_synthetic_label)
return loss
#FUNCTION TO COMPUTE MEAN RTS AND STS SIMILARITY WHICH IS USED TO HELP MONITOR GENERATOR TRAINING
def compute_similarity_metrics(X_synthetic, X_real, batch_size, real_synthetic_ratio, synthetic_synthetic_ratio=10, real_real_ratio=10):
#NECESSARY FEATURES REGARDING DATA SHAPE
num_segs = len(X_real)
seq_length = X_real.shape[1]
num_channels = X_real.shape[2]
synth_num_segs = len(X_synthetic)
synth_seq_length = X_synthetic.shape[1]
synth_num_channels = X_synthetic.shape[2]
RTS_sims = []
STS_sims = []
RTR_sims = []
#RESHAPE DATA INTO 2 DIMENSIONS
X_real = X_real.reshape(num_segs,seq_length*num_channels)
X_synthetic = X_synthetic.reshape(synth_num_segs,synth_seq_length*synth_num_channels)
#FOR EACH FAKE SEGMENT, CALCULATE ITS SIMILARITY TO A USER DEFINED NUMBER OF REAL SEGMENTS
for i in range(batch_size):
indices_toCompare = np.random.choice(num_segs,real_synthetic_ratio,replace=False)
for j in indices_toCompare:
sim = cosine_similarity(X_real[j].reshape(1,-1), X_synthetic[i].reshape(1,-1))
sim = sim[0,0]
RTS_sims += [sim]
#ALSO COMPUTE SIMILARITY BETWEEN USER DEFINED NUMBER OF SYNTHETIC SEGMENTS TO TEST FOR GENERATOR COLLAPSE
chosen_synthetic_value = X_synthetic[np.random.choice(len(X_synthetic),1)] #get one random fake sample
X_synthetic = np.delete(X_synthetic, chosen_synthetic_value, axis=0) #remove this sample so we dont compare to itself
synthetic_toCompare = X_synthetic[np.random.choice(len(X_synthetic), synthetic_synthetic_ratio)]
for other_synthetic in synthetic_toCompare:
sim2 = cosine_similarity(chosen_synthetic_value, other_synthetic.reshape(1,-1))
sim2 = sim2[0,0]
STS_sims += [sim2]
#COMPUTE SIMILARITY BETWEEN USER DEFINED NUMBER OF REAL SEGMENTS (RTR)
chosen_real_value = X_real[np.random.choice(len(X_real),1)] #get one random fake sample
X_real = np.delete(X_real, chosen_real_value, axis=0) #remove this sample so we dont compare to itself
real_toCompare = X_real[np.random.choice(len(X_real), real_real_ratio)]
for other_real in real_toCompare:
sim3 = cosine_similarity(chosen_real_value, other_real.reshape(1,-1))
sim3 = sim3[0,0]
RTR_sims += [sim3]
RTS_sims = np.array(RTS_sims)
STS_sims = np.array(STS_sims)
RTR_sims = np.array(RTR_sims)
mean_RTS_sim = np.mean(RTS_sims)
mean_STS_sim = np.mean(STS_sims)
mean_RTR_sim =
|
np.mean(RTR_sims)
|
numpy.mean
|
import os
import gym
import matlab.engine
import numpy as np
def d2r(num):
return num * np.pi / 180.0
def r2d(num):
return num * 180 / np.pi
def map_to(num, a, b):
""" Map linearly num on the [-1, 1] range to the [a, b] range"""
return ((num + 1) / 2) * (b - a) + a
class Citation(gym.Env):
"""Custom Environment that follows gym interface"""
metadata = {'render.modes': ['graph']}
def __init__(self, time_vector: np.ndarray = np.arange(0, 30, 0.01), task=None):
super(Citation, self).__init__()
self.time = time_vector
self.dt = self.time[1] - self.time[0]
self.A_matrix, self.B_matrix = self.get_eom()
# Integration step from EOM using Euler Integration
self.euler = lambda x, u: x + (self.A_matrix.dot(x) + self.B_matrix.dot(u)) * self.dt
if task is None:
task = self.get_task_default()
self.ref_signal = task[0]
self.track_indices = task[1]
self.obs_indices = task[2]
self.observation_space = gym.spaces.Box(-3000, 3000, shape=(4,), dtype=np.float64)
self.action_space = gym.spaces.Box(-1., 1., shape=(3,), dtype=np.float64)
self.state = None
self.scale_s = None
self.state_history = None
self.action_history = None
self.error = None
self.step_count = None
def step(self, action: np.ndarray):
self.state = self.euler(self.state, self.scale_a(action))
self.error = d2r(self.ref_signal[:, self.step_count]) - self.state[self.track_indices]
if 5 in self.track_indices: # for sideslip angle
self.error[self.track_indices.index(5)] *= 50
self.state_history[:, self.step_count] = np.multiply(self.state, self.scale_s)
self.action_history[:, self.step_count] = self.scale_a(action, to='fig')
self.step_count += 1
done = bool(self.step_count >= self.time.shape[0])
return self.get_obs(), self.get_reward(), done, {}
def reset(self):
self.state = np.zeros(12)
self.scale_s = np.ones(self.state.shape)
self.scale_s[[0, 1, 2, 4, 5, 6, 7, 8]] = 180 / np.pi
self.state_history = np.zeros((self.state.shape[0], self.time.shape[0]))
self.action_history = np.zeros((self.action_space.shape[0], self.time.shape[0]))
self.error = np.zeros(len(self.track_indices))
self.step_count = 0
return np.zeros(self.observation_space.shape)
def get_reward(self):
sum_error = r2d(self.error.sum() / 30)
return -abs(max(min(sum_error, 1), -1))
def get_task_default(self):
ref_pbody = np.hstack([5 * np.sin(self.time[:int(self.time.shape[0] / 3)] * 2 * np.pi * 0.2),
5 * np.sin(self.time[:int(self.time.shape[0] / 3)] * 3.5 * np.pi * 0.2),
- 5 * np.ones(int(2.5 * self.time.shape[0] / self.time[-1].round())),
5 * np.ones(int(2.5 * self.time.shape[0] / self.time[-1].round())),
np.zeros(int(5 * self.time.shape[0] / self.time[-1].round())),
])
ref_qbody = np.hstack([5 * np.sin(self.time[:int(self.time.shape[0] / 3)] * 2 * np.pi * 0.2),
5 * np.sin(self.time[:int(self.time.shape[0] / 3)] * 3.5 * np.pi * 0.2),
- 5 * np.ones(int(2.5 * self.time.shape[0] / self.time[-1].round())),
5 * np.ones(int(2.5 * self.time.shape[0] / self.time[-1].round())),
np.zeros(int(5 * self.time.shape[0] / self.time[-1].round())),
])
ref_beta = np.zeros(int(self.time.shape[0]))
return np.vstack([ref_pbody, ref_qbody, ref_beta]), [0, 1, 5], [0, 1, 5, 2]
def get_obs(self):
return
|
np.hstack([self.error, self.state[2]])
|
numpy.hstack
|
import numpy as np
from numpy import ma
import pandas as pd
import xarray as xr
import cv2 as cv
from scipy import ndimage as ndi
class Flow:
"""
Class to perform semi-lagrangian operations using optical flow
"""
def __init__(self, dataset, smoothing_passes=1, flow_kwargs={}):
self.get_flow(dataset, smoothing_passes, flow_kwargs)
def get_flow(self, data, smoothing_passes, flow_kwargs):
self.shape = data.shape
self.flow_for = np.full(self.shape+(2,), np.nan, dtype=np.float32)
self.flow_back = np.full(self.shape+(2,), np.nan, dtype=np.float32)
for i in range(self.shape[0]-1):
print(i, end='\r')
a, b = data[i].compute().data, data[i+1].compute().data
self.flow_for[i] = self.cv_flow(a, b, **flow_kwargs)
self.flow_back[i+1] = self.cv_flow(b, a, **flow_kwargs)
if smoothing_passes > 0:
for j in range(smoothing_passes):
self._smooth_flow_step(i)
self.flow_back[0] = -self.flow_for[0]
self.flow_for[-1] = -self.flow_back[-1]
def to_8bit(self, array, vmin=None, vmax=None):
"""
Converts an array to an 8-bit range between 0 and 255
"""
if vmin is None:
vmin = np.nanmin(array)
if vmax is None:
vmax = np.nanmax(array)
array_out = (array-vmin) * 255 / (vmax-vmin)
return array_out.astype('uint8')
def cv_flow(self, a, b, pyr_scale=0.5, levels=5, winsize=16, iterations=3,
poly_n=5, poly_sigma=1.1, flags=cv.OPTFLOW_FARNEBACK_GAUSSIAN):
"""
Wrapper function for cv.calcOpticalFlowFarneback
"""
flow = cv.calcOpticalFlowFarneback(self.to_8bit(a), self.to_8bit(b), None,
pyr_scale, levels, winsize, iterations,
poly_n, poly_sigma, flags)
return flow
def _warp_flow_step(self, img, step, method='linear', direction='forward', offset=[0,0]):
if img.shape != self.shape[1:]:
raise ValueError("Image shape does not match flow shape")
out_img = np.full_like(img, np.nan)
if method == 'linear':
method = cv.INTER_LINEAR
elif method =='nearest':
method = cv.INTER_NEAREST
else:
raise ValueError("method must be either 'linear' or 'nearest'")
h, w = self.shape[1:]
if direction=='forward':
return cv.remap(img,
(self.flow_for[step]
+ np.stack(np.meshgrid(np.arange(w), np.arange(h)), -1)
+ np.asarray(offset)).astype(np.float32),
None, method, out_img, cv.BORDER_TRANSPARENT)
elif direction=='backward':
return cv.remap(img,
(self.flow_back[step]
+ np.stack(np.meshgrid(np.arange(w),
|
np.arange(h)
|
numpy.arange
|
# Python 3.7
from __future__ import annotations
from kivy.logger import Logger
import astropy.coordinates as ac
import astropy.units as u
import astropy.time
import sgp4.api
import numpy as np
import re
import datetime
import typing
import os
# Use minimal sized source for `get_sunposition`
# High accuracy not required
ac.solar_system_ephemeris.set('de432s')
class Satellite :
"""
A Satellite object holds the position, orbit, time/epoch
and many other details for a satellite being tracked by the app.
The main analytical data is still stored in the `sgp4.wrapper.Satrec`
object (`self.satrec`), with computations performed by SGP4 library.
That is initialised from a TLE element set loaded beforehand
into `self.TLEs` from a file, using the staticmethod `load`.
Most methods here return coordinates/values in the format used
by OpenGL and the shaders/3D coordinate system of the app.
Units : All time arguments to methods are in python `datetime.datetime`.
Times are also always in UTC *only* (+00:00).
Angles in degrees (+ve North or East for latitude/longitude).
"""
TLEs = {}
WARN_AGE = datetime.timedelta(days=21)
@staticmethod
def load(source:typing.Union[str, bytes, os.PathLike]):
with open(source, 'r') as f:
text = f.read()
pat = re.compile(r"(.{1,69})\n(1[\w\s\.\-\+]{68})\n(2[\w\s\.\-\+]{68})\n")
for match in re.finditer(pat, text):
n, l1, l2 = match.groups()
n = n.strip()
if n in Satellite.TLEs :
Satellite.TLEs[n].append((l1.strip(), l2.strip()))
else :
Satellite.TLEs[n] = [(l1.strip(), l2.strip())]
duplicates, d = [], {}
for s in Satellite.TLEs :
if isinstance(Satellite.TLEs[s], list):
if len(Satellite.TLEs[s])==1:
Satellite.TLEs[s] = Satellite.TLEs[s][0]
else :
for i, si in enumerate(Satellite.TLEs[s], 1):
d[s+f" [{i}]"] = si
duplicates.append(s)
for s in duplicates :
Satellite.TLEs.pop(s)
Satellite.TLEs = {**Satellite.TLEs, **d}
def __init__(self, name:str, when:datetime.datetime=None, warn:bool=False):
self.name = name
self._warn = True # Warn about old/inaccurate TLEs on instantiation
self.satrec = sgp4.api.Satrec.twoline2rv(*self.TLEs[name])
y = 2000 if self.satrec.epochyr < 57 else 1900
self.tle_epoch = datetime.datetime(y+self.satrec.epochyr, 1, 1) + \
datetime.timedelta(days=self.satrec.epochdays)
self.norad_catalog_num = self.TLEs[name][0][2:7]
self.period = datetime.timedelta(minutes = 2 * np.pi / self.satrec.nm)
self.obstime = when or datetime.datetime.now(datetime.timezone.utc)
self.orbitpath = self.get_orbit().flatten()
self.pos = tuple(self.orbitpath[:3])
self.framerot = self.get_earthrotation()
self._warn = warn # User's choice for further updates
@property
def obstime(self):
return self._obstime
@obstime.setter
def obstime(self, val:datetime.datetime):
if not isinstance(val, datetime.datetime):
raise ValueError("obstime must be a python datetime object")
if hasattr(self, 'tle_epoch') and self._warn and \
abs(val.replace(tzinfo=None) - self.tle_epoch) > self.WARN_AGE :
Logger.warning(f"Satellite: TLE age of '{self.name}' has "+\
f"exceeded {self.WARN_AGE.total_seconds()/86400} days")
self._obstime = val
def get_orbit(self, when:datetime.datetime=None, detail:int=360,
from_jdates:tuple[typing.Sequence, typing.Sequence] = None,
opengl_format:bool=True, ) -> np.ndarray :
if not from_jdates:
t = when or self.obstime
jd,fr=sgp4.api.jday(*t.timetuple()[:6])
T = self.period.total_seconds() / 86400.0
frs, jds = np.modf(np.linspace(jd+fr+0.5, jd+fr+0.5+T, detail))
jds -= 0.5
else :
jds, frs = from_jdates
e, p, v = self.satrec.sgp4_array(np.array(jds), np.array(frs))
if e.any():
for i in np.where(e):
Logger.error(f"SGP4 : Computational error - {sgp4.api.SGP4_ERRORS[e[i]]}")
if opengl_format:
# axis orientations differ
p = np.stack([p[:,1],p[:,2],p[:,0]]).T / self.satrec.radiusearthkm
return p
def get_earthrotation(self, when:datetime.datetime=None) -> float:
# How much to rotate 3D globe texture by (texcoord 0.0 == -180° longitude)
t = when or self.obstime
TEMEframerotation = ac.TEME(
(6400*u.km, 0*u.m, 0*u.m), obstime=t).transform_to(
ac.ITRS(obstime=t)
)
angle = TEMEframerotation.earth_location.geodetic.lon.value - 90.0
return angle
def get_sunposition(self, when:datetime.datetime=None, scale_factor:float=100,
) -> tuple[float,float,float]:
t = astropy.time.Time(when or self.obstime)
pos = ac.get_sun(t).transform_to(ac.TEME(obstime=t))
posf = (pos.cartesian.xyz.value * scale_factor).astype(np.float32)
# Location in OpenGL orientation; distance/scale/unit doesn't matter
# Explicitly convert to regular python `float` (important !) for GLSL shader
return (float(posf[1]), float(posf[2]), float(posf[0]))
def get_cartesianposition(self, lat:float, long:float, alt_rf:float=0.05,
when:datetime.datetime=None, opengl_format:bool=True, ) -> tuple[float,float,float]:
t = when or self._obstime
Re = self.satrec.radiusearthkm
g = ac.ITRS(long*u.deg, lat*u.deg, alt_rf*Re*u.km, obstime=t,
representation_type='wgs84geodetic')
teme = g.transform_to(ac.TEME(obstime=t)).cartesian.xyz.to(u.km).value
if opengl_format:
return tuple(np.array((teme[1],teme[2],teme[0])) / Re)
else :
return tuple(teme)
def geolocation(self, when:datetime.datetime=None) -> tuple[float,float,float]:
t = when or self.obstime
Re = self.satrec.radiusearthkm
xyz = (self.pos[2]*Re, self.pos[0]*Re, self.pos[1]*Re,)
geol = ac.TEME(xyz * u.km, obstime=t).transform_to(ac.ITRS(obstime=t)
).earth_location.geodetic
return (geol.lat.value, geol.lon.value, geol.height.value)
def update(self, when:datetime.datetime=None, set_new:bool=True) -> tuple[float,float,float]:
t = when or self.obstime
if set_new and when:
self.obstime = when
jd,fr = sgp4.api.jday(*t.timetuple()[:6])
e, p, v = self.satrec.sgp4(jd, fr)
if e:
err = sgp4.api.SGP4_ERRORS[e]
Logger.error(f"SGP4 : Computational error for {self.name} at {str(t)} - {err}")
Re = self.satrec.radiusearthkm
pos = (p[1]/Re, p[2]/Re, p[0]/Re)
if set_new :
self.pos = pos
return pos
def direction_in_sky(self, lat:float, long:float, alt:float = 0,
when:datetime.datetime=None) -> tuple[float,float,float]:
t = when or self.obstime
pos = self.update(when, False) if when else self.pos
el = ac.EarthLocation.from_geodetic(long, lat, alt)
Re = self.satrec.radiusearthkm
xyz = (pos[2]*Re, pos[0]*Re, pos[1]*Re,)
aa = ac.TEME(xyz * u.km, obstime=t).transform_to(
ac.AltAz(location=el, obstime=t)
)
return (aa.alt.value, aa.az.value, aa.distance.value)
def next_transits_from(self, lat:float, long:float, alt:float=0,
horizonangle:float=5, start:datetime.datetime=None, maxdays:float=14,
localtime:bool=False) -> np.ndarray[np.datetime64]:
el = ac.EarthLocation.from_geodetic(long, lat, alt)
Re = self.satrec.radiusearthkm
t = start or self.obstime.replace(tzinfo=None)
detail = 90
dtimes = astropy.time.Time(
np.arange(t, t+datetime.timedelta(days=maxdays), self.period/detail)
)
o1 = self.get_orbit(opengl_format=False, from_jdates=(dtimes.jd1, dtimes.jd2))
# Eliminate values where it definitely won't pass over based on latitude
# Then exact computation performed for the remaining
geol = ac.TEME(o1.T * u.km, obstime=dtimes).transform_to(
ac.ITRS(obstime=dtimes)).earth_location
approx_fov = np.degrees(np.arccos(Re / (geol.height.value + Re)))
subset = np.where(np.abs(geol.lat.value - lat) <= approx_fov)[0]
if len(subset):
selection = ac.TEME(o1[subset].T * u.km, obstime=dtimes[subset])
aa = selection.transform_to(ac.AltAz(location=el, obstime=dtimes[subset]))
x = dtimes[subset][
|
np.where(aa.alt.value > horizonangle)
|
numpy.where
|
#!/usr/bin/env python
# Copyright (c) 2013, Carnegie Mellon University
# All rights reserved.
# Authors: <NAME> <<EMAIL>>
# Authors: <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import logging
import math
import numpy
import openravepy
import scipy.misc
import scipy.optimize
import threading
import time
import warnings
logger = logging.getLogger(__name__)
def create_sensor(env, args, anonymous=True):
sensor = openravepy.RaveCreateSensor(env, args)
if sensor is None:
raise Exception("Creating '%s' sensor failed." % args.split()[0])
env.Add(sensor, anonymous)
return sensor
def CreatePlannerParametersString(options, params=None,
remove_postprocessing=True):
""" Creates an OpenRAVE parameter XML string.
OpenRAVE planners have an InitPlan function that either take an instance of
the PlannerParameters() struct or the serialized XML version of this
struct. Unfortunately, it is not possible to override several default
options in the Python API. This function takes a seed PlannerParameters
struct and a dictionary of key-value pairs to override. It returns XML that
can be passed directly to InitPlan.
@param options: dictionary of key-value pairs
@type options: {str: str}
@param params: input struct (defaults to the defaults in OpenRAVE)
@type params: openravepy.Planner.PlannerParameters
@return planner parameters string XML
@rtype str
"""
import lxml.etree
import openravepy
from copy import deepcopy
options = deepcopy(options)
if remove_postprocessing:
options['_postprocessing'] = None
if params is None:
params = openravepy.Planner.PlannerParameters()
params_xml = lxml.etree.fromstring(params.__repr__().split('"""')[1])
for key, value in options.iteritems():
element = params_xml.find(key)
# Remove a value if "value" is None.
if value is None:
if element is not None:
params_xml.remove(element)
# Add (or overwrite) and existing value.
else:
if element is None:
element = lxml.etree.SubElement(params_xml, key)
element.text = str(value)
if remove_postprocessing:
params_xml.append(
lxml.etree.fromstring("""
<_postprocessing planner="">
<_nmaxiterations>20</_nmaxiterations>
<_postprocessing planner="parabolicsmoother">
<_nmaxiterations>100</_nmaxiterations>
</_postprocessing>
</_postprocessing>
""")
)
return lxml.etree.tostring(params_xml)
def HasGroup(cspec, group_name):
try:
cspec.GetGroupFromName(group_name)
return True
except openravepy.openrave_exception:
return False
def HasAffineDOFs(cspec):
return (HasGroup(cspec, 'affine_transform') or
HasGroup(cspec, 'affine_velocities') or
HasGroup(cspec, 'affine_accelerations'))
def HasJointDOFs(cspec):
return (HasGroup(cspec, 'joint_values') or
HasGroup(cspec, 'joint_velocities') or
HasGroup(cspec, 'joint_accelerations') or
HasGroup(cspec, 'joint_torques'))
def GetTrajectoryIndices(traj):
try:
cspec = traj.GetConfigurationSpecification()
joint_values_group = cspec.GetGroupFromName('joint_values')
return numpy.array([int(index) for index in
joint_values_group.name.split()[2:]])
except openravepy.openrave_exception:
return list()
def WaitForControllers(controllers, timeout=None, rate=20):
running_controllers = set(controllers)
start_time = time.time()
timestep = 1.0 / rate
while running_controllers:
# Check for a timeout.
now_time = time.time()
if timeout is not None and now_time - start_time > timeout:
return False
# Check if the trajectory is done.
done_controllers = set()
for controller in running_controllers:
if controller.IsDone():
done_controllers.add(controller)
running_controllers -= done_controllers
time.sleep(timestep)
return True
def SetCameraFromXML(viewer, xml):
if isinstance(viewer, openravepy.Environment):
viewer = viewer.GetViewer()
import lxml.etree
from StringIO import StringIO
padded_xml = '<bogus>{0:s}</bogus>'.format(xml)
camera_xml = lxml.etree.parse(StringIO(padded_xml))
translation_raw = camera_xml.find('//camtrans').text
axis_raw = camera_xml.find('//camrotationaxis').text
focal_raw = camera_xml.find('//camfocal').text
translation = numpy.loadtxt(StringIO(translation_raw))
axis_angle = numpy.loadtxt(StringIO(axis_raw))
axis_angle = axis_angle[3] * axis_angle[0:3] * (numpy.pi / 180)
focal = float(focal_raw)
transform = openravepy.matrixFromAxisAngle(axis_angle)
transform[0:3, 3] = translation
viewer.SetCamera(transform, focal)
def TakeSnapshot(viewer, path=None, show_figures=True,
width=1920, height=1080, fx=640, fy=640):
if isinstance(viewer, openravepy.Environment):
viewer = viewer.GetViewer()
viewer.SendCommand('SetFiguresInCamera {0:d}'.format(show_figures))
image = viewer.GetCameraImage(width, height, viewer.GetCameraTransform(),
[fx, fy, width / 2, height / 2])
if path is not None:
scipy.misc.imsave(path, image)
return image
def ComputeAinv(N, dof):
dt = 1.0 / (N - 1)
K = numpy.mat(numpy.zeros((N - 1, N - 1)))
for i in range(1, N - 1):
K[i, i] = 1 / dt
K[i, i - 1] = -1 / dt
K[0, 0] = 1 / dt
A = K.transpose() * K
invA_small = numpy.linalg.inv(A)
# Tensorize.
invA = numpy.mat(numpy.zeros([(N) * dof, (N) * dof]))
for i in range(1, N):
for j in range(1, N):
for k in range(dof):
invA[i * dof + k, j * dof + k] = invA_small[i - 1, j - 1]
return invA
def NormalizeVector(vec):
"""
Normalize a vector.
This is faster than doing: vec/numpy.linalg.norm(vec)
@param numpy.array vec: A 1-dimensional vector.
@returns numpy.array result: A vector of the same size, where the
L2 norm of the elements equals 1.
"""
numpy.seterr(divide='ignore', invalid='ignore')
magnitude = numpy.sqrt(vec.dot(vec))
vec2 = (vec / magnitude)
return numpy.nan_to_num(vec2) # convert NaN to zero
def MatrixToTraj(traj_matrix, cs, dof, robot):
env = robot.GetEnv()
traj = openravepy.RaveCreateTrajectory(env, '')
traj.Init(cs)
for i in range(numpy.size(traj_matrix) / dof):
tp = traj_matrix[range(i * dof, i * dof + dof)]
tp = numpy.array(tp.transpose())[0]
traj.Insert(i, tp)
openravepy.planningutils.RetimeActiveDOFTrajectory(
traj, robot, False, 0.2, 0.2, "LinearTrajectoryRetimer", "")
return traj
def TrajToMatrix(traj, dof):
traj_matrix = numpy.zeros(dof * (traj.GetNumWaypoints()))
traj_matrix = numpy.mat(traj_matrix).transpose()
for i in range(traj.GetNumWaypoints()):
d = traj.GetWaypoint(i)[range(dof)]
d = numpy.mat(d).transpose()
traj_matrix[range(i * dof, (i + 1) * dof)] = d
return traj_matrix
def AdaptTrajectory(traj, new_start, new_goal, robot):
"""
Adapt an existing trajectory to move between a new start and goal.
The trajectory's configuration specification must contain exactly one group
called "joint_values". Note that this does NOT collision check the warped
trajectory.
@param traj input trajectory
@param new_start new starting configuration
@param new_goal new goal configuration
@param robot
@return adapted trajectory
"""
# TODO: check joint limits
# TODO: support arbitrary trajectory types
# TODO: collision check the warped trajectory
# TODO: this should not require a robot as a parameter
cs = traj.GetConfigurationSpecification()
dof = cs.GetDOF()
start = traj.GetWaypoint(0)
start = start[range(dof)]
goal = traj.GetWaypoint(traj.GetNumWaypoints() - 1)
goal = goal[range(dof)]
traj_matrix = TrajToMatrix(traj, dof)
# Translate trajectory to match start point.
diff_start = new_start - start
diff_start = numpy.mat(diff_start).transpose()
translated_traj = numpy.zeros(dof * (traj.GetNumWaypoints()))
translated_traj = numpy.mat(translated_traj).transpose()
for i in range(traj.GetNumWaypoints()):
translated_traj[range((i - 1) * dof, i * dof)] = \
traj_matrix[range((i - 1) * dof, i * dof)] - diff_start
# Apply correction to reach goal point.
new_traj_matrix = translated_traj
N = traj.GetNumWaypoints()
goal_translated = new_traj_matrix[range((N - 1) * dof, (N) * dof)]
Ainv = ComputeAinv(N, dof)
goal_diff = numpy.mat(new_goal).transpose() - goal_translated
traj_diff = numpy.zeros(dof * (N))
traj_diff = numpy.mat(traj_diff).transpose()
traj_diff[range((N - 1) * dof, (N) * dof)] = goal_diff
new_traj_matrix += Ainv * traj_diff / Ainv[N * dof - 1, N * dof - 1]
new_traj = MatrixToTraj(new_traj_matrix, cs, dof, robot)
return new_traj
def CopyTrajectory(traj, env=None):
"""
Create a new copy of a trajectory using its Clone() operator.
@param traj input trajectory
@param env optional environment used to initialize a trajectory
@return copy of the trajectory
"""
copy_traj = openravepy.RaveCreateTrajectory(env or traj.GetEnv(),
traj.GetXMLId())
copy_traj.Clone(traj, 0)
copy_traj.SetDescription(traj.GetDescription())
return copy_traj
def GetTrajectoryTags(traj):
"""
Read key/value pairs from a trajectory.
The metadata is can be set by SetTrajectoryTags; see that function for
details. If no metadata is set, this function returns an empty dictionary.
@param traj input trajectory
@return dictionary of string key/value pairs
"""
import json
description = traj.GetDescription()
if description == '':
return dict()
else:
try:
return json.loads(description)
except ValueError as e:
logger.warning('Failed reading tags from trajectory: %s',
e.message)
return dict()
def SetTrajectoryTags(traj, tags, append=False):
"""
Tag a trajectory with a dictionary of key/value pairs.
If append = True, then the dictionary of tags is added to any existing tags
on the trajectory. Otherwise, all existing tags will be replaced. This
metadata can be accessed by GetTrajectoryTags. Currently, the metadata is
stored as JSON in the trajectory's description.
@param traj input trajectory
@param append if true, retain existing tags on the trajectory
"""
import json
if append:
all_tags = GetTrajectoryTags(traj)
all_tags.update(tags)
else:
all_tags = tags
traj.SetDescription(json.dumps(all_tags))
def SimplifyTrajectory(traj, robot):
"""
Re-interpolate trajectory as minimal set of linear segments.
This function attempts to extract linear segments from the given
trajectory by iteratively finding extrema waypoints until a set of
linear segments until all of the original trajectory waypoints are
within the robot's joint resolutions of the interpolated segments.
Currently, only untimed trajectories are supported!
@param robot the robot that should be used for the interpolation
@param traj input trajectory that will be simplified
@returns output trajectory of timed linear segments
"""
from scipy import interpolate
if traj.GetDuration() != 0.0:
raise ValueError("Cannot handle timed trajectories yet!")
if traj.GetNumWaypoints() < 2:
return traj
cspec = traj.GetConfigurationSpecification()
dofs = robot.GetActiveDOFIndices()
idxs = range(traj.GetNumWaypoints())
joints = [robot.GetJointFromDOFIndex(d) for d in dofs]
times = numpy.array(
idxs if not traj.GetDuration() else
numpy.cumsum([cspec.ExtractDeltaTime(traj.GetWaypoint(i),
robot, dofs) for i in idxs]))
values = numpy.array(
[cspec.ExtractJointValues(traj.GetWaypoint(i), robot, dofs)
for i in idxs])
resolutions = numpy.array([j.GetResolution(0) for j in joints])
# Start with an extrema set of the first to the last waypoint.
mask = numpy.zeros(times.shape, dtype=bool)
mask[[0, -1]] = True
for _ in idxs:
# Create new interpolation set from current extrema.
f = interpolate.interp1d(times[mask], values[mask, :],
axis=0, kind='linear')
errors = numpy.abs(f(times) - values)
# TODO: Can this be a single call?
# Find the extrema in the remaining waypoints.
max_err_idx = numpy.argmax(errors, axis=0)
max_err_vals = numpy.max(errors, axis=0)
# Add any extrema that deviated more than joint resolution.
max_err_idx = max_err_idx[max_err_vals > resolutions]
mask[max_err_idx] = True
# If none deviated more than joint resolution, the set is complete.
if len(max_err_idx) < 0:
break
# Return a new reduced trajectory.
import openravepy
reduced_traj = openravepy.RaveCreateTrajectory(traj.GetEnv(),
traj.GetXMLId())
reduced_traj.Init(cspec)
for (new_idx, old_idx) in enumerate(mask.nonzero()[0]):
reduced_traj.Insert(new_idx, traj.GetWaypoint(old_idx))
return reduced_traj
class Recorder(object):
MPEG = 13
def __init__(self, env, filename, width=1920, height=1080, codec=MPEG):
self.env = env
self.filename = filename
self.width = width
self.height = height
self.codec = codec
self.module = openravepy.RaveCreateModule(env, 'ViewerRecorder')
env.Add(self.module)
def __enter__(self):
self.Start()
def __exit__(self, type, value, traceback):
self.Stop()
def start(self):
cmd = ('Start {width:d} {height:d} 30 '
'codec {codec:d} timing realtime filename {filename:s}\n'
'viewer {viewer:s}'
.format(width=self.width, height=self.height,
codec=self.codec, filename=self.filename,
viewer=self.env.GetViewer().GetName()))
self.module.SendCommand(cmd)
def stop(self):
self.module.SendCommand('Stop')
class AlignmentToken(object):
def __init__(self, env, child_frame, extents,
pose=None, period=0.05, parent_frame='world'):
self.child_frame = child_frame
self.parent_frame = parent_frame
self.period = period
with env:
self.body = openravepy.RaveCreateKinBody(env, '')
aabbs = numpy.concatenate(([0., 0., 0.], extents)).reshape((1, 6))
self.body.InitFromBoxes(aabbs, True)
self.body.SetName('frame:' + child_frame)
if pose is not None:
self.body.SetTransform(pose)
env.Add(self.body, True)
import tf
self.broadcaster = tf.TransformBroadcaster()
self.update()
def update(self):
import rospy
with self.body.GetEnv():
or_pose = self.body.GetTransform()
or_quaternion = openravepy.quatFromRotationMatrix(or_pose)
position = tuple(or_pose[0:3, 3])
orientation = (or_quaternion[1], or_quaternion[2],
or_quaternion[3], or_quaternion[0])
self.broadcaster.sendTransform(position, orientation, rospy.Time.now(),
self.child_frame, self.parent_frame)
self.timer = threading.Timer(self.period, self.update)
self.timer.daemon = True
self.timer.start()
def destroy(self):
self.body.GetEnv().Remove(self.body)
self.body = None
class Timer(object):
def __init__(self, message=None):
self.message = message
self.start = 0
def __enter__(self):
if self.message is not None:
logging.info('%s started execution.', self.message)
self.start = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
if self.message is not None:
logging.info('%s executed in %.5f seconds.',
self.message, self.get_duration())
def stop(self):
self.end = time.time()
def get_duration(self):
return self.end - self.start
class Watchdog(object):
"""
Calls specified function after duration, unless reset/stopped beforehand
@param timeout_duration how long to wait before calling handler
@param handler function to call after timeout_duration
@param args for handler
@param kwargs for handler
"""
def __init__(self, timeout_duration, handler, args=(), kwargs={}):
self.timeout_duration = timeout_duration
self.handler = handler
self.handler_args = args
self.handler_kwargs = kwargs
self.thread_checking_time = threading.Thread(
target=self._check_timer_loop)
self.timer_thread_lock = threading.Lock()
self.start_time = time.time()
self.canceled = False
self.thread_checking_time.start()
def reset(self):
"""
Resets the timer.
Causes the handler function to be called after the next timeout
duration is reached. Also restarts the timer thread if it has existed.
"""
with self.timer_thread_lock:
if self.canceled or not self.thread_checking_time.is_alive():
self.thread_checking_time = threading.Thread(
target=self._check_timer_loop)
self.thread_checking_time.start()
self.start_time = time.time()
self.canceled = False
def stop(self):
"""
Stop the watchdog, so it will not call handler
"""
with self.timer_thread_lock:
self.canceled = True
def _check_timer_loop(self):
"""
Internal function for timer thread to loop
If elapsed time has passed, calls the handler function
Exists if watchdog was canceled, or handler was called
"""
while True:
with self.timer_thread_lock:
if self.canceled:
break
elapsed_time = time.time() - self.start_time
if elapsed_time > self.timeout_duration:
self.handler(*self.handler_args, **self.handler_kwargs)
with self.timer_thread_lock:
self.canceled = True
break
else:
time.sleep(self.timeout_duration - elapsed_time)
def quadraticPlusJointLimitObjective(dq, J, dx, q, q_min, q_max,
delta_joint_penalty=5e-1,
lambda_dqdist=0.01,
*args):
"""
Quadratic and joint-limit-avoidance objective for SciPy's optimization.
@param dq joint velocity
@param J Jacobian
@param dx desired twist
@param q current joint values
@param q_min lower joint limit
@param q_max upper joint limit
@param delta_joint_penalty distance from limit with penality
@param lamdbda_dqdist weighting for joint limit penalty
"""
# Compute quadratic distance part.
objective, gradient = quadraticObjective(dq, J, dx)
# Add penalty for joint limit avoidance.
qdiff_lower = delta_joint_penalty - (q - q_min)
qdiff_upper = delta_joint_penalty - (q_max - q)
dq_target = [diff_lower if diff_lower > 0. else
(-diff_upper if diff_upper > 0. else 0.)
for diff_lower, diff_upper in zip(qdiff_lower, qdiff_upper)]
objective += lambda_dqdist * 0.5 * sum(numpy.square(dq - dq_target))
gradient += lambda_dqdist * (dq - dq_target)
return objective, gradient
def quadraticObjective(dq, J, dx, *args):
"""
Quadratic objective function for SciPy's optimization.
@param dq joint velocity
@param J Jacobian
@param dx desired twist
@return objective the objective function
@return gradient the analytical gradient of the objective
"""
error = (numpy.dot(J, dq) - dx)
objective = 0.5 * numpy.dot(numpy.transpose(error), error)
gradient = numpy.dot(numpy.transpose(J), error)
return objective, gradient
def ComputeJointVelocityFromTwist(
robot, twist, objective=quadraticObjective, dq_init=None,
joint_limit_tolerance=3e-2, joint_velocity_limits=None):
"""
Computes the optimal joint velocity given a twist by formulating
the problem as a quadratic optimization with box constraints and
using SciPy's L-BFGS-B solver.
@params robot the robot
@params twist the desired twist in se(3)
with float('NaN') for dimensions we don't care about
@params objective optional objective function to optimize
defaults to quadraticObjective
@params dq_init optional initial guess for optimal joint velocity
defaults to robot.GetActiveDOFVelocities()
@params joint_velocity_limits override the robot's joint velocity limit;
defaults to robot.GetActiveDOFMaxVel()
@params joint_limit_tolerance if less then this distance to joint
limit, velocity is bounded in that direction to 0
@return dq_opt optimal joint velocity
@return twist_opt actual achieved twist
can be different from desired twist due to constraints
"""
manip = robot.GetActiveManipulator()
robot.SetActiveDOFs(manip.GetArmIndices())
if joint_velocity_limits is None:
joint_velocity_limits = robot.GetActiveDOFMaxVel()
elif isinstance(joint_velocity_limits, float):
joint_velocity_limits = numpy.array(
[numpy.PINF] * robot.GetActiveDOF())
if len(joint_velocity_limits) != robot.GetActiveDOF():
raise ValueError(
'Joint velocity limits has incorrect length:'
' Expected {:d}, got {:d}.'.format(
robot.GetActiveDOF(), len(joint_velocity_limits)))
elif (joint_velocity_limits <= 0.).any():
raise ValueError('One or more joint velocity limit is not positive.')
jacobian_spatial = manip.CalculateJacobian()
jacobian_angular = manip.CalculateAngularVelocityJacobian()
jacobian = numpy.vstack((jacobian_spatial, jacobian_angular))
rows = [i for i, x in enumerate(twist) if math.isnan(x) is False]
twist_active = twist[rows]
jacobian_active = jacobian[rows, :]
bounds = numpy.column_stack(
(-joint_velocity_limits, joint_velocity_limits))
# Check for joint limits
q_curr = robot.GetActiveDOFValues()
q_min, q_max = robot.GetActiveDOFLimits()
dq_bounds = [
(0., max) if q_curr[i] <= q_min[i] + joint_limit_tolerance else
(min, 0.) if q_curr[i] >= q_max[i] - joint_limit_tolerance else
(min, max) for i, (min, max) in enumerate(bounds)
]
if dq_init is None:
dq_init = robot.GetActiveDOFVelocities()
opt = scipy.optimize.fmin_l_bfgs_b(
objective, dq_init, fprime=None,
args=(jacobian_active, twist_active, q_curr, q_min, q_max),
bounds=dq_bounds, approx_grad=False
)
dq_opt = opt[0]
twist_opt = numpy.dot(jacobian, dq_opt)
return dq_opt, twist_opt
def GeodesicTwist(t1, t2):
"""
Computes the twist in global coordinates that corresponds
to the gradient of the geodesic distance between two transforms.
@param t1 current transform
@param t2 goal transform
@return twist in se(3)
"""
trel = numpy.dot(numpy.linalg.inv(t1), t2)
trans = numpy.dot(t1[0:3, 0:3], trel[0:3, 3])
omega = numpy.dot(t1[0:3, 0:3],
openravepy.axisAngleFromRotationMatrix(
trel[0:3, 0:3]))
return numpy.hstack((trans, omega))
def GeodesicError(t1, t2):
"""
Computes the error in global coordinates between two transforms.
@param t1 current transform
@param t2 goal transform
@return a 4-vector of [dx, dy, dz, solid angle]
"""
trel = numpy.dot(numpy.linalg.inv(t1), t2)
trans = numpy.dot(t1[0:3, 0:3], trel[0:3, 3])
omega = openravepy.axisAngleFromRotationMatrix(trel[0:3, 0:3])
angle = numpy.linalg.norm(omega)
return numpy.hstack((trans, angle))
def AngleBetweenQuaternions(quat1, quat2):
"""
Compute the angle between two quaternions.
From 0 to 2pi.
"""
theta = numpy.arccos(2.0 * (quat1.dot(quat2))**2 - 1.0)
return theta
def AngleBetweenRotations(rot1, rot2):
"""
Compute the angle between two 3x3 rotation matrices.
From 0 to 2pi.
"""
quat1 = openravepy.quatFromRotationMatrix(rot1)
quat2 = openravepy.quatFromRotationMatrix(rot2)
return AngleBetweenQuaternions(quat1, quat2)
def GeodesicDistance(t1, t2, r=1.0):
"""
Computes the geodesic distance between two transforms
@param t1 current transform
@param t2 goal transform
@param r in units of meters/radians converts radians to meters
"""
error = GeodesicError(t1, t2)
error[3] = r * error[3]
return numpy.linalg.norm(error)
def GetGeodesicDistanceBetweenTransforms(T0, T1, r=1.0):
"""
Wrapper, to match GetGeodesicDistanceBetweenQuaternions()
Calculate the geodesic distance between two transforms, being
gd = norm( relative translation + r * axis-angle error )
@param t1 current transform
@param t2 goal transform
@param r in units of meters/radians converts radians to meters
"""
return GeodesicDistance(T0, T1, r)
def GetEuclideanDistanceBetweenPoints(p0, p1):
"""
Calculate the Euclidean distance (L2 norm) between two vectors.
"""
sum = 0.0
for i in xrange(len(p0)):
sum = sum + (p0[i] - p1[i]) * (p0[i] - p1[i])
return numpy.sqrt(sum)
def GetEuclideanDistanceBetweenTransforms(T0, T1):
"""
Calculate the Euclidean distance between the translational
component of two 4x4 transforms.
(also called L2 or Pythagorean distance)
"""
p0 = T0[0:3, 3] # Get the x,y,z translation from the 4x4 matrix
p1 = T1[0:3, 3]
return GetEuclideanDistanceBetweenPoints(p0, p1)
def GetMinDistanceBetweenTransformAndWorkspaceTraj(T, traj, dt=0.01):
"""
Find the location on a workspace trajectory which is closest
to the specified transform.
@param numpy.matrix T: A 4x4 transformation matrix.
@param openravepy.Trajectory traj: A timed workspace trajectory.
@param float dt: Resolution at which to sample along the trajectory.
@return (float,float) (min_dist, t_loc, T_loc) The minimum distance,
the time value along the timed
trajectory, and the transform.
"""
if not IsTimedTrajectory(traj):
raise ValueError("Trajectory must have timing information.")
if not IsTrajectoryTypeIkParameterizationTransform6D(traj):
raise ValueError("Trajectory is not a workspace trajectory, it "
"must have configuration specification of "
"openravepy.IkParameterizationType.Transform6D")
def _GetError(t):
T_curr = openravepy.matrixFromPose(traj.Sample(t)[0:7])
error = GetEuclideanDistanceBetweenTransforms(T, T_curr)
return error
min_dist = numpy.inf
t_loc = 0.0
T_loc = None
# Iterate over the trajectory
t = 0.0
duration = traj.GetDuration()
while t < duration:
error = _GetError(t)
if error < min_dist:
min_dist = error
t_loc = t
t = t + dt
# Also check the end-point
error = _GetError(duration)
if error < min_dist:
min_dist = error
t_loc = t
T_loc = openravepy.matrixFromPose(traj.Sample(t_loc)[0:7])
return (min_dist, t_loc, T_loc)
def FindCatkinResource(package, relative_path):
"""
Find a Catkin resource in the share directory or
the package source directory. Raises IOError
if resource is not found.
@param relative_path Path relative to share or package source directory
@param package The package to search in
@return Absolute path to resource
"""
from catkin.find_in_workspaces import find_in_workspaces
paths = find_in_workspaces(project=package, search_dirs=['share'],
path=relative_path, first_match_only=True)
if paths and len(paths) == 1:
return paths[0]
else:
raise IOError('Loading resource "{:s}" failed.'.format(
relative_path))
def IsAtTrajectoryWaypoint(robot, trajectory, waypoint_idx):
"""
Check if robot is at a particular waypoint in a trajectory.
This function examines the current DOF values of the specified
robot and compares these values to the first waypoint of the
specified trajectory. If the DOF values specified in the trajectory
differ by less than the DOF resolution of the specified joint/axis
then it will return True. Otherwise, it returns False.
NOTE: This is used in ExecuteTrajectory(),
IsAtTrajectoryStart(), and
IsAtTrajectoryEnd()
@param robot: The robot whose active DOFs will be checked.
@param trajectory: The trajectory containing the waypoint
to be checked.
@returns: True The robot is at the desired position.
False One or more joints differ by DOF resolution.
"""
if trajectory.GetNumWaypoints() == 0:
raise ValueError('Trajectory has 0 waypoints!')
cspec = trajectory.GetConfigurationSpecification()
needs_base = HasAffineDOFs(cspec)
needs_joints = HasJointDOFs(cspec)
if needs_base and needs_joints:
raise ValueError('Trajectories with affine and joint DOFs are '
'not supported')
if trajectory.GetEnv() != robot.GetEnv():
raise ValueError('The environment attached to the trajectory '
'does not match the environment attached to '
'the robot in IsAtTrajectoryStart().')
if needs_base:
rtf = robot.GetTransform()
doft = (openravepy.DOFAffine.X |
openravepy.DOFAffine.Y |
openravepy.DOFAffine.RotationAxis)
curr_pose = openravepy.RaveGetAffineDOFValuesFromTransform(rtf, doft)
start_transform = numpy.eye(4)
waypoint = trajectory.GetWaypoint(0)
start_t = cspec.ExtractTransform(start_transform, waypoint, robot)
traj_start = openravepy.RaveGetAffineDOFValuesFromTransform(
start_t, doft)
# Compare translation distance
trans_delta_value = abs(curr_pose[:2] - traj_start[:2])
trans_resolution = robot.GetAffineTranslationResolution()[:2]
if trans_delta_value[0] > trans_resolution[0] or \
trans_delta_value[1] > trans_resolution[1]:
return False
# Compare rotation distance
rot_delta_value = abs(wrap_to_interval(curr_pose[2] - traj_start[2]))
rot_res = robot.GetAffineRotationAxisResolution()[2] # Rot about z?
if rot_delta_value > rot_res:
return False
else:
# Get joint indices used in the trajectory,
# and the joint positions at this waypoint
waypoint = trajectory.GetWaypoint(waypoint_idx)
dof_indices, _ = cspec.ExtractUsedIndices(robot)
goal_config = cspec.ExtractJointValues(waypoint, robot, dof_indices)
# Return false if any joint deviates too much
return IsAtConfiguration(robot, goal_config, dof_indices)
return True
def IsAtTrajectoryStart(robot, trajectory):
"""
Check if robot is at the configuration specified by
the FIRST waypoint in a trajectory.
"""
waypoint_idx = 0
return IsAtTrajectoryWaypoint(robot, trajectory, waypoint_idx)
def IsAtTrajectoryEnd(robot, trajectory):
"""
Check if robot is at the configuration specified by
the LAST waypoint in a trajectory.
"""
waypoint_idx = trajectory.GetNumWaypoints() - 1
return IsAtTrajectoryWaypoint(robot, trajectory, waypoint_idx)
def IsAtConfiguration(robot, goal_config, dof_indices=None):
"""
Check if robot's joints have reached a desired configuration.
If the DOF indices are not specified, the robot's active DOF
will be used.
@param robot The robot object.
@param goal_config The desired configuration, an array of joint
positions.
@param dof_indices The joint index numbers.
@return boolean Returns True if joints are at goal position,
within DOF resolution.
"""
# If DOF indices not specified, use the active DOF by default
if dof_indices is None:
dof_indices = robot.GetActiveDOFIndices()
# Get current position of joints
with robot.GetEnv():
joint_values = robot.GetDOFValues(dof_indices)
dof_resolutions = robot.GetDOFResolutions(dof_indices)
# If any joint is not at the goal position, return False
for i in xrange(0, len(goal_config)):
# Get the axis index for this joint, which is 0
# for revolute joints or 0-2 for spherical joints.
joint = robot.GetJointFromDOFIndex(dof_indices[i])
axis_idx = dof_indices[i] - joint.GetDOFIndex()
# Use OpenRAVE method to check the configuration
# difference value1-value2 for axis i,
# taking into account joint limits and wrapping
# of continuous joints.
delta_value = abs(joint.SubtractValue(joint_values[i], goal_config[i],
axis_idx))
if delta_value > dof_resolutions[i]:
return False
# If all joints match the goal, return True
return True
def IsTimedTrajectory(trajectory):
"""
Returns True if the trajectory is timed.
This function checks whether a trajectory has a valid `deltatime` group,
indicating that it is a timed trajectory.
@param trajectory: an OpenRAVE trajectory
@returns: True if the trajectory is timed, False otherwise
"""
cspec = trajectory.GetConfigurationSpecification()
empty_waypoint = numpy.zeros(cspec.GetDOF())
return cspec.ExtractDeltaTime(empty_waypoint) is not None
def IsJointSpaceTrajectory(traj):
"""
Check if trajectory is a joint space trajectory.
@param openravepy.Trajectory traj: A path or trajectory.
@return bool result: Returns True or False.
"""
try:
cspec = traj.GetConfigurationSpecification()
if cspec.GetGroupFromName("joint_values"):
return True
except openravepy.openrave_exception:
pass
return False
def IsWorkspaceTrajectory(traj):
"""
Check if trajectory is a workspace trajectory.
@param openravepy.Trajectory traj: A path or trajectory.
@return bool result: Returns True or False.
"""
return IsTrajectoryTypeIkParameterizationTransform6D(traj)
def IsTrajectoryTypeIkParameterization(traj):
"""
Check if trajectory has a configuration specification
of type IkParameterization:
Transform6d
Rotation3D
Translation3D
Direction3D
Ray4D
Lookat3D
TranslationDirection5D
TranslationXY2D
TranslationXYOrientation3D
TranslationLocalGlobal6D
TranslationXAxisAngle4D
TranslationYAxisAngle4D
TranslationZAxisAngle4D
TranslationXAxisAngleZNorm4D
TranslationYAxisAngleXNorm4D
TranslationZAxisAngleYNorm4D
@param openravepy.Trajectory traj: A path or trajectory.
@return bool result: Returns True or False.
"""
try:
cspec = traj.GetConfigurationSpecification()
if cspec.GetGroupFromName("ikparam_values"):
return True
except openravepy.openrave_exception:
pass
return False
def IsTrajectoryTypeIkParameterizationTransform6D(traj):
"""
Check if trajectory has a configuration specification
of type IkParameterization.Transform6D
@param openravepy.Trajectory traj: A path or trajectory.
@return bool result: Returns True or False.
"""
try:
IKP_type = openravepy.IkParameterizationType.Transform6D
# The IKP type must be passed as a number
group_name = "ikparam_values {0}".format(int(IKP_type))
if traj.GetConfigurationSpecification().GetGroupFromName(group_name):
return True
except openravepy.openrave_exception:
pass
return False
def IsTrajectoryTypeIkParameterizationTranslationDirection5D(traj):
"""
Check if trajectory has a configuration specification
of type IkParameterization.TranslationDirection5D
@param openravepy.Trajectory traj: A path or trajectory.
@return bool result: Returns True or False.
"""
try:
IKP_type = openravepy.IkParameterizationType.TranslationDirection5D
group_name = "ikparam_values {0}".format(int(IKP_type))
if traj.GetConfigurationSpecification().GetGroupFromName(group_name):
return True
except openravepy.openrave_exception:
pass
return False
def ComputeEnabledAABB(kinbody):
"""
Returns the AABB of the enabled links of a KinBody.
@param kinbody: an OpenRAVE KinBody
@returns: AABB of the enabled links of the KinBody
"""
from numpy import NINF, PINF
from openravepy import AABB
min_corner = numpy.array([PINF] * 3)
max_corner = numpy.array([NINF] * 3)
for link in kinbody.GetLinks():
if link.IsEnabled():
link_aabb = link.ComputeAABB()
center = link_aabb.pos()
half_extents = link_aabb.extents()
min_corner = numpy.minimum(center - half_extents, min_corner)
max_corner = numpy.maximum(center + half_extents, max_corner)
center = (min_corner + max_corner) / 2.
half_extents = (max_corner - min_corner) / 2.
return AABB(center, half_extents)
def UntimeTrajectory(trajectory, env=None):
"""
Returns an untimed copy of the provided trajectory.
This function strips the DeltaTime group from a timed trajectory to create
an untimed trajectory.
@param trajectory: an OpenRAVE trajectory
@returns: an untimed copy of the provided trajectory.
"""
cspec = trajectory.GetConfigurationSpecification()
cspec.RemoveGroups('deltatime', True)
waypoints = trajectory.GetWaypoints(0, trajectory.GetNumWaypoints(), cspec)
path = openravepy.RaveCreateTrajectory(env or trajectory.GetEnv(),
trajectory.GetXMLId())
path.Init(cspec)
path.Insert(0, waypoints)
return path
def ComputeUnitTiming(robot, traj, env=None):
"""
Compute the unit velocity timing of a path or trajectory.
@param robot: robot whose DOFs should be considered
@param traj: path or trajectory
@param env: environment to create the output trajectory in; defaults to the
same environment as the input trajectory
@returns: trajectory with unit velocity timing
"""
from openravepy import RaveCreateTrajectory
if env is None:
env = traj.GetEnv()
old_cspec = traj.GetConfigurationSpecification()
dof_indices, _ = old_cspec.ExtractUsedIndices(robot)
with robot.CreateRobotStateSaver():
robot.SetActiveDOFs(dof_indices)
new_cspec = robot.GetActiveConfigurationSpecification('linear')
new_cspec.AddDeltaTimeGroup()
new_traj = RaveCreateTrajectory(env, '')
new_traj.Init(new_cspec)
dof_values_prev = None
for i in range(traj.GetNumWaypoints()):
old_waypoint = traj.GetWaypoint(i)
dof_values = old_cspec.ExtractJointValues(
old_waypoint, robot, dof_indices)
if i == 0:
deltatime = 0.
else:
deltatime =
|
numpy.linalg.norm(dof_values - dof_values_prev)
|
numpy.linalg.norm
|
import numpy as np
import numba
@numba.jit(numba.types.Array(dtype=numba.c16, ndim=2, layout="C")(
numba.types.Array(dtype=numba.c16, ndim=2, layout="C"),
numba.types.Array(dtype=numba.c16, ndim=1, layout="C"),
numba.types.Array(dtype=numba.c16, ndim=1, layout="C"),
numba.types.float64,
numba.types.int64),
forceobj=True,
cache=True,
fastmath=True)
def Three_step_SGD_inspector(A_matrix,
f_vector,
u0_vector,
eps=10e-7,
n_iter=10000):
# Итерационные компоненты
N = A_matrix.shape[0]
u_vector = np.zeros((N, 1), dtype=complex) # Вектор итераций
r = np.zeros((N, 1), dtype=complex) # Невязки итераций
g = np.zeros((N, 1), dtype=complex) # Вектор градиента
d = np.zeros((N, 1), dtype=complex) #
deltaR = np.zeros((N, 1), dtype=complex) # Разница невязок итераций
deltaU = np.zeros((N, 1), dtype=complex) # Разница приближаемых векторов
alpha = np.zeros((1, ), dtype=complex) # Итерационный параметр
beta = np.zeros((1, ), dtype=complex) # Итерационный параметр
gamma = np.zeros((1, ), dtype=complex) # Итерационный параметр
A_star = np.transpose(np.conj(A_matrix))
r[:, 0] = A_matrix @ u0_vector - f_vector
g[:, 0] = A_star @ r[:, 0]
A_g0 = A_matrix @ g[:, 0]
beta[0] = (g[:, 0] @ np.conj(g[:, 0])) / (A_g0 @ np.conj(A_g0))
u_vector[:, 0] = u0_vector
u_vector = np.concatenate((u_vector, (u_vector[:, 0] - beta[0] * g[:, 0]).reshape((N, 1))), 1)
for k in range(1, n_iter):
new_r = (A_matrix @ u_vector[:, k]) - f_vector
r = np.concatenate((r, new_r.reshape((N, 1))), 1)
new_deltaR = r[:, k] - r[:, k - 1]
deltaR = np.concatenate((deltaR, new_deltaR.reshape((N, 1))), 1)
new_g = (A_matrix @ r[:, k])
g = np.concatenate((g, new_g.reshape((N, 1))), 1)
new_deltaU = u_vector[:, k] - u_vector[:, k - 1]
deltaU = np.concatenate((deltaU, new_deltaU.reshape((N, 1))), 1)
new_d = r[:, k] - (r[:, k] @ np.conj(deltaU[:, k])) / \
(deltaU[:, k] @ np.conj(deltaU[:, k])) * deltaU[:, k] - \
(r[:, k] @ np.conj(g[:, k])) / (g[:, k] @ np.conj(g[:, k])) * g[:, k]
d = np.concatenate((d, new_d.reshape((N, 1))), 1)
a1 = A_matrix @ g[:, k]
a2 = A_matrix @ d[:, k]
l_11 = deltaR[:, k] @ np.conj(deltaR[:, k])
l_12 = g[:, k] @
|
np.conj(g[:, k])
|
numpy.conj
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import json
import numpy as np
import datetime
import shutil
import py_gamma as pg
import glob
DEVEL=False
def myargsparse():
import argparse
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
thisprog=os.path.basename(sys.argv[0])
############################################################################
# define some strings for parsing and help
############################################################################
epilog=\
"""**********************************************************************************************************
\r* GAMMA S1 InSAR processor, v1.0, 2020-12-14, oc *
\r* Earth Big Data LLC AWS Cloud integration, 2020-01-13,jmk *
\r* Create json file summraizing files to create burst segments *
\r* *
\r* Input and options: *
\r* 1) List of single burst SLCs (associated .par and .tops_par files assumed to exist) *
\r* 2) Output directory *
\r* *
\r* Output: *
\r* JSON file *
\r* Tabfiles *
\r* *
\r***********************************************************************************************************
\nEXAMPLES:
\n{thisprog} -z $PATH/slclist* -o /home/user/ -s 849.17 851.86
\n{thisprog} -p 54 -o s3://ebd-scratch/jpl_coherence/step2
""".format(thisprog=thisprog)
help_slclist=\
'''List of S1 burst SLCs (locally available)'''
help_outdir=\
'''Output directory '''
help_path='Sentinel-1 acquisition path'
help_indir='Root path to where Sentinel-1 relative orbits (paths) are stored.'
p = argparse.ArgumentParser(description=epilog,prog=thisprog,formatter_class=CustomFormatter)
p.add_argument("-i","--indir",required=False,help=help_indir,action='store',default='s3://ebd-scratch/jpl_coherence/step11')
p.add_argument("-o","--outdir",required=False,help=help_outdir,action='store',default='s3://ebd-scratch/jpl_coherence/step12')
p.add_argument("-p","--path",required=True,help=help_path,action='store',default=None)
p.add_argument("-z","--slclist",required=False,help=help_slclist,action='store',default=None,nargs='*')
p.add_argument("-profile","--profile",required=False,help="AWS profile with s3 access",action='store',default='default')
p.add_argument("-v","--verbose",required=False,help="Verbose output",action='store_true',default=False)
args=p.parse_args()
if not args.slclist and not args.path:
p.print_usage()
print('Need one of --path or --slclist')
sys.exit(1)
return args
#########################################################################
# Function to remove paths in tabfiles
def tabfile_remove_path(tabfilein,tabfileout):
tab = pg.read_tab(tabfilein, as_list = True, dtype = str, transpose = False)
tabout=tab.copy()
r=len(tab)
c=sum(1 for x in tab if isinstance(x, list))
if c>0:
for ri in range(r):
for ci in range(c):
l=tab[ri][ci]
tabout[ri][ci]=l.rsplit('/')[-1]
else:
for ri in range(r):
l=tab[ri]
tabout[ri]=l.rsplit('/')[-1]
pg.write_tab(tabout, tabfileout)
#########################################################################
# Function to add paths in tabfiles
def tabfile_add_path(tabfilein,tabfileout,fpath):
tab = pg.read_tab(tabfilein, as_list = True, dtype = str, transpose = False)
tabout=tab.copy()
fpath=fpath.rstrip('/')
r=len(tab)
c=sum(1 for x in tab if isinstance(x, list))
if c>0:
for ri in range(r):
for ci in range(c):
l=tab[ri][ci]
tabout[ri][ci]=fpath + '/' + l
else:
for ri in range(r):
l=tab[ri]
tabout[ri]=fpath + '/' + l
pg.write_tab(tabout, tabfileout)
#########################################################################
# Function to copy files listed in first tabfile with output filenames
# according to the second tabfile
def copytab(tabfilein,tabfileout):
tab1 = pg.read_tab(tabfilein, as_list = True, dtype = str, transpose = False)
tab2 = pg.read_tab(tabfileout, as_list = True, dtype = str, transpose = False)
r=len(tab1)
c=sum(1 for x in tab1 if isinstance(x, list))
if c>0:
for ri in range(r):
for ci in range(1,c):
l1=tab1[ri][ci]
l2=tab2[ri][ci]
shutil.copy(l1,l2)
else:
for ri in range(r):
l1=tab1[ri]
l2=tab2[ri]
shutil.copy(l1,l2)
def get_slclist(args):
indir = args.indir.rstrip(os.sep)+os.sep+str(args.path)
files = glob.glob(indir+'/*slc')
files = [x['name'] for x in files if x['name'].endswith('.slc')]
return files
#########################################################################
def S1_segment(args):
# Start time
start = time.time()
tmpdir='/dev/shm'
##########################################################
# Define Input/Output filenames/processing parameters #
##########################################################
if args.slclist:
slclist = args.slclist # List of S1 zipfiles
else:
slclist = get_slclist(args)
outdir = args.outdir # Output directory
outdir=outdir.rstrip('/')
outdir=os.path.join(outdir,args.path) # Include path in outdir
if os.path.isdir(outdir) == False and not args.outdir.startswith('s3://'):
os.mkdir(outdir)
relorb=np.zeros(len(slclist), dtype=np.int64)
swath=np.zeros(len(slclist), dtype=np.int8)
pol=np.zeros(len(slclist), dtype=np.int8)
burstid=np.zeros(len(slclist))
acqdate=np.zeros(len(slclist), dtype=list)
polvec=['vv','vh','hv','hh']
# Obtain information from filename, e.g., 144_iw2_vh_2362.3802030_20191031.slc
for i,f in enumerate(slclist):
f=f.rstrip()
filename=f.rpartition('/')[-1]
relorb[i] = np.int32(filename.split('_')[0])
swath[i] = np.int(filename.split('_')[1][2])
if filename.split('_')[2] == 'vv':
pol[i] = 0
elif filename.split('_')[2] == 'vh':
pol[i] = 1
elif filename.split('_')[2] == 'hv':
pol[i] = 2
elif filename.split('_')[2] == 'hh':
pol[i] = 3
burstid[i] = np.float(filename.split('_')[3])/100.
acqdate[i] = filename.split('_')[4].split('.')[0]
# Unique values
relorbs=np.unique(relorb)
try:
relorbs=int(relorbs)
except Exception as e:
raise RuntimeError(e)
swaths=np.unique(swath)
pols=np.unique(pol)
# Per swath burstid offsets
boffset=np.zeros(len(swaths))
for sw in swaths:
burstid_swath=burstid[swath==sw]
boffset[sw-1]=np.median(burstid_swath-np.floor(burstid_swath))
for i in range(1,3):
if boffset[i]<boffset[i-1]: boffset[i]+=1
# max burstid per swath
burstidmax=np.floor(
|
np.max(burstid)
|
numpy.max
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 09:05:48 2017
@author: r.dewinter
"""
from JCS_LHSOptimizer import JCS_LHSOptimizer
from transformLHS import transformLHS
from simplexGauss import simplexGauss
from simplexKriging import simplexKriging
from predictorEGO import predictorEGO
from paretofrontFeasible import paretofrontFeasible
from optimizeSMSEGOcriterion import optimizeSMSEGOcriterion
from hypervolume import hypervolume
from findAllLocalOptimaNew3 import findAllLocalOptimaNew
from visualiseParetoFront import visualiseParetoFront
from RbfInter import trainCubicRBF
from RbfInter import adjustMargins
from functools import partial
import numpy as np
from scipy.special import ndtri
import os
import json
import copy
import time
import glob
import multiprocessing
def createKrigingModel(arguments):
parameters, objectives, evall, i = arguments
modell = simplexGauss(parameters[:evall,:], objectives[:evall,i])[0]
temp = predictorEGO(parameters[:evall,:], modell)[0]
modell = simplexGauss(parameters[:evall,:],temp,[1])[0]
return modell
def createExpKrigingModel(arguments):
parameters, objectives, evall, i = arguments
modell = simplexKriging(parameters[:evall,:], objectives[:evall,i])[0]
temp = predictorEGO(parameters[:evall,:], modell)[0]
modell = simplexKriging(parameters[:evall,:], temp, [1])[0]
return modell
def CONSTRAINED_SMSEGO(problemCall, rngMin, rngMax, ref, nconstraints, initEval=None, maxEval=None, smooth=None, runNo=0, epsilonInit=0.01, epsilonMax=0.02):
"""
based on:
1 Designing Ships using Constrained Multi-Objective Efficient Global Optimization
<NAME>, <NAME>, <NAME> and <NAME>
In the Fourth international conference of machinelearning optimization and data science (2018)
2 S-Metric Selection based Efficient Global Optimization (SMS-EGO) for
multi-objective optimization problems
Ponweiser, W.; <NAME>.; <NAME>.; <NAME>.: Multiobjective
Optimization on a Limited Amount of Evaluations Using Model-Assisted
S-Metric Selection. In: Proc. 10th Int'l Conf. Parallel Problem Solving
from Nature (PPSN X), 13.-17. September, Dortmund, <NAME>.; Jansen,
T.; <NAME>.; <NAME>.; <NAME>. (Eds.). No. 5199 in Lecture Notes
in Computer Science, Springer, Berlin, 2008, pp. 784-794.
ISBN 978-3-540-87699-1. doi: 10.1007/978-3-540-87700-4_78
3 Self-adjusting parameter control for surrogate-assisted constrained
optimization under limited budgets
<NAME>, <NAME>, <NAME>, <NAME>
ELSEVIER Applied Soft Computing 61 (2017) 377-393
4 <NAME>.; <NAME>.; <NAME>.; <NAME>.: On Expected-
Improvement Criteria for Model-Based Multi-Objective Optimization.
In: Proc. 11th Int'l. Conf. Parallel Problem Solving From Nature
(PPSN XI) - Part I, 11..-15. September, <NAME>, <NAME>.;
<NAME>.; <NAME>.; <NAME>. (Eds.). No. 6238 in Lecture Notes
in Computer Science, Springer, Berlin, 2010, pp. 718-727.
ISBN 978-3-642-15843-8. doi: 10.1007/978-3-642-15844-5_72
<NAME>.; <NAME>.; <NAME>.: Design and analysis of
'noisy' computer experiments. In: AIAA Journal, 44 (2006) 10,
pp. 2331-2339. doi: 10.2514/1.20068
call: CONSTRAINED_SMSEGO(problemCall, rngMin, rngMax, ref, nconstraints)
Input arguments
problemCall: function handle to the objective function (required)
rngMin: lower bound of the design space (dim)-np array (required)
rngMax: upper bound of the design space (dim)-np array (required)
ref: the maximum objective values interested in (required)
nconstraints: the number of constraints returned by the problemCall (requried)
Optional input arguments:
initEval: number of initial evaluations, default=11*number of variables-1,
maxEval: maximum number of evaluations, default=40*number of variables,
smooth: smoothning function, 1=smoothing with exponential kernel, 2=gaussican kernel,
runNo: run number controlls the seed,
epsilonInit: the "allowed" constrained violation since we are not 100%
confident about the constrained model, default=0.01,
epsilonMax= the maximum "allowed" constrained violation, default=0.02
"""
if problemCall is None or rngMin is None or rngMax is None or ref is None or nconstraints is None:
raise ValueError('SMSEGO requires at least five arguments (problemCall, rngMin, rngMax, ref, nconstraints)')
if smooth is None:
smooth = 2
nVar = len(rngMin)
if maxEval is None:
maxEval = 40*nVar
if initEval is None:
initEval = 11*nVar-1 #recommended, but has to be at least larger then n+1
EPS = np.array([epsilonInit]*nconstraints)
Cfeas = 0
Cinfeas = 0
print('Calculate initial sampling')
functionName = str(problemCall).split(' ')[1]
outdir = 'results/'+str(functionName)+'/'
if os.path.isdir(outdir) and glob.glob(outdir+'*_finalPF.csv'):
par_old, con_old, obj_old = include_previous_pareto(initEval, outdir, runNo)
paretoSize = len(par_old)
initEvalLHS = initEval - paretoSize
else:
paretoSize = 0
initEvalLHS = max(initEval, 2*nVar+1) #11*nvar = recommended, but has to be at least larger then 2*nVar+1
np.random.seed(runNo)
if initEvalLHS < 5:
initEvalLHS = max(11*nVar-1 - paretoSize, 4)
bestLHS, _, _ = JCS_LHSOptimizer(initEvalLHS, nVar, 10000)
bestLHS = transformLHS(bestLHS, rngMin, rngMax)
print("evaluate initial sampling")
nObj = len(ref)
temp = np.zeros((initEvalLHS, nObj))
temp2 = np.zeros((initEvalLHS, nconstraints))
for i in range(initEvalLHS):
temp[i,:], temp2[i,:] = problemCall(bestLHS[i,:])
if paretoSize == 0:
parameters = np.empty((maxEval,nVar))
objectives = np.empty((maxEval, nObj))
constraints = np.empty((maxEval, nconstraints))
else:
parameters = np.append(par_old, np.empty((maxEval,nVar)), axis=0)
objectives = np.append(obj_old, np.empty((maxEval, nObj)), axis=0)
constraints = np.append(con_old, np.empty((maxEval, nconstraints)), axis=0)
parameters[paretoSize:,:] = np.NAN
objectives[paretoSize:,:] = np.NaN
constraints[paretoSize:,:] = np.NaN
parameters[paretoSize:paretoSize+initEvalLHS,:] = bestLHS
objectives[paretoSize:paretoSize+initEvalLHS,:] = temp
constraints[paretoSize:paretoSize+initEvalLHS,:] = temp2
evall = initEvalLHS + paretoSize
maxEval = maxEval + paretoSize
hypervolumeProgress = np.empty((maxEval,2))
hypervolumeProgress[:] = np.NAN
Z = -1
paretoOptimal = np.array([False]*(maxEval))
for i in range(evall):
paretoOptimal = np.array([False]*(maxEval))
paretoOptimal[:i] = paretofrontFeasible(objectives[:i,:],constraints[:i,:])
paretoFront = objectives[paretoOptimal]
hypervolumeProgress[i] = [hypervolume(paretoFront, ref),Z]
model = [ [] for i in range(nObj)]
if not os.path.isdir(outdir):
os.makedirs(outdir)
outputFileParameters = str(outdir)+'par_run'+str(runNo)+'.csv'
outputFileObjectives = str(outdir)+'obj_run'+str(runNo)+'.csv'
outputFileConstraints = str(outdir)+'con_run'+str(runNo)+'.csv'
np.savetxt(outputFileParameters, parameters[:evall], delimiter=',')
np.savetxt(outputFileObjectives, objectives[:evall], delimiter=',')
np.savetxt(outputFileConstraints, constraints[:evall], delimiter=',')
paretoOptimal[:evall] = paretofrontFeasible(objectives[:evall,:], constraints[:evall,:])
paretoFront = objectives[paretoOptimal,:]
paretoSet = parameters[paretoOptimal]
paretoConstraints = constraints[paretoOptimal,:]
visualiseParetoFront(paretoFront,save=False)
print(paretoFront)
print(paretoConstraints)
start = time.time()
while evall < maxEval:
iterationTime = time.time()
print('Compute model for each objective')
s=time.time()
model = [ [] for i in range(nObj)]
if smooth == 0:
raise ValueError("no smoothing, to be implemented")
elif smooth==1:
#smoothing usin gpower exponential kernel with nugget
pool = multiprocessing.Pool(processes=nObj)
processs = [(copy.deepcopy(parameters), copy.deepcopy(objectives), evall, obji) for obji in range(nObj)]
model = pool.map(createExpKrigingModel, processs)
elif smooth==2:
#smoothing using gaussian kernel with nugget
pool = multiprocessing.Pool(processes=nObj)
processs = [(copy.deepcopy(parameters), copy.deepcopy(objectives), evall, obji) for obji in range(nObj)]
model = pool.map(createKrigingModel, processs)
print("Time to compute surrogate models ",time.time()-s)
print('Optimize infill criterion')
currentHV = hypervolume(paretoFront, ref)
hypervolumeProgress[evall] = [currentHV,Z]
nPF = sum(paretoOptimal)
if nPF < 2:
eps = np.zeros((1,nObj))
else:
maxima = np.array([max(col) for col in paretoFront.T])
minima = np.array([min(col) for col in paretoFront.T])
spread = maxima-minima
c = 1-(1/np.power(2,nObj))
eps = spread/(nPF+c*(maxEval-evall))
gain = -ndtri(0.5*(0.5**(1/float(nObj))))
criterion = partial(optimizeSMSEGOcriterion, model=copy.deepcopy(model),
ref=ref, paretoFront=paretoFront,
currentHV=currentHV, epsilon=
|
np.ndarray.flatten(eps)
|
numpy.ndarray.flatten
|
# Based on https://raw.githubusercontent.com/JiawangBian/SC-SfMLearner-Release/master/kitti_eval/kitti_odometry.py
import seaborn as sn
from glob import glob
from pathlib import Path
import os
import numpy as np
from matplotlib import pyplot as plt
import copy
import matplotlib as mpl
mpl.use('Agg') # No x-server
# sns.set(style=\"whitegrid\", rc={\"font.size\":8,\"axes.titlesize\":8,\"axes.labelsize\":5})
sn.set(style="whitegrid", font_scale=1.5)
sn.set_palette("bright", n_colors=4, color_codes=True)
class EvalOdom():
"""Evaluate odometry result
Usage example:
vo_eval = EvalOdom()
vo_eval.eval(gt_pose, pred_pose, result_pose_txt_dir)
"""
def __init__(self, isPartial=False, fps=5):
"""Instantiate an odometry evaluation class.
Args:
isPartial (bool, optional): Robotcar has partial sequences which have shorter trajectory length.
The errors are calculated wrt. distances. Defaults to False.
fps (int, optional): FPS of the ground-truth. Radar GT is 5 FPS. Defaults to 5.
"""
# partial sequences in the robotcar dataset are around 3km and the regular sequences are 9km
if isPartial:
self.lengths = [500, 1000, 1500, 2000, 2500, 3000]
else:
self.lengths = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000]
self.num_lengths = len(self.lengths)
self.step_size = fps # FPS
def trajectory_distances(self, poses):
"""Compute distance for each pose w.r.t frame-0
Args:
poses (dict): {idx: 4x4 array}
Returns:
dist (float list): distance of each pose w.r.t frame-0
"""
dist = [0]
sort_frame_idx = sorted(poses.keys())
for i in range(len(sort_frame_idx)-1):
cur_frame_idx = sort_frame_idx[i]
next_frame_idx = sort_frame_idx[i+1]
P1 = poses[cur_frame_idx]
P2 = poses[next_frame_idx]
dx = P1[0, 3] - P2[0, 3]
dy = P1[1, 3] - P2[1, 3]
dz = P1[2, 3] - P2[2, 3]
dist.append(dist[i]+np.sqrt(dx**2+dy**2+dz**2))
return dist
def rotation_error(self, pose_error):
"""Compute rotation error
Args:
pose_error (4x4 array): relative pose error
Returns:
rot_error (float): rotation error
"""
a = pose_error[0, 0]
b = pose_error[1, 1]
c = pose_error[2, 2]
d = 0.5*(a+b+c-1.0)
rot_error = np.arccos(max(min(d, 1.0), -1.0))
return rot_error
def translation_error(self, pose_error):
"""Compute translation error
Args:
pose_error (4x4 array): relative pose error
Returns:
trans_error (float): translation error
"""
dx = pose_error[0, 3]
dy = pose_error[1, 3]
dz = pose_error[2, 3]
trans_error = np.sqrt(dx**2+dy**2+dz**2)
return trans_error
def last_frame_from_segment_length(self, dist, first_frame, length):
"""Find frame (index) that away from the first_frame with
the required distance
Args:
dist (float list): distance of each pose w.r.t frame-0
first_frame (int): start-frame index
length (float): required distance
Returns:
i (int) / -1: end-frame index. if not found return -1
"""
for i in range(first_frame, len(dist), 1):
if dist[i] > (dist[first_frame] + length):
return i
return -1
def calc_sequence_errors(self, poses_gt, poses_result):
"""calculate sequence error
Args:
poses_gt (dict): {idx: 4x4 array}, ground truth poses
poses_result (dict): {idx: 4x4 array}, predicted poses
Returns:
err (list list): [first_frame, rotation error, translation error, length, speed]
- first_frame: frist frame index
- rotation error: rotation error per length
- translation error: translation error per length
- length: evaluation trajectory length
- speed: car speed (#FIXME: 10FPS is assumed)
"""
err = []
dist = self.trajectory_distances(poses_gt)
for first_frame in range(0, len(poses_gt), self.step_size):
for i in range(self.num_lengths):
len_ = self.lengths[i]
last_frame = self.last_frame_from_segment_length(
dist, first_frame, len_
)
# Continue if sequence not long enough
if last_frame == -1 or \
not(last_frame in poses_result.keys()) or \
not(first_frame in poses_result.keys()):
continue
# compute rotational and translational errors
pose_delta_gt = np.dot(
np.linalg.inv(poses_gt[first_frame]),
poses_gt[last_frame]
)
pose_delta_result = np.dot(
np.linalg.inv(poses_result[first_frame]),
poses_result[last_frame]
)
pose_error = np.dot(
np.linalg.inv(pose_delta_result),
pose_delta_gt
)
r_err = self.rotation_error(pose_error)
t_err = self.translation_error(pose_error)
# compute speed
num_frames = last_frame - first_frame + 1.0
speed = len_/(0.1*num_frames)
err.append([first_frame, r_err/len_, t_err/len_, len_, speed])
return err
def save_sequence_errors(self, err, file_name):
"""Save sequence error
Args:
err (list list): error information
file_name (str): txt file for writing errors
"""
fp = open(file_name, 'w')
for i in err:
line_to_write = " ".join([str(j) for j in i])
fp.writelines(line_to_write+"\n")
fp.close()
def compute_overall_err(self, seq_err):
"""Compute average translation & rotation errors
Args:
seq_err (list list): [[r_err, t_err],[r_err, t_err],...]
- r_err (float): rotation error
- t_err (float): translation error
Returns:
ave_t_err (float): average translation error
ave_r_err (float): average rotation error
"""
t_err = 0
r_err = 0
seq_len = len(seq_err)
if seq_len > 0:
for item in seq_err:
r_err += item[1]
t_err += item[2]
ave_t_err = t_err / seq_len
ave_r_err = r_err / seq_len
return ave_t_err, ave_r_err
else:
return 0, 0
def plot_trajectory(self, poses_gt, poses_result, result_dir, plt_prefix):
"""Plot trajectory for both GT and prediction
Args:
poses_gt (dict): {idx: 4x4 array}; ground truth poses
poses_result (dict): {idx: 4x4 array}; predicted poses
seq (int): sequence index.
"""
plot_keys = ["Ground Truth", "Ours"]
fontsize_ = 20
poses_dict = {}
poses_dict["Ground Truth"] = poses_gt
poses_dict["Ours"] = poses_result
fig = plt.figure()
ax = plt.gca()
ax.set_aspect('equal')
for key in plot_keys:
pos_xz = []
frame_idx_list = sorted(poses_dict["Ours"].keys())
for frame_idx in frame_idx_list:
# pose = np.linalg.inv(poses_dict[key][frame_idx_list[0]]) @ poses_dict[key][frame_idx]
pose = poses_dict[key][frame_idx]
pos_xz.append([pose[0, 3], pose[1, 3]])
pos_xz = np.asarray(pos_xz)
plt.plot(pos_xz[:, 0], pos_xz[:, 1], label=key)
traj_txt = result_dir/(plt_prefix+key+'_trajectory.txt')
np.savetxt(traj_txt, pos_xz, delimiter=',')
plt.legend(prop={'size': fontsize_})
plt.xticks(fontsize=fontsize_)
plt.yticks(fontsize=fontsize_)
plt.xlabel('x (m)', fontsize=fontsize_)
plt.ylabel('y (m)', fontsize=fontsize_)
fig.set_size_inches(10, 10)
fig_pdf = result_dir/(plt_prefix+"trajectory.pdf")
fig_png = result_dir/(plt_prefix+"trajectory.png")
plt.savefig(fig_pdf, bbox_inches='tight', pad_inches=0)
plt.savefig(fig_png, bbox_inches='tight', pad_inches=0)
plt.close(fig)
# def plot_trajectory(self, pred, gt):
# gt_xyz = gt[:,:3,3]
# pred_xyz = pred[:,:3,3]
# fig, ax = plt.subplots(figsize=(8,8))
# sn.lineplot(x=pred_xyz[:,0], y=pred_xyz[:,1], sort=False, ax=ax, label='Ours')
# sn.lineplot(x=gt_xyz[:,0], y=gt_xyz[:,1], sort=False, ax=ax, label='Ground Truth')
# ax.set(xlabel='X (m)', ylabel='Y (m)')
# # Save fig
# plt.tight_layout()
# plt.savefig(str(Path(self.plot_path_dir)/'ro_pred_with_gt.pdf'), bbox_inches = 'tight', pad_inches = 0)
# plt.savefig(str(Path(self.plot_path_dir)/'ro_pred_with_gt.png'), bbox_inches = 'tight', pad_inches = 0)
# plt.close(fig)
def plot_error(self, avg_segment_errs, result_dir, plt_prefix):
"""Plot per-length error
Args:
avg_segment_errs (dict): {100:[avg_t_err, avg_r_err],...}
seq (int): sequence index.
"""
# Translation error
plot_y = []
plot_x = []
for len_ in self.lengths:
plot_x.append(len_)
if len(avg_segment_errs[len_]) > 0:
plot_y.append(avg_segment_errs[len_][0] * 100)
else:
plot_y.append(0)
fontsize_ = 10
fig = plt.figure()
plt.plot(plot_x, plot_y, "bs-", label="Translation Error")
plt.ylabel('Translation Error (%)', fontsize=fontsize_)
plt.xlabel('Sequence Length (m)', fontsize=fontsize_)
plt.legend(loc="upper right", prop={'size': fontsize_})
fig.set_size_inches(5, 5)
fig_pdf = result_dir/(plt_prefix+"trans_err.pdf")
plt.savefig(fig_pdf, bbox_inches='tight', pad_inches=0)
plt.close(fig)
# Rotation error
plot_y = []
plot_x = []
for len_ in self.lengths:
plot_x.append(len_)
if len(avg_segment_errs[len_]) > 0:
plot_y.append(avg_segment_errs[len_][1] / np.pi * 180 * 100)
else:
plot_y.append(0)
fontsize_ = 10
fig = plt.figure()
plt.plot(plot_x, plot_y, "bs-", label="Rotation Error")
plt.ylabel('Rotation Error (deg/100m)', fontsize=fontsize_)
plt.xlabel('Sequence Length (m)', fontsize=fontsize_)
plt.legend(loc="upper right", prop={'size': fontsize_})
fig.set_size_inches(5, 5)
fig_pdf = result_dir/(plt_prefix+"rot_err.pdf")
plt.savefig(fig_pdf, bbox_inches='tight', pad_inches=0)
plt.close(fig)
def compute_segment_error(self, seq_errs):
"""This function calculates average errors for different segment.
Args:
seq_errs (list list): list of errs; [first_frame, rotation error, translation error, length, speed]
- first_frame: frist frame index
- rotation error: rotation error per length
- translation error: translation error per length
- length: evaluation trajectory length
- speed: car speed (#FIXME: 10FPS is assumed)
Returns:
avg_segment_errs (dict): {100:[avg_t_err, avg_r_err],...}
"""
segment_errs = {}
avg_segment_errs = {}
for len_ in self.lengths:
segment_errs[len_] = []
# Get errors
for err in seq_errs:
len_ = err[3]
t_err = err[2]
r_err = err[1]
segment_errs[len_].append([t_err, r_err])
# Compute average
for len_ in self.lengths:
if segment_errs[len_] != []:
avg_t_err = np.mean(np.asarray(segment_errs[len_])[:, 0])
avg_r_err = np.mean(np.asarray(segment_errs[len_])[:, 1])
avg_segment_errs[len_] = [avg_t_err, avg_r_err]
else:
avg_segment_errs[len_] = []
return avg_segment_errs
def compute_ATE(self, gt, pred):
"""Compute RMSE of ATE
Args:
gt (4x4 array dict): ground-truth poses
pred (4x4 array dict): predicted poses
"""
errors = []
idx_0 = list(pred.keys())[0]
gt_0 = gt[idx_0]
pred_0 = pred[idx_0]
for i in pred:
# cur_gt = np.linalg.inv(gt_0) @ gt[i]
cur_gt = gt[i]
gt_xyz = cur_gt[:3, 3]
# cur_pred = np.linalg.inv(pred_0) @ pred[i]
cur_pred = pred[i]
pred_xyz = cur_pred[:3, 3]
align_err = gt_xyz - pred_xyz
# print('i: ', i)
# print("gt: ", gt_xyz)
# print("pred: ", pred_xyz)
# input("debug")
errors.append(np.sqrt(np.sum(align_err ** 2)))
ate = np.sqrt(np.mean(np.asarray(errors) ** 2))
return ate
def compute_RPE(self, gt, pred):
"""Compute RPE
Args:
gt (4x4 array dict): ground-truth poses
pred (4x4 array dict): predicted poses
Returns:
rpe_trans
rpe_rot
"""
trans_errors = []
rot_errors = []
for i in list(pred.keys())[:-1]:
gt1 = gt[i]
gt2 = gt[i+1]
gt_rel = np.linalg.inv(gt1) @ gt2
pred1 = pred[i]
pred2 = pred[i+1]
pred_rel = np.linalg.inv(pred1) @ pred2
rel_err = np.linalg.inv(gt_rel) @ pred_rel
trans_errors.append(self.translation_error(rel_err))
rot_errors.append(self.rotation_error(rel_err))
# rpe_trans = np.sqrt(np.mean(np.asarray(trans_errors) ** 2))
# rpe_rot = np.sqrt(np.mean(np.asarray(rot_errors) ** 2))
rpe_trans = np.mean(
|
np.asarray(trans_errors)
|
numpy.asarray
|
#!/usr/bin/env python
# coding: utf-8
# In[27]:
import numpy as np
import matplotlib.pyplot as plt
from svg.path import parse_path
from svg.path.path import Line
from xml.dom import minidom
def line_splitter(start, end):
return (lambda t: (1-t)*start+t*end)
def cubic_bezier_converter(start, control1, control2, end):
original_data = np.array([start, control1, control2, end])
cubic_bezier_matrix = np.array([
[-1, 3, -3, 1],
[ 3, -6, 3, 0],
[-3, 3, 0, 0],
[ 1, 0, 0, 0]
])
return_data = cubic_bezier_matrix.dot(original_data)
return (lambda t: np.array([t**3, t**2, t, 1]).dot(return_data))
# Learned from
# https://stackoverflow.com/questions/36971363/how-to-interpolate-svg-path-into-a-pixel-coordinates-not-simply-raster-in-pyth
# In[4]:
doc = minidom.parse('B_sample.svg')
path_strings = [path.getAttribute('d') for path
in doc.getElementsByTagName('path')]
doc.unlink()
for path_string in path_strings:
path = parse_path(path_string)
for e in path:
if type(e).__name__ == 'Line':
x0 = e.start.real
y0 = e.start.imag
x1 = e.end.real
y1 = e.end.imag
print("(%.2f, %.2f) - (%.2f, %.2f)" % (x0, y0, x1, y1))
# In[59]:
block=0
n_dots=100
key=0
points_np=[]
path=parse_path(path_strings[block])
dat=path[key]
if type(path[key]).__name__=='CubicBezier':
start_np = np.array([dat.start.real, dat.start.imag])
control1_np = np.array([dat.control1.real, dat.control1.imag])
control2_np = np.array([dat.control2.real, dat.control2.imag])
end_np = np.array([dat.end.real, dat.end.imag])
converted_curve = cubic_bezier_converter(start_np, control1_np, control2_np, end_np)
#
diff_np=start_np-end_np
n_dots=np.round(np.linalg.norm(diff_np))
#
points_np = np.array([converted_curve(t) for t in np.linspace(0, 1, n_dots)])
elif type(path[key]).__name__=='Line':
start_np = np.array([dat.start.real, dat.start.imag])
end_np = np.array([dat.end.real, dat.end.imag])
converted_line = line_splitter(start_np,end_np)
#
diff_np=start_np-end_np
n_dots=np.round(np.linalg.norm(diff_np))
#
points_np=np.array([converted_line(t) for t in np.linspace(0, 1, n_dots)])
elif type(path[key]).__name__=='Move':
#
n_dots=1
#
start_np = np.array([dat.start.real, dat.start.imag])
end_np = np.array([dat.end.real, dat.end.imag])
points_np = np.array([start_np,end_np])
else:
points_np=np.array([])
# == plot the line==
## controls_np = np.array([start_np, control1_np, control2_np, end_np])
# curve segmentation
plt.plot(points_np[:, 0], points_np[:, 1], '.-')
# showing of control points
## plt.plot(controls_np[:,0], controls_np[:,1], 'o')
# line drawing
## plt.plot([start_np[0], control1_np[0]], [start_np[1], control1_np[1]], '-', lw=1)
## plt.plot([control2_np[0], end_np[0]], [control2_np[1], end_np[1]], '-', lw=1)
plt.show()
print(points_np)
# In[231]:
block=0
n_dots=100
key=0
points_np_all=[]
points_np_all=np.empty((len(path_strings)),dtype=object)
print(len(points_np_all))
#points_np_all[k]=np.array([])
for k in range(len(path_strings)):
#for path_string in path_strings:
path = parse_path(path_strings[k])
points_np_merge=np.empty((0,2), float)
#points_np_merge=np.empty(points_np_merge)
for dat in path:
#path=parse_path(path_strings[block])
#dat=path[key]
if type(dat).__name__=='CubicBezier':
start_np = np.array([dat.start.real, dat.start.imag])
control1_np = np.array([dat.control1.real, dat.control1.imag])
control2_np = np.array([dat.control2.real, dat.control2.imag])
end_np = np.array([dat.end.real, dat.end.imag])
converted_curve = cubic_bezier_converter(start_np, control1_np, control2_np, end_np)
#
diff_np=start_np-end_np
n_dots=np.round(np.linalg.norm(diff_np))
#
points_np = np.array([converted_curve(t) for t in np.linspace(0, 1, n_dots)])
elif type(dat).__name__=='Line':
start_np = np.array([dat.start.real, dat.start.imag])
end_np =
|
np.array([dat.end.real, dat.end.imag])
|
numpy.array
|
import h5py
import numpy as np
np.set_printoptions(threshold=np.nan)
from shutil import copyfile
copyfile("dummy_lutnet.h5", "pretrained_bin.h5") # create pretrained.h5 using datastructure from dummy.h5
bl = h5py.File("baseline_pruned.h5", 'r')
#dummy = h5py.File("dummy.h5", 'r')
pretrained = h5py.File("pretrained_bin.h5", 'r+')
# conv layer 1
bl_w1 = bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["Variable_1:0"]
#bl_rand_map = bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["rand_map:0"]
bl_pruning_mask = bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["Variable:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_conv_1"]["binary_conv_1"]["Variable_1:0"]
#pret_rand_map = pretrained["model_weights"]["binary_conv_1"]["binary_conv_1"]["rand_map:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_conv_1"]["binary_conv_1"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_conv_1"]["binary_conv_1"]["Variable:0"]
pret_w1[...] = np.array(bl_w1)
#pret_rand_map[...] = np.array(bl_rand_map)
p_gamma[...] = np.array(bl_gamma)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# conv layer 2
bl_w1 = bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.reshape(np.array(bl_w1), (-1,weight_shape[3]))
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0]*tile_shape[1]*tile_shape[2])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0]*tile_shape[1]*tile_shape[2])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0]*tile_shape[1]*tile_shape[2])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.reshape(np.tile(np.reshape(rand_map_0,[tile_shape[0],tile_shape[1],tile_shape[2]]),[weight_shape[0]/tile_shape[0],weight_shape[1]/tile_shape[1],weight_shape[2]/tile_shape[2]]), [-1])
rand_map_1_expand = np.reshape(np.tile(np.reshape(rand_map_1,[tile_shape[0],tile_shape[1],tile_shape[2]]),[weight_shape[0]/tile_shape[0],weight_shape[1]/tile_shape[1],weight_shape[2]/tile_shape[2]]), [-1])
rand_map_2_expand = np.reshape(np.tile(np.reshape(rand_map_2,[tile_shape[0],tile_shape[1],tile_shape[2]]),[weight_shape[0]/tile_shape[0],weight_shape[1]/tile_shape[1],weight_shape[2]/tile_shape[2]]), [-1])
for i in range(weight_shape[0]*weight_shape[1]*weight_shape[2]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[2]*(weight_shape[2]/tile_shape[2]-1)) * (rand_map_0_expand[i]/tile_shape[2]) + tile_shape[2]*(i%weight_shape[2]/tile_shape[2])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[2]*(weight_shape[2]/tile_shape[2]-1)) * (rand_map_1_expand[i]/tile_shape[2]) + tile_shape[2]*(i%weight_shape[2]/tile_shape[2])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[2]*(weight_shape[2]/tile_shape[2]-1)) * (rand_map_2_expand[i]/tile_shape[2]) + tile_shape[2]*(i%weight_shape[2]/tile_shape[2])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# conv layer 3
bl_w1 = bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.reshape(np.array(bl_w1), (-1,weight_shape[3]))
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0]*tile_shape[1]*tile_shape[2])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0]*tile_shape[1]*tile_shape[2])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0]*tile_shape[1]*tile_shape[2])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.reshape(np.tile(np.reshape(rand_map_0,[tile_shape[0],tile_shape[1],tile_shape[2]]),[weight_shape[0]/tile_shape[0],weight_shape[1]/tile_shape[1],weight_shape[2]/tile_shape[2]]), [-1])
rand_map_1_expand = np.reshape(np.tile(np.reshape(rand_map_1,[tile_shape[0],tile_shape[1],tile_shape[2]]),[weight_shape[0]/tile_shape[0],weight_shape[1]/tile_shape[1],weight_shape[2]/tile_shape[2]]), [-1])
rand_map_2_expand = np.reshape(np.tile(np.reshape(rand_map_2,[tile_shape[0],tile_shape[1],tile_shape[2]]),[weight_shape[0]/tile_shape[0],weight_shape[1]/tile_shape[1],weight_shape[2]/tile_shape[2]]), [-1])
for i in range(weight_shape[0]*weight_shape[1]*weight_shape[2]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[2]*(weight_shape[2]/tile_shape[2]-1)) * (rand_map_0_expand[i]/tile_shape[2]) + tile_shape[2]*(i%weight_shape[2]/tile_shape[2])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[2]*(weight_shape[2]/tile_shape[2]-1)) * (rand_map_1_expand[i]/tile_shape[2]) + tile_shape[2]*(i%weight_shape[2]/tile_shape[2])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[2]*(weight_shape[2]/tile_shape[2]-1)) * (rand_map_2_expand[i]/tile_shape[2]) + tile_shape[2]*(i%weight_shape[2]/tile_shape[2])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# conv layer 4
bl_w1 = bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.reshape(np.array(bl_w1), (-1,weight_shape[3]))
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0]*tile_shape[1]*tile_shape[2])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0]*tile_shape[1]*tile_shape[2])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0]*tile_shape[1]*tile_shape[2])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.reshape(np.tile(np.reshape(rand_map_0,[tile_shape[0],tile_shape[1],tile_shape[2]]),[weight_shape[0]/tile_shape[0],weight_shape[1]/tile_shape[1],weight_shape[2]/tile_shape[2]]), [-1])
rand_map_1_expand = np.reshape(np.tile(np.reshape(rand_map_1,[tile_shape[0],tile_shape[1],tile_shape[2]]),[weight_shape[0]/tile_shape[0],weight_shape[1]/tile_shape[1],weight_shape[2]/tile_shape[2]]), [-1])
rand_map_2_expand = np.reshape(np.tile(np.reshape(rand_map_2,[tile_shape[0],tile_shape[1],tile_shape[2]]),[weight_shape[0]/tile_shape[0],weight_shape[1]/tile_shape[1],weight_shape[2]/tile_shape[2]]), [-1])
for i in range(weight_shape[0]*weight_shape[1]*weight_shape[2]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[2]*(weight_shape[2]/tile_shape[2]-1)) * (rand_map_0_expand[i]/tile_shape[2]) + tile_shape[2]*(i%weight_shape[2]/tile_shape[2])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[2]*(weight_shape[2]/tile_shape[2]-1)) * (rand_map_1_expand[i]/tile_shape[2]) + tile_shape[2]*(i%weight_shape[2]/tile_shape[2])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[2]*(weight_shape[2]/tile_shape[2]-1)) * (rand_map_2_expand[i]/tile_shape[2]) + tile_shape[2]*(i%weight_shape[2]/tile_shape[2])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 =
|
np.reshape(bl_w1_rand_0, weight_shape)
|
numpy.reshape
|
import numpy as np
from macrel import AMP_features, AMP_predict
from macrel.main import data_file
from os import path
def test_predict():
fs = AMP_features.features('tests/peptides/expep.faa.gz')
fsp = AMP_predict.predict( data_file("models/AMP.pkl.gz"),
data_file("models/Hemo.pkl.gz"),
fs)
fsn = AMP_predict.predict( data_file("models/AMP.pkl.gz"),
data_file("models/Hemo.pkl.gz"),
fs, keep_negatives=True)
assert len(fsp) < len(fsn)
assert not np.all(fsn.is_AMP)
def test_predict_very_short():
fs = AMP_features.features(
path.join(path.dirname(__file__),
'data',
'very_short.faa'))
assert len(fs) == 2
fsn = AMP_predict.predict(data_file("models/AMP.pkl.gz"),
data_file("models/Hemo.pkl.gz"),
fs, keep_negatives=True)
assert not
|
np.any(fsn.is_AMP)
|
numpy.any
|
# from sampling_utils import *
import datetime
import pickle as pkl
import re
from collections import OrderedDict
import numpy as np
import pandas as pd
import patsy as pt
import pymc3 as pm
import scipy as sp
import theano
import theano.tensor as tt
# BUG: may throw an error for flat RVs
theano.config.compute_test_value = "off"
class SpatioTemporalFeature(object):
def __init__(self):
self._call_ = np.frompyfunc(self.call, 2, 1)
def __call__(self, times, locations):
_times = [pd.Timestamp(d) for d in times]
return self._call_(
np.asarray(_times).reshape((-1, 1)), np.asarray(locations).reshape((1, -1))
).astype(np.float32)
class SpatioTemporalYearlyDemographicsFeature(SpatioTemporalFeature):
""" TODO:
* county data must be updated to include 2019/2020 demographic data
|> fix call
"""
def __init__(self, county_dict, group, scale=1.0):
self.dict = {
(year, county): val * scale
for county, values in county_dict.items()
for (g, year), val in values["demographics"].items()
if g == group
}
super().__init__()
def call(self, yearweekday, county):
# TODO: do this properly when data is available!
return self.dict.get((2018, county))
# return self.dict.get((yearweekday.year,county))
class SpatialEastWestFeature(SpatioTemporalFeature):
def __init__(self, county_dict):
self.dict = {
county: 1.0
if "east" in values["region"]
else (0.5 if "berlin" in values["region"] else 0.0)
for county, values in county_dict.items()
}
super().__init__()
def call(self, yearweekday, county):
return self.dict.get(county)
class TemporalFourierFeature(SpatioTemporalFeature):
def __init__(self, i, t0, scale):
self.t0 = t0
self.scale = scale
self.τ = (i // 2 + 1) * 2 * np.pi
self.fun = np.sin if (i % 2) == 0 else np.cos
super().__init__()
def call(self, t, x):
return self.fun((t - self.t0) / self.scale * self.τ)
class TemporalPeriodicPolynomialFeature(SpatioTemporalFeature):
def __init__(self, t0, period, order):
self.t0 = t0
self.period = period
self.order = order
super().__init__()
def call(self, t, x):
tdelta = (t - self.t0).days % self.period
return (tdelta / self.period) ** self.order
class TemporalSigmoidFeature(SpatioTemporalFeature):
def __init__(self, t0, scale):
self.t0 = t0
self.scale = scale
super().__init__()
def call(self, t, x):
t_delta = (t - self.t0) / self.scale
return sp.special.expit(t_delta.days + (t_delta.seconds / (3600 * 24)))
class TemporalPolynomialFeature(SpatioTemporalFeature):
def __init__(self, t0, tmax, order):
self.t0 = t0
self.order = order
self.scale = (tmax - t0).days
super().__init__()
def call(self, t, x):
t_delta = (t - self.t0).days / self.scale
return t_delta ** self.order
class ReportDelayPolynomialFeature(SpatioTemporalFeature):
def __init__(self, t0, t_max, order):
self.t0 = t0
self.order = order
self.scale = (t_max - t0).days
super().__init__()
def call(self, t, x):
_t = 0 if t <= self.t0 else (t - self.t0).days / self.scale
return _t ** self.order
class IAEffectLoader(object):
generates_stats = False
def __init__(self, var, filenames, days, counties, predict_for=None):
self.vars = [var]
self.samples = []
i = 0
for filename in filenames:
try:
with open(filename, "rb") as f:
tmp = pkl.load(f)
except FileNotFoundError:
print("Warning: File {} not found!".format(filename))
pass
except Exception as e:
print(e)
else:
m = tmp["ia_effects"]
ds = list(tmp["predicted day"])
cs = list(tmp["predicted county"])
d_idx = np.array([ds.index(d) for d in days]).reshape((-1, 1))
print(i)
i = i + 1
print("Days")
print(days)
print("ds")
print(ds)
for d in days:
print(d)
print(ds.index(d))
c_idx = np.array([cs.index(c) for c in counties])
# Simulate linear IA effects if predicting the future
if predict_for is not None:
d1 = [ds.index(d) for d in days]
d2 = list(range(d1[-1], d1[-1] + len(predict_for)))
n_days_pred = len(d2)
# Repeat ia_effects for last day.
last = m[-1, :, :]
last = np.tile(last, (n_days_pred, 1, 1))
m = np.concatenate((m, last), axis=0)
# Update d_idx.
d_idx = np.array(d1 + d2).reshape(-1, 1)
self.samples.append(
np.moveaxis(m[d_idx, c_idx, :], -1, 0).reshape((m.shape[-1], -1)).T
)
def step(self, point):
new = point.copy()
# res = new[self.vars[0].name]
new_res = self.samples[np.random.choice(len(self.samples))]
new[self.vars[0].name] = new_res
# random choice; but block structure <-- this must have "design matrix" shape/content
return new
def stop_tuning(self, *args):
pass
@property
def vars_shape_dtype(self):
shape_dtypes = {}
for var in self.vars:
dtype = np.dtype(var.dtype)
shape = var.dshape
shape_dtypes[var.name] = (shape, dtype)
return shape_dtypes
class BaseModel(object):
"""
Model for disease prediction.
The model has 4 types of features (predictor variables):
* temporal (functions of time)
* spatial (functions of space, i.e. longitude, latitude)
* county_specific (functions of time and space, i.e. longitude, latitude)
* interaction effects (functions of distance in time and space relative to each datapoint)
"""
def __init__(
self,
trange,
counties,
ia_effect_filenames,
model=None,
num_ia=16,
include_ia=True,
include_report_delay=True,
report_delay_order=4,
include_demographics=True,
include_temporal=True,
trend_poly_order=4,
include_periodic=True,
periodic_poly_order=4,
orthogonalize=False,
):
self.county_info = counties
self.ia_effect_filenames = ia_effect_filenames
self.num_ia = num_ia if include_ia else 0
self.include_ia = include_ia
self.include_report_delay = include_report_delay
self.report_delay_order = report_delay_order
self.include_demographics = include_demographics
self.include_temporal = include_temporal
self.trend_poly_order = trend_poly_order
self.include_periodic = include_periodic
self.periodic_poly_order = periodic_poly_order
self.trange = trange # 0 -> 28th of Jan; 1-> Last
self.features = {
"temporal_trend": {
"temporal_polynomial_{}".format(i): TemporalPolynomialFeature(
trange[0], trange[1], i
)
for i in range(self.trend_poly_order + 1)
}
if self.include_temporal
else {},
"temporal_seasonal": {
"temporal_periodic_polynomial_{}".format(
i
): TemporalPeriodicPolynomialFeature(trange[0], 7, i)
for i in range(self.periodic_poly_order + 1)
}
if self.include_periodic
else {},
"spatiotemporal": {
"demographic_{}".format(group): SpatioTemporalYearlyDemographicsFeature(
self.county_info, group
)
for group in ["[0-5)", "[5-20)", "[20-65)"]
}
if self.include_demographics
else {},
"temporal_report_delay": {
"report_delay": ReportDelayPolynomialFeature(
trange[1] - pd.Timedelta(days=5), trange[1], self.report_delay_order
)
}
if self.include_report_delay
else {}, # what is going in here?
"exposure": {
"exposure": SpatioTemporalYearlyDemographicsFeature(
self.county_info, "total", 1.0 / 100000
)
},
}
def evaluate_features(self, days, counties):
all_features = {}
for group_name, features in self.features.items():
group_features = {}
for feature_name, feature in features.items():
feature_matrix = feature(days, counties)
group_features[feature_name] = pd.DataFrame(
feature_matrix[:, :], index=days, columns=counties
).stack()
all_features[group_name] = (
pd.DataFrame(
[], index=pd.MultiIndex.from_product([days, counties]), columns=[]
)
if len(group_features) == 0
else pd.DataFrame(group_features)
)
return all_features
def init_model(self, target):
days, counties = target.index, target.columns
# extract features
features = self.evaluate_features(days, counties)
Y_obs = target.stack().values.astype(np.float32)
T_S = features["temporal_seasonal"].values.astype(np.float32)
T_T = features["temporal_trend"].values.astype(np.float32)
T_D = features["temporal_report_delay"].values.astype(np.float32)
TS = features["spatiotemporal"].values.astype(np.float32)
log_exposure = np.log(features["exposure"].values.astype(np.float32).ravel())
# extract dimensions
num_obs = np.prod(target.shape)
num_t_s = T_S.shape[1]
num_t_t = T_T.shape[1]
num_t_d = T_D.shape[1]
num_ts = TS.shape[1]
num_counties = len(counties)
if self.include_ia:
with pm.Model() as self.model:
# interaction effects are generated externally -> flat prior
IA = pm.Flat(
"IA",
testval=np.ones((num_obs, self.num_ia)),
shape=(num_obs, self.num_ia),
)
# priors
# NOTE: Vary parameters over time -> W_ia dependent on time
# δ = 1/√α
δ = pm.HalfCauchy("δ", 10, testval=1.0)
α = pm.Deterministic("α", np.float32(1.0) / δ)
W_ia = pm.Normal(
"W_ia",
mu=0,
sd=10,
testval=np.zeros(self.num_ia),
shape=self.num_ia,
)
W_t_s = pm.Normal(
"W_t_s", mu=0, sd=10, testval=np.zeros(num_t_s), shape=num_t_s
)
W_t_t = pm.Normal(
"W_t_t",
mu=0,
sd=10,
testval=np.zeros((num_counties, num_t_t)),
shape=(num_counties, num_t_t),
)
W_t_d = pm.Normal(
"W_t_d", mu=0, sd=10, testval=np.zeros(num_t_d), shape=num_t_d
)
W_ts = pm.Normal(
"W_ts", mu=0, sd=10, testval=np.zeros(num_ts), shape=num_ts
)
self.param_names = ["δ", "W_ia", "W_t_s", "W_t_t", "W_t_d", "W_ts"]
self.params = [δ, W_ia, W_t_s, W_t_t, W_t_d, W_ts]
expanded_Wtt = tt.tile(
W_t_t.reshape(shape=(1, num_counties, -1)), reps=(21, 1, 1)
)
expanded_TT = np.reshape(T_T, newshape=(21, 412, 2))
result_TT = tt.flatten(tt.sum(expanded_TT * expanded_Wtt, axis=-1))
# calculate mean rates
μ = pm.Deterministic(
"μ",
tt.exp(
tt.dot(IA, W_ia)
+ tt.dot(T_S, W_t_s)
+ result_TT
+ tt.dot(T_D, W_t_d)
+ tt.dot(TS, W_ts)
+ log_exposure
),
)
# constrain to observations
pm.NegativeBinomial("Y", mu=μ, alpha=α, observed=Y_obs)
else:
# doesn't include IA
with pm.Model() as self.model:
# priors
# δ = 1/√α
δ = pm.HalfCauchy("δ", 10, testval=1.0)
α = pm.Deterministic("α", np.float32(1.0) / δ)
W_t_s = pm.Normal(
"W_t_s", mu=0, sd=10, testval=np.zeros(num_t_s), shape=num_t_s
)
W_t_t = pm.Normal(
"W_t_t",
mu=0,
sd=10,
testval=np.zeros((num_counties, num_t_t)),
shape=(num_counties, num_t_t),
)
W_t_d = pm.Normal(
"W_t_d", mu=0, sd=10, testval=np.zeros(num_t_d), shape=num_t_d
)
W_ts = pm.Normal(
"W_ts", mu=0, sd=10, testval=np.zeros(num_ts), shape=num_ts
)
self.param_names = ["δ", "W_t_s", "W_t_t", "W_t_d", "W_ts"]
self.params = [δ, W_t_s, W_t_t, W_t_d, W_ts]
expanded_Wtt = tt.tile(
W_t_t.reshape(shape=(1, num_counties, -1)), reps=(21, 1, 1)
)
expanded_TT = np.reshape(T_T, newshape=(21, 412, 2))
result_TT = tt.flatten(tt.sum(expanded_TT * expanded_Wtt, axis=-1))
# calculate mean rates
μ = pm.Deterministic(
"μ",
tt.exp(
tt.dot(T_S, W_t_s)
+ result_TT
+ tt.dot(T_D, W_t_d)
+ tt.dot(TS, W_ts)
+ log_exposure
),
)
# constrain to observations
pm.NegativeBinomial("Y", mu=μ, alpha=α, observed=Y_obs)
def sample_parameters(
self,
target,
n_init=100,
samples=1000,
chains=None,
cores=8,
init="advi",
target_accept=0.8,
max_treedepth=10,
**kwargs
):
"""
sample_parameters(target, samples=1000, cores=8, init="auto", **kwargs)
Samples from the posterior parameter distribution, given a training dataset.
The basis functions are designed to be causal, i.e. only data points strictly
predating the predicted time points are used (this implies "one-step-ahead"-predictions).
"""
self.init_model(target)
if chains is None:
chains = max(2, cores)
if self.include_ia:
with self.model:
# run!
ia_effect_loader = IAEffectLoader(
self.model.IA,
self.ia_effect_filenames,
target.index,
target.columns,
)
nuts = pm.step_methods.NUTS(
vars=self.params,
target_accept=target_accept,
max_treedepth=max_treedepth,
)
steps = [ia_effect_loader, nuts]
trace = pm.sample(
samples,
steps,
chains=chains,
cores=cores,
compute_convergence_checks=False,
**kwargs
)
else:
with self.model:
# run!
nuts = pm.step_methods.NUTS(
vars=self.params,
target_accept=target_accept,
max_treedepth=max_treedepth,
)
trace = pm.sample(
samples,
nuts,
chains=chains,
cores=cores,
compute_convergence_checks=False,
**kwargs
)
return trace
def sample_predictions(
self,
target_days,
target_counties,
parameters,
prediction_days,
average_periodic_feature=False,
average_all=False,
init="auto",
):
all_days = pd.DatetimeIndex(
[d for d in target_days] + [d for d in prediction_days]
)
# extract features
features = self.evaluate_features(all_days, target_counties)
# num_counties = 412 #hardcoded; not needed?
T_S = features["temporal_seasonal"].values
T_T = features["temporal_trend"].values
T_D = features["temporal_report_delay"].values
TS = features["spatiotemporal"].values
log_exposure = np.log(features["exposure"].values.ravel())
if average_periodic_feature:
T_S = np.reshape(T_S, newshape=(-1, 412, 5))
mean = np.mean(T_S, axis=0, keepdims=True)
T_S = np.reshape(np.tile(mean, reps=(T_S.shape[0], 1, 1)), (-1, 5))
if average_all:
T_S =
|
np.reshape(T_S, newshape=(31, 412, -1))
|
numpy.reshape
|
"""
An implementation of the REST API exposed by D-Wave Solver API (SAPI) servers.
This API lets you submit an Ising model and receive samples from a distribution over the model
as defined by the solver you have selected.
- The SAPI servers provide authentication, queuing, and scheduling services, and
provide a network interface to the solvers.
- A solver is a resource that can sample from a discrete quadratic model.
- This package implements the REST interface these servers provide.
An example using the client:
.. code-block:: python
:linenos:
import dwave_micro_client
import random
# Connect using explicit connection information
conn = dwave_micro_client.Connection('https://sapi-url', 'token-string')
# Load a solver by name
solver = conn.get_solver('test-solver')
# Build a random Ising model on +1, -1. Build it to exactly fit the graph the solver provides
linear = {index: random.choice([-1, 1]) for index in solver.nodes}
quad = {key: random.choice([-1, 1]) for key in solver.undirected_edges}
# Send the problem for sampling, include a solver specific parameter 'num_reads'
results = solver.sample_ising(linear, quad, num_reads=100)
# Print out the first sample
print(results.samples[0])
Rough workflow within the SAPI server:
1. Submitted problems enter an input queue. Each user has an input queue per solver.
2. Drawing from all input queues for a solver, problems are scheduled.
3. Results of the server are cached for retrieval by the client.
By default all sampling requests will be processed asynchronously. Reading results from
any future object is a blocking operation.
.. code-block:: python
:linenos:
# We can submit several sample requests without blocking
# (In this specific case we could accomplish the same thing by increasing 'num_reads')
futures = [solver.sample_ising(linear, quad, num_reads=100) for _ in range(10)]
# We can check if a set of samples are ready without blocking
print(futures[0].done())
# We can wait on a single future
futures[0].wait()
# Or we can wait on several futures
dwave_micro_client.Future.wait_multiple(futures)
"""
# TODOS:
# - More testing for sample_qubo
from __future__ import division, absolute_import
import json
import threading
import base64
import struct
import time
import sys
import os
import posixpath
import types
import logging
import requests
import collections
import datetime
import six
import six.moves.queue as queue
import six.moves
range = six.moves.range
# Get the logger using the recommended name
log = logging.getLogger(__name__)
# log.setLevel(logging.DEBUG)
# log.addHandler(logging.StreamHandler(sys.stdout))
# Use numpy if available for fast decoding
try:
import numpy as np
_numpy = True
except ImportError:
# If numpy isn't available we can do the encoding slower in native python
_numpy = False
class SolverFailureError(Exception):
"""An exception raised when there is a remote failure calling a solver."""
pass
class SolverAuthenticationError(Exception):
"""An exception raised when there is an authentication error."""
def __init__(self):
super(SolverAuthenticationError, self).__init__("Token not accepted for that action.")
class CanceledFutureError(Exception):
"""An exception raised when code tries to read from a canceled future."""
def __init__(self):
super(CanceledFutureError, self).__init__("An error occured reading results from a canceled request")
class Connection:
"""
Connect to a SAPI server to expose the solvers that the server advertises.
Args:
url (str): URL of the SAPI server.
token (str): Authentication token from the SAPI server.
proxies (dict): Mapping from the connection scheme (http[s]) to the proxy server address.
permissive_ssl (boolean; false by default): Disables SSL verification.
"""
# The status flags that a problem can have
STATUS_IN_PROGRESS = 'IN_PROGRESS'
STATUS_PENDING = 'PENDING'
STATUS_COMPLETE = 'COMPLETED'
STATUS_FAILED = 'FAILED'
STATUS_CANCELLED = 'CANCELLED'
# Cases when multiple status flags qualify
ANY_STATUS_ONGOING = [STATUS_IN_PROGRESS, STATUS_PENDING]
ANY_STATUS_NO_RESULT = [STATUS_FAILED, STATUS_CANCELLED]
# Number of problems to include in a status query
_STATUS_QUERY_SIZE = 100
# Number of worker threads for each problem processing task
_SUBMISSION_THREAD_COUNT = 5
_CANCEL_THREAD_COUNT = 1
_POLL_THREAD_COUNT = 2
_LOAD_THREAD_COUNT = 5
def __init__(self, url=None, token=None, proxies=None, permissive_ssl=False):
"""To setup the connection a pipeline of queues/workers is costructed.
There are five interations with the server the connection manages:
1. Downloading solver information.
2. Submitting problem data.
3. Polling problem status.
4. Downloading problem results.
5. Canceling problems
Loading solver information is done syncronously. The other four tasks are
performed by asyncronous workers. For 2, 3, and 5 the workers gather
togeather tasks into in batches.
"""
# Use configuration from parameters passed, if parts are
# missing, try the configuration function
self.default_solver = None
if token is None:
url, token, proxies, self.default_solver = load_configuration(url)
log.debug("Creating a connection to SAPI server: %s", url)
self.base_url = url
self.token = token
# Create a :mod:`requests` session. `requests` will manage our url parsing, https, etc.
self.session = requests.Session()
self.session.headers.update({'X-Auth-Token': self.token})
self.session.proxies = proxies
if permissive_ssl:
self.session.verify = False
# Build the problem submission queue, start its workers
self._submission_queue = queue.Queue()
self._submission_workers = []
for _ in range(self._SUBMISSION_THREAD_COUNT):
worker = threading.Thread(target=self._do_submit_problems)
worker.daemon = True
worker.start()
# Build the cancel problem queue, start its workers
self._cancel_queue = queue.Queue()
self._cancel_workers = []
for _ in range(self._CANCEL_THREAD_COUNT):
worker = threading.Thread(target=self._do_cancel_problems)
worker.daemon = True
worker.start()
# Build the problem status polling queue, start its workers
self._poll_queue = queue.Queue()
self._poll_workers = []
for _ in range(self._POLL_THREAD_COUNT):
worker = threading.Thread(target=self._do_poll_problems)
worker.daemon = True
worker.start()
# Build the result loading queue, start its workers
self._load_queue = queue.Queue()
self._load_workers = []
for _ in range(self._LOAD_THREAD_COUNT):
worker = threading.Thread(target=self._do_load_results)
worker.daemon = True
worker.start()
# Prepare an empty set of solvers
self.solvers = {}
self._solvers_lock = threading.RLock()
self._all_solvers_ready = False
# Set the parameters for requests; disable SSL verification if needed
self._request_parameters = {}
if permissive_ssl:
self._request_parameters['verify'] = False
def close(self):
"""Perform a clean shutdown.
Wait for all the currently scheduled work to finish, kill the workers,
and close the connection pool. Assumes no one is submitting more work
while the connection is closing.
"""
# Finish all the work that requires the connection
log.debug("Joining submission queue")
self._submission_queue.join()
log.debug("Joining cancel queue")
self._cancel_queue.join()
log.debug("Joining poll queue")
self._poll_queue.join()
log.debug("Joining load queue")
self._load_queue.join()
# Kill off the worker threads, (which should now be blocking on the empty)
[worker.kill() for worker in self._submission_workers]
[worker.kill() for worker in self._cancel_workers]
[worker.kill() for worker in self._poll_workers]
[worker.kill() for worker in self._load_workers]
# Close the connection pool
self.session.close()
def __enter__(self):
"""Let connections be used in with blocks."""
return self
def __exit__(self, *args):
"""At the end of a with block perform a clean shutdown of the connection."""
self.close()
return False
def solver_names(self):
"""List all the solvers this connection can provide, and load the data about the solvers.
To get all solver data: ``GET /solvers/remote/``
Returns:
list of str
"""
with self._solvers_lock:
if self._all_solvers_ready:
return self.solvers.keys()
log.debug("Requesting list of all solver data.")
response = self.session.get(posixpath.join(self.base_url, 'solvers/remote/'))
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
log.debug("Received list of all solver data.")
data = response.json()
for solver in data:
log.debug("Found solver: %s", solver['id'])
self.solvers[solver['id']] = Solver(self, solver)
self._all_solvers_ready = True
return self.solvers.keys()
def get_solver(self, name=None):
"""Load the configuration for a single solver.
To get specific solver data: ``GET /solvers/remote/{solver_name}/``
Args:
name (str): Id of the requested solver. None will return the default solver.
Returns:
:obj:`Solver`
"""
log.debug("Looking for solver: %s", name)
if name is None:
if self.default_solver is not None:
name = self.default_solver
else:
raise ValueError("No name or default name provided when loading solver.")
with self._solvers_lock:
if name not in self.solvers:
if self._all_solvers_ready:
raise KeyError(name)
response = self.session.get(posixpath.join(self.base_url, 'solvers/remote/{}/'.format(name)))
if response.status_code == 401:
raise SolverAuthenticationError()
if response.status_code == 404:
raise KeyError("No solver with the name {} was available".format(name))
response.raise_for_status()
data = json.loads(response.text)
self.solvers[data['id']] = Solver(self, data)
return self.solvers[name]
def _submit(self, body, future):
"""Enqueue a problem for submission to the server.
This method is thread safe.
"""
self._submission_queue.put(self._submit.Message(body, future))
_submit.Message = collections.namedtuple('Message', ['body', 'future'])
def _do_submit_problems(self):
"""Pull problems from the submission queue and submit them.
Note:
This method is always run inside of a daemon thread.
"""
try:
while True:
# Pull as many problems as we can, block on the first one,
# but once we have one problem, switch to non-blocking then
# submit without blocking again.
ready_problems = [self._submission_queue.get()]
while True:
try:
ready_problems.append(self._submission_queue.get_nowait())
except queue.Empty:
break
# Submit the problems
log.debug("submitting {} problems".format(len(ready_problems)))
body = '[' + ','.join(mess.body for mess in ready_problems) + ']'
try:
response = self.session.post(posixpath.join(self.base_url, 'problems/'), body)
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
log.debug("Finished submitting {} problems".format(len(ready_problems)))
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for mess in ready_problems:
mess.future._set_error(exception, sys.exc_info())
self._submission_queue.task_done()
continue
# Pass on the information
for submission, res in zip(ready_problems, message):
self._handle_problem_status(res, submission.future, False)
self._submission_queue.task_done()
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except BaseException as err:
log.exception(err)
def _handle_problem_status(self, message, future, in_poll):
"""Handle the results of a problem submission or results request.
This method checks the status of the problem and puts it in the correct queue.
Args:
message (dict): Update message from the SAPI server wrt. this problem.
future `Future`: future corresponding to the problem
in_poll (bool): Flag set to true if the problem is in the poll loop already.
Returns:
true if the problem has been processed out of the status poll loop
Note:
This method is always run inside of a daemon thread.
"""
try:
status = message['status']
log.debug("Status: %s %s", message['id'], status)
# The future may not have the ID set yet
with future._single_cancel_lock:
# This handles the case where cancel has been called on a future
# before that future recived the problem id
if future._cancel_requested:
if not future._cancel_sent and status == self.STATUS_PENDING:
# The problem has been canceled but the status says its still in queue
# try to cancel it
self._cancel(message['id'], future)
# If a cancel request could meaningfully be sent it has been now
future._cancel_sent = True
# Set the id field in the future
future.id = message['id']
future.remote_status = status
if future.time_received is not None and 'submitted_on' in message and message['submitted_on'] is not None:
future.time_received = datetime.strptime(message['submitted_on'])
if future.time_solved is not None and 'solved_on' in message and message['solved_on'] is not None:
future.time_solved = datetime.strptime(message['solved_on'])
if status == self.STATUS_COMPLETE:
# If the message is complete, forward it to the future object
if 'answer' in message:
future._set_message(message)
# If the problem is complete, but we don't have the result data
# put the problem in the queue for loading results.
else:
self._load(future)
elif status in self.ANY_STATUS_ONGOING:
# If the response is pending add it to the queue.
if not in_poll:
self._poll(future)
return False
elif status == self.STATUS_CANCELLED:
# If canceled return error
future._set_error(CanceledFutureError())
else:
# Return an error to the future object
future._set_error(SolverFailureError(message.get('error_message', 'An unknown error has occurred.')))
except Exception as error:
# If there were any unhandled errors we need to release the
# lock in the future, otherwise deadlock occurs.
future._set_error(error, sys.exc_info())
return True
def _cancel(self, id_, future):
"""Enqueue a problem to be canceled.
This method is thread safe.
"""
self._cancel_queue.put((id_, future))
def _do_cancel_problems(self):
"""Pull ids from the cancel queue and submit them.
Note:
This method is always run inside of a daemon thread.
"""
try:
while True:
# Pull as many problems as we can, block when none are avaialble.
item_list = [self._cancel_queue.get()]
while True:
try:
item_list.append(self._cancel_queue.get_nowait())
except queue.Empty:
break
# Submit the problems, attach the ids as a json list in the
# body of the delete query.
try:
body = [item[0] for item in item_list]
self.session.delete(posixpath.join(self.base_url, 'problems/'), json=body)
except Exception as err:
for _, future in item_list:
if future is not None:
future._set_error(err, sys.exc_info())
# Mark all the ids as processed regardless of success or failure.
[self._cancel_queue.task_done() for _ in item_list]
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
log.exception(err)
def _poll(self, future):
"""Enqueue a problem to poll the server for status.
This method is threadsafe.
"""
self._poll_queue.put(future)
def _do_poll_problems(self):
"""Poll the server for the status of a set of problems.
Note:
This method is always run inside of a daemon thread.
"""
try:
# Maintain an active group of queries
futures = {}
active_queries = set()
# Add a query to the active queries
def add(ftr):
if ftr.id not in futures and not ftr.done():
active_queries.add(ftr.id)
futures[ftr.id] = ftr
else:
self._poll_queue.task_done()
# Remve a query from the active set
def remove(id_):
del futures[id_]
active_queries.remove(id_)
self._poll_queue.task_done()
while True:
try:
# If we have no active queries, wait on the status queue
while len(active_queries) == 0:
add(self._poll_queue.get())
# Once there is any active queries try to fill up the set and move on
while len(active_queries) < self._STATUS_QUERY_SIZE:
add(self._poll_queue.get_nowait())
except queue.Empty:
pass
# Build a query string with block of ids
log.debug("Query on futures: %s", ', '.join(active_queries))
query_string = 'problems/?id=' + ','.join(active_queries)
try:
response = self.session.get(posixpath.join(self.base_url, query_string))
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for id_ in list(active_queries):
futures[id_]._set_error(IOError(exception), sys.exc_info())
remove(id_)
continue
# If problems are removed from the polling by _handle_problem_status
# remove them from the active set
for single_message in message:
if self._handle_problem_status(single_message, futures[single_message['id']], True):
remove(single_message['id'])
# Remove the finished queries
for id_ in list(active_queries):
if futures[id_].done():
remove(id_)
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
log.exception(err)
def _load(self, future):
"""Enqueue a problem to download results from the server.
Args:
future: Future` object corresponding to the query
This method is threadsafe.
"""
self._load_queue.put(future)
def _do_load_results(self):
"""Submit a query asking for the results for a particular problem.
To request the results of a problem: ``GET /problems/{problem_id}/``
Note:
This method is always run inside of a daemon thread.
"""
try:
while True:
# Select a problem
future = self._load_queue.get()
log.debug("Query for results: %s", future.id)
# Submit the query
query_string = 'problems/{}/'.format(future.id)
try:
response = self.session.get(posixpath.join(self.base_url, query_string))
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
future._set_error(IOError(exception), sys.exc_info())
continue
# Dispatch the results, mark the task complete
self._handle_problem_status(message, future, False)
self._load_queue.task_done()
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
log.error('Load result error: ' + str(err))
class Solver:
"""
A solver enables sampling from an Ising model.
Get solver objects by calling get_solver(name) on a connection object.
The solver has responsibilty for:
- Encoding problems submitted
- Checking the submitted parameters
- Add problems to the Connection's submission queue
Args:
connection (`Connection`): Connection through which the solver is accessed.
data: Data from the server describing this solver.
"""
# Special flag to notify the system a solver needs access to special hardware
_PARAMETER_ENABLE_HARDWARE = 'use_hardware'
def __init__(self, connection, data):
self.connection = connection
self.id = data['id']
self.data = data
#: When True the solution data will be returned as numpy matrices: False
self.return_matrix = False
# The exact sequence of nodes/edges is used in encoding problems and must be preserved
self._encoding_qubits = data['properties']['qubits']
self._encoding_couplers = [tuple(edge) for edge in data['properties']['couplers']]
#: The nodes in this solver's graph: set(int)
self.nodes = self.variables = set(self._encoding_qubits)
#: The edges in this solver's graph, every edge will be present as (a, b) and (b, a): set(tuple(int, int))
self.edges = self.couplers = set(tuple(edge) for edge in self._encoding_couplers) | \
set((edge[1], edge[0]) for edge in self._encoding_couplers)
#: The edges in this solver's graph, each edge will only be represented once: set(tuple(int, int))
self.undirected_edges = {edge for edge in self.edges if edge[0] < edge[1]}
#: Properties of this solver the server presents: dict
self.properties = data['properties']
#: The set of extra parameters this solver will accept in sample_ising or sample_qubo: dict
self.parameters = self.properties['parameters']
# Create a set of default parameters for the queries
self._params = {}
# As a heuristic to guess if this is a hardware sampler check if
# the 'annealing_time_range' property is set.
if 'annealing_time_range' in data['properties']:
self._params[self._PARAMETER_ENABLE_HARDWARE] = True
def sample_ising(self, linear, quadratic, **params):
"""Draw samples from the provided Ising model.
To submit a problem: ``POST /problems/``
Args:
linear (list/dict): Linear terms of the model (h).
quadratic (dict of (int, int):float): Quadratic terms of the model (J).
**params: Parameters for the sampling method, specified per solver.
Returns:
:obj:`Future`
"""
# Our linear and quadratic objective terms are already separated in an
# ising model so we can just directly call `_sample`.
return self._sample('ising', linear, quadratic, params)
def sample_qubo(self, qubo, **params):
"""Draw samples from the provided QUBO.
To submit a problem: ``POST /problems/``
Args:
qubo (dict of (int, int):float): Terms of the model.
**params: Parameters for the sampling method, specified per solver.
Returns:
:obj:`Future`
"""
# In a QUBO the linear and quadratic terms in the objective are mixed into
# a matrix. For the sake of encoding, we will separate them before calling `_sample`
linear = {i1: v for (i1, i2), v in _uniform_iterator(qubo) if i1 == i2}
quadratic = {(i1, i2): v for (i1, i2), v in _uniform_iterator(qubo) if i1 != i2}
return self._sample('qubo', linear, quadratic, params)
def _sample(self, type_, linear, quadratic, params, reuse_future=None):
"""Internal method for both sample_ising and sample_qubo.
Args:
linear (list/dict): Linear terms of the model.
quadratic (dict of (int, int):float): Quadratic terms of the model.
**params: Parameters for the sampling method, specified per solver.
Returns:
:obj: `Future`
"""
# Check the problem
if not self.check_problem(linear, quadratic):
raise ValueError("Problem graph incompatible with solver.")
# Mix the new parameters with the default parameters
combined_params = dict(self._params)
combined_params.update(params)
# Check the parameters before submitting
for key in combined_params:
if key not in self.parameters and key != self._PARAMETER_ENABLE_HARDWARE:
raise KeyError("{} is not a parameter of this solver.".format(key))
# Encode the problem, use the newer format
data = self._base64_format(self, linear, quadratic)
# data = self._text_format(solver, lin, quad)
body = json.dumps({
'solver': self.id,
'data': data,
'type': type_,
'params': params
})
# Construct where we will put the result when we finish, submit the query
if reuse_future is not None:
future = reuse_future
future.__init__(self, None, self.return_matrix, (type_, linear, quadratic, params))
else:
future = Future(self, None, self.return_matrix, (type_, linear, quadratic, params))
log.debug("Submitting new problem to: %s", self.id)
self.connection._submit(body, future)
return future
def check_problem(self, linear, quadratic):
"""Test if an Ising model matches the graph provided by the solver.
Args:
linear (list/dict): Linear terms of the model (h).
quadratic (dict of (int, int):float): Quadratic terms of the model (J).
Returns:
boolean
"""
for key, value in _uniform_iterator(linear):
if value != 0 and key not in self.nodes:
return False
for key, value in _uniform_iterator(quadratic):
if value != 0 and tuple(key) not in self.edges:
return False
return True
def retrieve_problem(self, id_):
"""Resume polling for a problem previously submitted.
Args:
id_: Identification of the query.
Returns:
:obj: `Future`
"""
future = Future(self, id_, self.return_matrix, None)
self.connection._poll(future)
return future
def _text_format(self, solver, lin, quad):
"""Perform the legacy problem encoding.
Deprecated encoding method; included only for reference.
Args:
solver: solver requested.
lin: linear terms of the model.
quad: Quadratic terms of the model.
Returns:
data: text formatted problem
"""
data = ''
counter = 0
for index, value in _uniform_iterator(lin):
if value != 0:
data = data + '{} {} {}\n'.format(index, index, value)
counter += 1
for (index1, index2), value in six.iteritems(quad):
if value != 0:
data = data + '{} {} {}\n'.format(index1, index2, value)
counter += 1
data = '{} {}\n'.format(max(solver.nodes) + 1, counter) + data
return data
def _base64_format(self, solver, lin, quad):
"""Encode the problem for submission to a given solver.
Args:
solver: solver requested.
lin: linear terms of the model.
quad: Quadratic terms of the model.
Returns:
encoded submission dictionary
"""
# Encode linear terms. The coefficients of the linear terms of the objective
# are encoded as an array of little endian 64 bit doubles.
# This array is then base64 encoded into a string safe for json.
# The order of the terms is determined by the _encoding_qubits property
# specified by the server.
lin = [_uniform_get(lin, qubit, 0) for qubit in solver._encoding_qubits]
lin = base64.b64encode(struct.pack('<' + ('d' * len(lin)), *lin))
# Encode the coefficients of the quadratic terms of the objective
# in the same manner as the linear terms, in the order given by the
# _encoding_couplers property
quad = [quad.get(edge, 0) + quad.get((edge[1], edge[0]), 0)
for edge in solver._encoding_couplers]
quad = base64.b64encode(struct.pack('<' + ('d' * len(quad)), *quad))
# The name for this encoding is 'qp' and is explicitly included in the
# message for easier extension in the future.
return {
'format': 'qp',
'lin': lin.decode('utf-8'),
'quad': quad.decode('utf-8')
}
class Future:
"""An object for a pending SAPI call.
Waits for a request to complete and parses the message returned.
The future will be block to resolve when any data value is accessed.
The method :meth:`done` can be used to query for resolution without blocking.
:meth:`wait`, and :meth:`wait_multiple` can be used to block for a variable
number of jobs for a given ammount of time.
Note:
Only constructed by :obj:`Solver` objects.
Args:
solver: The solver that is going to fulfil this future.
id_: Identification of the query we are waiting for. (May be None and filled in later.)
return_matrix: Request return values as numpy matrices.
"""
def __init__(self, solver, id_, return_matrix, submission_data):
self.solver = solver
# Store the query data in case the problem needs to be resubmitted
self._submission_data = submission_data
# Has the client tried to cancel this job
self._cancel_requested = False
self._cancel_sent = False
self._single_cancel_lock = threading.Lock() # Make sure we only call cancel once
# Should the results be decoded as python lists or numpy matrices
if return_matrix and not _numpy:
raise ValueError("Matrix result requested without numpy.")
self.return_matrix = return_matrix
#: The id the server will use to identify this problem, None until the id is actually known
self.id = id_
#: `datetime` corriesponding to the time when the problem was accepted by the server (None before then)
self.time_received = None
#: `datetime` corriesponding to the time when the problem was completed by the server (None before then)
self.time_solved = None
#: `datetime` corriesponding to the time when the problem was completed by the server (None before then)
self.time_solved = None
# Track how long it took us to parse the data
self.parse_time = None
# Data from the server before it is parsed
self._message = None
#: Status flag most recently returned by the server
self.remote_status = None
# Data from the server after it is parsed (either data or an error)
self._result = None
self.error = None
# Event(s) to signal when the results are ready
self._results_ready_event = threading.Event()
self._other_events = []
def _set_message(self, message):
"""Complete the future with a message from the server.
The message from the server may actually be an error.
Args:
message (dict): Data from the server from trying to complete query.
"""
self._message = message
self._signal_ready()
def _set_error(self, error, exc_info=None):
"""Complete the future with an error.
Args:
error: An error string or exception object.
exc_info: Stack trace info from sys module for reraising exceptions nicely.
"""
self.error = error
self._exc_info = exc_info
self._signal_ready()
def _signal_ready(self):
"""Signal all the events waiting on this future."""
self._results_ready_event.set()
[ev.set() for ev in self._other_events]
def _add_event(self, event):
"""Add an event to be signaled after this event completes."""
self._other_events.append(event)
if self.done():
event.set()
def _remove_event(self, event):
"""Remove a completion event from this future."""
self._other_events.remove(event)
@staticmethod
def wait_multiple(futures, min_done=None, timeout=float('inf')):
"""Wait for multiple Future objects to complete.
Python doesn't provide a multi-wait, but we can jury rig something reasonably
efficent using an event object.
Args:
futures (list of Future): list of objects to wait on
min_done (int): Stop waiting when this many results are ready
timeout (float): Maximum number of seconds to wait
Returns:
boolean: True if the minimum number of results have been reached.
"""
if min_done is None:
min_done = len(futures)
# Track the exit conditions
finish = time.time() + timeout
done = 0
# Keep track of what futures havn't finished
remaining = list(futures)
# Insert our event into all the futures
event = threading.Event()
[f._add_event(event) for f in remaining]
# Check the exit conditions
while done < min_done and finish > time.time():
# Prepare to wait on any of the jobs finishing
event.clear()
# Check if any of the jobs have finished. After the clear just in
# case one finished and we erased the signal it by calling clear above
finished_futures = {f for f in remaining if f.done()}
if len(finished_futures) > 0:
# If we did make a mistake reseting the event, undo that now
# so that we double check the finished list before a wait blocks
event.set()
# Update our exit conditions
done += len(finished_futures)
remaining = [f for f in remaining if f not in finished_futures]
continue
# Block on any of the jobs finishing
wait_time = finish - time.time() if abs(finish) != float('inf') else None
event.wait(wait_time)
# Clean up after ourselves
[f._remove_event(event) for f in futures]
return done >= min_done
def wait(self, timeout=None):
"""Wait for the results to be available.
Args:
timeout (float): Maximum number of seconds to wait
"""
return self._results_ready_event.wait(timeout)
def done(self):
"""Test whether a response has arrived."""
return self._message is not None or self.error is not None
def cancel(self):
"""Try to cancel the problem corresponding to this result.
An effort will be made to prevent the execution of the corresponding problem
but there are no guarantees.
"""
# Don't need to cancel something already finished
if self.done():
return
with self._single_cancel_lock:
# Already done
if self._cancel_requested:
return
# Set the cancel flag
self._cancel_requested = True
# The cancel request will be sent here, or by the solver when it
# gets a status update for this problem (in the case where the id hasn't been set yet)
if self.id is not None and not self._cancel_sent:
self._cancel_sent = True
self.solver.connection._cancel(self.id, self)
@property
def energies(self):
"""The energy buffer, blocks if needed.
Returns:
list or numpy matrix of doubles.
"""
result = self._load_result()
return result['energies']
@property
def samples(self):
"""The state buffer, blocks if needed.
Returns:
list of lists or numpy matrix.
"""
result = self._load_result()
return result['solutions']
@property
def occurrences(self):
"""The occurrences buffer, blocks if needed.
Returns:
list or numpy matrix of doubles.
"""
result = self._load_result()
if 'num_occurrences' in result:
return result['num_occurrences']
elif self.return_matrix:
return np.ones((len(result['solutions']),))
else:
return [1] * len(result['solutions'])
@property
def timing(self):
"""Information about the time the solver took in operation.
The response is a mapping from string keys to numeric values.
The exact keys used depend on the solver.
Returns:
dict
"""
result = self._load_result()
return result['timing']
def __getitem__(self, key):
"""Provide dwave_sapi2 compatible access to results.
Args:
key: keywords for result fields.
"""
if key == 'energies':
return self.energies
elif key in ['solutions', 'samples']:
return self.samples
elif key in ['occurrences', 'num_occurrences']:
return self.occurrences
elif key == 'timing':
return self.timing
else:
raise KeyError('{} is not a property of response object'.format(key))
def _load_result(self):
"""Get the result, waiting and decoding as needed."""
if self._result is None:
# Wait for the query response
self._results_ready_event.wait()
# Check for other error conditions
if self.error is not None:
if self._exc_info is not None:
six.reraise(*self._exc_info)
if isinstance(self.error, Exception):
raise self.error
raise RuntimeError(self.error)
# If someone else took care of this while we were waiting
if self._result is not None:
return self._result
self._decode()
return self._result
def _decode(self):
"""Choose the right decoding method based on format and environment."""
start = time.time()
try:
if self._message['type'] not in ['qubo', 'ising']:
raise ValueError('Unknown problem format used.')
# If no format is set we fall back to legacy encoding
if 'format' not in self._message['answer']:
if _numpy:
return self._decode_legacy_numpy()
return self._decode_legacy()
# If format is set, it must be qp
if self._message['answer']['format'] != 'qp':
raise ValueError('Data format returned by server not understood.')
if _numpy:
return self._decode_qp_numpy()
return self._decode_qp()
finally:
self.parse_time = time.time() - start
def _decode_legacy(self):
"""Decode old format, without numpy.
The legacy format, included mostly for information and contrast, used
pure json for most of the data, with a dense encoding used for the
samples themselves.
"""
# Most of the data can be used as is
self._result = self._message['answer']
# Measure the shape of the binary data returned
num_solutions = len(self._result['energies'])
active_variables = self._result['active_variables']
total_variables = self._result['num_variable']
# Decode the solutions, which will be a continuous run of bits.
# It was treated as a raw byte string and base64 encoded.
binary = base64.b64decode(self._result['solutions']) # Undo the base64 encoding
byte_buffer = struct.unpack('B' * len(binary), binary) # Read out the byte array
bits = []
for byte in byte_buffer:
bits.extend(reversed(self._decode_byte(byte))) # Turn the bytes back into bits
# Figure out the null value for output
default = 3 if self._message['type'] == 'qubo' else 0
# Pull out a bit for each active variable, keep our spot in the
# bit array between solutions using `index`
index = 0
solutions = []
for solution_index in range(num_solutions):
# Use None for any values not active
solution = [default] * total_variables
for i in active_variables:
solution[i] = bits[index]
index += 1
# Make sure we are in the right variable space
if self._message['type'] == 'ising':
values = {0: -1, 1: 1}
solution = [values.get(v, None) for v in solution]
solutions.append(solution)
self._result['solutions'] = solutions
def _decode_legacy_numpy(self):
"""Decode old format, using numpy.
Decodes the same format as _decode_legacy, but gains some speed using numpy.
"""
# Load number lists into numpy buffers
res = self._result = self._message['answer']
if self.return_matrix:
res['energies'] = np.array(res['energies'], dtype=float)
if 'num_occurrences' in res:
res['num_occurrences'] = np.array(res['num_occurrences'], dtype=int)
res['active_variables'] = np.array(res['active_variables'], dtype=int)
# Measure the shape of the data
num_solutions = len(res['energies'])
active_variables = res['active_variables']
num_variables = len(active_variables)
# Decode the solutions, which will be a continuous run of bits
byte_type = np.dtype(np.uint8)
byte_type = byte_type.newbyteorder('<')
bits = np.unpackbits(np.frombuffer(base64.b64decode(res['solutions']), dtype=byte_type))
# Clip off the extra bits from encoding
bits = np.delete(bits, range(num_solutions * num_variables, bits.size))
bits = np.reshape(bits, (num_solutions, num_variables))
# Switch from bits to spins
default = 3
if self._message['type'] == 'ising':
bits = bits.astype(np.int8)
bits *= 2
bits -= 1
default = 0
# Fill in the missing variables
solutions = np.full((num_solutions, res['num_variables']), default, dtype=np.int8)
solutions[:, active_variables] = bits
res['solutions'] = solutions
if not res['solutions']:
res['solutions'] = res['solutions'].tolist()
def _decode_qp(self):
"""Decode qp format, without numpy.
The 'qp' format is the current encoding used for problems and samples.
In this encoding the reply is generally json, but the samples, energy,
and histogram data (the occurrence count of each solution), are all
base64 encoded arrays.
"""
# Decode the simple buffers
res = self._result = self._message['answer']
res['active_variables'] = self._decode_ints(res['active_variables'])
active_variables = res['active_variables']
if 'num_occurrences' in res:
res['num_occurrences'] = self._decode_ints(res['num_occurrences'])
res['energies'] = self._decode_doubles(res['energies'])
# Measure out the size of the binary solution data
num_solutions = len(res['energies'])
num_variables = len(res['active_variables'])
solution_bytes = -(-num_variables // 8) # equivalent to int(math.ceil(num_variables / 8.))
total_variables = res['num_variables']
# Figure out the null value for output
default = 3 if self._message['type'] == 'qubo' else 0
# Decode the solutions, which will be byte aligned in binary format
binary = base64.b64decode(res['solutions'])
solutions = []
for solution_index in range(num_solutions):
# Grab the section of the buffer related to the current
buffer_index = solution_index * solution_bytes
solution_buffer = binary[buffer_index:buffer_index + solution_bytes]
bytes = struct.unpack('B' * solution_bytes, solution_buffer)
# Assume None values
solution = [default] * total_variables
index = 0
for byte in bytes:
# Parse each byte and read how ever many bits can be
values = self._decode_byte(byte)
for _ in range(min(8, len(active_variables) - index)):
i = active_variables[index]
index += 1
solution[i] = values.pop()
# Switch to the right variable space
if self._message['type'] == 'ising':
values = {0: -1, 1: 1}
solution = [values.get(v, default) for v in solution]
solutions.append(solution)
res['solutions'] = solutions
def _decode_byte(self, byte):
"""Helper for _decode_qp, turns a single byte into a list of bits.
Args:
byte: byte to be decoded
Returns:
list of bits corresponding to byte
"""
bits = []
for _ in range(8):
bits.append(byte & 1)
byte >>= 1
return bits
def _decode_ints(self, message):
"""Helper for _decode_qp, decodes an int array.
The int array is stored as little endian 32 bit integers.
The array has then been base64 encoded. Since we are decoding we do these
steps in reverse.
"""
binary = base64.b64decode(message)
return struct.unpack('<' + ('i' * (len(binary) // 4)), binary)
def _decode_doubles(self, message):
"""Helper for _decode_qp, decodes a double array.
The double array is stored as little endian 64 bit doubles.
The array has then been base64 encoded. Since we are decoding we do these
steps in reverse.
Args:
message: the double array
Returns:
decoded double array
"""
binary = base64.b64decode(message)
return struct.unpack('<' + ('d' * (len(binary) // 8)), binary)
def _decode_qp_numpy(self):
"""Decode qp format, with numpy."""
res = self._result = self._message['answer']
# Build some little endian type encodings
double_type =
|
np.dtype(np.double)
|
numpy.dtype
|
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.morphology import binary_erosion, distance_transform_edt
from scipy.ndimage import find_objects
import random
from random import uniform, random, randint, getrandbits
from scipy import interpolate
import copy
from scipy.ndimage.filters import generic_filter
try:
import edt
except Exception:
pass
try:
import dataset_iterator.helpers as dih
except:
dih=None
from .helpers import ensure_multiplicity
def batch_wise_fun(fun):
#return lambda batch : np.stack([fun(batch[i]) for i in range(batch.shape[0])], 0)
def func(batch):
for b in range(batch.shape[0]):
batch[b] = fun(batch[b])
return batch
return func
def apply_and_stack_channel(*funcs):
return lambda batch : np.concatenate([fun(batch) for fun in funcs], -1)
def identity(batch):
return batch
def level_set(label_img, max_distance=None, dtype=np.float32):
if not np.any(label_img): # empty image
baseline = np.ones_like(label_img, dtype=dtype)
if max_distance is not None:
return baseline * max_distance # base line = max possible distance value
else:
return baseline * max(label_img.shape)
inside = distance_transform_edt(label_img).astype(dtype, copy=False) # edm inside
outside = distance_transform_edt(np.where(label_img, False, True)).astype(dtype, copy=False)
if max_distance is not None:
inside[inside<-max_distance] = -max_distance
outside[outside>max_distance] = max_distance
return outside - inside
def unet_weight_map(batch, wo=10, sigma=5, max_background_ratio=0, set_contours_to_zero=False, dtype=np.float32):
"""Implementation of Unet weight map as in <NAME>., <NAME>., & <NAME>. (2015, October).
U-net: Convolutional networks for biomedical image segmentation.
Parameters
----------
batch : type
ND array of shape (batch, Y, X, nchan) of labeld images
if nchan>1 function is applied separately on each channel
wo : float
cf Unet paper
sigma : float
cf Unet paper
max_background_ratio : bool
limits the ratio (background volume / foreground volume).
useful when foreground is rare, in which case the weight of forground will be: max_background_ratio / (1 + max_background_ratio)
if 0, not limit
set_contours_to_zero : bool
if true, weight of object contours is set to zero
dtype : numpy.dtype
weight map data type
Returns
-------
type
numpy nd array of same shape as batch
"""
if batch.shape[-1]>1:
wms = [unet_weight_map(batch[...,i:i+1], wo, sigma, max_background_ratio, True, dtype) for i in range(batch.shape[-1])]
return np.concatenate(wms, axis=-1)
else:
s2 = sigma * sigma * 2
wm = weight_map_mask_class_balance(batch, max_background_ratio, True, dtype)
if wo>0 or set_contours_to_zero:
for i in range(batch.shape[0]):
im = batch[i]
labels = np.unique(im)
labels = labels[labels!=0]
if labels.shape[0]>1 and wo>0:
edms=[distance_transform_edt(np.invert(im==l)) for l in labels]
edm = np.concatenate(edms, axis=-1)
edm = np.partition(edm, 1)[...,:2] # get the 2 min values
edm =
|
np.sum(edm, axis=-1, keepdims=True)
|
numpy.sum
|
"""
Simple Horten Wing as used by Richards. Baseline and simplified models
"""
import numpy as np
from cases.hangar.horten_wing import HortenWing
import sharpy.utils.algebra as algebra
class Baseline(HortenWing):
def set_properties(self):
# Wing geometry
self.span = 20.0 # [m]
self.sweep_LE = 20 * np.pi / 180 # [rad] Leading Edge Sweep
self.c_root = 1.0 # [m] Root chord - Richards
self.taper_ratio = 0.25 # Richards
self.thrust_nodes = [self.n_node_fuselage - 1,
self.n_node_fuselage + self.n_node_wing + 1]
self.loc_cg = 0.45 # CG position wrt to LE (from sectional analysis)
# EA is the reference in NATASHA - defined with respect to the midchord. SHARPy is wrt to LE and as a pct of
# local chord
self.main_ea_root = 0.33
self.main_ea_tip = 0.33
self.n_mass = 2 * self.n_elem_wing
# FUSELAGE GEOMETRY
self.fuselage_width = 1.65/2
self.c_fuselage = self.c_root
# WASH OUT
self.washout_root = 0*np.pi/180
self.washout_tip = -2 * np.pi / 180
# Horseshoe wake
self.horseshoe = False
self.wake_type = 2
self.dt_factor = 1
self.dt = 1 / self.M / self.u_inf * self.dt_factor
# Dynamics
self.n_tstep = int(self.physical_time/self.dt)
self.gust_intensity = 0.1
# Numerics
self.tolerance = 1e-12
self.fsi_tolerance = 1e-10
self.relaxation_factor = 0.2
def update_mass_stiffness(self, sigma=1., sigma_mass=1.):
"""
Set's the mass and stiffness properties of the default wing
Returns:
"""
n_elem_fuselage = self.n_elem_fuselage
n_elem_wing = self.n_elem_wing
n_node_wing = self.n_node_wing
n_node_fuselage = self.n_node_fuselage
c_root = self.c_root
taper_ratio = self.taper_ratio
# Local chord to root chord initialisation
c_bar_temp = np.linspace(c_root, taper_ratio * c_root, n_elem_wing)
# Structural properties at the wing root section from Richards 2016
ea = 1e6
ga = 1e6
gj = 4.24e5
eiy = 3.84e5
eiz = 2.46e7
root_i_beam = IBeam()
root_i_beam.build(c_root)
root_i_beam.rotation_axes = np.array([0, self.main_ea_root-0.25, 0])
root_airfoil = Airfoil()
root_airfoil.build(c_root)
root_i_beam.rotation_axes = np.array([0, self.main_ea_root-0.25, 0])
mu_0 = root_i_beam.mass + root_airfoil.mass
j_xx = root_i_beam.ixx + root_airfoil.ixx
j_yy = root_i_beam.iyy + root_airfoil.iyy
j_zz = root_i_beam.izz + root_airfoil.izz
# Number of stiffnesses used
n_stiffness = self.n_stiffness
# Initialise the stiffness database
base_stiffness = self.base_stiffness
stiffness_root = sigma * np.diag([ea, ga, ga, gj, eiy, eiz])
stiffness_tip = taper_ratio ** 2 * stiffness_root
# Assume a linear variation in the stiffness. Richards et al. use VABS on the linearly tapered wing to find the
# spanwise properties
alpha = np.linspace(0, 1, self.n_elem_wing)
for i_elem in range(0, self.n_elem_wing):
base_stiffness[i_elem + 1, :, :] = stiffness_root*(1-alpha[i_elem]**2) + stiffness_tip*alpha[i_elem]**2
base_stiffness[0] = base_stiffness[1]
# Mass variation along the span
# Right wing centre of mass - wrt to 0.25c
cm = (root_airfoil.centre_mass * root_airfoil.mass + root_i_beam.centre_mass * root_i_beam.mass) \
/ np.sum(root_airfoil.mass + root_i_beam.mass)
cg = np.array([0, -(cm[0] + 0.25 * self.c_root - self.main_ea_root), 0]) * 1
n_mass = self.n_mass
# sigma_mass = 1.25
# Initialise database
base_mass = self.base_mass
mass_root_right = np.diag([mu_0, mu_0, mu_0, j_xx, j_yy, j_zz]) * sigma_mass
mass_root_right[:3, -3:] = -algebra.skew(cg) * mu_0
mass_root_right[-3:, :3] = algebra.skew(cg) * mu_0
mass_root_left = np.diag([mu_0, mu_0, mu_0, j_xx, j_yy, j_zz]) * sigma_mass
mass_root_left[:3, -3:] = -algebra.skew(-cg) * mu_0
mass_root_left[-3:, :3] = algebra.skew(-cg) * mu_0
mass_tip_right = taper_ratio * mass_root_right
mass_tip_left = taper_ratio * mass_root_left
ixx_dummy = []
iyy_dummy = []
izz_dummy = []
for i_elem in range(self.n_elem_wing):
# Create full cross section
c_bar = self.c_root * ((1-alpha[i_elem]) + self.taper_ratio * alpha[i_elem])
x_section = WingCrossSection(c_bar)
print(i_elem)
print('Section Mass: %.2f ' %x_section.mass)
print('Linear Mass: %.2f' % (mu_0 * (1-alpha[i_elem]) + mu_0 * self.taper_ratio * alpha[i_elem]))
print('Section Ixx: %.4f' % x_section.ixx)
print('Section Iyy: %.4f' % x_section.iyy)
print('Section Izz: %.4f' % x_section.izz)
print('Linear Ixx: %.2f' % (j_xx * (1-alpha[i_elem]) + j_xx * self.taper_ratio * alpha[i_elem]))
# base_mass[i_elem, :, :] = mass_root_right*(1-alpha[i_elem]) + mass_tip_right*alpha[i_elem]
# base_mass[i_elem + self.n_elem_wing + self.n_elem_fuselage - 1] = mass_root_left*(1-alpha[i_elem]) + mass_tip_left*alpha[i_elem]
base_mass[i_elem, :, :] = np.diag([x_section.mass, x_section.mass, x_section.mass,
x_section.ixx, x_section.iyy, x_section.izz])
cg = np.array([0, -(x_section.centre_mass[0] + (0.25 - self.main_ea_root) * c_bar / self.c_root), 0]) * 1
base_mass[i_elem, :3, -3:] = -algebra.skew(cg) * x_section.mass
base_mass[i_elem, -3:, :3] = algebra.skew(cg) * x_section.mass
base_mass[i_elem + self.n_elem_wing + self.n_elem_fuselage - 1, :, :] = np.diag([x_section.mass, x_section.mass, x_section.mass,
x_section.ixx, x_section.iyy, x_section.izz])
cg = np.array([0, -(x_section.centre_mass[0] + (0.25 - self.main_ea_root) * c_bar / self.c_root), 0]) * 1
base_mass[i_elem + self.n_elem_wing + self.n_elem_fuselage - 1, :3, -3:] = -algebra.skew(-cg) * x_section.mass
base_mass[i_elem + self.n_elem_wing + self.n_elem_fuselage - 1, -3:, :3] = algebra.skew(-cg) * x_section.mass
ixx_dummy.append(x_section.ixx)
iyy_dummy.append(x_section.iyy)
izz_dummy.append(x_section.izz)
# for item in x_section.items:
# plt.plot(item.y, item.z)
# plt.scatter(x_section.centre_mass[0], x_section.centre_mass[1])
# plt.show()
# print(x_section.centre_mass)
# print(cg)
# plt.plot(range(self.n_elem_wing), ixx_dummy)
# plt.plot(range(self.n_elem_wing), iyy_dummy)
# plt.plot(range(self.n_elem_wing), izz_dummy)
# plt.show()
# Lumped mass initialisation
lumped_mass_nodes = self.lumped_mass_nodes
lumped_mass = self.lumped_mass
lumped_mass_inertia = self.lumped_mass_inertia
lumped_mass_position = self.lumped_mass_position
# Lumped masses nodal position
# 0 - Right engine
# 1 - Left engine
# 2 - Fuselage
lumped_mass_nodes[0] = 2
lumped_mass_nodes[1] = n_node_fuselage + n_node_wing + 1
lumped_mass_nodes[2] = 0
# Lumped mass value from Richards 2013
lumped_mass[0:2] = 51.445 / 9.81
lumped_mass[2] = 150 / 9.81
# lumped_mass_position[2] = [0, 0, -10.]
# Lumped mass inertia
lumped_mass_inertia[0, :, :] = np.diag([0.29547, 0.29322, 0.29547])
lumped_mass_inertia[1, :, :] = np.diag([0.29547, 0.29322, 0.29547])
lumped_mass_inertia[2, :, :] = np.diag([0.5, 1, 1]) * lumped_mass[2]
# Define class attributes
self.lumped_mass = lumped_mass * 1
self.lumped_mass_nodes = lumped_mass_nodes * 1
self.lumped_mass_inertia = lumped_mass_inertia * 1
self.lumped_mass_position = lumped_mass_position * 1
self.base_stiffness = base_stiffness
self.base_mass = base_mass
class CrossSection(object):
def __init__(self):
self.rho = 2770
self.rotation_axes = np.array([0, 0.33-0.25, 0])
self.y = np.ndarray((2,))
self.z = np.ndarray((2,))
self.t = np.ndarray((2,))
@property
def mass(self):
"""
Mass of the I beam per unit length
"""
return np.sum(self.t * self.elem_length) * self.rho
@property
def ixx(self):
ixx_ = np.sum(self.elem_length * self.t * self.rho * (self.elem_cm_y ** 2 + self.elem_cm_z ** 2))
return ixx_ + self.mass * (self.centre_mass[0] - self.rotation_axes[1]) ** 2
@property
def elem_length(self):
elem_length = np.sqrt(np.diff(self.y) ** 2 + np.diff(self.z) ** 2)
return elem_length
@property
def elem_cm_y(self):
elem_cm_y_ = np.ndarray((self.n_elem, ))
elem_cm_y_[:] = 0.5 * (self.y[:-1] + self.y[1:])
return elem_cm_y_
@property
def elem_cm_z(self):
elem_cm_z_ = np.ndarray((self.n_elem, ))
elem_cm_z_[:] = 0.5 * (self.z[:-1] + self.z[1:])
return elem_cm_z_
@property
def centre_mass(self):
y_cm = np.sum(self.elem_cm_y * self.elem_length) / np.sum(self.elem_length)
z_cm = np.sum(self.elem_cm_z * self.elem_length) / np.sum(self.elem_length)
return np.array([y_cm, z_cm])
@property
def iyy(self):
x_dom = np.linspace(-0.5, 0.5, 100)
x_cg = 0.5 * (x_dom[:-1].copy() + x_dom[1:].copy())
dx = np.diff(x_dom)[0]
iyy_ = 0
for elem in range(len(self.elem_length)):
z_cg = np.ones_like(x_cg) * self.elem_cm_z[elem]
iyy_ += np.sum(self.elem_length[elem] * self.t[elem] * dx * self.rho * (x_cg ** 2 + z_cg ** 2))
return iyy_ #np.sum(self.elem_length * self.t * self.rho * 1 * self.elem_cm_z ** 2)
@property
def izz(self):
x_dom = np.linspace(-0.5, 0.5, 100)
x_cg = 0.5 * (x_dom[:-1].copy() + x_dom[1:].copy())
dx = np.diff(x_dom)[0]
iyy_ = 0
izz_ = 0
for elem in range(len(self.elem_length)):
y_cg = np.ones_like(x_cg) * self.elem_cm_y[elem]
izz_ += np.sum(self.elem_length[elem] * self.t[elem] * dx * self.rho * (x_cg ** 2 + y_cg ** 2))
return izz_ #np.sum(self.elem_length * self.t * self.rho * 1 * self.elem_cm_y ** 2)
@property
def n_node(self):
return self.y.shape[0]
@property
def n_elem(self):
return self.n_node - 1
def build(self, y, z, t):
self.y = y
self.z = z
self.t = t
class IBeam(CrossSection):
def build(self, c_root):
t_skin = 0.127e-2
t_c = 0.12
w_I = 10e-2 * c_root # Width of the Ibeam
self.rho = 2770
self.y = np.ndarray((self.n_node, ))
self.z = np.ndarray((self.n_node, ))
self.t = np.ndarray((self.n_node, ))
z_max = t_c * c_root
y = np.array([-w_I/2, w_I/2, 0, 0, -w_I/2, w_I/2])
z = np.array([z_max/2, z_max/2, z_max/2, -z_max/2, -z_max/2, -z_max/2])
t = np.array([t_skin, 0, t_skin, 0, t_skin])
self.y = y
self.z = z
self.t = t
class Airfoil(CrossSection):
def build(self, c_root):
t_c = 0.12
t_skin = 0.127e-2 * 1.5
y_dom = np.linspace(0, c_root, 100)
y = np.concatenate((y_dom, y_dom[:-1][::-1]))
z_dom = 5 * t_c * (0.2969 * np.sqrt(y_dom/c_root) -
0.1260 * y_dom/c_root -
0.3516 * (y_dom/c_root) ** 2 +
0.2843 * (y_dom/c_root) ** 3 -
0.1015 * (y_dom/c_root) ** 4) * c_root
z =
|
np.concatenate((z_dom, -z_dom[:-1][::-1]))
|
numpy.concatenate
|
import arff
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
"""
The available locations are "Gaimersheim", "Munich" and "Ingolstadt"
"""
location = 'Gaimersheim'
openfile = 'Interpolated_data/data_' + location + '.json'
df = pd.read_json(openfile)
# Extracting all sensors values
acc_x = np.array(df["acceleration_x"]["values"])
acc_y = np.array(df["acceleration_y"]["values"])
acc_z = np.array(df["acceleration_z"]["values"])
acc_pedal = np.array(df["accelerator_pedal"]["values"])
brake_pressure =
|
np.array(df["brake_pressure"]["values"])
|
numpy.array
|
#pylint disable=C0301
from struct import Struct, pack
from abc import abstractmethod
import inspect
from typing import List
import numpy as np
from numpy import zeros, searchsorted, allclose
from pyNastran.utils.numpy_utils import integer_types, float_types
from pyNastran.op2.result_objects.op2_objects import BaseElement, get_times_dtype
from pyNastran.f06.f06_formatting import (
write_floats_13e, write_floats_12e,
write_float_13e, # write_float_12e,
_eigenvalue_header,
)
from pyNastran.op2.op2_interface.write_utils import set_table3_field
SORT2_TABLE_NAME_MAP = {
'OEF2' : 'OEF1',
'OEFATO2' : 'OEFATO1',
'OEFCRM2' : 'OEFCRM1',
'OEFPSD2' : 'OEFPSD1',
'OEFRMS2' : 'OEFRMS1',
'OEFNO2' : 'OEFNO1',
}
TABLE_NAME_TO_TABLE_CODE = {
'OEF1' : 4,
}
class ForceObject(BaseElement):
def __init__(self, data_code, isubcase, apply_data_code=True):
self.element_type = None
self.element_name = None
self.nonlinear_factor = np.nan
self.element = None
self._times = None
BaseElement.__init__(self, data_code, isubcase, apply_data_code=apply_data_code)
def finalize(self):
"""it's required that the object be in SORT1"""
self.set_as_sort1()
def set_as_sort1(self):
"""the data is in SORT1, but the flags are wrong"""
if self.is_sort1:
return
self.table_name = SORT2_TABLE_NAME_MAP[self.table_name]
self.sort_bits[1] = 0 # sort1
self.sort_method = 1
assert self.is_sort1 is True, self.is_sort1
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
raise NotImplementedError()
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element == eid) for eid in eids])
ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def _write_table_3(self, op2, op2_ascii, new_result, itable, itime): #itable=-3, itime=0):
import inspect
from struct import pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_table_3: %s\n' % (self.__class__.__name__, call_frame[1][3]))
#print('new_result=%s itable=%s' % (new_result, itable))
if new_result and itable != -3:
header = [
4, 146, 4,
]
else:
header = [
4, itable, 4,
4, 1, 4,
4, 0, 4,
4, 146, 4,
]
op2.write(pack(b'%ii' % len(header), *header))
op2_ascii.write('table_3_header = %s\n' % header)
approach_code = self.approach_code
table_code = self.table_code
isubcase = self.isubcase
element_type = self.element_type
assert isinstance(self.element_type, int), self.element_type
#[
#'aCode', 'tCode', 'element_type', 'isubcase',
#'???', '???', '???', 'load_set'
#'format_code', 'num_wide', 's_code', '???',
#'???', '???', '???', '???',
#'???', '???', '???', '???',
#'???', '???', '???', '???',
#'???', 'Title', 'subtitle', 'label']
#random_code = self.random_code
format_code = self.format_code
s_code = 0 # self.s_code
num_wide = self.num_wide
acoustic_flag = 0
thermal = 0
title = b'%-128s' % self.title.encode('ascii')
subtitle = b'%-128s' % self.subtitle.encode('ascii')
label = b'%-128s' % self.label.encode('ascii')
ftable3 = b'50i 128s 128s 128s'
#oCode = 0
load_set = 0
#print(self.code_information())
ftable3 = b'i' * 50 + b'128s 128s 128s'
field6 = 0
field7 = 0
if self.analysis_code == 1:
field5 = self.loadIDs[itime]
elif self.analysis_code == 2:
field5 = self.modes[itime]
field6 = self.eigns[itime]
field7 = self.cycles[itime]
assert isinstance(field6, float), type(field6)
assert isinstance(field7, float), type(field7)
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
ftable3 = set_table3_field(ftable3, 7, b'f') # field 7
elif self.analysis_code == 5:
try:
field5 = self.freqs[itime]
except AttributeError: # pragma: no cover
print(self)
raise
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5
elif self.analysis_code == 6:
if hasattr(self, 'times'):
field5 = self.times[itime]
#elif hasattr(self, 'dts'):
#field5 = self.times[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find times or dts on analysis_code=8')
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5
elif self.analysis_code == 7: # pre-buckling
field5 = self.loadIDs[itime] # load set number
elif self.analysis_code == 8: # post-buckling
if hasattr(self, 'lsdvmns'):
field5 = self.lsdvmns[itime] # load set number
elif hasattr(self, 'loadIDs'):
field5 = self.loadIDs[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find lsdvmns or loadIDs on analysis_code=8')
if hasattr(self, 'eigns'):
field6 = self.eigns[itime]
elif hasattr(self, 'eigrs'):
field6 = self.eigrs[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find eigns or eigrs on analysis_code=8')
assert isinstance(field6, float_types), type(field6)
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
elif self.analysis_code == 9: # complex eigenvalues
field5 = self.modes[itime]
if hasattr(self, 'eigns'):
field6 = self.eigns[itime]
elif hasattr(self, 'eigrs'):
field6 = self.eigrs[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find eigns or eigrs on analysis_code=8')
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
field7 = self.eigis[itime]
ftable3 = set_table3_field(ftable3, 7, b'f') # field 7
elif self.analysis_code == 10: # nonlinear statics
field5 = self.load_steps[itime]
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5; load step
elif self.analysis_code == 11: # old geometric nonlinear statics
field5 = self.loadIDs[itime] # load set number
else:
raise NotImplementedError(self.analysis_code)
table3 = [
approach_code, table_code, element_type, isubcase, field5,
field6, field7, load_set, format_code, num_wide,
s_code, acoustic_flag, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, thermal, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
title, subtitle, label,
]
assert table3[22] == thermal
n = 0
for v in table3:
if isinstance(v, (int, float, np.float32)):
n += 4
elif isinstance(v, str):
#print(len(v), v)
n += len(v)
else:
#print('write_table_3', v)
n += len(v)
assert n == 584, n
data = [584] + table3 + [584]
fmt = b'i' + ftable3 + b'i'
#print(fmt)
#print(data)
#f.write(pack(fascii, '%s header 3c' % self.table_name, fmt, data))
op2_ascii.write('%s header 3c = %s\n' % (self.table_name, data))
op2.write(pack(fmt, *data))
class RealForceObject(ForceObject):
def __init__(self, data_code, isubcase, apply_data_code=True):
ForceObject.__init__(self, data_code, isubcase, apply_data_code=apply_data_code)
@property
def is_real(self):
return True
@property
def is_complex(self):
return False
"""
F A I L U R E I N D I C E S F O R L A Y E R E D C O M P O S I T E E L E M E N T S ( T R I A 3 )
ELEMENT FAILURE PLY FP=FAILURE INDEX FOR PLY FB=FAILURE INDEX FOR BONDING FAILURE INDEX FOR ELEMENT FLAG
ID THEORY ID (DIRECT STRESSES/STRAINS) (INTER-LAMINAR STRESSES) MAX OF FP,FB FOR ALL PLIES
3 STRAIN 1 20345.4805 -2
7.1402
2 0.9025 -12
7.1402
3 20342.2461 -2
20345.4805 ***
4 STRAIN 1 16806.9277 -2
38.8327
2 0.9865 -2
38.8327
3 16804.4199 -2
F A I L U R E I N D I C E S F O R L A Y E R E D C O M P O S I T E E L E M E N T S ( T R I A 6 )
ELEMENT FAILURE PLY FP=FAILURE INDEX FOR PLY FB=FAILURE INDEX FOR BONDING FAILURE INDEX FOR ELEMENT FLAG
ID THEORY ID (DIRECT STRESSES/STRAINS) (INTER-LAMINAR STRESSES) MAX OF FP,FB FOR ALL PLIES
5 STRAIN 1 21850.3184 -2
166984.4219
2 0.7301 -2
166984.4219
3 21847.9902 -2
166984.4219 ***
6 STRAIN 1 18939.8340 -2
130371.3828
2 0.7599 -1
130371.3828
3 18937.7734 -2
F A I L U R E I N D I C E S F O R L A Y E R E D C O M P O S I T E E L E M E N T S ( Q U A D 4 )
ELEMENT FAILURE PLY FP=FAILURE INDEX FOR PLY FB=FAILURE INDEX FOR BONDING FAILURE INDEX FOR ELEMENT FLAG
ID THEORY ID (DIRECT STRESSES/STRAINS) (INTER-LAMINAR STRESSES) MAX OF FP,FB FOR ALL PLIES
1 STRAIN 1 18869.6621 -2
16.2471
2 1.0418 -2
16.2471
3 18866.6074 -2
18869.6621 ***
1 CC227: CANTILEVERED COMPOSITE PLATE 3 LAYER SYMM PLY CC227 DECEMBER 5, 2011 MSC.NASTRAN 6/17/05 PAGE 15
FAILURE CRITERION IS STRAIN, STRESS ALLOWABLES, LIST STRESSES
0
F A I L U R E I N D I C E S F O R L A Y E R E D C O M P O S I T E E L E M E N T S ( Q U A D 8 )
ELEMENT FAILURE PLY FP=FAILURE INDEX FOR PLY FB=FAILURE INDEX FOR BONDING FAILURE INDEX FOR ELEMENT FLAG
ID THEORY ID (DIRECT STRESSES/STRAINS) (INTER-LAMINAR STRESSES) MAX OF FP,FB FOR ALL PLIES
2 STRAIN 1 14123.7451 -2
31.4861
2 1.0430 -2
31.4861
3 14122.1221 -2
"""
class FailureIndicesArray(RealForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealForceObject.__init__(self, data_code, isubcase)
self.nelements = 0 # result specific
def build(self):
"""sizes the vectorized attributes of the FailureIndices"""
if self.is_built:
return
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size)
self._times = zeros(self.ntimes, dtype=dtype)
self.failure_theory = np.full(self.nelements, '', dtype='U8')
self.element_layer = zeros((self.nelements, 2), dtype=idtype)
#[failure_stress_for_ply, interlaminar_stress, max_value]
self.data = zeros((self.ntimes, self.nelements, 3), dtype=fdtype)
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
element_layer = [self.element_layer[:, 0], self.element_layer[:, 1]]
if self.nonlinear_factor not in (None, np.nan):
# Time 0.00 0.05
# ElementID NodeID Item
# 2 1 failure_index_for_ply (direct stress/strain) 0.0 5.431871e-14
# 2 failure_index_for_bonding (interlaminar stresss) 0.0 3.271738e-16
# 3 max_value NaN NaN
# 1 failure_index_for_ply (direct stress/strain) 0.0 5.484873e-30
# 2 failure_index_for_bonding (interlaminar stresss) 0.0 3.271738e-16
# 3 max_value NaN NaN
# 1 failure_index_for_ply (direct stress/strain) 0.0 5.431871e-14
# 2 failure_index_for_bonding (interlaminar stresss) NaN NaN
# 3 max_value 0.0 5.431871e-14
column_names, column_values = self._build_dataframe_transient_header()
names = ['ElementID', 'Layer', 'Item']
data_frame = self._build_pandas_transient_element_node(
column_values, column_names, headers,
element_layer, self.data, names=names,
from_tuples=False, from_array=True)
#column_names, column_values = self._build_dataframe_transient_header()
#data_frame = pd.Panel(self.data, items=column_values,
#major_axis=element_layer, minor_axis=headers).to_frame()
#data_frame.columns.names = column_names
#data_frame.index.names = ['ElementID', 'Layer', 'Item']
else:
#Static failure_index_for_ply (direct stress/strain) failure_index_for_bonding (interlaminar stresss) max_value
#ElementID Layer
#101 1 7.153059e-07 0.0 NaN
# 2 1.276696e-07 0.0 NaN
# 3 7.153059e-07 NaN 7.153059e-07
element_layer = [self.element_layer[:, 0], self.element_layer[:, 1]]
index = pd.MultiIndex.from_arrays(element_layer, names=['ElementID', 'Layer'])
data_frame = pd.DataFrame(self.data[0], columns=headers, index=index)
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def get_headers(self) -> List[str]:
#headers = ['eid', 'failure_theory', 'ply', 'failure_index_for_ply (direct stress/strain)',
#'failure_index_for_bonding (interlaminar stresss)', 'failure_index_for_element', 'flag']
headers = ['failure_index_for_ply (direct stress/strain)',
'failure_index_for_bonding (interlaminar stresss)', 'max_value']
return headers
def __eq__(self, table): # pragma: no cover
return True
def add_sort1(self, dt, eid, failure_theory, ply_id, failure_stress_for_ply, flag,
interlaminar_stress, max_value, nine):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element_layer[self.ielement] = [eid, ply_id]
self.failure_theory[self.ielement] = failure_theory
self.data[self.itime, self.ielement, :] = [failure_stress_for_ply, interlaminar_stress, max_value]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
ntimes = self.data.shape[0]
nelements = self.data.shape[1]
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
return [] # raise NotImplementedError('this should be overwritten by %s' % (self.__class__.__name__))
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
f06_file.write('skipping FailureIndices f06\n')
return page_num
#NotImplementedError(self.code_information())
#asd
#if self.is_sort1:
#page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f06_file, msg_temp)
#else:
#raise NotImplementedError(self.code_information())
#page_num = self._write_sort2_as_sort2(header, page_stamp, page_num, f06_file, msg_temp)
#' F A I L U R E I N D I C E S F O R L A Y E R E D C O M P O S I T E E L E M E N T S ( T R I A 3 )\n'
#' ELEMENT FAILURE PLY FP=FAILURE INDEX FOR PLY FB=FAILURE INDEX FOR BONDING FAILURE INDEX FOR ELEMENT FLAG\n'
#' ID THEORY ID (DIRECT STRESSES/STRAINS) (INTER-LAMINAR STRESSES) MAX OF FP,FB FOR ALL PLIES\n'
#' 1 HOFFMAN 101 6.987186E-02 \n'
#' 1.687182E-02 \n'
#' 102 9.048269E-02 \n'
#' 1.721401E-02 \n'
#return page_num
class RealSpringDamperForceArray(RealForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealForceObject.__init__(self, data_code, isubcase)
self.nelements = 0 # result specific
#if not is_sort1:
#raise NotImplementedError('SORT2')
@classmethod
def add_static_case(cls, table_name, element_name, element, data, isubcase,
is_sort1=True, is_random=False, is_msc=True,
random_code=0, title='', subtitle='', label=''):
analysis_code = 1 # static
data_code = oef_data_code(table_name, analysis_code,
is_sort1=is_sort1, is_random=is_random,
random_code=random_code,
title=title, subtitle=subtitle, label=label,
is_msc=is_msc)
data_code['loadIDs'] = [0] # TODO: ???
data_code['data_names'] = []
# I'm only sure about the 1s in the strains and the
# corresponding 0s in the stresses.
#if is_stress:
#data_code['stress_bits'] = [0, 0, 0, 0]
#data_code['s_code'] = 0
#else:
#data_code['stress_bits'] = [0, 1, 0, 1]
#data_code['s_code'] = 1 # strain?
element_name_to_element_type = {
'CELAS1' : 11,
'CELAS2' : 12,
'CELAS3' : 13,
'CELAS4' : 14,
}
element_type = element_name_to_element_type[element_name]
data_code['element_name'] = element_name
data_code['element_type'] = element_type
#data_code['load_set'] = 1
ntimes = data.shape[0]
nnodes = data.shape[1]
dt = None
obj = cls(data_code, is_sort1, isubcase, dt)
obj.element = element
obj.data = data
obj.ntimes = ntimes
obj.ntotal = nnodes
obj._times = [None]
obj.is_built = True
return obj
def build(self):
"""sizes the vectorized attributes of the RealSpringDamperForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size)
self.build_data(self.ntimes, self.nelements, dtype, idtype, fdtype)
def build_data(self, ntimes, nelements, dtype, idtype, fdtype):
"""actually performs the build step"""
self.ntimes = ntimes
self.nelements = nelements
self._times = zeros(ntimes, dtype=dtype)
self.element = zeros(nelements, dtype=idtype)
#[force]
self.data = zeros((ntimes, nelements, 1), dtype=fdtype)
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
#Mode 1 2 3
#Freq 1.482246e-10 3.353940e-09 1.482246e-10
#Eigenvalue -8.673617e-19 4.440892e-16 8.673617e-19
#Radians 9.313226e-10 2.107342e-08 9.313226e-10
#ElementID Item
#30 spring_force 2.388744e-19 -1.268392e-10 -3.341473e-19
#31 spring_force 2.781767e-19 -3.034770e-11 -4.433221e-19
#32 spring_force 0.000000e+00 0.000000e+00 0.000000e+00
#33 spring_force 0.000000e+00 0.000000e+00 0.000000e+00
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
#Static spring_force
#ElementID
#30 0.0
#31 0.0
#32 0.0
#33 0.0
data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)
data_frame.index.name = 'ElementID'
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
is_nan = (
self.nonlinear_factor is not None and
np.isnan(self.nonlinear_factor) and
np.isnan(table.nonlinear_factor)
)
if not is_nan:
assert self.nonlinear_factor == table.nonlinear_factor
assert self.ntotal == table.ntotal
assert self.table_name == table.table_name, 'table_name=%r table.table_name=%r' % (self.table_name, table.table_name)
assert self.approach_code == table.approach_code
if self.nonlinear_factor not in (None, np.nan):
assert np.array_equal(self._times, table._times), 'ename=%s-%s times=%s table.times=%s' % (
self.element_name, self.element_type, self._times, table._times)
if not np.array_equal(self.element, table.element):
assert self.element.shape == table.element.shape, 'shape=%s element.shape=%s' % (self.element.shape, table.element.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\nEid1, Eid2\n' % str(self.code_information())
for eid, eid2 in zip(self.element, table.element):
msg += '%s, %s\n' % (eid, eid2)
print(msg)
raise ValueError(msg)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid, in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(force1, stress1) = t1
(force2, stress2) = t2
if not allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s)\n (%s, %s)\n' % (
eid,
force1, stress1,
force2, stress2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, force):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
#print('dt=%s eid=%s' % (dt, eid))
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [force]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
ntimes = self.data.shape[0]
nelements = self.data.shape[1]
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
raise NotImplementedError('this should be overwritten by %s' % (self.__class__.__name__))
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
if self.is_sort1:
page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f06_file, msg_temp)
else:
raise NotImplementedError(self.code_information())
#page_num = self._write_sort2_as_sort2(header, page_stamp, page_num, f06_file, msg_temp)
return page_num
def _write_sort1_as_sort1(self, header, page_stamp, page_num, f06_file, msg_temp):
ntimes = self.data.shape[0]
eids = self.element
nwrite = len(eids)
nrows = nwrite // 4
nleftover = nwrite - nrows * 4
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
stress = self.data[itime, :, 0]
out = []
for eid, stressi in zip(eids, stress):
out.append([eid, write_float_13e(stressi)])
for i in range(0, nrows * 4, 4):
f06_file.write(' %10i %13s %10i %13s %10i %13s %10i %13s\n' % (
tuple(out[i] + out[i + 1] + out[i + 2] + out[i + 3])))
i = nrows * 4
if nleftover == 3:
f06_file.write(' %10i %13s %10i %13s %10i %13s\n' % (
tuple(out[i] + out[i + 1] + out[i + 2])))
elif nleftover == 2:
f06_file.write(' %10i %13s %10i %13s\n' % (
tuple(out[i] + out[i + 1])))
elif nleftover == 1:
f06_file.write(' %10i %13s\n' % tuple(out[i]))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'if')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
force = self.data[itime, :, 0]
for eid, forcei in zip(eids_device, force):
data = [eid, forcei]
op2_ascii.write(' eid=%s force=%s\n' % tuple(data))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class RealSpringForceArray(RealSpringDamperForceArray):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealSpringDamperForceArray.__init__(self, data_code, is_sort1, isubcase, dt)
@property
def nnodes_per_element(self) -> int:
return 1
def get_headers(self) -> List[str]:
headers = ['spring_force']
return headers
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
if self.element_type == 11: # CELAS1
msg = [' F O R C E S I N S C A L A R S P R I N G S ( C E L A S 1 )\n']
elif self.element_type == 12: # CELAS2
msg = [' F O R C E S I N S C A L A R S P R I N G S ( C E L A S 2 )\n']
elif self.element_type == 13: # CELAS3
msg = [' F O R C E S I N S C A L A R S P R I N G S ( C E L A S 3 )\n']
elif self.element_type == 14: # CELAS4
msg = [' F O R C E S I N S C A L A R S P R I N G S ( C E L A S 4 )\n']
else: # pragma: no cover
msg = 'element_name=%s element_type=%s' % (self.element_name, self.element_type)
raise NotImplementedError(msg)
msg += [
' ELEMENT FORCE ELEMENT FORCE ELEMENT FORCE ELEMENT FORCE\n'
' ID. ID. ID. ID.\n'
]
return msg
class RealDamperForceArray(RealSpringDamperForceArray):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealSpringDamperForceArray.__init__(self, data_code, is_sort1, isubcase, dt)
@property
def nnodes_per_element(self) -> int:
return 1
def get_headers(self) -> List[str]:
headers = ['damper_force']
return headers
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
if self.element_type == 20: # CDAMP1
msg = [' F O R C E S I N S C A L A R D A M P E R S ( C D A M P 1 )\n']
elif self.element_type == 21: # CDAMP2
msg = [' F O R C E S I N S C A L A R D A M P E R S ( C D A M P 2 )\n']
elif self.element_type == 22: # CDAMP3
msg = [' F O R C E S I N S C A L A R D A M P E R S ( C D A M P 3 )\n']
elif self.element_type == 23: # CDAMP4
msg = [' F O R C E S I N S C A L A R D A M P E R S ( C D A M P 4 )\n']
else: # pragma: no cover
msg = 'element_name=%s element_type=%s' % (self.element_name, self.element_type)
raise NotImplementedError(msg)
if is_sort1:
msg += [
' ELEMENT FORCE ELEMENT FORCE ELEMENT FORCE ELEMENT FORCE\n'
' ID. ID. ID. ID.\n'
]
else:
msg += [
' AXIAL AXIAL\n'
' TIME FORCE TORQUE TIME FORCE TORQUE\n'
]
return msg
class RealRodForceArray(RealForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealForceObject.__init__(self, data_code, isubcase)
self.nelements = 0 # result specific
@classmethod
def add_static_case(cls, table_name, element_name, element, data, isubcase,
is_sort1=True, is_random=False, is_msc=True,
random_code=0, title='', subtitle='', label=''):
analysis_code = 1 # static
data_code = oef_data_code(table_name, analysis_code,
is_sort1=is_sort1, is_random=is_random,
random_code=random_code,
title=title, subtitle=subtitle, label=label,
is_msc=is_msc)
data_code['loadIDs'] = [0] # TODO: ???
data_code['data_names'] = []
# I'm only sure about the 1s in the strains and the
# corresponding 0s in the stresses.
#if is_stress:
#data_code['stress_bits'] = [0, 0, 0, 0]
#data_code['s_code'] = 0
#else:
#data_code['stress_bits'] = [0, 1, 0, 1]
#data_code['s_code'] = 1 # strain?
element_name_to_element_type = {
'CROD' : 1,
'CTUBE' : 3,
'CONROD' : 10,
}
element_type = element_name_to_element_type[element_name]
data_code['element_name'] = element_name
data_code['element_type'] = element_type
#data_code['load_set'] = 1
ntimes = data.shape[0]
nnodes = data.shape[1]
dt = None
obj = cls(data_code, is_sort1, isubcase, dt)
obj.element = element
obj.data = data
obj.ntimes = ntimes
obj.ntotal = nnodes
obj._times = [None]
obj.is_built = True
return obj
@property
def nnodes_per_element(self) -> int:
return 1
def get_headers(self) -> List[str]:
headers = ['axial', 'torsion']
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def _get_msgs(self):
base_msg = [' ELEMENT AXIAL TORSIONAL ELEMENT AXIAL TORSIONAL\n',
' ID. FORCE MOMENT ID. FORCE MOMENT\n']
crod_msg = [' F O R C E S I N R O D E L E M E N T S ( C R O D )\n', ]
conrod_msg = [' F O R C E S I N R O D E L E M E N T S ( C O N R O D )\n', ]
ctube_msg = [' F O R C E S I N R O D E L E M E N T S ( C T U B E )\n', ]
crod_msg += base_msg
conrod_msg += base_msg
ctube_msg += base_msg
return crod_msg, conrod_msg, ctube_msg
def build(self):
"""sizes the vectorized attributes of the RealRodForceArray"""
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
self.build_data(self.ntimes, self.nelements, float_fmt='float32')
def build_data(self, ntimes, nelements, float_fmt='float32'):
"""actually performs the build step"""
self.ntimes = ntimes
self.nelements = nelements
#self.ntotal = ntimes * nelements
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(ntimes, dtype=dtype)
self.element = zeros(nelements, dtype='int32')
#[axial_force, torque]
self.data = zeros((ntimes, nelements, 2), dtype='float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
#Static axial SMa torsion SMt
#ElementID
#14 0.0 1.401298e-45 0.0 1.401298e-45
#15 0.0 1.401298e-45 0.0 1.401298e-45
data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)
data_frame.index.name = 'ElementID'
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def add_sort1(self, dt, eid, axial, torque):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [axial, torque]
self.ielement += 1
if self.ielement == self.nelements:
self.ielement = 0
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True):
crod_msg, conrod_msg, ctube_msg = self._get_msgs()
if 'CROD' in self.element_name:
msg = crod_msg
elif 'CONROD' in self.element_name:
msg = conrod_msg
elif 'CTUBE' in self.element_name:
msg = ctube_msg
return self.element_name, msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
(elem_name, msg_temp) = self.get_f06_header(is_mag_phase)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
is_odd = False
nwrite = len(eids)
if len(eids) % 2 == 1:
nwrite -= 1
is_odd = True
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
axial = self.data[itime, :, 0]
torsion = self.data[itime, :, 1]
out = []
for eid, axiali, torsioni in zip(eids, axial, torsion):
[axiali, torsioni] = write_floats_13e([axiali, torsioni])
out.append([eid, axiali, torsioni])
for i in range(0, nwrite, 2):
out_line = ' %8i %-13s %-13s %8i %-13s %s\n' % tuple(out[i] + out[i + 1])
f06_file.write(out_line)
if is_odd:
out_line = ' %8i %-13s %s\n' % tuple(out[-1])
f06_file.write(out_line)
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
is_nan = (
self.nonlinear_factor is not None and
np.isnan(self.nonlinear_factor) and
np.isnan(table.nonlinear_factor)
)
if not is_nan:
assert self.nonlinear_factor == table.nonlinear_factor
assert self.ntotal == table.ntotal
assert self.table_name == table.table_name, 'table_name=%r table.table_name=%r' % (self.table_name, table.table_name)
assert self.approach_code == table.approach_code
if self.nonlinear_factor not in (None, np.nan):
assert np.array_equal(self._times, table._times), 'ename=%s-%s times=%s table.times=%s' % (
self.element_name, self.element_type, self._times, table._times)
if not np.array_equal(self.element, table.element):
assert self.element.shape == table.element.shape, 'element shape=%s table.shape=%s' % (self.element.shape, table.element.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
msg += 'Eid\n'
for eid, eid2 in zip(self.element, table.element):
msg += '%s, %s\n' % (eid, eid2)
print(msg)
raise ValueError(msg)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(axial1, torque1) = t1
(axial2, torque2) = t2
if not np.array_equal(t1, t2):
msg += '(%s) (%s, %s) (%s, %s)\n' % (
eid,
axial1, torque1,
axial2, torque2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
#eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i2f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
axial = self.data[itime, :, 0]
torsion = self.data[itime, :, 1]
#print('eids3', eids3)
for eid, axiali, torsioni in zip(eids_device, axial, torsion):
data = [eid, axiali, torsioni]
op2_ascii.write(' eid=%s axial=%s torsion=%s\n' % tuple(data))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class RealCBeamForceArray(RealForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
#ForceObject.__init__(self, data_code, isubcase)
RealForceObject.__init__(self, data_code, isubcase)
self.result_flag = 0
self.itime = 0
self.nelements = 0 # result specific
#if is_sort1:
##sort1
#pass
#else:
#raise NotImplementedError('SORT2')
def build(self):
"""sizes the vectorized attributes of the RealCBeamForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (self.ntimes, self.nelements, self.ntotal, self.subtitle))
if self.is_built:
return
nnodes = 11
#self.names = []
#self.nelements //= nnodes
self.nelements //= self.ntimes
#self.ntotal //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
self.is_built = True
#print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size)
self._times = zeros(self.ntimes, dtype)
self.element = zeros(self.ntotal, idtype)
self.element_node = zeros((self.ntotal, 2), idtype)
# the number is messed up because of the offset for the element's properties
if not (self.nelements * nnodes) == self.ntotal:
msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (self.ntimes,
self.nelements, nnodes,
self.nelements * nnodes,
self.ntotal)
raise RuntimeError(msg)
#[sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq]
self.data = zeros((self.ntimes, self.ntotal, 8), fdtype)
def finalize(self):
sd = self.data[0, :, 0]
i_sd_zero = np.where(sd != 0.0)[0]
i_node_zero = np.where(self.element_node[:, 1] != 0)[0]
assert i_node_zero.max() > 0, 'CBEAM element_node hasnt been filled'
i = np.union1d(i_sd_zero, i_node_zero)
#self.nelements = len(self.element) // 11
self.element = self.element[i]
self.element_node = self.element_node[i, :]
self.data = self.data[:, i, :]
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
element_location = [
self.element_node[:, 0],
self.data[0, :, 0],
]
if self.nonlinear_factor not in (None, np.nan):
#Mode 1 2 3
#Freq 1.482246e-10 3.353940e-09 1.482246e-10
#Eigenvalue -8.673617e-19 4.440892e-16 8.673617e-19
#Radians 9.313226e-10 2.107342e-08 9.313226e-10
#ElementID Location Item
#12 0.0 bending_moment1 1.505494e-13 -2.554764e-07 -5.272747e-13
# bending_moment2 -2.215085e-13 -2.532377e-07 3.462328e-13
# shear1 1.505494e-13 -2.554763e-07 -5.272747e-13
# shear2 -2.215085e-13 -2.532379e-07 3.462328e-13
# axial_force 1.294136e-15 -1.670896e-09 4.759476e-16
# total_torque -4.240346e-16 2.742446e-09 1.522254e-15
# warping_torque 0.000000e+00 0.000000e+00 0.000000e+00
# 1.0 bending_moment1 0.000000e+00 -1.076669e-13 1.009742e-28
# bending_moment2 -5.048710e-29 1.704975e-13 0.000000e+00
# shear1 1.505494e-13 -2.554763e-07 -5.272747e-13
# shear2 -2.215085e-13 -2.532379e-07 3.462328e-13
# axial_force 1.294136e-15 -1.670896e-09 4.759476e-16
# total_torque -4.240346e-16 2.742446e-09 1.522254e-15
# warping_torque 0.000000e+00 0.000000e+00 0.000000e+00
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_element_node(
column_values, column_names,
headers[1:], element_location, self.data[:, :, 1:], from_tuples=False, from_array=True)
data_frame.index.names = ['ElementID', 'Location', 'Item']
else:
df1 = pd.DataFrame(element_location).T
df1.columns = ['ElementID', 'Location']
df2 = pd.DataFrame(self.data[0])
df2.columns = headers
data_frame = df1.join([df2])
#self.data_frame = data_frame.reset_index().replace({'NodeID': {0:'CEN'}}).set_index(['ElementID', 'NodeID'])
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid, in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1) = t1
(axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s)\n' % (
eid,
axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1,
axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_new_element_sort1(self, dt, eid, nid, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
return self.add_sort1(dt, eid, nid, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq)
def add_sort1(self, dt, eid, nid, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.data[self.itime, self.itotal, :] = [sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq]
self.element[self.itotal] = eid
self.element_node[self.itotal, :] = [eid, nid]
self.itotal += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
else:
msg.append(' type=%s nelements=%i; table_name=%r\n' % (
self.__class__.__name__, nelements, self.table_name))
#msg.append(' eType, cid\n')
msg.append(' data: [ntimes, nelements, 8] where 8=[%s]\n' % str(', '.join(self.get_headers())))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
msg.append(' element_node.shape = %s\n' % str(self.element_node.shape).replace('L', ''))
msg.append(' is_sort1=%s is_sort2=%s\n' % (self.is_sort1, self.is_sort2))
msg.append(' CBEAM\n')
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
#name = self.data_code['name']
#if name == 'freq':
#name = 'FREQUENCY'
#else: # mode
#raise RuntimeError(name)
#if is_sort1:
msg_temp = [
' F O R C E S I N B E A M E L E M E N T S ( C B E A M )\n',
' STAT DIST/ - BENDING MOMENTS - - WEB SHEARS - AXIAL TOTAL WARPING\n',
' ELEMENT-ID GRID LENGTH PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE TORQUE\n']
#else:
#raise NotImplementedError('CBEAM-SORT2')
if self.is_sort1:
#assert self.is_sort1 is True, str(self)
#if is_sort1:
page_num = self._write_sort1_as_sort1(f06_file, page_num, page_stamp, header, msg_temp)
#else:
#self._write_sort1_as_sort2(f06_file, page_num, page_stamp, header, msg_temp)
else:
print('skipping %s because its sort2' % self.__class__.__name__)
#assert self.is_sort1 is True, str(self)
return page_num - 1
def get_headers(self) -> List[str]:
headers = [
'sd', 'bending_moment1', 'bending_moment2', 'shear1', 'shear2',
'axial_force', 'total_torque', 'warping_torque', ]
return headers
def _write_sort1_as_sort1(self, f06_file, page_num, page_stamp, header, msg_temp):
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
long_form = False
if nids.min() == 0:
msg = header + [
' F O R C E S I N B E A M E L E M E N T S ( C B E A M )\n',
' STAT DIST/ - BENDING MOMENTS - - WEB SHEARS - AXIAL TOTAL WARPING\n',
' ELEMENT-ID GRID LENGTH PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE TORQUE\n']
long_form = True
#times = self._times
ntimes = self.data.shape[0]
for itime in range(ntimes):
if self.nonlinear_factor not in (None, np.nan):
dt = self._times[itime]
dt_line = ' %14s = %12.5E\n' % (self.data_code['name'], dt)
header[1] = dt_line
msg = header + msg_temp
f06_file.write(''.join(msg))
#sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq
assert self.is_sort1 is True, str(self)
sd = self.data[itime, :, 0]
bm1 = self.data[itime, :, 1]
bm2 = self.data[itime, :, 2]
ts1 = self.data[itime, :, 3]
ts2 = self.data[itime, :, 4]
af = self.data[itime, :, 5]
ttrq = self.data[itime, :, 6]
wtrq = self.data[itime, :, 7]
for eid, nid, sdi, bm1i, bm2i, ts1i, ts2i, afi, ttrqi, wtrqi in zip(eids, nids, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
vals = (bm1i, bm2i, ts1i, ts2i, afi, ttrqi, wtrqi)
vals2 = write_floats_13e(vals)
(sbm1i, sbm2i, sts1i, sts2i, safi, sttrqi, swtrq) = vals2
if long_form:
f06_file.write(' %8i %.3f %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, sdi, sbm1i, sbm2i, sts1i, sts2i, safi, sttrqi, swtrq))
else:
if sdi == 0.:
f06_file.write('0 %8i\n' % eid)
f06_file.write(' %8i %.3f %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
nid, sdi, sbm1i, sbm2i, sts1i, sts2i, safi, sttrqi, swtrq))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
#long_form = False
#if nids.min() == 0:
#long_form = True
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
#xxbs = self.xxb
#print(xxbs)
eids_device = eids * 10 + self.device_code
ueids = np.unique(eids)
#ieid = np.searchsorted(eids, ueids)
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = len(ueids)
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
if self.is_sort1:
struct1 = Struct(endian + b'2i 8f')
struct2 = Struct(endian + b'i 8f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
sd = self.data[itime, :, 0]
bm1 = self.data[itime, :, 1]
bm2 = self.data[itime, :, 2]
ts1 = self.data[itime, :, 3]
ts2 = self.data[itime, :, 4]
af = self.data[itime, :, 5]
ttrq = self.data[itime, :, 6]
wtrq = self.data[itime, :, 7]
icount = 0
nwide = 0
ielement = 0
assert len(eids) == len(sd)
for eid, nid, sdi, bm1i, bm2i, ts1i, ts2i, afi, ttrqi, wtrqi in zip(eids, nids, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
if icount == 0:
eid_device = eids_device[ielement]
nid = nids[ielement]
data = [eid_device, nid, sdi, bm1i, bm2i, ts1i, ts2i, afi, ttrqi, wtrqi] # 10
op2.write(struct1.pack(*data))
ielement += 1
icount = 1
elif nid > 0 and icount > 0:
# 11 total nodes, with 1, 11 getting an nid; the other 9 being
# xxb sections
data = [0, 0., 0., 0., 0., 0., 0., 0., 0.]
#print('***adding %s\n' % (10-icount))
for unused_i in range(10 - icount):
op2.write(struct2.pack(*data))
nwide += len(data)
eid_device2 = eids_device[ielement]
assert eid_device == eid_device2
nid = nids[ielement]
data = [nid, sdi, bm1i, bm2i, ts1i, ts2i, afi, ttrqi, wtrqi] # 9
op2.write(struct2.pack(*data))
ielement += 1
icount = 0
else:
raise RuntimeError('CBEAM op2 writer')
#data = [0, xxb, sxc, sxd, sxe, sxf, smax, smin, smt, smc] # 10
#op2.write(struct2.pack(*data))
#icount += 1
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data)))
nwide += len(data)
assert ntotal == nwide, 'ntotal=%s nwide=%s' % (ntotal, nwide)
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class RealCShearForceArray(RealForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
RealForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if not is_sort1:
#raise NotImplementedError('SORT2')
@property
def nnodes_per_element(self) -> int:
return 1
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self) -> List[str]:
headers = [
'force41', 'force21', 'force12', 'force32', 'force23', 'force43',
'force34', 'force14',
'kick_force1', 'shear12', 'kick_force2', 'shear23',
'kick_force3', 'shear34', 'kick_force4', 'shear41',
]
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the RealCShearForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[force41, force21, force12, force32, force23, force43,
# force34, force14,
# kick_force1, shear12, kick_force2, shear23,
# kick_force3, shear34, kick_force4, shear41]
self.data = zeros((self.ntimes, self.ntotal, 16), dtype='float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
#Mode 1 2 3
#Freq 1.482246e-10 3.353940e-09 1.482246e-10
#Eigenvalue -8.673617e-19 4.440892e-16 8.673617e-19
#Radians 9.313226e-10 2.107342e-08 9.313226e-10
#ElementID Item
#22 force41 -4.025374e-14 2.935730e-08 1.017620e-13
# force21 -4.025374e-14 2.935730e-08 1.017620e-13
# force12 4.025374e-14 -2.935730e-08 -1.017620e-13
# force32 4.025374e-14 -2.935730e-08 -1.017620e-13
# force23 -4.025374e-14 2.935730e-08 1.017620e-13
# force43 -4.025374e-14 2.935730e-08 1.017620e-13
# force34 4.025374e-14 -2.935730e-08 -1.017620e-13
# force14 4.025374e-14 -2.935730e-08 -1.017620e-13
# kick_force1 -0.000000e+00 0.000000e+00 0.000000e+00
# shear12 -8.050749e-14 5.871460e-08 2.035239e-13
# kick_force2 -0.000000e+00 0.000000e+00 0.000000e+00
# shear23 -8.050749e-14 5.871460e-08 2.035239e-13
# kick_force3 -0.000000e+00 0.000000e+00 0.000000e+00
# shear34 -8.050749e-14 5.871460e-08 2.035239e-13
# kick_force4 -0.000000e+00 0.000000e+00 0.000000e+00
# shear41 -8.050749e-14 5.871460e-08 2.035239e-13
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
#Static axial SMa torsion SMt
#ElementID
#14 0.0 1.401298e-45 0.0 1.401298e-45
#15 0.0 1.401298e-45 0.0 1.401298e-45
data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)
data_frame.index.name = 'ElementID'
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
is_nan = (
self.nonlinear_factor is not None and
np.isnan(self.nonlinear_factor) and
np.isnan(table.nonlinear_factor)
)
if not is_nan:
assert self.nonlinear_factor == table.nonlinear_factor
assert self.ntotal == table.ntotal
assert self.table_name == table.table_name, 'table_name=%r table.table_name=%r' % (self.table_name, table.table_name)
assert self.approach_code == table.approach_code
if self.nonlinear_factor not in (None, np.nan):
assert np.array_equal(self._times, table._times), 'ename=%s-%s times=%s table.times=%s' % (
self.element_name, self.element_type, self._times, table._times)
if not np.array_equal(self.element, table.element):
assert self.element.shape == table.element.shape, 'element shape=%s table.shape=%s' % (self.element.shape, table.element.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
msg += 'Eid\n'
for eid1, eid2 in zip(self.element, table.element):
msg += '%s, %s\n' % (eid1, eid2)
print(msg)
raise ValueError(msg)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(force41a, force14a, force21a, force12a, force32a, force23a, force43a, force34a, kick_force1a, kick_force2a, kick_force3a, kick_force4a, shear12a, shear23a, shear34a, shear41a) = t1
(force41b, force14b, force21b, force12b, force32b, force23b, force43b, force34b, kick_force1b, kick_force2b, kick_force3b, kick_force4b, shear12b, shear23b, shear34b, shear41b) = t2
if not np.array_equal(t1, t2):
msg += (
'%s (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n'
' (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
force41a, force14a, force21a, force12a, force32a, force23a, force43a, force34a, kick_force1a, kick_force2a, kick_force3a, kick_force4a, shear12a, shear23a, shear34a, shear41a,
force41b, force14b, force21b, force12b, force32b, force23b, force43b, force34b, kick_force1b, kick_force2b, kick_force3b, kick_force4b, shear12b, shear23b, shear34b, shear41b
))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid,
force41, force14, force21, force12, force32, force23, force43, force34,
kick_force1, kick_force2, kick_force3, kick_force4,
shear12, shear23, shear34, shear41):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [
force41, force14, force21, force12, force32, force23, force43, force34,
kick_force1, kick_force2, kick_force3, kick_force4,
shear12, shear23, shear34, shear41]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = [
' F O R C E S A C T I N G O N S H E A R P A N E L E L E M E N T S (CSHEAR)\n'
' \n'
' ====== POINT 1 ====== ====== POINT 2 ====== ====== POINT 3 ====== ====== POINT 4 ======\n'
' ELEMENT F-FROM-4 F-FROM-2 F-FROM-1 F-FROM-3 F-FROM-2 F-FROM-4 F-FROM-3 F-FROM-1\n'
' ID KICK-1 SHEAR-12 KICK-2 SHEAR-23 KICK-3 SHEAR-34 KICK-4 SHEAR-41\n'
]
#(elem_name, msg_temp) = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
f14 = self.data[itime, :, 0]
f12 = self.data[itime, :, 1]
f21 = self.data[itime, :, 2]
f23 = self.data[itime, :, 3]
f32 = self.data[itime, :, 4]
f34 = self.data[itime, :, 5]
f43 = self.data[itime, :, 6]
f41 = self.data[itime, :, 7]
kick1 = self.data[itime, :, 8]
tau12 = self.data[itime, :, 9]
kick2 = self.data[itime, :, 10]
tau23 = self.data[itime, :, 11]
kick3 = self.data[itime, :, 12]
tau34 = self.data[itime, :, 13]
kick4 = self.data[itime, :, 14]
tau41 = self.data[itime, :, 15]
#zip_in = [
#f14, f12, f21, f23, f32, f34, f43, f41,
#kick1, tau12, kick2, tau23, kick3, tau34, kick4, tau41,
#]
for (eid, f14i, f12i, f21i, f23i, f32i, f34i, f43i, f41i,
kick1i, tau12i, kick2i, tau23i, kick3i, tau34i, kick4i, tau41i) in zip(
eids, f14, f12, f21, f23, f32, f34, f43, f41,
kick1, tau12, kick2, tau23, kick3, tau34, kick4, tau41):
vals2 = write_floats_12e([
f14i, f12i, f21i, f23i, f32i, f34i, f43i, f41i,
kick1i, tau12i, kick2i, tau23i, kick3i, tau34i, kick4i, tau41i])
[
f14i, f12i,
f21i, f23i,
f32i, f34i,
f43i, f41i,
kick1i, tau12i, kick2i, tau23i,
kick3i, tau34i, kick4i, tau41i
] = vals2
f06_file.write(
'0%13i%-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, f14i, f12i, f21i, f23i, f32i, f34i, f43i, f41i,
kick1i, tau12i, kick2i, tau23i, kick3i, tau34i, kick4i, tau41i))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result, date,
is_mag_phase=False, endian='>'):
"""writes an OP2"""
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
unused_eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids = self.element
eids_device = self.element * 10 + self.device_code
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i 16f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
#print('3, %s' % itable)
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
#print('4, %s' % itable)
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
f14 = self.data[itime, :, 0]
f12 = self.data[itime, :, 1]
f21 = self.data[itime, :, 2]
f23 = self.data[itime, :, 3]
f32 = self.data[itime, :, 4]
f34 = self.data[itime, :, 5]
f43 = self.data[itime, :, 6]
f41 = self.data[itime, :, 7]
kick1 = self.data[itime, :, 8]
tau12 = self.data[itime, :, 9]
kick2 = self.data[itime, :, 10]
tau23 = self.data[itime, :, 11]
kick3 = self.data[itime, :, 12]
tau34 = self.data[itime, :, 13]
kick4 = self.data[itime, :, 14]
tau41 = self.data[itime, :, 15]
for (eid, eid_device, f14i, f12i, f21i, f23i, f32i, f34i, f43i, f41i,
kick1i, tau12i, kick2i, tau23i, kick3i, tau34i, kick4i, tau41i) in zip(
eids, eids_device, f14, f12, f21, f23, f32, f34, f43, f41,
kick1, tau12, kick2, tau23, kick3, tau34, kick4, tau41):
op2.write(struct1.pack(
eid_device, f14i, f12i, f21i, f23i, f32i, f34i, f43i, f41i,
kick1i, tau12i, kick2i, tau23i, kick3i, tau34i, kick4i, tau41i))
vals2 = write_floats_12e([
f14i, f12i, f21i, f23i, f32i, f34i, f43i, f41i,
kick1i, tau12i, kick2i, tau23i, kick3i, tau34i, kick4i, tau41i])
[
f14i, f12i,
f21i, f23i,
f32i, f34i,
f43i, f41i,
kick1i, tau12i, kick2i, tau23i,
kick3i, tau34i, kick4i, tau41i
] = vals2
op2_ascii.write(
'0%13i%-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, f14i, f12i, f21i, f23i, f32i, f34i, f43i, f41i,
kick1i, tau12i, kick2i, tau23i, kick3i, tau34i, kick4i, tau41i))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class RealViscForceArray(RealForceObject): # 24-CVISC
def __init__(self, data_code, is_sort1, isubcase, dt):
RealForceObject.__init__(self, data_code, isubcase)
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if not is_sort1:
#raise NotImplementedError('SORT2')
@property
def nnodes_per_element(self) -> int:
return 1
def get_headers(self) -> List[str]:
headers = ['axial', 'torsion']
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the RealViscForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[axial_force, torque]
self.data = zeros((self.ntimes, self.ntotal, 2), dtype='float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
#Mode 1 2 3
#Freq 1.482246e-10 3.353940e-09 1.482246e-10
#Eigenvalue -8.673617e-19 4.440892e-16 8.673617e-19
#Radians 9.313226e-10 2.107342e-08 9.313226e-10
#ElementID Item
#50 axial -0.0 -0.0 0.0
# torsion 0.0 0.0 -0.0
#51 axial 0.0 -0.0 -0.0
# torsion -0.0 0.0 0.0
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
#Static axial torsion
#ElementID
#14 0.0 0.0
#15 0.0 0.0
data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)
data_frame.index.name = 'ElementID'
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def add_sort1(self, dt, eid, axial, torque):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [axial, torque]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
if is_sort1:
msg = [
' F O R C E S I N V I S C E L E M E N T S ( C V I S C )\n'
' \n'
' ELEMENT AXIAL TORSIONAL ELEMENT AXIAL TORSIONAL\n'
' ID. FORCE MOMENT ID. FORCE MOMENT\n'
]
else:
msg = [
' F O R C E S I N V I S C E L E M E N T S ( C V I S C )\n'
' \n'
' AXIAL AXIAL\n'
' TIME FORCE TORQUE TIME FORCE TORQUE\n'
#' 0.0 0.0 0.0 1.000000E+00 -5.642718E-04 0.0\n'
#' 2.000000E+00 -1.905584E-06 0.0 3.000000E+00 9.472010E-07 0.0\n'
]
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
is_odd = False
nwrite = len(eids)
if len(eids) % 2 == 1:
nwrite -= 1
is_odd = True
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
axial = self.data[itime, :, 0]
torsion = self.data[itime, :, 1]
out = []
for eid, axiali, torsioni in zip(eids, axial, torsion):
[axiali, torsioni] = write_floats_13e([axiali, torsioni])
out.append([eid, axiali, torsioni])
for i in range(0, nwrite, 2):
out_line = ' %8i %-13s %-13s %8i %-13s %s\n' % tuple(out[i] + out[i + 1])
f06_file.write(out_line)
if is_odd:
out_line = ' %8i %-13s %s\n' % tuple(out[-1])
f06_file.write(out_line)
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
is_nan = (
self.nonlinear_factor is not None and
np.isnan(self.nonlinear_factor) and
np.isnan(table.nonlinear_factor)
)
if not is_nan:
assert self.nonlinear_factor == table.nonlinear_factor
assert self.ntotal == table.ntotal
assert self.table_name == table.table_name, 'table_name=%r table.table_name=%r' % (self.table_name, table.table_name)
assert self.approach_code == table.approach_code
if not np.array_equal(self.element, table.element):
assert self.element.shape == table.element.shape, 'element shape=%s table.shape=%s' % (self.element.shape, table.element.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
msg += 'Eid\n'
for eid1, eid2 in zip(self.element, table.element):
msg += '%s, %s\n' % (eid1, eid2)
print(msg)
raise ValueError(msg)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(axial1, torque1) = t1
(axial2, torque2) = t2
if not np.array_equal(t1, t2):
msg += '(%s) (%s, %s) (%s, %s)\n' % (
eid,
axial1, torque1,
axial2, torque2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
class RealPlateForceArray(RealForceObject): # 33-CQUAD4, 74-CTRIA3
def __init__(self, data_code, is_sort1, isubcase, dt):
RealForceObject.__init__(self, data_code, isubcase)
self.dt = dt
self.nelements = 0
assert self.element_name != 'RBAR', self.data_code
#if is_sort1:
#if dt is not None:
#self.add = self.add_sort1
#else:
#assert dt is not None
#self.add = self.add_sort2
def _get_msgs(self):
raise NotImplementedError()
def get_headers(self) -> List[str]:
return ['mx', 'my', 'mxy', 'bmx', 'bmy', 'bmxy', 'tx', 'ty']
def build(self):
"""sizes the vectorized attributes of the RealPlateForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
#self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size)
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.ntotal, dtype=idtype)
#[mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.data = zeros((self.ntimes, self.ntotal, 8), dtype=fdtype)
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
assert 0 not in self.element
if self.nonlinear_factor not in (None, np.nan):
#Mode 1 2 3
#Freq 1.482246e-10 3.353940e-09 1.482246e-10
#Eigenvalue -8.673617e-19 4.440892e-16 8.673617e-19
#Radians 9.313226e-10 2.107342e-08 9.313226e-10
#ElementID Item
#8 mx -5.467631e-14 -1.406068e-07 1.351960e-13
# my -8.983144e-14 -3.912936e-07 9.707208e-14
# mxy 2.767353e-13 -4.950616e-08 -5.985472e-13
# bmx 7.616284e-14 -2.809588e-08 -1.051987e-13
# bmy 4.245138e-14 -6.567249e-09 -6.066584e-14
# bmxy -1.233790e-14 3.561397e-09 1.840837e-14
# tx 2.601638e-13 -9.601510e-08 -3.611116e-13
# ty -5.825233e-14 -7.382687e-09 9.038553e-14
#9 mx 5.444685e-15 -1.014145e-07 -4.500100e-14
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
#Static axial SMa torsion SMt
#ElementID
#14 0.0 1.401298e-45 0.0 1.401298e-45
#15 0.0 1.401298e-45 0.0 1.401298e-45
data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)
data_frame.index.name = 'ElementID'
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, e in enumerate(self.element_node):
(eid, nid) = e
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(fiber_dist1, oxx1, oyy1, txy1, angle1, majorP1, minorP1, ovm1) = t1
(fiber_dist2, oxx2, oyy2, txy2, angle2, majorP2, minorP2, ovm2) = t2
# vm stress can be NaN for some reason...
if not np.array_equal(t1[:-1], t2[:-1]):
msg += '(%s, %s) (%s, %s, %s, %s, %s, %s, %s, %s) (%s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid, nid,
fiber_dist1, oxx1, oyy1, txy1, angle1, majorP1, minorP1, ovm1,
fiber_dist2, oxx2, oyy2, txy2, angle2, majorP2, minorP2, ovm2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
#def add_new_eid_sort1(self, dt, eid, axial, SMa, torsion, SMt):
#self._times[self.itime] = dt
#self.element[self.ielement] = eid
#self.data[self.itime, self.ielement, :] = [axial, SMa, torsion, SMt]
#self.ielement += 1
def add_sort1(self, dt, eid, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.itotal] = eid
self.data[self.itime, self.itotal, :] = [mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.itotal += 1
def add_sort2(self, eid, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
raise NotImplementedError('SORT2')
#if dt not in self.mx:
#self.add_new_transient(dt)
#self.data[self.itime, self.itotal, :] = [mx, my, mxy, bmx, bmy, bmxy, tx, ty]
@property
def nnodes_per_element(self):
return 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True):
if 'CTRIA3' in self.element_name:
msg = [
' F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A 3 )\n'
' \n'
' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID FX FY FXY MX MY MXY QX QY\n'
]
nnodes = 3
elif 'CQUAD4' in self.element_name:
msg = [
' F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'
' \n'
' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID GRID-ID FX FY FXY MX MY MXY QX QY\n'
]
nnodes = 4
elif 'CTRIAR' in self.element_name:
msg = [
' F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A R )\n'
' \n'
' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID FX FY FXY MX MY MXY QX QY\n'
]
nnodes = 3
elif 'CQUADR' in self.element_name:
msg = [
' F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D R )\n'
' \n'
' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID GRID-ID FX FY FXY MX MY MXY QX QY\n'
]
nnodes = 4
else:
msg = f'element_name={self.element_name} self.element_type={self.element_type}'
raise NotImplementedError(msg)
return self.element_name, nnodes, msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
(elem_name, nnodes, msg_temp) = self.get_f06_header(is_mag_phase)
# write the f06
ntimes = self.data.shape[0]
eids = self.element
cen_word = 'CEN/%i' % nnodes
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
#[mx, my, mxy, bmx, bmy, bmxy, tx, ty]
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
if self.element_type in [74, 83, 227, 228]:
# 74, 83 CTRIA3
# 227 CTRIAR linear
# 228 CQUADR linear
for eid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(eids, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
[mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi] = write_floats_13e(
[mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi])
# ctria3
# 8 -7.954568E+01 2.560061E+03 -4.476376E+01 1.925648E+00 1.914048E+00 3.593237E-01 8.491534E+00 5.596094E-01 #
f06_file.write(' %8i %18s %13s %13s %13s %13s %13s %13s %s\n' % (
eid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi))
elif self.element_type == 33:
for eid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(eids, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
[mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi] = write_floats_13e(
[mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi])
# cquad4
#0 6 CEN/4 1.072685E+01 2.504399E+03 -2.455727E+01 -5.017930E+00 -2.081427E+01 -5.902618E-01 -9.126162E+00 4.194400E+01#
#Fmt = '% 8i ' + '%27.20E ' * 8 + '\n'
#f06_file.write(Fmt % (eid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi))
#
f06_file.write('0 %8i %8s %13s %13s %13s %13s %13s %13s %13s %s\n' % (
eid, cen_word, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi))
else:
raise NotImplementedError(f'element_name={self.element_name} element_type={self.element_type}')
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
print(self.get_stats())
#if 'CTRIA3' in self.element_name:
#nnodes = 3
#elif 'CQUAD4' in self.element_name:
#nnodes = 4
#elif 'CTRIAR' in self.element_name:
#nnodes = 4 # ???
#elif 'CQUADR' in self.element_name:
#nnodes = 5 # ???
#else: # pragma: no cover
#raise NotImplementedError(self.code_information())
#print("nnodes_all =", nnodes_all)
#cen_word_ascii = 'CEN/%i' % nnodes
#cen_word = b'CEN/%i' % nnodes
eids = self.element
#cen_word = 'CEN/%i' % nnodes
#msg.append(' element_node.shape = %s\n' % str(self.element_node.shape).replace('L', ''))
#msg.append(' data.shape=%s\n' % str(self.data.shape).replace('L', ''))
eids = self.element
eids_device = eids * 10 + self.device_code
nelements = len(eids)
assert nelements > 0, eids
#print('nelements =', nelements)
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert nnodes > 1, nnodes
#assert self.ntimes == 1, self.ntimes
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
#[fiber_dist, oxx, oyy, txy, angle, majorP, minorP, ovm]
if self.is_sort1:
structi = Struct(endian + b'i 8f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
nwide = 0
for eid_device, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(eids_device, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
data = [eid_device, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi]
#[mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi] = write_floats_13e(
# [mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi])
op2.write(structi.pack(*data))
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data[1:])))
nwide += len(data)
assert nwide == ntotal, "nwide=%s ntotal=%s" % (nwide, ntotal)
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class RealPlateBilinearForceArray(RealForceObject): # 144-CQUAD4
def __init__(self, data_code, is_sort1, isubcase, dt):
RealForceObject.__init__(self, data_code, isubcase)
self.dt = dt
self.nelements = 0
#if is_sort1:
#if dt is not None:
#self.add = self.add_sort1
#else:
#assert dt is not None
#self.add = self.add_sort2
def _get_msgs(self):
raise NotImplementedError()
def get_headers(self) -> List[str]:
return ['mx', 'my', 'mxy', 'bmx', 'bmy', 'bmxy', 'tx', 'ty']
def build(self):
"""sizes the vectorized attributes of the RealPlateBilinearForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
#self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element_node = zeros((self.ntotal, 2), dtype='int32')
# -MEMBRANE FORCES- -BENDING MOMENTS- -TRANSVERSE SHEAR FORCES -
# FX FY FXY MX MY MXY QX QY
#[fx, fy, fxy, mx, my, mxy, qx, qy]
#[mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.data = zeros((self.ntimes, self.ntotal, 8), dtype='float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
node = pd.Series(self.element_node[:, 1])
node.replace({'NodeID': {0:'CEN'}}, inplace=True)
element_node = [self.element_node[:, 0], node]
if self.nonlinear_factor not in (None, np.nan):
# Mode 1 2 3
# Freq 1.482246e-10 3.353940e-09 1.482246e-10
# Eigenvalue -8.673617e-19 4.440892e-16 8.673617e-19
# Radians 9.313226e-10 2.107342e-08 9.313226e-10
# ElementID NodeID Item
# 6 0 mx 2.515537e-13 -2.294306e-07 -3.626725e-13
# my 2.916815e-13 -7.220319e-08 -5.030049e-13
# mxy -2.356622e-14 4.391171e-07 -5.960345e-14
# bmx 4.138377e-14 -1.861012e-08 -5.586283e-14
# bmy 5.991298e-15 -2.471926e-09 -5.400710e-15
# bmxy 4.511364e-15 -1.190845e-09 -5.546569e-15
# tx 1.122732e-13 -5.563460e-08 -1.523176e-13
# ty -1.164320e-14 4.813929e-09 1.023404e-14
# 4 mx 3.839208e-13 -4.580973e-07 -4.949736e-13
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_element_node(
column_values, column_names,
headers, element_node, self.data, from_tuples=False, from_array=True)
else:
df1 = pd.DataFrame(element_node).T
df1.columns = ['ElementID', 'NodeID']
df2 = pd.DataFrame(self.data[0])
df2.columns = headers
data_frame = df1.join(df2)
data_frame = data_frame.reset_index().set_index(['ElementID', 'NodeID'])
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, e in enumerate(self.element_node):
(eid, nid) = e
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(mx1, my1, mxy1, bmx1, bmy1, bmxy1, tx1, ty1) = t1
(mx2, my2, mxy2, bmx2, bmy2, bmxy2, tx2, ty2) = t2
if not np.array_equal(t1, t2):
msg += '(%s, %s) (%s, %s, %s, %s, %s, %s, %s, %s) (%s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid, nid,
mx1, my1, mxy1, bmx1, bmy1, bmxy1, tx1, ty1,
mx2, my2, mxy2, bmx2, bmy2, bmxy2, tx2, ty2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, term, nid, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element_node[self.itotal] = [eid, nid]
self.data[self.itime, self.itotal, :] = [mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.itotal += 1
def add_sort2(self, eid, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
raise NotImplementedError('SORT2')
#if dt not in self.mx:
#self.add_new_transient(dt)
#self.data[self.itime, self.itotal, :] = [mx, my, mxy, bmx, bmy, bmxy, tx, ty]
@property
def nnodes_per_element(self):
if self.element_type == 144: # CQUAD4
nnodes_element = 5
elif self.element_type == 64: # CQUAD8
nnodes_element = 5
elif self.element_type == 82: # CQUADR
nnodes_element = 5
elif self.element_type == 75: # CTRIA6
nnodes_element = 4
elif self.element_type == 70: # CTRIAR
nnodes_element = 4
else:
raise NotImplementedError('element_type=%s element_name=%s' % (self.element_type, self.element_name))
return nnodes_element
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i ntotal=%i nnodes/element=%i\n'
% (self.__class__.__name__, ntimes, nelements, ntotal, self.nnodes_per_element))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i ntotal=%i nnodes/element=%i\n'
% (self.__class__.__name__, nelements, ntotal, self.nnodes_per_element))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, ntotal, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element_node.shape = %s\n' % str(self.element_node.shape).replace('L', ''))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True):
# if 'CTRIA3' in self.element_name:
# msg = [
# ' F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A 3 )\n'
# ' \n'
# ' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
# ' ID FX FY FXY MX MY MXY QX QY\n'
# ]
# nnodes = 3
if self.element_type == 70:
# CQUAD4
element_name = 'CTRIAR'
msg = [
' F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A R )\n'
' \n'
' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID GRID-ID FX FY FXY MX MY MXY QX QY\n'
]
nnodes = 6
elif self.element_type == 75:
# CQUAD4
element_name = 'CTRIA6'
msg = [
' F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A 6 )\n'
' \n'
' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID GRID-ID FX FY FXY MX MY MXY QX QY\n'
]
nnodes = 6
elif self.element_type == 64:
# CQUAD4
element_name = 'CQUAD8'
msg = [
' F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 8 )\n'
' \n'
' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID GRID-ID FX FY FXY MX MY MXY QX QY\n'
]
nnodes = 8
elif self.element_type == 82:
# CQUAD4
element_name = 'CQUADR'
msg = [
' F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D R )\n'
' \n'
' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID GRID-ID FX FY FXY MX MY MXY QX QY\n'
]
nnodes = 4
elif self.element_type == 144:
# CQUAD4
element_name = 'CQUAD4'
msg = [
' F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 ) OPTION = BILIN \n'
' \n'
' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID GRID-ID FX FY FXY MX MY MXY QX QY\n'
]
nnodes = 4
else:
raise NotImplementedError('element_name=%s element_type=%s' % (self.element_name, self.element_type))
return element_name, nnodes, msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
(elem_name, nnodes, msg_temp) = self.get_f06_header(is_mag_phase)
# write the f06
ntimes = self.data.shape[0]
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
cen_word = 'CEN/%i' % nnodes
if self.element_type in [64, 82, 144]: # CQUAD8, CQUADR, CQUAD4
cyci = [0, 1, 2, 3, 4]
#cyc = cycle([0, 1, 2, 3, 4]) # TODO: this is totally broken...
nnodes_per_eid = 5
elif self.element_type in [70, 75]: # CTRIAR, CTRIA6
cyci = [0, 1, 2, 3]
#cyc = cycle([0, 1, 2, 3]) # TODO: this is totally broken...
nnodes_per_eid = 4
else:
raise NotImplementedError(self.element_type)
# TODO: this shouldn't be neccessary
cyc = cyci * (len(eids) // nnodes_per_eid)
assert len(eids) % nnodes_per_eid == 0
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
#[mx, my, mxy, bmx, bmy, bmxy, tx, ty]
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
for i, eid, nid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(cyc, eids, nids, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
[mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi] = write_floats_13e(
[mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi])
# ctria3
# 8 -7.954568E+01 2.560061E+03 -4.476376E+01 1.925648E+00 1.914048E+00 3.593237E-01 8.491534E+00 5.596094E-01 #
if i == 0:
f06_file.write(
'0 %8i %s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, cen_word, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi))
else:
f06_file.write(
' %8i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
nid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi))
# else:
# raise NotImplementedError(self.element_type)
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
(unused_elem_name, nnodes, unused_msg_temp) = self.get_f06_header(is_mag_phase)
# write the f06
#ntimes = self.data.shape[0]
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
if self.element_type in [64, 82, 144]: # CQUAD8, CQUADR, CQUAD4
cyci = [0, 1, 2, 3, 4]
#cyc = cycle([0, 1, 2, 3, 4]) # TODO: this is totally broken...
nnodes_per_eid = 5
elif self.element_type in [70, 75]: # CTRIAR, CTRIA6
cyci = [0, 1, 2, 3]
#cyc = cycle([0, 1, 2, 3]) # TODO: this is totally broken...
nnodes_per_eid = 4
else:
raise NotImplementedError(self.element_type)
# TODO: this shouldn't be neccessary
cyc = cyci * (len(eids) // nnodes_per_eid)
assert len(eids) % nnodes_per_eid == 0
#print("nnodes_all =", nnodes_all)
#cen_word_ascii = 'CEN/%i' % nnodes
cen_word = b'CEN/'
#msg.append(' element_node.shape = %s\n' % str(self.element_node.shape).replace('L', ''))
#msg.append(' data.shape=%s\n' % str(self.data.shape).replace('L', ''))
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
eids_device = eids * 10 + self.device_code
nelements = len(np.unique(eids))
#print('nelements =', nelements)
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
assert nnodes > 1, nnodes
#assert self.ntimes == 1, self.ntimes
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
#[fiber_dist, oxx, oyy, txy, angle, majorP, minorP, ovm]
if self.is_sort1:
struct1 = Struct(endian + b'i4s i 8f')
struct2 = Struct(endian + b'i 8f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
nwide = 0
for i, eid, eid_device, nid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(cyc, eids, eids_device, nids, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
#[mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi] = write_floats_13e(
# [mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi])
if i == 0:
data = [eid_device, cen_word, nnodes, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi]
op2.write(struct1.pack(*data))
op2_ascii.write(
'0 %8i %s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, cen_word, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi))
else:
data = [nid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi]
op2.write(struct2.pack(*data))
op2_ascii.write(
' %8i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
nid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi))
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data)))
nwide += len(data)
assert nwide == ntotal, "nwide=%s ntotal=%s" % (nwide, ntotal)
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class RealCBarFastForceArray(RealForceObject):
"""
34-CBAR
119-CFAST
"""
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
RealForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
@property
def nnodes_per_element(self) -> int:
return 1
def get_headers(self) -> List[str]:
headers = [
'bending_moment_a1', 'bending_moment_a2',
'bending_moment_b1', 'bending_moment_b2',
'shear1', 'shear2',
'axial', 'torque']
return headers
def build(self):
"""sizes the vectorized attributes of the RealCBarForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
if self.is_sort1: # or self.table_name in ['OEFRMS2']
ntimes = self.ntimes
nelements = self.nelements
ntotal = self.ntotal
self._build(ntimes, nelements, ntotal, self._times_dtype)
else:
ntimes = self.nelements
nelements = self.ntimes
ntotal = nelements * 2
name = self.analysis_method + 's'
self._build(ntimes, nelements, ntotal, self._times_dtype)
setattr(self, name, self._times)
self.data_code['name'] = self.analysis_method
self.data_names[0] = self.analysis_method
#print(f'data_names -> {self.data_names}')
def _build(self, ntimes, nelements, ntotal, dtype):
self.ntimes = ntimes
self.nelements = nelements
self.ntotal = ntotal
#print(f"*ntimes={ntimes} nelements={nelements} ntotal={ntotal} data_names={self.data_names}")
unused_dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size)
self._times = zeros(ntimes, dtype=dtype)
self.element = zeros(nelements, dtype=idtype)
#[bending_moment_a1, bending_moment_a2, bending_moment_b1, bending_moment_b2, shear1, shear2, axial, torque]
self.data = zeros((ntimes, ntotal, 8), dtype=fdtype)
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)
data_frame.index.name = 'ElementID'
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def add_sort1(self, dt, eid, bending_moment_a1, bending_moment_a2,
bending_moment_b1, bending_moment_b2, shear1, shear2, axial, torque):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
#[eid, bending_moment_a1, bending_moment_a2,
#bending_moment_b1, bending_moment_b2, shear1, shear2, axial, torque] = data
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [
bending_moment_a1, bending_moment_a2,
bending_moment_b1, bending_moment_b2,
shear1, shear2, axial, torque]
self.ielement += 1
def add_sort2(self, dt, eid, bending_moment_a1, bending_moment_a2,
bending_moment_b1, bending_moment_b2, shear1, shear2, axial, torque):
"""unvectorized method for adding SORT2 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
#[eid, bending_moment_a1, bending_moment_a2,
#bending_moment_b1, bending_moment_b2, shear1, shear2, axial, torque] = data
itime = self.ielement
ielement = self.itime
#print(f'{self.table_name} itime={itime} ielement={ielement} time={dt} eid={eid} axial={axial}')
self._times[itime] = dt
self.element[ielement] = eid
self.data[itime, ielement, :] = [
bending_moment_a1, bending_moment_a2,
bending_moment_b1, bending_moment_b2,
shear1, shear2, axial, torque]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def eid_to_element_node_index(self, eids):
ind = searchsorted(eids, self.element)
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
words = self._words()
#msg = []
#header[1] = ' %s = %10.4E\n' % (self.data_code['name'], dt)
eids = self.element
#f06_file.write(''.join(words))
ntimes = self.data.shape[0]
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + words))
bm1a = self.data[itime, :, 0]
bm2a = self.data[itime, :, 1]
bm1b = self.data[itime, :, 2]
bm2b = self.data[itime, :, 3]
ts1 = self.data[itime, :, 4]
ts2 = self.data[itime, :, 5]
af = self.data[itime, :, 6]
trq = self.data[itime, :, 7]
for eid, bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi in zip(
eids, bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq):
[bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi] = write_floats_13e([
bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi])
f06_file.write(' %8i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi))
f06_file.write(page_stamp % page_num)
return page_num
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(bm1a1, bm2a1, bm1b1, bm2b1, ts11, ts21, af1, trq1) = t1
(bm1a2, bm2a2, bm1b2, bm2b2, ts12, ts22, af2, trq2) = t2
if not np.array_equal(t1, t2):
msg += '(%s) (%s, %s, %s, %s, %s, %s, %s, %s) (%s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
bm1a1, bm2a1, bm1b1, bm2b1, ts11, ts21, af1, trq1,
bm1a2, bm2a2, bm1b2, bm2b2, ts12, ts22, af2, trq2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
eids = self.element
eids_device = eids * 10 + self.device_code
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i 8f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
bm1a = self.data[itime, :, 0]
bm2a = self.data[itime, :, 1]
bm1b = self.data[itime, :, 2]
bm2b = self.data[itime, :, 3]
ts1 = self.data[itime, :, 4]
ts2 = self.data[itime, :, 5]
af = self.data[itime, :, 6]
trq = self.data[itime, :, 7]
for eid_device, bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi in zip(
eids_device, bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq):
data = [eid_device, bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi]
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data)))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
@abstractmethod
def _words(self) -> List[str]:
return []
class RealCBarForceArray(RealCBarFastForceArray): # 34-CBAR
"""34-CBAR"""
def __init__(self, data_code, is_sort1, isubcase, dt):
RealCBarFastForceArray.__init__(self, data_code, is_sort1, isubcase, dt)
@classmethod
def add_static_case(cls, table_name, element, data, isubcase,
is_sort1=True, is_random=False, is_msc=True,
random_code=0, title='', subtitle='', label=''):
analysis_code = 1 # static
data_code = oef_data_code(table_name, analysis_code,
is_sort1=is_sort1, is_random=is_random,
random_code=random_code,
title=title, subtitle=subtitle, label=label,
is_msc=is_msc)
data_code['loadIDs'] = [0] # TODO: ???
data_code['data_names'] = []
# I'm only sure about the 1s in the strains and the
# corresponding 0s in the stresses.
#if is_stress:
#data_code['stress_bits'] = [0, 0, 0, 0]
#data_code['s_code'] = 0
#else:
#data_code['stress_bits'] = [0, 1, 0, 1]
#data_code['s_code'] = 1 # strain?
data_code['element_name'] = 'CBAR'
data_code['element_type'] = 34
#data_code['load_set'] = 1
ntimes = data.shape[0]
nnodes = data.shape[1]
dt = None
obj = cls(data_code, is_sort1, isubcase, dt)
obj.element = element
obj.data = data
obj.ntimes = ntimes
obj.ntotal = nnodes
obj._times = [None]
obj.is_built = True
return obj
def _words(self) -> List[str]:
words = [' F O R C E S I N B A R E L E M E N T S ( C B A R )\n',
'0 ELEMENT BEND-MOMENT END-A BEND-MOMENT END-B - SHEAR - AXIAL\n',
' ID. PLANE 1 PLANE 2 PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE\n']
return words
class RealCWeldForceArray(RealCBarFastForceArray): # 34-CBAR
"""117-CWELD"""
def __init__(self, data_code, is_sort1, isubcase, dt):
RealCBarFastForceArray.__init__(self, data_code, is_sort1, isubcase, dt)
class RealCFastForceArrayNX(RealCBarFastForceArray): # 34-CBAR
"""119-CFAST"""
def __init__(self, data_code, is_sort1, isubcase, dt):
RealCBarFastForceArray.__init__(self, data_code, is_sort1, isubcase, dt)
class RealConeAxForceArray(RealForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
RealForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
if not is_sort1:
raise NotImplementedError('SORT2; code_info=\n%s' % self.code_information())
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self) -> List[str]:
headers = [
'hopa', 'bmu', 'bmv', 'tm', 'su', 'sv'
]
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the RealConeAxForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[hopa, bmu, bmv, tm, su, sv]
self.data = zeros((self.ntimes, self.ntotal, 6), dtype='float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = pd.Panel(self.data, items=column_values,
major_axis=self.element, minor_axis=headers).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['ElementID', 'Item']
else:
df1 = pd.DataFrame(self.element)
df1.columns = ['ElementID']
df2 = pd.DataFrame(self.data[0])
df2.columns = headers
self.data_frame = df1.join([df2])
#print(self.data_frame)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, e in enumerate(self.element):
eid = e
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(hopa1, bmu1, bmv1, tm1, su1, sv1) = t1
(hopa2, bmu2, bmv2, tm2, su2, sv2) = t2
if not np.array_equal(t1, t2):
msg += (
'%s (%s, %s, %s, %s, %s, %s)\n'
' (%s, %s, %s, %s, %s, %s)\n' % (
eid,
hopa1, bmu1, bmv1, tm1, su1, sv1,
hopa2, bmu2, bmv2, tm2, su2, sv2,
))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, hopa, bmu, bmv, tm, su, sv):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [hopa, bmu, bmv, tm, su, sv]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = [
' F O R C E S I N A X I S - S Y M M E T R I C C O N I C A L S H E L L E L E M E N T S (CCONEAX)\n'
' \n'
' ELEMENT HARMONIC POINT BEND-MOMENT BEND-MOMENT TWIST-MOMENT SHEAR SHEAR\n'
' ID. NUMBER ANGLE V U V U\n'
#' 101 0 5.864739E-09 1.759422E-09 0.0 0.0 0.0'
#' 101 0.0000 5.864739E-09 1.759422E-09 0.0 0.0 0.0'
]
#(elem_name, msg_temp) = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
hopa = self.data[itime, :, 0]
bmu = self.data[itime, :, 1]
bmv = self.data[itime, :, 2]
tm = self.data[itime, :, 3]
su = self.data[itime, :, 4]
sv = self.data[itime, :, 5]
for (eid, hopai, bmui, bmvi, tmi, sui, svi) in zip(
eids, hopa, bmu, bmv, tm, su, sv):
if hopai > 0.1:
raise NotImplementedError(hopai)
vals2 = write_floats_13e([hopai, bmui, bmvi, tmi, sui, svi])
[hopai, bmui, bmvi, tmi, sui, svi] = vals2
# TODO: hopa is probably the wrong type
# hopa # Mu Mv twist Vy Vu
f06_file.write(' %8i %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, 0.0, hopai, bmui, bmvi, tmi, sui, svi))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class RealCBar100ForceArray(RealForceObject): # 100-CBAR
"""
CBAR-34s are converted to CBAR-100s when you have PLOAD1s
(distributed bar loads). The number of stations by default is 2,
but with a CBARAO, you can change this (max of 8 points; 6 internal
points).
If you use a CBARO without PLOAD1s, you wil turn CBAR-34s into
CBAR-100s as well.
"""
def __init__(self, data_code, is_sort1, isubcase, dt):
RealForceObject.__init__(self, data_code, isubcase)
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
if not is_sort1:
raise NotImplementedError('SORT2; code_info=\n%s' % self.code_information())
def get_headers(self) -> List[str]:
headers = [
'station', 'bending_moment1', 'bending_moment2', 'shear1', 'shear2', 'axial', 'torque'
]
return headers
def build(self):
"""sizes the vectorized attributes of the RealCBar100ForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
# [station, bending_moment1, bending_moment2, shear1, shear2, axial, torque]
self.data = zeros((self.ntimes, self.ntotal, 7), dtype='float32')
#def finalize(self):
#sd = self.data[0, :, 0]
#i_sd_zero = np.where(sd != 0.0)[0]
#i_node_zero = np.where(self.element_node[:, 1] != 0)[0]
#assert i_node_zero.max() > 0, 'CBAR element_node hasnt been filled'
#i = np.union1d(i_sd_zero, i_node_zero)
#self.element = self.element[i]
#self.element_node = self.element_node[i, :]
#self.data = self.data[:, i, :]
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
element_location = [
self.element,
self.data[0, :, 0],
]
if self.nonlinear_factor not in (None, np.nan):
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = pd.Panel(self.data[:, :, 1:], items=column_values,
major_axis=element_location, minor_axis=headers[1:]).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['ElementID', 'Location', 'Item']
else:
df1 = pd.DataFrame(element_location).T
df1.columns = ['ElementID', 'Location']
df2 = pd.DataFrame(self.data[0])
df2.columns = headers
self.data_frame = df1.join([df2])
#self.data_frame = self.data_frame.reset_index().replace({'NodeID': {0:'CEN'}}).set_index(['ElementID', 'NodeID'])
#print(self.data_frame)
def add_sort1(self, dt, eid, sd, bm1, bm2, ts1, ts2, af, trq):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
# station, bending_moment1, bending_moment2, shear1, shear2, axial, torque
self.data[self.itime, self.ielement, :] = [sd, bm1, bm2, ts1, ts2, af, trq]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
msg = [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
return msg
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def eid_to_element_node_index(self, eids):
ind = searchsorted(eids, self.element)
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
#' F O R C E D I S T R I B U T I O N I N B A R E L E M E N T S ( C B A R )'
#'0 ELEMENT STATION BEND-MOMENT SHEAR FORCE AXIAL'
#' ID. (PCT) PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE'
#' 10 0.000 0.0 -5.982597E+06 0.0 -7.851454E+03 0.0 0.0'
#' 10 1.000 0.0 1.868857E+06 0.0 -7.851454E+03 0.0 0.0'
#' 11 0.000 0.0 1.868857E+06 0.0 -7.851454E+03 0.0 0.0'
#' 11 0.050 0.0 2.261429E+06 0.0 -7.851454E+03 0.0 0.0'
#' 11 0.100 0.0 2.654002E+06 0.0 -7.851454E+03 0.0 0.0'
words = [
' F O R C E D I S T R I B U T I O N I N B A R E L E M E N T S ( C B A R )\n'
'0 ELEMENT STATION BEND-MOMENT SHEAR FORCE AXIAL\n'
' ID. (PCT) PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE\n']
# ' 15893 0.000 1.998833E+02 9.004551E+01 2.316835E+00 1.461960E+00 -2.662207E+03 9.795244E-02'
#msg = []
#header[1] = ' %s = %10.4E\n' % (self.data_code['name'], dt)
eids = self.element
#f.write(''.join(words))
ntimes = self.data.shape[0]
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + words))
# sd, bm1, bm2, ts1, ts2, af, trq
sd = self.data[itime, :, 0]
bm1 = self.data[itime, :, 1]
bm2 = self.data[itime, :, 2]
ts1 = self.data[itime, :, 3]
ts2 = self.data[itime, :, 4]
af = self.data[itime, :, 5]
trq = self.data[itime, :, 6]
for eid, sdi, bm1i, bm2i, ts1i, ts2i, afi, trqi in zip(eids, sd, bm1, bm2, ts1, ts2, af, trq):
[bm1i, bm2i, ts1i, ts2i, afi, trqi] = write_floats_13e([
bm1i, bm2i, ts1i, ts2i, afi, trqi])
f06_file.write(
' %8i %4.3f %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, sdi, bm1i, bm2i, ts1i, ts2i, afi, trqi))
f06_file.write(page_stamp % page_num)
return page_num
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i2f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
eids = self.element
#f.write(''.join(words))
#ntimes = self.data.shape[0]
struct1 = Struct(endian + b'i7f')
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
# sd, bm1, bm2, ts1, ts2, af, trq
sd = self.data[itime, :, 0]
bm1 = self.data[itime, :, 1]
bm2 = self.data[itime, :, 2]
ts1 = self.data[itime, :, 3]
ts2 = self.data[itime, :, 4]
af = self.data[itime, :, 5]
trq = self.data[itime, :, 6]
for eid, eid_device, sdi, bm1i, bm2i, ts1i, ts2i, afi, trqi in zip(
eids, eids_device, sd, bm1, bm2, ts1, ts2, af, trq):
[sbm1i, sbm2i, sts1i, sts2i, safi, strqi] = write_floats_13e([
bm1i, bm2i, ts1i, ts2i, afi, trqi])
op2_ascii.write(
' %8i %4.3f %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, sdi, sbm1i, sbm2i, sts1i, sts2i, safi, strqi))
data = [eid_device, sdi, bm1i, bm2i, ts1i, ts2i, afi, trqi]
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(sd1, bm11, bm21, ts11, ts21, af1, trq1) = t1
(sd2, bm12, bm22, ts12, ts22, af2, trq2) = t2
if not np.array_equal(t1, t2):
msg += '(%s) (%s, %s, %s, %s, %s, %s, %s) (%s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
sd1, bm11, bm21, ts11, ts21, af1, trq1,
sd2, bm12, bm22, ts12, ts22, af2, trq2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
class RealCGapForceArray(RealForceObject): # 38-CGAP
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
RealForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
if not is_sort1:
raise NotImplementedError('SORT2; code_info=\n%s' % self.code_information())
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self) -> List[str]:
headers = [
'fx', 'sfy', 'sfz', 'u', 'v', 'w', 'sv', 'sw'
]
return headers
def build(self):
"""sizes the vectorized attributes of the RealCGapForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
# [fx, sfy, sfz, u, v, w, sv, sw]
self.data = zeros((self.ntimes, self.ntotal, 8), dtype='float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
# LoadStep 1.0
# ElementID Item
# 101 fx 33333.332031
# sfy 0.000000
# sfz 0.000000
# u 0.000115
# v 0.000000
# w 0.000000
# sv 0.000000
# sw 0.000000
# 102 fx -0.000002
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
# Static fx sfy sfz u v w sv sw
# ElementID
# 1 1.253610e-10 -0.0 0.0 0.250722 -0.852163 0.0 0.0 0.0
# 21 1.253610e-10 -0.0 0.0 0.250722 -0.852163 0.0 0.0 0.0
data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)
data_frame.index.name = 'ElementID'
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, e in enumerate(self.element):
eid = e
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(fx1, sfy1, sfz1, u1, v1, w1, sv1, sw1) = t1
(fx2, sfy2, sfz2, u2, v2, w2, sv2, sw2) = t2
if not np.array_equal(t1, t2):
msg += (
'%s (%s, %s, %s, %s, %s, %s, %s, %s)\n'
' (%s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
fx1, sfy1, sfz1, u1, v1, w1, sv1, sw1,
fx2, sfy2, sfz2, u2, v2, w2, sv2, sw2,
))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, fx, sfy, sfz, u, v, w, sv, sw):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [fx, sfy, sfz, u, v, w, sv, sw]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = [
' F O R C E S I N G A P E L E M E N T S ( C G A P )\n'
' ELEMENT - F O R C E S I N E L E M S Y S T E M - - D I S P L A C E M E N T S I N E L E M S Y S T E M -\n'
' ID COMP-X SHEAR-Y SHEAR-Z AXIAL-U TOTAL-V TOTAL-W SLIP-V SLIP-W\n'
#' 101 3.333333E+04 0.0 0.0 1.149425E-04 0.0 0.0 0.0 0.0\n'
]
##(elem_name, msg_temp) = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
#ntimes, ntotal = self.data.shape[:1]
ntimes = self.data.shape[0]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
# [fx, sfy, sfz, u, v, w, sv, sw]
fx = self.data[itime, :, 0]
sfy = self.data[itime, :, 1]
sfz = self.data[itime, :, 2]
u = self.data[itime, :, 3]
v = self.data[itime, :, 4]
w = self.data[itime, :, 5]
sv = self.data[itime, :, 6]
sw = self.data[itime, :, 7]
for (eid, fxi, sfyi, sfzi, ui, vi, wi, svi, swi) in zip(eids, fx, sfy, sfz, u, v, w, sv, sw):
vals2 = write_floats_12e([fxi, sfyi, sfzi, ui, vi, wi, svi, swi])
[fxi, sfyi, sfzi, ui, vi, wi, svi, swi] = vals2
f06_file.write('0%13i%-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, fxi, sfyi, sfzi, ui, vi, wi, svi, swi))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class RealBendForceArray(RealForceObject): # 69-CBEND
def __init__(self, data_code, is_sort1, isubcase, dt):
RealForceObject.__init__(self, data_code, isubcase)
self.nelements = 0 # result specific
def get_headers(self) -> List[str]:
headers = [
'bending_moment_1a', 'bending_moment_2a', 'shear_1a', 'shear_2a', 'axial_a', 'torque_a',
'bending_moment_1b', 'bending_moment_2b', 'shear_1b', 'shear_2b', 'axial_b', 'torque_b',
]
return headers
def build(self):
"""sizes the vectorized attributes of the RealBendForceArray"""
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element_node = zeros((self.nelements, 3), dtype='int32')
#[bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a
# bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b]
self.data = zeros((self.ntimes, self.nelements, 12), dtype='float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
# TODO: add NodeA, NodeB
element = self.element_node[:, 0]
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, element, self.data)
#data_frame = pd.Panel(self.data, items=column_values,
#major_axis=element, minor_axis=headers).to_frame()
#data_frame.columns.names = column_names
#data_frame.index.names = ['ElementID', 'Item']
else:
df1 = pd.DataFrame(self.element_node)
df1.columns = ['ElementID', 'NodeA', 'NodeB']
df2 = pd.DataFrame(self.data[0])
df2.columns = headers
data_frame = df1.join(df2)
self.data_frame = data_frame
def add_sort1(self, dt, eid,
nid_a, bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a,
nid_b, bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element_node[self.ielement] = [eid, nid_a, nid_b]
self.data[self.itime, self.ielement, :] = [
bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a,
bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b,
]
self.ielement += 1
if self.ielement == self.nelements:
self.ielement = 0
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element_node.shape = %s\n' % str(self.element_node.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = [
' F O R C E S I N B E N D E L E M E N T S ( C B E N D )\n'
' - BENDING MOMENTS - - SHEARS - AXIAL\n'
' ELEMENT-ID GRID END PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE\n'
#'0 6901 6901 A 0.0 0.0 0.0 0.0 0.0 -6.305720E-16'
#' 6902 B -5.000000E-01 5.000000E-01 1.000000E+00 -1.000000E+00 -5.000000E-07 -1.666537E-07'
]
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element_node[:, 0]
nid_a = self.element_node[:, 1]
nid_b = self.element_node[:, 2]
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
bending_moment_1a = self.data[itime, :, 0]
bending_moment_2a = self.data[itime, :, 1]
shear_1a = self.data[itime, :, 2]
shear_2a = self.data[itime, :, 3]
axial_a = self.data[itime, :, 4]
torque_a = self.data[itime, :, 5]
bending_moment_1b = self.data[itime, :, 6]
bending_moment_2b = self.data[itime, :, 7]
shear_1b = self.data[itime, :, 8]
shear_2b = self.data[itime, :, 9]
axial_b = self.data[itime, :, 10]
torque_b = self.data[itime, :, 11]
for (eid,
nid_ai, bending_moment_1ai, bending_moment_2ai, shear_1ai, shear_2ai, axial_ai, torque_ai,
nid_bi, bending_moment_1bi, bending_moment_2bi, shear_1bi, shear_2bi, axial_bi, torque_bi) in zip(
eids,
nid_a, bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a,
nid_b, bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b):
[bending_moment_1ai, bending_moment_2ai, shear_1ai, shear_2ai, axial_ai, torque_ai,
bending_moment_1bi, bending_moment_2bi, shear_1bi, shear_2bi, axial_bi, torque_bi] = write_floats_13e(
[bending_moment_1ai, bending_moment_2ai, shear_1ai, shear_2ai, axial_ai, torque_ai,
bending_moment_1bi, bending_moment_2bi, shear_1bi, shear_2bi, axial_bi, torque_bi])
f06_file.write(
'0 %8i%8i A %13s %13s %13s %13s %13s %13s\n'
' %8i B %13s %13s %13s %13s %13s %13s\n' % (
eid, nid_ai, bending_moment_1ai, bending_moment_2ai, shear_1ai, shear_2ai, axial_ai, torque_ai,
nid_bi, bending_moment_1bi, bending_moment_2bi, shear_1bi, shear_2bi, axial_bi, torque_bi
))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.element_node, table.element_node):
assert self.element_node.shape == table.element_node.shape, 'element_node shape=%s table.shape=%s' % (self.element_node.shape, table.element_node.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
msg += 'Eid, Nid_A, Nid_B\n'
for (eid1, nida1, nidb1), (eid2, nida2, nidb2) in zip(self.element_node, table.element_node):
msg += '(%s, %s, %s), (%s, %s, %s)\n' % (eid1, nida1, nidb1, eid2, nida2, nidb2)
print(msg)
raise ValueError(msg)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
eids = self.element_node[:, 0]
for itime in range(self.ntimes):
for ie, eid in enumerate(eids):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(bending_moment_1a1, bending_moment_2a1, shear_1a1, shear_2a1, axial_a1, torque_a1,
bending_moment_1b1, bending_moment_2b1, shear_1b1, shear_2b1, axial_b1, torque_b1) = t1
(bending_moment_1a2, bending_moment_2a2, shear_1a2, shear_2a2, axial_a2, torque_a2,
bending_moment_1b2, bending_moment_2b2, shear_1b2, shear_2b2, axial_b2, torque_b2) = t2
if not np.array_equal(t1, t2):
msg += '(%s) (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
bending_moment_1a1, bending_moment_2a1, shear_1a1, shear_2a1, axial_a1, torque_a1,
bending_moment_1b1, bending_moment_2b1, shear_1b1, shear_2b1, axial_b1, torque_b1,
bending_moment_1a2, bending_moment_2a2, shear_1a2, shear_2a2, axial_a2, torque_a2,
bending_moment_1b2, bending_moment_2b2, shear_1b2, shear_2b2, axial_b2, torque_b2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
class RealSolidPressureForceArray(RealForceObject): # 77-PENTA_PR,78-TETRA_PR
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
RealForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if not is_sort1:
#raise NotImplementedError('SORT2; code_info=\n%s' % self.code_information())
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self) -> List[str]:
headers = [
'ax', 'ay', 'az', 'vx', 'vy', 'vz', 'pressure'
]
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the RealSolidPressureForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size)
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype=idtype)
#[ax, ay, az, vx, vy, vz, pressure]
self.data = zeros((self.ntimes, self.ntotal, 7), dtype=fdtype)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(ax1, ay1, az1, vx1, vy1, vz1, pressure1) = t1
(ax2, ay2, az2, vx2, vy2, vz2, pressure2) = t2
if not np.array_equal(t1, t2):
msg += (
'%s (%s, %s, %s, %s, %s, %s, %s)\n'
' (%s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
ax1, ay1, az1, vx1, vy1, vz1, pressure1,
ax2, ay2, az2, vx2, vy2, vz2, pressure2,
))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, etype, ax, ay, az, vx, vy, vz, pressure):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [ax, ay, az, vx, vy, vz, pressure]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
#(elem_name, msg_temp) = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
#(ntimes, ntotal, two) = self.data.shape
if self.is_sort1:
page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f06_file)
else:
raise NotImplementedError('SORT2; code_info=\n%s' % self.code_information())
return page_num
def _write_sort2_as_sort1(self, header, page_stamp, page_num=1, f=None):
msg_temp = [' P E A K A C C E L E R A T I O N S A N D P R E S S U R E S\n',
' \n',
' TIME EL-TYPE X-ACCELERATION Y-ACCELERATION Z-ACCELERATION PRESSURE (DB)\n']
ntimes = self.data.shape[0]
eids = self.element
etype = self.element_name
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f.write(''.join(header + msg_temp))
vx = self.data[itime, :, 0]
vy = self.data[itime, :, 1]
vz = self.data[itime, :, 2]
ax = self.data[itime, :, 3]
ay = self.data[itime, :, 4]
az = self.data[itime, :, 5]
pressure = self.data[itime, :, 5]
for (eid, vxi, vyi, vzi, axi, ayi, azi, pressurei) in zip(
eids, vx, vy, vz, ax, ay, az, pressure):
vals2 = write_floats_13e([axi, ayi, azi, pressurei])
[sax, say, saz, spressure] = vals2
#etype = 'PENPR'
f.write('0%13s %5s %-13s %-13s %-13s %s\n' % (eid, etype, sax, say, saz, spressure))
f.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def _write_sort1_as_sort1(self, header, page_stamp, page_num=1, f=None):
msg_temp = [' P E A K A C C E L E R A T I O N S A N D P R E S S U R E S\n',
' \n',
' ELEMENT-ID EL-TYPE X-ACCELERATION Y-ACCELERATION Z-ACCELERATION PRESSURE (DB)\n'] # TODO: bad line...
ntimes = self.data.shape[0]
eids = self.element
etype = self.element_name
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f.write(''.join(header + msg_temp))
vx = self.data[itime, :, 0]
vy = self.data[itime, :, 1]
vz = self.data[itime, :, 2]
ax = self.data[itime, :, 3]
ay = self.data[itime, :, 4]
az = self.data[itime, :, 5]
pressure = self.data[itime, :, 5]
for (eid, vxi, vyi, vzi, axi, ayi, azi, pressurei) in zip(
eids, vx, vy, vz, ax, ay, az, pressure):
vals2 = write_floats_13e([axi, ayi, azi, pressurei])
[sax, say, saz, spressure] = vals2
#etype = 'PENPR'
f.write('0%13s %5s %-13s %-13s %-13s %s\n' % (eid, etype, sax, say, saz, spressure))
f.write(page_stamp % page_num)
page_num += 1
return page_num - 1
# F:\work\pyNastran\examples\Dropbox\move_tpl\beamp11.op2
class RealCBeamForceVUArray(RealForceObject): # 191-VUBEAM
"""
**ELTYPE = 191 Beam view element (VUBEAM)**
2 PARENT I Parent p-element identification number
3 COORD I Coordinate system identification number
4 ICORD CHAR4 Flat/curved and so on
TCODE,7 = 0 Real
5 VUGRID I VU grid ID for output grid
6 POSIT RS x/L position of VU grid identification number
7 FORCEX RS Force x
8 SHEARY RS Shear force y
9 SHEARZ RS Shear force z
10 TORSION RS Torsional moment x
11 BENDY RS Bending moment y
12 BENDZ RS Bending moment z
DIRECT TRANSIENT RESPONSE ADAPTIVITY INDEX= 1
0 PVAL ID= 1 SUBCASE= 1
VU-ELEMENT ID = 100001002
F O R C E S I N P - V E R S I O N B E A M E L E M E N T S ( B E A M )
TIME = 0.000000E+00, P-ELEMENT ID = 1, OUTPUT COORD. ID = 0, P OF EDGES = 1
VUGRID VUGRID DIST/ - BENDING MOMENTS - -WEB SHEARS - AXIAL TOTAL
ID. LENGTH PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE
111001002 0.333 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
111001003 0.667 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
F O R C E S I N P - V E R S I O N B E A M E L E M E N T S ( B E A M )
TIME = 1.000000E+00, P-ELEMENT ID = 1, OUTPUT COORD. ID = 0, P OF EDGES = 1
VUGRID VUGRID DIST/ - BENDING MOMENTS - -WEB SHEARS - AXIAL TOTAL
ID. LENGTH PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE
111001002 0.333 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 9.982032E-01 0.000000E+00
111001003 0.667 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 9.982032E-01 0.000000E+00
"""
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
RealForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2; code_info=\n%s' % self.code_information())
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self) -> List[str]:
headers = ['xxb', 'fx', 'fy', 'fz', 'mx', 'my', 'mz']
return headers
def build(self):
"""sizes the vectorized attributes of the RealCBeamForceVUArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element_node = zeros((self.ntotal, 2), dtype='int32')
self.parent_coord = zeros((self.ntotal, 2), dtype='int32')
#[xxb, fx, fy, fz, mx, my, mz]
self.data = zeros((self.ntimes, self.ntotal, 7), dtype='float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
#Mode 1 2
#Freq 1.214849e-07 1.169559e-07
#Eigenvalue -5.826450e-13 -5.400125e-13
#Radians 7.633119e-07 7.348554e-07
#ElementID NodeID Item
#100001001 111001001 xxb 0.000000e+00 0.000000e+00
# fx -2.363981e-14 1.091556e-15
# fy 1.041715e-13 5.642625e-14
# fz 2.040026e-14 1.133024e-12
# mx -1.903338e-15 5.201282e-16
# my 8.236364e-14 2.141288e-13
# mz -2.247944e-14 9.947491e-14
# 111001002 xxb 3.333333e-01 3.333333e-01
# fx -2.278856e-14 5.621586e-16
# fy -2.092285e-14 -7.903003e-14
# fz -6.230423e-14 2.318665e-12
# mx 7.229356e-16 -1.005680e-16
# my 3.688462e-14 7.122293e-14
# mz -7.785338e-15 3.304847e-14
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_element_node(
column_values, column_names,
headers, self.element_node, self.data)
else:
data_frame = pd.Panel(self.data,
major_axis=self.element_node[:, 0], minor_axis=headers).to_frame()
data_frame.columns.names = ['Static']
data_frame.index.names = ['ElementID', 'Item']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
is_nan = (
self.nonlinear_factor is not None and
|
np.isnan(self.nonlinear_factor)
|
numpy.isnan
|
"""
File name : aerodynamic coefficients
Author : <NAME>
Email : <EMAIL>
Date : September/2020
Last edit : September/2020
Language : Python 3.8 or >
Aeronautical Institute of Technology - Airbus Brazil
Description:
-
Inputs:
-
Outputs:
-
TODO's:
-
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
import array
import scipy.io as spio
from sklearn.preprocessing import normalize
from framework.baseline_aircraft import *
# =============================================================================
# CLASSES
# =============================================================================
# =============================================================================
# FUNCTIONS
# =============================================================================
def loadmat(filename):
'''
this function should be called instead of direct snp.pio.loadmat
as it cures the problem of not properly recovering python dictionaries
from mat files. It calls the function check keys to cure all entries
which are still mat-objects
'''
data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
def _check_keys(dict):
'''
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
'''
for key in dict:
if isinstance(dict[key], spio.matlab.mio5_params.mat_struct):
dict[key] = _todict(dict[key])
return dict
def _todict(matobj):
'''
A recursive function which constructs from matobjects nested dictionaries
'''
dict = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, spio.matlab.mio5_params.mat_struct):
dict[strg] = _todict(elem)
else:
dict[strg] = elem
return dict
def logical(varin):
if varin == 0:
varout = 0
else:
varout = 1
return varout
def aerodynamic_coefficients_ANN(vehicle, altitude, mach, CL,alpha_deg,switch_neural_network):
CL_input = CL
aircraft = vehicle['aircraft']
wing = vehicle['wing']
inputs_neural_network = {
'mach': mach,
'altitude': altitude,
'angle_of_attack': alpha_deg*np.pi/180,
'aspect_ratio': wing['aspect_ratio'],
'taper_ratio': wing['taper_ratio'],
'leading_edge_sweep': wing['sweep_c_4']*np.pi/180,
'inboard_wing_dihedral': 3*np.pi/180,
'outboard_wing_dihedral': 5*np.pi/180,
'break_position': wing['semi_span_kink'],
'wing_area': wing['area'],
'wing_root_airfoil_incidence': wing['root_incidence']*np.pi/180,
'wing_break_airfoil_incidence': wing['kink_incidence']*np.pi/180,
'wing_tip_airfoil_incidence': wing['tip_incidence']*np.pi/180,
'root_airfoil_leading_edge_radius': wing['leading_edge_radius'][0],
'root_airfoil_thickness_ratio': wing['thickness_ratio'][0],
'root_airfoil_thickness_line_angle_trailing_edge': wing['thickness_line_angle_trailing_edge'][0],
'root_airfoil_thickness_to_chord_maximum_ratio': wing['thickness_to_chord_maximum_ratio'][0],
'root_airfoil_camber_line_angle_leading_edge': wing['camber_line_angle_leading_edge'][0],
'root_airfoil_camber_line_angle_trailing_edge': wing['camber_line_angle_trailing_edge'][0],
'root_airfoil_maximum_camber': wing['maximum_camber'][0],
'root_airfoil_camber_at_maximum_thickness_chordwise_position': wing['camber_at_maximum_thickness_chordwise_position'][0],
'root_airfoil_maximum_camber_chordwise_position ': wing['maximum_camber_chordwise_position'][0],
'break_airfoil_leading_edge_radius': wing['leading_edge_radius'][1],
'break_airfoil_thickness_ratio': wing['thickness_ratio'][1],
'break_airfoil_thickness_line_angle_trailing_edge': wing['thickness_line_angle_trailing_edge'][1],
'break_airfoil_maximum_thickness_chordwise_position': wing['thickness_to_chord_maximum_ratio'][1],
'break_airfoil_camber_line_angle_leading_edge': wing['camber_line_angle_leading_edge'][1],
'break_airfoil_camber_line_angle_trailing_edge': wing['camber_line_angle_trailing_edge'][1],
'break_airfoil_maximum_camber': wing['maximum_camber'][1],
'break_airfoil_camber_at_maximum_thickness_chordwise_position': wing['camber_at_maximum_thickness_chordwise_position'][1],
'break_airfoil_maximum_camber_chordwise_position ': wing['maximum_camber_chordwise_position'][1],
'tip_airfoil_leading_edge_radius': wing['leading_edge_radius'][2],
'tip_airfoil_thickness_ratio': wing['thickness_ratio'][2],
'tip_airfoil_thickness_line_angle_trailing_edge': wing['thickness_line_angle_trailing_edge'][2],
'tip_airfoil_maximum_thickness_chordwise_position': wing['thickness_to_chord_maximum_ratio'][2],
'tip_airfoil_camber_line_angle_leading_edge': wing['camber_line_angle_leading_edge'][2],
'tip_airfoil_camber_line_angle_trailing_edge': wing['camber_line_angle_trailing_edge'][2],
'tip_airfoil_maximum_camber': wing['maximum_camber'][2],
'tip_airfoil_camber_at_maximum_thickness_chordwise_position': wing['camber_at_maximum_thickness_chordwise_position'][2],
'tip_airfoil_maximum_camber_chordwise_position ': wing['maximum_camber_chordwise_position'][2]
}
# NN_induced = loadmat('Aerodynamics/NN_CDind.mat')
# np.save('NN_induced.npy', NN_induced)
# NN_wave = loadmat('Aerodynamics/NN_CDwave.mat')
# np.save('NN_wave.npy', NN_wave)
# NN_cd0 = loadmat('Aerodynamics/NN_CDfp.mat')
# np.save('NN_cd0.npy', NN_cd0)
# NN_CL = loadmat('Aerodynamics/NN_CL.mat')
# np.save('NN_CL.npy', NN_CL)
NN_induced = np.load('Aerodynamics/NN_induced.npy',
allow_pickle=True).item()
NN_wave = np.load('Aerodynamics/NN_wave.npy', allow_pickle=True).item()
NN_cd0 = np.load('Aerodynamics/NN_cd0.npy', allow_pickle=True).item()
NN_CL = np.load('Aerodynamics/NN_CL.npy', allow_pickle=True).item()
CLout, Alpha, CDfp, CDwave, CDind, grad_CL, grad_CDfp, grad_CDwave, grad_CDind = ANN_aerodynamics_main(
CL_input,
inputs_neural_network,
switch_neural_network,
NN_induced,
NN_wave,
NN_cd0,
NN_CL
)
CDfp = 1.04*CDfp
CDwing = CDfp + CDwave + CDind
return CDwing, CLout
def ANN_aerodynamics_main(
CL_input,
inputs_neural_network,
switch_neural_network,
NN_ind,
NN_wave,
NN_cd0,
NN_CL,
):
# Soure: <NAME> and <NAME>
# Aeronautical Institute of Technology
sizes = len(inputs_neural_network)
# if sizes != 40 :
# print('\n The number of input variables should be 40.')
# print('\n Check the size of input_neural_network columns.')
m = 1
# DEFINE VARIABLE BOUNDS
# Flight conditions
mach = np.array([0.2, 0.85]) # 1 - Flight Mach number
altitude = np.array([0, 13000]) # 2 - Flight altitude [m]
alpha = np.array([-5, 10])*np.pi/180 # 3 - Angle of attack [rad]
# Wing planform
aspect_ratio =
|
np.array([7, 12])
|
numpy.array
|
import os
import argparse
from PIL import Image
import numpy as np
from utils.metrics import iou_stats
def parse_argument():
parser = argparse.ArgumentParser(
description='Benchmark over 2D-3D-Semantics on segmentation, '\
+'depth and surface normals estimation')
parser.add_argument('--pred_dir', type=str, default='',
help='/path/to/prediction.')
parser.add_argument('--gt_dir', type=str, default='',
help='/path/to/ground-truths.')
parser.add_argument('--depth_unit', type=float, default=512.0,
help='Each pixel value difference means 1/depth_unit meters.')
parser.add_argument('--num_classes', type=int, default=21,
help='number of segmentation classes.')
parser.add_argument('--string_replace', type=str, default=',',
help='replace the first string with the second one.')
parser.add_argument('--train_segmentation', action='store_true',
help='enable/disable to benchmark segmentation on mIoU.')
parser.add_argument('--train_depth', action='store_true',
help='enable/disable to benchmark depth.')
parser.add_argument('--train_normal', action='store_true',
help='enable/disable to benchmark surface normal.')
args = parser.parse_args()
return args
def benchmark_segmentation(pred_dir, gt_dir, num_classes, string_replace):
"""Benchmark segmentaion on mean Intersection over Union (mIoU).
"""
print('Benchmarking semantic segmentation.')
assert(os.path.isdir(pred_dir))
assert(os.path.isdir(gt_dir))
tp_fn = np.zeros(num_classes, dtype=np.float64)
tp_fp = np.zeros(num_classes, dtype=np.float64)
tp = np.zeros(num_classes, dtype=np.float64)
for dirpath, dirnames, filenames in os.walk(pred_dir):
for filename in filenames:
predname = os.path.join(dirpath, filename)
gtname = predname.replace(pred_dir, gt_dir)
if string_replace != '':
stra, strb = string_replace.split(',')
gtname = gtname.replace(stra, strb)
pred = np.asarray(
Image.open(predname).convert(mode='L'),
dtype=np.uint8
)
gt = np.asarray(
Image.open(gtname).convert(mode='L'),
dtype=np.uint8
)
_tp_fn, _tp_fp, _tp = iou_stats(
pred,
gt,
num_classes=num_classes,
background=0
)
tp_fn += _tp_fn
tp_fp += _tp_fp
tp += _tp
iou = tp / (tp_fn + tp_fp - tp + 1e-12) * 100.0
class_names = ['beam', 'board', 'bookcase', 'ceiling', 'chair', 'clutter',
'column', 'door', 'floor', 'sofa', 'table', 'wall', 'window']
for i in range(num_classes):
print('class {:10s}: {:02d}, acc: {:4.4f}%'.format(
class_names[i], i, iou[i])
)
mean_iou = iou.sum() / num_classes
print('mean IOU: {:4.4f}%'.format(mean_iou))
mean_pixel_acc = tp.sum() / (tp_fp.sum() + 1e-12)
print('mean Pixel Acc: {:4.4f}%'.format(mean_pixel_acc))
def benchmark_depth(pred_dir, gt_dir, string_replace):
"""Benchmark depth estimation.
"""
print('Benchmarking depth estimations.')
assert(os.path.isdir(pred_dir))
assert(os.path.isdir(gt_dir))
N = 0.0
rmse_linear = 0.0
rmse_log = 0.0
absrel = 0.0
sqrrel = 0.0
thresholds = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
powers = [1/8.0, 1/4.0, 1/2.0, 1.0, 2.0, 3.0]
for dirpath, dirnames, filenames in os.walk(pred_dir):
for filename in filenames:
predname = os.path.join(dirpath, filename)
gtname = predname.replace(pred_dir, gt_dir)
if string_replace != '':
stra, strb = string_replace.split(',')
gtname = gtname.replace(stra, strb)
pred = np.asarray(
Image.open(predname).convert(mode='I'),
dtype=np.int32)
gt = np.asarray(
Image.open(gtname).convert(mode='I'),
dtype=np.int32)
pred = np.reshape(pred, (-1,))
gt = np.reshape(gt, (-1,))
#mask = np.logical_and(gt >= 51, gt <= 26560)
mask = gt < 2**16-1
pred = np.clip(pred, 51, 26560)
pred = pred[mask].astype(np.float32)/args.depth_unit
gt = gt[mask].astype(np.float32)/args.depth_unit
rmse_linear += np.sum((pred-gt)**2)
rmse_log += np.sum(
(np.log(np.maximum(pred, 1e-12))-np.log(np.maximum(gt, 1e-12)))**2)
absrel += np.sum(np.abs(pred-gt)/gt)
sqrrel += np.sum((pred-gt)**2/gt)
th = np.maximum(pred/gt, gt/pred)
for i in range(len(thresholds)):
#thresholds[i] += np.sum(th < 1.25**(i+1))
thresholds[i] += np.sum(th < 1.25**powers[i])
N += pred.shape[0]
rmse_linear = np.sqrt(rmse_linear/N)
rmse_log = np.sqrt(rmse_log/N)
absrel = absrel / N
sqrrel = sqrrel / N
for i in range(len(thresholds)):
thresholds[i] = thresholds[i] / N
print('RMSE(lin): {:.4f}'.format(rmse_linear))
print('RMSE(log): {:.4f}'.format(rmse_log))
print('abs rel: {:.4f}'.format(absrel))
print('sqr rel: {:.4f}'.format(sqrrel))
for i in range(len(thresholds)):
print('\sigma < 1.25^{:.4f}: {:.4f}'.format(powers[i], thresholds[i]))
print('\sigma < 1.25: {:.4f}'.format(thresholds[0]))
print('\sigma < 1.25^2: {:.4f}'.format(thresholds[1]))
print('\sigma < 1.25^3: {:.4f}'.format(thresholds[2]))
def benchmark_normal(pred_dir, gt_dir, string_replace):
"""Benchmark surface normal estimations.
"""
print('Benchmarking surface normal estimations.')
assert(os.path.isdir(pred_dir))
assert(os.path.isdir(gt_dir))
N = 0.0
angles = []
for dirpath, dirnames, filenames in os.walk(pred_dir):
for filename in filenames:
predname = os.path.join(dirpath, filename)
gtname = predname.replace(pred_dir, gt_dir)
if string_replace != '':
stra, strb = string_replace.split(',')
gtname = gtname.replace(stra, strb)
pred = np.asarray(
Image.open(predname).convert(mode='RGB'),
dtype=np.uint8)
gt = np.asarray(
Image.open(gtname).convert(mode='RGB'),
dtype=np.uint8)
pred = np.reshape(pred, (-1,3))
gt = np.reshape(gt, (-1,3))
mask = np.any(gt != 128, axis=-1)
pred = pred[mask, :].astype(np.float32)-127.5
gt = gt[mask, :].astype(np.float32)-127.5
pred = pred / (np.linalg.norm(pred, axis=-1, keepdims=True)+1e-12)
gt = gt / (np.linalg.norm(gt, axis=-1, keepdims=True)+1e-12)
cos = np.sum(pred*gt, axis=-1)
abs_cos = np.abs(cos)
assert(not (abs_cos-1 > 1e-5).any())
cos = np.clip(cos, -1, 1)
angles.append(cos)
angles = np.concatenate(angles, axis=0)
angles = np.arccos(angles)*(180.0/np.pi)
print('Angle Mean: {:.4f}'.format(np.mean(angles)))
print('Angle Median: {:.4f}'.format(np.median(angles)))
print('Angles within 2.8125: {:.4f}%'.format(np.mean(angles <= 2.8125)*100.0))
print('Angles within 5.625: {:.4f}%'.format(
|
np.mean(angles <= 5.625)
|
numpy.mean
|
import numpy as np
import scipy as sp
from scipy import stats
from itertools import chain
from math import log, pi, sqrt, exp
from numpy.linalg import pinv, det
"""
This code provides united representation of probability distributions (or functions in general),
whether continuous or discrete, along with their domains. It allows to perform different
operations, like integration, in an unified way, without knowing the nature of underlying
distribution, so the code which uses only these operation would work for any such distribution.
"""
class DiscreteDomain:
"""
A discrete domain -- a set of points.
"""
def __init__(self, li=None):
"""
:param li: list of point is the domain
:return: None
"""
self.values = set(li if li else [])
def integrate(self, f):
"""
Integrate over the domain.
:param f: function to integrate
:return: integral value
"""
return sum(map(f, self.values))
def __contains__(self, val):
return val in self.values
class IntervalDomain:
"""
An (open) interval on real line.
"""
def __init__(self, begin=-np.inf, end=+np.inf):
self.begin = begin
self.end = end
def integrate(self, f):
"""
Integrate over the domain.
:param f: function to integrate
:return: integral value
"""
return sp.integrate.quad(f, self.begin, self.end)[0]
def __contains__(self, val):
"""
Check if a point is in the domain.
:param val: target point
:return: True if point is in the domain, False otherwise
"""
return val > self.begin and val < self.end
class UnionDomain:
"""
Union of domains.
"""
def __init__(self, *domains):
flattened_domains = []
for domain in domains:
if isinstance(domain, UnionDomain):
flattened_domains += domain.domains
else:flattened_domains.append(domain)
self.domains = flattened_domains
def integrate(self, f):
"""
Integrate over the domain.
:param f: function to integrate
:return: integral value
"""
return sum(map(lambda domain: domain.integrate(f), self.domains))
def __contains__(self, val):
"""
Check if a point is in the domain.
:param val: target point
:return: True if point is in the domain, False otherwise
"""
return any(map(lambda x: val in x, self.domains))
class ProductDomain:
"""
Cartesian product of domains.
"""
def __init__(self, *domains):
flattened_domains = []
for domain in domains:
if isinstance(domain, ProductDomain):
flattened_domains += domain.domains
else:flattened_domains.append(domain)
self.domains = flattened_domains
def integrate(self, f):
"""
Integrate over the domain.
:param f: function to integrate
:return: integral value
"""
if len(self.domains) == 1:
return self.domains[0].integrate(f)
reduced_domain = ProductDomain(*self.domains[1:])
return reduced_domain.integrate(lambda *args: self.domains[0].integrate(lambda x: f(x, *args)))
def integrate_along(self, f, axis):
"""
Integrate along one specified axis.
:param f: function to integrate
:param axis: selected axis
:return: integral value
"""
reduced_domain = ProductDomain(*(self.domains[:axis] + self.domains[axis+1:]))
target_domain = self.domains[axis]
g = lambda *args: target_domain.integrate(lambda x: f(*(args[:axis] + [x] + args[axis+1:])))
return g, reduced_domain
def __contains__(self, val):
"""
Check if a point is in the domain.
:param val: target point
:return: True if point is in the domain, False otherwise
"""
return all([val[i] in self.domains[i] for i in range(len(self.domains))])
def __getitem__(self, pos):
return self.domains[pos]
def __iter__(self):
return self.domains.__iter__()
class MathFunction:
"""
Stores a Python function and its domain. Represents a mathematical function with
domain information provided, so e.g. integration can be performed.
"""
def __init__(self, f, domain):
self.f = f
self.domain = domain
def __call__(self, *args, **kwargs):
return self.f(*args, **kwargs)
def integrate(self):
"""
Integrate over all the domain.
:return: integral value
"""
return self.domain.integrate(self.f)
def integrate_along(self, axis):
"""
Integrate along one specified axis.
:param axis: selected axis
:return: integral value
"""
return MathFunction(*self.domain.integrate_along(axis))
class MathDistribution:
"""
Stores a distribution (scipy-style) and its domain.
"""
def __init__(self, distr, domain):
self.distr = distr
self.domain = domain
def __call__(self, *args, **kwargs):
return self.distr(*args, **kwargs)
class MultivariateGaussianDistribution(MathDistribution):
"""
Gaussian (normal) multivariate distribution.
"""
def __init__(self, mean, cov):
self.mean = mean
self.cov = np.matrix(cov)
self.dim = len(self.mean)
assert self.cov.shape[0] == self.cov.shape[1]
domain = ProductDomain([IntervalDomain(-np.inf, +np.inf) for i in range(len(self.mean))])
super().__init__(stats.multivariate_normal(self.mean, self.cov), domain)
def reduce(self, assignment):
if all([x is None for x in assignment]):
return MultivariateGaussianDistribution(self.mean, self.cov)
# reordering variables, so that non-reduced variables go before reduced
reduced_idx = [i for i in range(len(assignment)) if assignment[i] is not None]
non_reduced_idx = [i for i in range(len(assignment)) if assignment[i] is None]
x = np.matrix([assignment[idx] for idx in reduced_idx]).T
new_idx = non_reduced_idx + reduced_idx
mean1 = np.matrix([self.mean[idx] for idx in non_reduced_idx]).T
mean2 = np.matrix([self.mean[idx] for idx in reduced_idx]).T
cov11 = self.cov[non_reduced_idx][:, non_reduced_idx]
cov22 = self.cov[reduced_idx][:, reduced_idx]
cov12 = self.cov[non_reduced_idx][:, reduced_idx]
mean = mean1 + cov12 * pinv(cov22) * (x - mean2)
cov = cov11 - cov12 * pinv(cov22) * cov12.T
return MultivariateGaussianDistribution(np.array(mean.T), cov)
def marginalize(self, marginalized):
non_marginalized = [i for i in range(self.dim) if i not in marginalized]
mean = self.mean[non_marginalized]
cov = self.cov[non_marginalized][:, non_marginalized]
return MultivariateGaussianDistribution(mean, cov)
def rvs(self, *args, **kwargs):
return self.distr.rvs(*args, **kwargs)
class LinearGaussianDistribution:
"""
Univariate gaussian distribution.
"""
def __init__(self, w0, w, variance):
self.w0 = w0
self.w = w
self.variance = variance
self.dim = len(w) + 1
@property
def scale(self):
return sqrt(self.variance)
def pdf(self, x):
x = np.atleast_1d(x)
u = x[1:]
x = x[0]
return stats.norm.pdf(x, loc=np.dot(u, self.w) + self.w0, scale=self.scale)
def __mul__(self, other):
if isinstance(other, LinearGaussianDistribution):
other = other.canonical_form
return self.canonical_form * other
def rvs(self, size=1):
assert self.dim == 1
return stats.norm.rvs(size=size, loc=self.w0, scale=self.scale)
def reduce(self, assignment):
if assignment[0] is not None:
return self.canonical_form.reduce(assignment)
reduced = [i - 1 for i in range(1, len(assignment)) if assignment[i] is not None]
non_reduced = [i - 1 for i in range(1, len(assignment)) if assignment[i] is None]
reduced_values = np.array([x for x in assignment if x is not None])
w0 = self.w0 + np.dot(self.w[reduced], reduced_values)
w = self.w[non_reduced]
return LinearGaussianDistribution(w0, w, self.variance)
@property
def canonical_form(self):
w = np.matrix(np.hstack([[-1.], self.w]), copy=False).T
return QuadraticCanonicalForm(K=w*w.T/self.variance, h=-self.w0*w.T/self.variance,
g=(self.w0 * self.w0 / self.variance) - 0.5*log(2*pi*self.variance))
def marginalize(self, *args, **kwargs):
return self.canonical_form.marginalize(*args, **kwargs)
@staticmethod
def mle(data):
"""
Maximum Likelihood Estimation
:param data: data in the form [(x, u0, u1, ... , un)], preferably numpy array
:return: LinearGaussianDistribution with estimated parameters
"""
data = np.asarray(data)
u = data[:, 1:]
x = data[:, 0]
dim = data.shape[1]
covs = np.matrix(np.atleast_2d(np.cov(np.transpose(data))), copy=False)
means = np.mean(data, axis=0)
A = np.matrix(
|
np.zeros((dim, dim))
|
numpy.zeros
|
import numpy as np
import pandas as pd
import random
import pickle
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import torch
from torch.nn.utils.rnn import pad_sequence
from utils import _get_parcel, _get_behavioral
from cc_utils import _get_clip_labels
K_RUNS = 4
K_SEED = 330
def _get_clip_seq(df, subject_list, args):
'''
return:
X: input seq (batch_size x time x feat_size)
y: label seq (batch_size x time)
X_len: len of each seq (batch_size x 1)
batch_size <-> number of sequences
time <-> max length after padding
'''
features = [ii for ii in df.columns if 'feat' in ii]
X = []
y = []
for subject in subject_list:
for i_class in range(args.k_class):
if i_class==0: # split test-retest into 4
seqs = df[(df['Subject']==subject) &
(df['y'] == 0)][features].values
label_seqs = df[(df['Subject']==subject) &
(df['y'] == 0)]['y'].values
k_time = int(seqs.shape[0]/K_RUNS)
for i_run in range(K_RUNS):
seq = seqs[i_run*k_time:(i_run+1)*k_time, :]
label_seq = label_seqs[i_run*k_time:(i_run+1)*k_time]
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
X.append(torch.FloatTensor(seq))
y.append(torch.LongTensor(label_seq))
else:
seq = df[(df['Subject']==subject) &
(df['y'] == i_class)][features].values
label_seq = df[(df['Subject']==subject) &
(df['y'] == i_class)]['y'].values
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
X.append(torch.FloatTensor(seq))
y.append(torch.LongTensor(label_seq))
X_len = torch.LongTensor([len(seq) for seq in X])
# pad sequences
X = pad_sequence(X, batch_first=True, padding_value=0)
y = pad_sequence(y, batch_first=True, padding_value=-100)
return X.to(args.device), X_len.to(args.device), y.to(args.device)
def _clip_class_df(args):
'''
data for 15-way clip classification
args.roi: number of ROIs
args.net: number of subnetworks (7 or 17)
args.subnet: subnetwork; 'wb' if all subnetworks
args.invert_flag: all-but-one subnetwork
args.r_roi: number of random ROIs to pick
args.r_seed: random seed for picking ROIs
save each timepoint as feature vector
append class label based on clip
return:
pandas df
'''
load_path = (args.input_data + '/data_MOVIE_runs_%s' %(args.roi_name) +
'_%d_net_%d_ts.pkl' %(args.roi, args.net))
with open(load_path, 'rb') as f:
data = pickle.load(f)
# where are the clips within the run?
timing_file = pd.read_csv('data/videoclip_tr_lookup.csv')
'''
main
'''
clip_y = _get_clip_labels()
table = []
for run in range(K_RUNS):
print('loading run %d/%d' %(run+1, K_RUNS))
run_name = 'MOVIE%d' %(run+1) #MOVIEx_7T_yz
# timing file for run
timing_df = timing_file[
timing_file['run'].str.contains(run_name)]
timing_df = timing_df.reset_index(drop=True)
for subject in data:
# get subject data (time x roi x run)
vox_ts = data[subject][:, :, run]
for jj, clip in timing_df.iterrows():
start = int(np.floor(clip['start_tr']))
stop = int(np.ceil(clip['stop_tr']))
clip_length = stop - start
# assign label to clip
y = clip_y[clip['clip_name']]
for t in range(clip_length):
act = vox_ts[t + start, :]
t_data = {}
t_data['Subject'] = subject
t_data['timepoint'] = t
for feat in range(vox_ts.shape[1]):
t_data['feat_%d' %(feat)] = act[feat]
t_data['y'] = y
table.append(t_data)
df = pd.DataFrame(table)
df['Subject'] = df['Subject'].astype(int)
return df
def _clip_class_rest_df(args, run):
'''
data for 15 clip + rest visualization
each run is saved individually
run: 0, 1, 2, 3 (one of the 4 runs)
args.roi: number of ROIs
args.net: number of subnetworks (7 or 17)
args.subnet: subnetwork; 'wb' if all subnetworks
args.invert_flag: all-but-one subnetwork
args.r_roi: number of random ROIs to pick
args.r_seed: random seed for picking ROIs
save each timepoint as feature vector
append class label based on clip
return:
pandas df
'''
# optional arguments
d = vars(args)
if 'invert_flag' not in d:
args.invert_flag = False
if 'r_roi' not in d:
args.r_roi = 0
args.r_seed = 0
load_path = (args.input_data + '/data_MOVIE_runs_' +
'roi_%d_net_%d_ts.pkl' %(args.roi, args.net))
with open(load_path, 'rb') as f:
data = pickle.load(f)
# where are the clips within the run?
timing_file = pd.read_csv('data/videoclip_tr_lookup.csv')
# pick either all ROIs or subnetworks
if args.subnet!='wb':
if 'minus' in args.subnet:
# remove 'minus_' prefix
args.subnet = args.subnet.split('minus_')[1]
_, nw_info = _get_parcel(args.roi, args.net)
# ***roi ts sorted in preprocessing
nw_info = np.sort(nw_info)
idx = (nw_info == args.subnet)
else:
idx = np.ones(args.roi).astype(bool)
# all-but-one subnetwork
if args.subnet and args.invert_flag:
idx = ~idx
# if random selection,
# overwrite everything above
if args.r_roi > 0:
random.seed(args.r_seed)
idx = np.zeros(args.roi).astype(bool)
# random sample without replacement
samp = random.sample(range(args.roi), k=args.r_roi)
idx[samp] = True
'''
main
'''
print('loading run %d' %(run+1))
run_name = 'MOVIE%d' %(run+1) #MOVIEx_7T_yz
timing_df = timing_file[timing_file['run'].str.contains(run_name)]
timing_df = timing_df.reset_index(drop=True)
# get unique id for each segment including rest segments
length = data[list(data.keys())[0]][:, :, run].shape[0]
k_class = len(timing_df)
y_vec = np.ones(length)*len(timing_df)
for jj, clip in timing_df.iterrows():
start = int(np.floor(clip['start_tr']))
if jj==0:
tag = k_class
y_vec[:start] = tag
tag += 1
else:
y_vec[stop:start] = tag
tag += 1
stop = int(np.ceil(clip['stop_tr']))
clip_length = stop - start
y_vec[start:stop] = jj
y_vec[stop:] = tag
table = []
for subject in data:
roi_ts = data[subject][:, idx, run]
for t in range(roi_ts.shape[0]):
act = roi_ts[t, :]
t_data = {}
t_data['Subject'] = subject
t_data['timepoint'] = t
t_data['y'] = y_vec[t]
for feat in range(roi_ts.shape[1]):
t_data['feat_%d' %(feat)] = act[feat]
table.append(t_data)
df = pd.DataFrame(table)
df['Subject'] = df['Subject'].astype(int)
return df
def _get_bhv_seq(df, subject_list, args):
'''
return:
X: input seq (batch_size x time x feat_size)
y: label seq (batch_size x time)
in {0, 1, ..} if args.mode=='class'
in R if args.mode=='reg'
c: clip seq (batch_size x time)
X_len: len of each seq (batch_size x 1)
batch_size <-> number of sequences
time <-> max length after padding
'''
# optional arguments
d = vars(args)
# regression or classification
if 'mode' not in d:
args.mode = 'class'
if args.mode=='class':
label = 'y'
elif args.mode=='reg':
label = args.bhv
# permutation test
if 'shuffle' not in d:
args.shuffle = False
if args.shuffle:
# different shuffle for each iteration
np.random.seed(args.i_seed)
# get scores for all participants without bhv_df
train_label = df[(df['Subject'].isin(subject_list)) &
(df['c']==1) & (df['timepoint']==0)][label].values
np.random.shuffle(train_label) # inplace
k_clip = len(np.unique(df['c']))
features = [ii for ii in df.columns if 'feat' in ii]
X = []
y = []
c = []
for ii, subject in enumerate(subject_list):
for i_clip in range(k_clip):
if i_clip==0: #handle test retest differently
seqs = df[(df['Subject']==subject) &
(df['c'] == 0)][features].values
if args.shuffle:
label_seqs = np.ones(seqs.shape[0])*train_label[ii]
else:
label_seqs = df[(df['Subject']==subject) &
(df['c'] == 0)][label].values
clip_seqs = df[(df['Subject']==subject) &
(df['c'] == 0)]['c'].values
k_time = int(seqs.shape[0]/K_RUNS)
for i_run in range(K_RUNS):
seq = seqs[i_run*k_time:(i_run+1)*k_time, :]
label_seq = label_seqs[i_run*k_time:(i_run+1)*k_time]
clip_seq = clip_seqs[i_run*k_time:(i_run+1)*k_time]
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
X.append(torch.FloatTensor(seq))
if args.mode=='class':
y.append(torch.LongTensor(label_seq))
elif args.mode=='reg':
y.append(torch.FloatTensor(label_seq))
c.append(torch.LongTensor(clip_seq))
else:
seq = df[(df['Subject']==subject) &
(df['c'] == i_clip)][features].values
if args.shuffle:
label_seq = np.ones(seq.shape[0])*train_label[ii]
else:
label_seq = df[(df['Subject']==subject) &
(df['c'] == i_clip)][label].values
clip_seq = df[(df['Subject']==subject) &
(df['c'] == i_clip)]['c'].values
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
X.append(torch.FloatTensor(seq))
if args.mode=='class':
y.append(torch.LongTensor(label_seq))
elif args.mode=='reg':
y.append(torch.FloatTensor(label_seq))
c.append(torch.LongTensor(clip_seq))
X_len = torch.LongTensor([len(seq) for seq in X])
# pad sequences
X = pad_sequence(X, batch_first=True, padding_value=0)
y = pad_sequence(y, batch_first=True, padding_value=-100)
c = pad_sequence(c, batch_first=True, padding_value=-100)
return (X.to(args.device), X_len.to(args.device),
y.to(args.device), c.to(args.device))
def _group_bhv_df(args, subject_list):
'''
based on behavioral score,
group participants into clusters
if k_class==2:
group top cutoff and bot cutoff
if k_class > 2:
use k_means for grouping
return:
if args.mode=='class'
bhv_df: ['Subject', bhv, 'y']
if args.mode=='reg'
bhv_df: ['Subject', bhv, 'y']
*** return 'y' in reg mode
for kfold balancing
'''
# for kfold balancing
if args.mode=='reg':
args.k_class = 2
# get behavioral data for subject_list
bhv_df = _get_behavioral(subject_list)
bhv_df = bhv_df[['Subject', args.bhv]]
'''
***normalize bhv scores
must be explicitly done for pytorch
'''
b = bhv_df[args.bhv].values
bhv_df[args.bhv] = (b - np.min(b))/(np.max(b) - np.min(b))
# reduce subjects by picking top and bottom 'cutoff' percent
_x = np.sort(bhv_df[args.bhv].values)
percentile = int(np.floor(args.cutoff*len(subject_list)))
bot_cut = _x[percentile]
top_cut = _x[-percentile]
bhv_df = bhv_df[(bhv_df[args.bhv] >= top_cut) |
(bhv_df[args.bhv] <= bot_cut)]
'''
behavioral groups: into 'k_class'
'''
if args.k_class > 2:
_x = bhv_df[[args.bhv]].values
model = KMeans(n_clusters=args.k_class,
random_state=K_SEED)
y = model.fit_predict(_x)
# each participant assigned a label
bhv_df['y'] = y
else:
b = bhv_df[args.bhv].values
y = [1 if ii>=top_cut else 0 for ii in b]
bhv_df['y'] = np.array(y)
return bhv_df
def _bhv_class_df(args):
'''
data for k_class bhv classification
*** used for both classification and regression
args.mode: 'class' or bhv'
args.roi: number of ROIs
args.net: number of subnetworks (7 or 17)
args.subnet: subnetwork; 'wb' if all subnetworks
args.bhv: behavioral measure
args.k_class: number of behavioral groups
args.cutoff: percentile for participant cutoff
args.invert_flag: all-but-one subnetwork
save each timepoint as feature vector
append 'c' based on clip
append 'y' based on behavioral group
'''
# optional arguments
d = vars(args)
if 'invert_flag' not in d:
args.invert_flag = False
if 'mode' not in d:
args.mode = 'class'
load_path = (args.input_data + '/data_MOVIE_runs_' +
'roi_%d_net_%d_ts.pkl' %(args.roi, args.net))
with open(load_path, 'rb') as f:
data = pickle.load(f)
subject_list = np.sort(list(data.keys()))
bhv_df = _group_bhv_df(args, subject_list)
cutoff_list = bhv_df['Subject'].values.astype(str)
# where are the clips within the run?
timing_file = pd.read_csv('data/videoclip_tr_lookup.csv')
# pick either all ROIs or subnetworks
if args.subnet!='wb':
if 'minus' in args.subnet:
# remove 'minus_' prefix
args.subnet = args.subnet.split('minus_')[1]
_, nw_info = _get_parcel(args.roi, args.net)
# ***roi ts sorted in preprocessing
nw_info = np.sort(nw_info)
idx = (nw_info == args.subnet)
else:
idx = np.ones(args.roi).astype(bool)
# all-but-one subnetwork
if args.subnet and args.invert_flag:
idx = ~idx
'''
main
'''
clip_y = _get_clip_labels()
table = []
for run in range(K_RUNS):
print('loading run %d/%d' %(run+1, K_RUNS))
run_name = 'MOVIE%d' %(run+1) #MOVIEx_7T_yz
# timing file for run
timing_df = timing_file[
timing_file['run'].str.contains(run_name)]
timing_df = timing_df.reset_index(drop=True)
for subject in data:
if subject in cutoff_list:
# get subject data (time x roi x run)
roi_ts = data[subject][:, idx, run]
for jj, clip in timing_df.iterrows():
start = int(np.floor(clip['start_tr']))
stop = int(np.ceil(clip['stop_tr']))
clip_length = stop - start
# assign label to clip
c = clip_y[clip['clip_name']]
for t in range(clip_length):
act = roi_ts[t + start, :]
t_data = {}
t_data['Subject'] = subject
t_data['timepoint'] = t
for feat in range(roi_ts.shape[1]):
t_data['feat_%d' %(feat)] = act[feat]
t_data['c'] = c
table.append(t_data)
df = pd.DataFrame(table)
df['Subject'] = df['Subject'].astype(int)
# merges on all subject rows!
df = df.merge(bhv_df, on='Subject', how='inner')
return df, bhv_df
def _get_bhv_cpm_seq(data_df, subject_list, args):
'''
return:
X: input seq (batch_size x (FC_size))
y: label seq (batch_size)
in {0, 1, ..} if args.mode=='class'
in R if args.mode=='reg'
c: clip seq (batch_size)
X_len: len of each seq (batch_size x 1)
batch_size <-> number of sequences
time <-> max length after padding
'''
# optional arguments
d = vars(args)
if 'mode' not in d:
args.mode = 'class'
k_clip = len(np.unique(data_df['c']))
features = [ii for ii in data_df.columns if 'feat' in ii]
X, y, b, c = [], [], [], []
for subject in subject_list:
for i_clip in range(k_clip):
if i_clip==0: #split test retest into 4
seqs = data_df[(data_df['Subject']==subject) &
(data_df['c'] == 0)][features].values
label_seqs = data_df[(data_df['Subject']==subject) &
(data_df['c'] == 0)]['y'].values
bhv_seqs = data_df[(data_df['Subject']==subject) &
(data_df['c'] == 0)][args.bhv].values
clip_seqs = data_df[(data_df['Subject']==subject) &
(data_df['c'] == 0)]['c'].values
k_time = int(seqs.shape[0]/K_RUNS)
for i_run in range(K_RUNS):
seq = seqs[i_run*k_time:(i_run+1)*k_time, :]
label_seq = label_seqs[i_run*k_time:(i_run+1)*k_time]
bhv_seq = bhv_seqs[i_run*k_time:(i_run+1)*k_time]
clip_seq = clip_seqs[i_run*k_time:(i_run+1)*k_time]
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
FC = np.corrcoef(seq.T)
vecFC = FC[np.triu_indices_from(FC, k=1)]
X.append(vecFC)
# sanity check
if (np.all(label_seq==label_seq[0]) and
np.all(bhv_seq==bhv_seq[0]) and
np.all(clip_seq==clip_seq[0])):
y.append(label_seq[0])
b.append(bhv_seq[0])
c.append(clip_seq[0])
else:
print('FATAL ERROR')
else:
seq = data_df[(data_df['Subject']==subject) &
(data_df['c'] == i_clip)][features].values
label_seq = data_df[(data_df['Subject']==subject) &
(data_df['c'] == i_clip)]['y'].values
bhv_seq = data_df[(data_df['Subject']==subject) &
(data_df['c'] == i_clip)][args.bhv].values
clip_seq = data_df[(data_df['Subject']==subject) &
(data_df['c'] == i_clip)]['c'].values
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
FC = np.corrcoef(seq.T)
vecFC = FC[np.triu_indices_from(FC, k=1)]
X.append(vecFC)
# sanity check
if (np.all(label_seq==label_seq[0]) and
np.all(bhv_seq==bhv_seq[0]) and
np.all(clip_seq==clip_seq[0])):
y.append(label_seq[0])
b.append(bhv_seq[0])
c.append(clip_seq[0])
else:
print('FATAL ERROR')
if args.mode=='class':
return np.array(X), np.array(y),
|
np.array(b)
|
numpy.array
|
import math
import os
import awswrangler as wr
import boto3
import fsspec
import geopandas as gpd
import numpy as np
import regionmask
import utm
import xarray as xr
from pyproj import Transformer
from s3fs import S3FileSystem
fs = S3FileSystem(requester_pays=True)
def save_to_zarr(ds, url, list_of_variables=None, mode='w', append_dim=None):
"""
Avoid chunking errors while saving a dataset to zarr file
list_of_variables is a list of variables to store, everything else will be dropped
if None then the dataset will be stored as is
"""
mapper = fsspec.get_mapper(url)
if not list_of_variables:
list_of_variables = list(ds.keys())
for v in list_of_variables:
if 'chunks' in ds[v].encoding:
del ds[v].encoding['chunks']
ds[list_of_variables].to_zarr(mapper, mode=mode, append_dim=append_dim, consolidated=True)
def get_transformer(p1=4326, p2=32610):
"""
default p1 p2 transforms from lat/lon to Landsat coordinates
"""
return Transformer.from_crs(4326, 32610)
def get_x_from_latlon(lat, lon, transformer):
x, y = transformer.transform(lat, lon)
return x
def get_y_from_latlon(lat, lon, transformer):
x, y = transformer.transform(lat, lon)
return y
def convert_long3_to_long1(long3):
# see https://confluence.ecmwf.int/pages/viewpage.action?pageId=149337515
long1 = (long3 + 180) % 360 - 180
return long1
def open_zarr_file(uri, file_system='s3', consolidated=None):
if not uri.startswith(f'{file_system}://'):
uri = f'{file_system}://{uri}'
mapper = fsspec.get_mapper(uri)
ds = xr.open_zarr(mapper, consolidated=consolidated)
return ds
def open_glah14_data(do_convert_long3_to_long1=True):
data = open_zarr_file("s3://carbonplan-climatetrace/intermediate/glah14.zarr")
if do_convert_long3_to_long1:
data["lon"] = convert_long3_to_long1(data.lon)
return data
def open_glah01_data():
fs = S3FileSystem()
uris = [
f's3://{f}'
for f in fs.ls('s3://carbonplan-climatetrace/intermediate/glah01/')
if not f.endswith('/')
]
ds_list = [open_zarr_file(uri) for uri in uris]
ds = xr.concat(ds_list, dim='record_index').chunk({'record_index': 500})
for k in ds:
_ = ds[k].encoding.pop('chunks', None)
return ds
def align_coords_by_dim_groups(ds_list, dim):
"""
Split ds in ds_list into groups along dimension dim, where ds are in the same group if there is
any overlap (defined as the exact same coordinate value) between the ds and the rest of the group.
Then, align the coordinate values within each group by reindexing each ds in a group with the union
of all ds within that group.
As an example, two ds spanning lat 0-5 and 2-7 would be in the same group along dim=lat, and will be
re-indexed to 0-7. A ds spanning lat 10-15 would be in a separate group. Output is a flattened list
from all groups.
Examples
--------
x1 = xr.Dataset(
{
"temp": (("y", "x"), np.zeros((2,3))),
},
coords={"y": [2, 3], "x": [40, 50, 60]}
)
x2 = xr.Dataset(
{
"temp": (("y", "x"), np.zeros((3,3))),
},
coords={"y": [1, 2, 3], "x": [10, 20, 30]}
)
x3 = xr.Dataset(
{
"temp": (("y", "x"), np.zeros((3,2))),
},
coords={"y": [4, 5, 6], "x": [20, 30]}
)
x4 = xr.Dataset(
{
"temp": (("y", "x"), np.zeros((2,2))),
},
coords={"y": [5, 6], "x": [50, 60]}
)
ds_list = [x1, x2, x3, x4]
for dim in ['x', 'y']:
ds_list = align_coords_by_dim_groups(ds_list, dim)
x = xr.combine_by_coords(ds_list)
print(x.temp)
<xarray.DataArray 'temp' (y: 6, x: 6)>
array([[ 0., 0., 0., nan, nan, nan],
[ 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0.],
[nan, 0., 0., nan, nan, nan],
[nan, 0., 0., nan, 0., 0.],
[nan, 0., 0., nan, 0., 0.]])
Coordinates:
* y (y) int64 1 2 3 4 5 6
* x (x) int64 10 20 30 40 50 60
"""
# initiate the groups list and the union index list with the first element
groups = [[ds_list[0]]]
union_indexes = [set(ds_list[0].coords[dim].values)]
for ds in ds_list[1:]:
found = False
ds_index = set(ds.coords[dim].values)
for i, index in enumerate(union_indexes):
# if there is any overlap between this element and the one alraedy in the group, add to the group
if len(ds_index.intersection(index)) > 0:
groups[i].append(ds)
union_indexes[i] = ds_index.union(index)
found = True
break
# else start a new group
if not found:
groups.append([ds])
union_indexes.append(set(ds.coords[dim].values))
out = []
for group, union_index in zip(groups, union_indexes):
for ds in group:
out.append(ds.reindex({dim: sorted(union_index)}))
return out
def open_and_combine_lat_lon_data(folder, tiles=None, lat_lon_box=None, consolidated=None):
"""
Load lat lon data stored as 10x10 degree tiles in folder
If tiles is none, load all data available
If no file is available, return None
"""
fs = S3FileSystem()
if not tiles:
tiles = [
os.path.splitext(os.path.split(path)[-1])[0]
for path in fs.ls(folder)
if not path.endswith('/')
]
uris = [f'{folder}{tile}.zarr' for tile in tiles]
ds_list = []
for uri in uris:
if fs.exists(uri):
da = open_zarr_file(uri, consolidated=consolidated)
# sort lat/lon
if da.lat[0] > da.lat[-1]:
da = da.reindex(lat=da.lat[::-1])
if da.lon[0] > da.lon[-1]:
da = da.reindex(lat=da.lon[::-1])
# drop extra dimensions
if 'spatial_ref' in da:
da = da.drop_vars('spatial_ref')
# crop to lat/lon box to save on memory
if lat_lon_box is not None:
[min_lat, max_lat, min_lon, max_lon] = lat_lon_box
da = da.sel(lat=slice(min_lat, max_lat), lon=slice(min_lon, max_lon))
if da.dims['lat'] > 0 and da.dims['lon'] > 0:
ds_list.append(da)
if len(ds_list) > 0:
for dim in ['lat', 'lon']:
ds_list = align_coords_by_dim_groups(ds_list, dim)
ds = xr.combine_by_coords(ds_list, combine_attrs="drop_conflicts")
return ds # .chunk({'lat': 2000, 'lon': 2000})
return None
def open_srtm_data(tiles=None):
"""
Load SRTM data stored as 10x10 degree tiles
If tiles is none, load all data available
"""
folder = 's3://carbonplan-climatetrace/intermediate/srtm/'
ds = open_and_combine_lat_lon_data(folder, tiles)
return ds
def open_ecoregion_data(tiles=None, lat_lon_box=None):
"""
Load ecoregion data stored as 10x10 degree tiles
If tiles is none, load all data available
"""
folder = 's3://carbonplan-climatetrace/intermediate/ecoregions_mask/'
return open_and_combine_lat_lon_data(folder, tiles, lat_lon_box=lat_lon_box)
def open_igbp_data(tiles=None, lat_lon_box=None):
"""
Load igbp data stored as 10x10 degree tiles
If tiles is none, load all data available
"""
folder = 's3://carbonplan-climatetrace/intermediate/igbp/'
return open_and_combine_lat_lon_data(folder, tiles, lat_lon_box=lat_lon_box)
def open_burned_area_data(tiles):
"""
Load MODIS burned area data stored as 10x10 degree tiles
If tiles is none, load all data available
"""
folder = 's3://carbonplan-climatetrace/intermediate/modis_burned_area/'
return open_and_combine_lat_lon_data(folder, tiles)
def open_global_igbp_data(lat_lon_box=None):
"""
Load igbp data stored as a global dataset
"""
fs = S3FileSystem()
mapper = fs.get_mapper('s3://carbonplan-climatetrace/intermediate/global_igbp.zarr')
global_igbp = xr.open_zarr(mapper, consolidated=True)
if global_igbp.lat[0] > global_igbp.lat[-1]:
global_igbp = global_igbp.reindex(lat=global_igbp.lat[::-1])
if global_igbp.lon[0] > global_igbp.lon[-1]:
global_igbp = global_igbp.reindex(lat=global_igbp.lon[::-1])
if lat_lon_box:
[min_lat, max_lat, min_lon, max_lon] = lat_lon_box
global_igbp = global_igbp.sel(lat=slice(min_lat, max_lat), lon=slice(min_lon, max_lon))
return global_igbp.drop_vars(['spatial_ref'])
def open_gez_data():
fp = "s3://carbonplan-climatetrace/inputs/shapes/gez_2010_wgs84.shp"
gez = gpd.read_file(fp)
return gez
def preprocess_gez_data():
"""
From FAO GEZ 2010 data, process to get a shapefile with two shapes, one for the tropic and one for
anything that is not tropic.
"""
fs = S3FileSystem()
gez = open_gez_data()
gez['is_tropics'] = gez.gez_name.apply(lambda x: (x.split()[0]) == 'Tropical').astype(np.int8)
tropic = gez.dissolve(by='is_tropical')
tropic.to_file("tropics.shp")
s3_folder = 's3://carbonplan-climatetrace/inputs/shapes/'
files = [f'tropics.{ext}' for ext in ['cpg', 'dbf', 'prj', 'shp', 'shx']]
for f in files:
fs.put(f, s3_folder + f)
os.remove(f)
def write_parquet(df, out_path, access_key_id, secret_access_key):
wr.s3.to_parquet(
df=df,
index=True,
path=out_path,
boto3_session=boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
),
)
def find_matching_records(data, lats, lons, years=None, dtype=None):
"""
find records in data that is nearest to locations specified in lats and lons
lat and lon must be coordinates in data (an xarray dataset/daraarray)
"""
if dtype is not None:
if years is not None:
assert 'year' in data
return (
data.sel(lat=lats, lon=lons, year=years, method="nearest")
.drop_vars(["lat", "lon", "year"])
.astype(dtype)
)
return (
data.sel(lat=lats, lon=lons, method="nearest").drop_vars(["lat", "lon"]).astype(dtype)
)
else:
if years is not None:
assert 'year' in data
return data.sel(lat=lats, lon=lons, year=years, method="nearest").drop_vars(
["lat", "lon", "year"]
)
return data.sel(lat=lats, lon=lons, method="nearest").drop_vars(["lat", "lon"])
def get_lat_lon_tags_from_tile_path(tile_path):
"""
tile_path may be the full path including gs://, folder names, and extension
outputs a lat lon tag eg, (50N, 120W)
"""
fn = os.path.splitext(os.path.split(tile_path)[-1])[0]
lat, lon = fn.split('_')
return lat, lon
def parse_bounding_box_from_lat_lon_tags(lat, lon):
"""
lat lon strings denoting the upper left corner of a 10x10 degree box eg (50N, 120W)
"""
# the tile name denotes the upper left corner of each tile
if lat.endswith('N'):
max_lat = float(lat[:-1])
elif lat.endswith('S'):
max_lat = -1 * float(lat[:-1])
# each tile covers 10 degree x 10 degree
min_lat = max_lat - 10
if lon.endswith('E'):
min_lon = float(lon[:-1])
elif lon.endswith('W'):
min_lon = -1 * float(lon[:-1])
max_lon = min_lon + 10
return min_lat, max_lat, min_lon, max_lon
def get_lat_lon_tags_from_bounding_box(max_lat, min_lon):
lat_tag = str(abs(math.ceil(max_lat))).zfill(2)
if max_lat >= 0:
lat_tag += 'N'
else:
lat_tag += 'S'
lon_tag = str(abs(math.floor(min_lon))).zfill(3)
if min_lon >= 0:
lon_tag += 'E'
else:
lon_tag += 'W'
return lat_tag, lon_tag
def subset_data_for_bounding_box(data, min_lat, max_lat, min_lon, max_lon):
"""
Return a subset of data within the bounding lat/lon box
The function assumes that lat/lon are not coordinates in the data
"""
sub = data.where(
(data.lat > min_lat) & (data.lat <= max_lat) & (data.lon > min_lon) & (data.lon <= max_lon),
drop=True,
)
return sub
def find_tiles_for_bounding_box(min_lat, max_lat, min_lon, max_lon):
"""
return a list of 10x10 degree tile names covering the bounding box
the tile names are in the format of {lat}_{lon} where lat, lon represent the upper left corner
ocean tiles are removed
"""
fs = S3FileSystem()
folder = 's3://carbonplan-climatetrace/intermediate/ecoregions_mask/'
available_tiles = [
os.path.splitext(os.path.split(path)[-1])[0]
for path in fs.ls(folder)
if not path.endswith('/')
]
step = 10
lat_start = math.ceil(min_lat / step) * step
lat_stop = math.ceil(max_lat / step) * step
all_lat_tiles = np.arange(start=lat_start, stop=lat_stop + 1, step=step)
if min_lat == lat_start:
all_lat_tiles = all_lat_tiles[1:]
lon_start = math.floor(min_lon / step) * step
lon_stop = math.floor(max_lon / step) * step
all_lon_tiles = np.arange(start=lon_start, stop=lon_stop + 1, step=step)
if max_lon == lon_stop:
all_lon_tiles = all_lon_tiles[:-1]
out = []
for lat in all_lat_tiles:
for lon in all_lon_tiles:
lat_tag, lon_tag = get_lat_lon_tags_from_bounding_box(lat, lon)
fn = f'{lat_tag}_{lon_tag}'
if fn in available_tiles:
out.append(fn)
return out
# create utm band letter / latitude dictionary
# latitude represents southern edge of letter band
BAND_NUMBERS = list(
|
np.arange(-80, 80, 8)
|
numpy.arange
|
"""Bearing Element module.
This module defines the BearingElement classes which will be used to represent the rotor
bearings and seals. There are 7 different classes to represent bearings options,
and 2 element options with 8 or 12 degrees of freedom.
"""
import os
import warnings
from inspect import signature
import numpy as np
import toml
from plotly import graph_objects as go
from scipy import interpolate as interpolate
from ross.element import Element
from ross.fluid_flow import fluid_flow as flow
from ross.fluid_flow.fluid_flow_coefficients import (
calculate_stiffness_and_damping_coefficients,
)
from ross.units import Q_, check_units
from ross.utils import read_table_file
__all__ = [
"BearingElement",
"SealElement",
"BallBearingElement",
"RollerBearingElement",
"BearingFluidFlow",
"BearingElement6DoF",
"MagneticBearingElement",
]
class BearingElement(Element):
"""A bearing element.
This class will create a bearing element.
Parameters can be a constant value or speed dependent.
For speed dependent parameters, each argument should be passed
as an array and the correspondent speed values should also be
passed as an array.
Values for each parameter will be_interpolated for the speed.
Parameters
----------
n : int
Node which the bearing will be located in
kxx : float, array, pint.Quantity
Direct stiffness in the x direction (N/m).
cxx : float, array, pint.Quantity
Direct damping in the x direction (N*s/m).
kyy : float, array, pint.Quantity, optional
Direct stiffness in the y direction (N/m).
(defaults to kxx)
cyy : float, array, pint.Quantity, optional
Direct damping in the y direction (N*s/m).
(defaults to cxx)
kxy : float, array, pint.Quantity, optional
Cross coupled stiffness in the x direction (N/m).
(defaults to 0)
cxy : float, array, pint.Quantity, optional
Cross coupled damping in the x direction (N*s/m).
(defaults to 0)
kyx : float, array, pint.Quantity, optional
Cross coupled stiffness in the y direction (N/m).
(defaults to 0)
cyx : float, array, pint.Quantity, optional
Cross coupled damping in the y direction (N*s/m).
(defaults to 0)
frequency : array, pint.Quantity, optional
Array with the frequencies (rad/s).
tag : str, optional
A tag to name the element
Default is None.
n_link : int, optional
Node to which the bearing will connect. If None the bearing is
connected to ground.
Default is None.
scale_factor : float, optional
The scale factor is used to scale the bearing drawing.
Default is 1.
color : str, optional
A color to be used when the element is represented.
Default is '#355d7a' (Cardinal).
Examples
--------
>>> # A bearing element located in the first rotor node, with these
>>> # following stiffness and damping coefficients and speed range from
>>> # 0 to 200 rad/s
>>> import ross as rs
>>> kxx = 1e6
>>> kyy = 0.8e6
>>> cxx = 2e2
>>> cyy = 1.5e2
>>> frequency = np.linspace(0, 200, 11)
>>> bearing0 = rs.BearingElement(n=0, kxx=kxx, kyy=kyy, cxx=cxx, cyy=cyy, frequency=frequency)
>>> bearing0.K(frequency) # doctest: +ELLIPSIS
array([[[1000000., 1000000., ...
>>> bearing0.C(frequency) # doctest: +ELLIPSIS
array([[[200., 200., ...
"""
@check_units
def __init__(
self,
n,
kxx,
cxx,
kyy=None,
kxy=0,
kyx=0,
cyy=None,
cxy=0,
cyx=0,
frequency=None,
tag=None,
n_link=None,
scale_factor=1,
color="#355d7a",
**kwargs,
):
if frequency is not None:
self.frequency = np.array(frequency, dtype=np.float64)
else:
self.frequency = frequency
args = ["kxx", "kyy", "kxy", "kyx", "cxx", "cyy", "cxy", "cyx"]
# all args to coefficients
args_dict = locals()
if kyy is None:
args_dict["kyy"] = kxx
if cyy is None:
args_dict["cyy"] = cxx
# check coefficients len for consistency
coefficients_len = []
for arg in args:
coefficient, interpolated = self._process_coefficient(args_dict[arg])
setattr(self, arg, coefficient)
setattr(self, f"{arg}_interpolated", interpolated)
coefficients_len.append(len(coefficient))
if frequency is not None and type(frequency) != float:
coefficients_len.append(len(args_dict["frequency"]))
if len(set(coefficients_len)) > 1:
raise ValueError(
"Arguments (coefficients and frequency)"
" must have the same dimension"
)
else:
for c in coefficients_len:
if c != 1:
raise ValueError(
"Arguments (coefficients and frequency)"
" must have the same dimension"
)
self.n = n
self.n_link = n_link
self.n_l = n
self.n_r = n
self.tag = tag
self.color = color
self.scale_factor = scale_factor
self.dof_global_index = None
def _process_coefficient(self, coefficient):
"""Helper function used to process the coefficient data."""
interpolated = None
if isinstance(coefficient, (int, float)):
if self.frequency is not None and type(self.frequency) != float:
coefficient = [coefficient for _ in range(len(self.frequency))]
else:
coefficient = [coefficient]
if len(coefficient) > 1:
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
interpolated = interpolate.UnivariateSpline(
self.frequency, coefficient
)
# dfitpack.error is not exposed by scipy
# so a bare except is used
except:
try:
if len(self.frequency) in (2, 3):
interpolated = interpolate.interp1d(
self.frequency,
coefficient,
kind=len(self.frequency) - 1,
fill_value="extrapolate",
)
except:
raise ValueError(
"Arguments (coefficients and frequency)"
" must have the same dimension"
)
else:
interpolated = interpolate.interp1d(
[0, 1],
[coefficient[0], coefficient[0]],
kind="linear",
fill_value="extrapolate",
)
return coefficient, interpolated
def plot(
self,
coefficients=None,
frequency_units="rad/s",
stiffness_units="N/m",
damping_units="N*s/m",
fig=None,
**kwargs,
):
"""Plot coefficient vs frequency.
Parameters
----------
coefficients : list, str
List or str with the coefficients to plot.
frequency_units : str
Frequency units.
Default is rad/s.
y_units : str
**kwargs : optional
Additional key word arguments can be passed to change the plot layout only
(e.g. width=1000, height=800, ...).
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
Example
-------
>>> bearing = bearing_example()
>>> fig = bearing.plot('kxx')
>>> # fig.show()
"""
if fig is None:
fig = go.Figure()
if isinstance(coefficients, str):
coefficients = [coefficients]
# check coefficients consistency
coefficients_set = set([coeff[0] for coeff in coefficients])
if len(coefficients_set) > 1:
raise ValueError("Can only plot stiffness or damping in the same plot.")
coeff_to_plot = coefficients_set.pop()
if coeff_to_plot == "k":
default_units = "N/m"
y_units = stiffness_units
else:
default_units = "N*s/m"
y_units = damping_units
frequency_range = np.linspace(min(self.frequency), max(self.frequency), 30)
for coeff in coefficients:
y_value = (
Q_(
getattr(self, f"{coeff}_interpolated")(frequency_range),
default_units,
)
.to(y_units)
.m
)
frequency_range = Q_(frequency_range, "rad/s").to(frequency_units).m
fig.add_trace(
go.Scatter(
x=frequency_range,
y=y_value,
mode="lines",
showlegend=False,
hovertemplate=f"Frequency ({frequency_units}): %{{x:.2f}}<br> Coefficient ({y_units}): %{{y:.3e}}",
)
)
fig.update_xaxes(title_text=f"Frequency ({frequency_units})")
fig.update_yaxes(exponentformat="power")
fig.update_layout(**kwargs)
return fig
def __repr__(self):
"""Return a string representation of a bearing element.
Returns
-------
A string representation of a bearing element object.
Examples
--------
>>> bearing = bearing_example()
>>> bearing # doctest: +ELLIPSIS
BearingElement(n=0, n_link=None,
kxx=[...
"""
return (
f"{self.__class__.__name__}"
f"(n={self.n}, n_link={self.n_link},\n"
f" kxx={self.kxx}, kxy={self.kxy},\n"
f" kyx={self.kyx}, kyy={self.kyy},\n"
f" cxx={self.cxx}, cxy={self.cxy},\n"
f" cyx={self.cyx}, cyy={self.cyy},\n"
f" frequency={self.frequency}, tag={self.tag!r})"
)
def __eq__(self, other):
"""Equality method for comparasions.
Parameters
----------
other: object
The second object to be compared with.
Returns
-------
bool
True if the comparison is true; False otherwise.
Examples
--------
>>> bearing1 = bearing_example()
>>> bearing2 = bearing_example()
>>> bearing1 == bearing2
True
"""
compared_attributes = [
"kxx",
"kyy",
"kxy",
"kyx",
"cxx",
"cyy",
"cxy",
"cyx",
"frequency",
]
if isinstance(other, self.__class__):
init_args = []
for arg in signature(self.__init__).parameters:
if arg not in ["kwargs"]:
init_args.append(arg)
init_args_comparison = []
for arg in init_args:
comparison = getattr(self, arg) == getattr(other, arg)
try:
comparison = all(comparison)
except TypeError:
pass
init_args_comparison.append(comparison)
init_args_comparison = all(init_args_comparison)
attributes_comparison = all(
(
(
np.array(getattr(self, attr)) == np.array(getattr(other, attr))
).all()
for attr in compared_attributes
)
)
return init_args_comparison and attributes_comparison
return False
def __hash__(self):
return hash(self.tag)
def save(self, file):
try:
data = toml.load(file)
except FileNotFoundError:
data = {}
# save initialization args and coefficients
args = list(signature(self.__init__).parameters)
args += [
"kxx",
"kyy",
"kxy",
"kyx",
"cxx",
"cyy",
"cxy",
"cyx",
]
brg_data = {}
for arg in args:
if arg not in ["kwargs"]:
brg_data[arg] = self.__dict__[arg]
# change np.array to lists so that we can save in .toml as list(floats)
for k, v in brg_data.items():
if isinstance(v, np.generic):
brg_data[k] = brg_data[k].item()
elif isinstance(v, np.ndarray):
brg_data[k] = brg_data[k].tolist()
# case for a container with np.float (e.g. list(np.float))
else:
try:
brg_data[k] = [i.item() for i in brg_data[k]]
except (TypeError, AttributeError):
pass
data[f"{self.__class__.__name__}_{self.tag}"] = brg_data
with open(file, "w") as f:
toml.dump(data, f)
def dof_mapping(self):
"""Degrees of freedom mapping.
Returns a dictionary with a mapping between degree of freedom and its
index.
Returns
-------
dof_mapping : dict
A dictionary containing the degrees of freedom and their indexes.
Examples
--------
The numbering of the degrees of freedom for each node.
Being the following their ordering for a node:
x_0 - horizontal translation
y_0 - vertical translation
>>> bearing = bearing_example()
>>> bearing.dof_mapping()
{'x_0': 0, 'y_0': 1}
"""
return dict(x_0=0, y_0=1)
def M(self):
"""Mass matrix for an instance of a bearing element.
This method returns the mass matrix for an instance of a bearing
element.
Returns
-------
M : np.ndarray
Mass matrix (kg).
Examples
--------
>>> bearing = bearing_example()
>>> bearing.M()
array([[0., 0.],
[0., 0.]])
"""
M = np.zeros_like(self.K(0))
return M
def K(self, frequency):
"""Stiffness matrix for an instance of a bearing element.
This method returns the stiffness matrix for an instance of a bearing
element.
Parameters
----------
frequency : float
The excitation frequency (rad/s).
Returns
-------
K : np.ndarray
A 2x2 matrix of floats containing the kxx, kxy, kyx, and kyy values.
Examples
--------
>>> bearing = bearing_example()
>>> bearing.K(0)
array([[1000000., 0.],
[ 0., 800000.]])
"""
kxx = self.kxx_interpolated(frequency)
kyy = self.kyy_interpolated(frequency)
kxy = self.kxy_interpolated(frequency)
kyx = self.kyx_interpolated(frequency)
K = np.array([[kxx, kxy], [kyx, kyy]])
if self.n_link is not None:
# fmt: off
K = np.vstack((np.hstack([K, -K]),
np.hstack([-K, K])))
# fmt: on
return K
def C(self, frequency):
"""Damping matrix for an instance of a bearing element.
This method returns the damping matrix for an instance of a bearing
element.
Parameters
----------
frequency : float
The excitation frequency (rad/s).
Returns
-------
C : np.ndarray
A 2x2 matrix of floats containing the cxx, cxy, cyx, and cyy values (N*s/m).
Examples
--------
>>> bearing = bearing_example()
>>> bearing.C(0)
array([[200., 0.],
[ 0., 150.]])
"""
cxx = self.cxx_interpolated(frequency)
cyy = self.cyy_interpolated(frequency)
cxy = self.cxy_interpolated(frequency)
cyx = self.cyx_interpolated(frequency)
C =
|
np.array([[cxx, cxy], [cyx, cyy]])
|
numpy.array
|
"""
Functions for handling radian values.
"""
from matplotlib.ticker import ScalarFormatter
from numpy import pi
import numpy as np
two_pi = 2.0 * pi
# Circular differences
def cdiffsc(a, b, degrees=False):
"""Smallest circular difference between two scalar angles."""
CIRC_MAX = degrees and 360 or two_pi
delta = (a % CIRC_MAX) - (b % CIRC_MAX)
mag = abs(delta)
if mag > CIRC_MAX / 2:
if delta > 0:
return delta - CIRC_MAX
else:
return CIRC_MAX - mag
else:
return delta
def cdiff(u, v):
"""Smallest circular difference between two angle arrays."""
if not (np.iterable(u) or np.iterable(v)):
return cdiffsc(u, v)
delta = np.fmod(u, two_pi) - np.fmod(v, two_pi)
mag = np.absolute(delta)
res =
|
np.empty_like(delta)
|
numpy.empty_like
|
from milk.supervised.lasso import lasso_learner
import milk.supervised.lasso
import numpy as np
def test_lasso_smoke():
np.random.seed(3)
for i in xrange(8):
X = np.random.rand(100,10)
Y = np.random.rand(5,10)
B = np.random.rand(5,100)
before = np.linalg.norm(Y - np.dot(B,X))
B = milk.supervised.lasso(X,Y)
after = np.linalg.norm(Y - np.dot(B,X))
assert after < before
assert np.all(~
|
np.isnan(B)
|
numpy.isnan
|
from harold import (State, Transfer, feedback, lqr, matrix_slice,
concatenate_state_matrices)
from numpy import array, eye
from numpy.testing import assert_equal, assert_almost_equal, assert_raises
def test_feedback_wrong_inputs():
G = Transfer(1, [1, 1])
H = Transfer([3, 1], [1, 2], dt=0.01)
assert_raises(ValueError, feedback, G, H)
assert_raises(ValueError, feedback, G, [1, 2])
assert_raises(ValueError, feedback, G, 5+1j)
def test_feedback_wellposedness():
G = State(eye(3), [[1], [1], [1]], [1, 1, 1], [1])
assert_raises(ValueError, feedback, G, 1+0j)
def test_feedback_static_static():
G = State(5)
H = Transfer(4)
assert_almost_equal(feedback(G, G).d, array([[10.208333333333334]]))
assert_almost_equal(feedback(G, H).d,
|
array([[10.263157894736842]])
|
numpy.array
|
import pymesh
import numpy as np
from scipy import sparse
np.set_printoptions(precision=4, linewidth=250, suppress=True)
# mesh = pymesh.load_mesh("../data/simple_cube.obj")
mesh = pymesh.load_mesh("../data/simple_strange_cube.obj")
# mesh = pymesh.load_mesh("../data/teapot.obj") # carefull teapot seems to have double vertices! my gradient does not work for this
num_V = len(mesh.vertices)
print("num_vertices", num_V)
mesh.enable_connectivity()
neighbours = mesh.get_vertex_adjacent_vertices(0)
print(neighbours)
# print("control teapot:", 2659, 2683, 2773, 2837, 2937, 2984)
print("control simple_cube:", 1, 2, 4, 6, 7)
assembler = pymesh.Assembler(mesh)
L = assembler.assemble("laplacian")
print(type(L))
print(np.shape(L))
def build_Laplacian(mesh, num_V, anchor_weight=1, is_pymesh=False):
"""
Build a Laplacian sparse matrix between the vertices of a triangular mesh
This mainly follow work from:
"Laplacian Mesh Optimization" (<NAME> et al. 2006)
and an implementation from:
https://github.com/bmershon/laplacian-meshes/blob/master/LaplacianMesh.py
:param mesh:
:param num_V: num_vertices
:return:
"""
# declare variables to build sparse matrix
I = []
J = []
V = []
# find anchoring points (all the vertices they belong to does not have two triangles attached two it)
anchors = []
# built sparse Laplacian matrix with cotangent weights
# for vertex in range(num_V):
for vertex in range(num_V):
# get neighbors vertices of "vertex" -> found here:
# https://stackoverflow.com/questions/12374781/how-to-find-all-neighbors-of-a-given-point-in-a-delaunay-triangulation-using-sci
if is_pymesh:
v_neighbors = mesh.get_vertex_adjacent_vertices(vertex)
else:
v_neighbors = mesh.vertex_neighbor_vertices[1] \
[mesh.vertex_neighbor_vertices[0][vertex]:mesh.vertex_neighbor_vertices[0][vertex + 1]]
weights = []
z = len(v_neighbors)
I = I + ([vertex] * (z + 1)) # repeated row
J = J + v_neighbors.tolist() + [vertex] # column indices and this row
is_anchor = False
# for v_neighbor in v_neighbors:
for v_neighbor in v_neighbors:
if is_pymesh:
# get faces that touches the vertex
vertex_faces = mesh.get_vertex_adjacent_faces(vertex)
# get faces that touches the second vertex
v_neigh_faces = mesh.get_vertex_adjacent_faces(v_neighbor)
# keep only the faces that has the two vertices in common
common_faces = vertex_faces[np.nonzero(
|
np.in1d(vertex_faces, v_neigh_faces)
|
numpy.in1d
|
import numpy as np
import time
"""
input hidden output
vector network vector
f1
f2 w1
f3 w2 sum
f4 w3
f5 wn
fn
[1, [5x5]
2, [1] - w1 [5x5]
3, [1] - w2 [5x5]
4, . [1] - w3 = [5x5]
5, [1] - wn [5x5]
n]
^^^^ WRONG ^^^
|factors| |hidden in| |hidden out|
[1,
2,
[1,2,3,4,5] (dot) 3, = [55]
4,
5] [55, [1
-----> 55, ----> [55,55,55] (dot) 1 = score
55] 1]
[1,
2,
(dot) 3, = [55]
4,
5,]
basically
1x5 mulitplied by a 5xN = 1xN where N is the number of weights
and that can be converted to a sum in the final step
"""
def doNothing(x = None, y = None):
pass
def sigmoid(x):
return 1 / (1 + np.exp(-x))
activation_function = sigmoid
class scoringMatrix:
def __init__(self, num_of_factors, num_of_weights, learning_rate):
self.num_of_factors = num_of_factors
self.num_of_weights = num_of_weights
self.learning_rate = learning_rate
self.create_weight_matrice()
def create_weight_matrice(self):
self.weights_in_hidden = np.random.rand(self.num_of_factors, self.num_of_weights)
self.weights_hidden_out = np.random.rand(self.num_of_weights, 1)
def train(self, input_vector, target_vector):#Changed input
# input_vector and target_vector can be tuple, list or ndarray
input_vector = np.array(input_vector, ndmin=2)
target_vector = np.array(target_vector, ndmin=2)
#input
output_vector1 = np.dot(input_vector, self.weights_in_hidden)
output_vector_hidden = output_vector1
#output
output_vector2 = np.dot(output_vector_hidden, self.weights_hidden_out)
output_errors = target_vector - output_vector2
"""
I have the error
i get the relative error
I multiply the last or hidden out by a certain function
sigma()
I multiply the second or hidden in by a certain function
sigma()*(1 - sigma())
so if i get close to zero my error reaches zero and
relative is zero
if i am above
error is positive
and sigmoid > 0.5
and i want to lower the weights
error is negative
sigmoid is < 0.5
and i want to increase the weights
"""
error = output_errors[0,0]
relative_error = error/target_vector[0,0]
#print(output_vector2)
#print("Output Error", output_errors)
#----------------------------PART 1------------------------------#
coefficient_of_error = -1 * error * (sigmoid(error)) * self.learning_rate
"""
if( coefficient_of_error > 0):
print("+++++++++++++")
else:
print("-------------")
"""
self.weights_hidden_out = (1 - coefficient_of_error) * self.weights_hidden_out
#----------------------------PART 2------------------------------#
hidden_errors = np.dot(output_errors, self.weights_hidden_out.T) #returns [value1, value2, value3, ... valueN]
identity_matrix = np.zeros((self.num_of_weights,self.num_of_weights))
i = 0
for err in hidden_errors[0]:
coefficient_of_error = -1* err * (sigmoid(err)) * self.learning_rate * (1-sigmoid(err)) #normalize it
identity_matrix[i][i] = coefficient_of_error
i+=1
tmp = (np.dot(self.weights_in_hidden , identity_matrix)) #matrix that has been modified
self.weights_in_hidden += tmp
def run(self, input_vector):
output_vector = np.dot(input_vector, self.weights_in_hidden)
output_vector = np.dot(output_vector, self.weights_hidden_out)
return output_vector
class scoringMatrixOverTime:
def __init__(self, in_matrix, out_matrix):
self.weights_in_hidden = in_matrix
self.weight_hidden_out = out_matrix
def __init__(self, num_of_factors = None, num_of_weights = None, learning_rate = None, method = None, in_matrix = None, out_matrix = None):
if learning_rate is not None:
self.num_of_factors = num_of_factors
self.num_of_weights = num_of_weights
self.learning_rate = learning_rate
self.method = method
self.create_weight_matrice()#CHANGED
else:
self.weights_in_hidden = in_matrix
self.weights_hidden_out = out_matrix
def create_weight_matrice(self, weight_in = None, weight_out = None):
self.weights_in_hidden = weight_in
if (weight_in) is None:
print("NONE")
self.weights_in_hidden = np.random.rand(self.num_of_factors, self.num_of_weights)
#for i in range(len(self.weights_in_hidden)):
# if (i%2 == 0):
# self.weights_in_hidden[i] *= -1
self.weights_hidden_out = weight_out #__________________THIS IS THE CAUSE OF MY PAIN____a frickin s
if (weight_out) is None:
print("NONE")
self.weights_hidden_out = np.random.rand(self.num_of_weights, 1)
def train(self, input_set, target_vector, start_value = 0):#Changed input
# input_vector and target_vector can be tuple, list or ndarray
output_vector2 = np.zeros(shape = (1,1))
output_vector2[0,0] = start_value
temp_sum = 0#TEMP
#RUN THROUGH AND FIGURE OUT ERROR IF WE JUST INTIALLY RAN IT#
#------------------------------------------------------------------------------------------------------#
target_vector = np.array(target_vector, ndmin=2)
for input_vector in input_set:
input_vector = np.array(input_vector, ndmin=2)
temp_sum += input_vector[0,self.num_of_factors-1]#----------------------TEMP------------------
#input
output_vector1 =
|
np.dot(input_vector, self.weights_in_hidden)
|
numpy.dot
|
import numpy as np
import mars.tensor as mt
import time
CN = 1000000000.0
def simple():
N = 200_000_000
a = np.random.uniform(-1, 1, size=(N, 2))
t1 = time.time_ns()
npy_norm = np.linalg.norm(a, axis=1)
t2 = time.time_ns()
npy_less = npy_norm < 1
t3 = time.time_ns()
npy_ag = npy_less.sum() * 4 / N
t4 = time.time_ns()
print(f"Numpy Time : Norm = {(t2 - t1) / CN}, "
f"Less Than 1 = {(t3 - t2) / CN}, "
f"Aggregation = {(t4 - t3) / CN},"
f"Total = {(t4 - t1) / CN}")
a = mt.random.uniform(-1, 1, size=(N, 2))
t1 = time.time_ns()
mt_norm = mt.linalg.norm(a, axis=1)
t2 = time.time_ns()
mt_less = mt_norm < 1
t3 = time.time_ns()
mt_ag = mt_less.sum() * 4 / N
mt_ag.execute()
t4 = time.time_ns()
# print(((mt.linalg.norm(a, axis=1) < 1)
# .sum() * 4 / N).execute())
print(f"Mars Time : Norm = {(t2 - t1) / CN}, "
f"Less Than 1 = {(t3 - t2) / CN}, "
f"Aggregation = {(t4 - t3) / CN}, "
f", Total = {(t4 - t1) / CN}")
def matmul():
row = 30_000
col = 2
a = np.arange(row * col)
b = np.reshape(a, [row, col])
c = np.reshape(a, [col, row])
t1 = time.time_ns()
d = np.matmul(b, c)
sum = d.sum()
print(f"Numpy Mat Mul Time [{row}] x [{row}] => SUM {sum}, Time = {(time.time_ns() - t1) / CN}")
a = mt.arange(row * col)
b = mt.reshape(a, [row, col])
c = mt.reshape(a, [col, row])
t1 = time.time_ns()
d: mt = mt.matmul(b, c)
sum = d.sum().execute()
print(f"Mars Mat Mul Time [{row}] x [{row}] => SUM {sum}, Time = {(time.time_ns() - t1) / CN}")
def scalar_mul():
row = 100_000_000
col = 2
a = np.arange(row * col)
b =
|
np.reshape(a, [row, col])
|
numpy.reshape
|
'''Additional functions
prediction standard errors and confidence intervals
A: josef pktd
'''
import numpy as np
from scipy import stats
def atleast_2dcol(x):
''' convert array_like to 2d from 1d or 0d
not tested because not used
'''
x = np.asarray(x)
if (x.ndim == 1):
x = x[:, None]
elif (x.ndim == 0):
x = np.atleast_2d(x)
elif (x.ndim > 0):
raise ValueError('too many dimensions')
return x
def wls_prediction_std(res, exog=None, weights=None, alpha=0.05):
'''calculate standard deviation and confidence interval for prediction
applies to WLS and OLS, not to general GLS,
that is independently but not identically distributed observations
Parameters
----------
res : regression result instance
results of WLS or OLS regression required attributes see notes
exog : array_like (optional)
exogenous variables for points to predict
weights : scalar or array_like (optional)
weights as defined for WLS (inverse of variance of observation)
alpha : float (default: alpha = 0.5)
confidence level for two-sided hypothesis
Returns
-------
predstd : array_like, 1d
standard error of prediction
same length as rows of exog
interval_l, interval_u : array_like
lower und upper confidence bounds
Notes
-----
The result instance needs to have at least the following
res.model.predict() : predicted values or
res.fittedvalues : values used in estimation
res.cov_params() : covariance matrix of parameter estimates
If exog is 1d, then it is interpreted as one observation,
i.e. a row vector.
testing status: not compared with other packages
References
----------
Greene p.111 for OLS, extended to WLS by analogy
'''
# work around current bug:
# fit doesn't attach results to model, predict broken
#res.model.results
covb = res.cov_params()
if exog is None:
exog = res.model.exog
predicted = res.fittedvalues
else:
exog = np.atleast_2d(exog)
if covb.shape[1] != exog.shape[1]:
raise ValueError('wrong shape of exog')
predicted = res.model.predict(res.params, exog)
if weights is None:
weights = res.model.weights
# full covariance:
#predvar = res3.mse_resid + np.diag(np.dot(X2,np.dot(covb,X2.T)))
# predication variance only
predvar = res.mse_resid/weights + (exog * np.dot(covb, exog.T).T).sum(1)
predstd =
|
np.sqrt(predvar)
|
numpy.sqrt
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import numpy as np
from datetime import datetime as dt
from cuda import cudart
import tensorrt as trt
os.environ['TF_ENABLE_DEPRECATION_WARNINGS'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
np.random.seed(97)
tf.compat.v1.set_random_seed(97)
epsilon = 1e-6
nBatchSize, nSequenceLength, nInputDim, nHiddenDim = 2, 4, 7, 5
inputX = np.random.rand(nBatchSize, nSequenceLength, nInputDim).astype(np.float32).reshape([nBatchSize, nSequenceLength, nInputDim])
inputH = np.random.rand(nBatchSize, nHiddenDim).astype(np.float32).reshape([nBatchSize, nHiddenDim])
inputC = np.random.rand(nBatchSize, nHiddenDim).astype(np.float32).reshape([nBatchSize, nHiddenDim])
def check(a, b, weak=False, info=""):
if weak:
res = np.all(np.abs(a - b) < epsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + epsilon))
print("check %s:" % info, res, diff0, diff1)
def printArray(x, info="", n=5):
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
#print('\t',x.reshape(-1)[:n])
# for debug
def smallTest():
def sigmoid(x):
return 1 / (1 + np.exp(-x))
para = np.load('test?.npz')
weight = [np.split(i, [nInputDim], axis=0) for i in np.split(para['?/kernel:0'], 4, axis=1)]
bias = np.split(para['?/bias:0'], 4)
h, c = h0, c0
for t in range(nSequenceLength):
x = x0[:, t, :]
it = sigmoid(np.matmul(x, weight[0][0]) + np.matmul(h, weight[0][1]) + bias[0])
ct_ = np.tanh(np.matmul(x, weight[1][0]) + np.matmul(h, weight[1][1]) + bias[1])
ft = sigmoid(np.matmul(x, weight[2][0]) + np.matmul(h, weight[2][1]) + bias[2])
ot = sigmoid(np.matmul(x, weight[3][0]) + np.matmul(h, weight[3][1]) + bias[3])
ct = ft * c0 + it * ct_
ht = ot * np.tanh(ct)
print("ht=\n", ht, "\nct=\n", ct)
h = ht
c = ct
print("here")
return
def test1():
print("\ntf.keras.layers.LSTM 或 tf.keras.layers.LSTMCell + tf.keras.layers.RNN")
# TensorFlow part ----------------------------------------------------------
x = tf.compat.v1.placeholder(tf.float32, [None, nSequenceLength, nInputDim], name='x')
h0 = tf.compat.v1.placeholder(tf.float32, [None, nHiddenDim], name='h0')
c0 = tf.compat.v1.placeholder(tf.float32, [None, nHiddenDim], name='c0')
if True:
# 采用 tf.keras.layers.LSTM
lstm = tf.compat.v1.keras.layers.LSTM( \
nHiddenDim,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1),
recurrent_initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1),
bias_initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1),
unit_forget_bias=False,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
implementation=1,
return_sequences=True,
return_state=True,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False
)
else:
# 等价实现,采用 tf.keras.layers.LSTMCell + tf.keras.layers.RNN
cell = tf.keras.layers.LSTMCell( \
nHiddenDim,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1),
recurrent_initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1),
bias_initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1),
unit_forget_bias=False,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
)
lstm = tf.keras.layers.RNN( \
cell,
return_sequences=True,
return_state=True,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False
)
y, h1, c1 = lstm(x, initial_state=[h0, c0])
tfConfig = tf.compat.v1.ConfigProto()
tfConfig.gpu_options.per_process_gpu_memory_fraction = 0.5
sess = tf.compat.v1.Session(config=tfConfig)
sess.run(tf.compat.v1.global_variables_initializer())
outputTF, outputTFh1, outputTFc1 = sess.run([y, h1, c1], feed_dict={x: inputX, h0: inputH, c0: inputC})
tfPara = {}
print("Weight:")
for i in tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES):
name, value = i.name, sess.run(i)
print(name, value.shape)
tfPara[name] = value
np.savez("test1.npz", **tfPara)
sess.close()
# TensorRT part ------------------------------------------------------------
if True:
# 使用 Loop 结构实现,可以支持 dynamic shape 模式
# 这里权重写成了 constant 张量,不支持 Refit 功能
# TensorRT 的两个等价实现跟上面 TensorFlow 中的两个等价实现没有一一对应关系,四种组合均能得到正确结果
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.max_workspace_size = 4 << 30
inputT0 = network.add_input('inputT0', trt.DataType.FLOAT, (-1, -1, nInputDim))
inputT1 = network.add_input('inputT1', trt.DataType.FLOAT, (-1, nHiddenDim))
inputT2 = network.add_input('inputT2', trt.DataType.FLOAT, (-1, nHiddenDim))
profile.set_shape(inputT0.name, (1, 1, nInputDim), (nBatchSize, nSequenceLength, nInputDim), (nBatchSize * 2, nSequenceLength * 2, nInputDim))
profile.set_shape(inputT1.name, (1, nHiddenDim), (nBatchSize, nHiddenDim), (nBatchSize * 2, nHiddenDim))
profile.set_shape(inputT2.name, (1, nHiddenDim), (nBatchSize, nHiddenDim), (nBatchSize * 2, nHiddenDim))
config.add_optimization_profile(profile)
para =
|
np.load('test1.npz')
|
numpy.load
|
import os
import sys
import json
import torch
import numpy as np
from torch import nn
import matplotlib
# matplotlib.use("pgf")
matplotlib.rcParams.update({
# 'font.family': 'serif',
'font.size':12,
})
from matplotlib import pyplot as plt
import pytorch_lightning as pl
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.loggers import TensorBoardLogger
seed_everything(42)
import DiffNet
from DiffNet.networks.wgan import GoodNetwork
from DiffNet.DiffNetFDM import DiffNetFDM
from DiffNet.datasets.single_instances.klsum import Dataset
class Poisson(DiffNetFDM):
"""docstring for Poisson"""
def __init__(self, network, dataset, **kwargs):
super(Poisson, self).__init__(network, dataset, **kwargs)
def test(self):
x = torch.linspace(0, 1., 64)
y = torch.linspace(0, 1., 64)
xv, yv = torch.meshgrid(x, y)
print("x = ", xv)
print("y = ", yv)
print("sobelx = ", self.sobelx)
print("sobely = ", self.sobely)
print("sobelxx = ", self.sobelxx)
print("sobelyy = ", self.sobelyy)
sinx = torch.sin(np.pi * xv).type_as(next(self.network.parameters()))
dxsinx = nn.functional.conv2d(sinx.unsqueeze(0).unsqueeze(0), self.sobelx)
dysinx = nn.functional.conv2d(sinx.unsqueeze(0).unsqueeze(0), self.sobely)
fig, axs = plt.subplots(2, 2, figsize=(2*2,1.2*2),
subplot_kw={'aspect': 'auto'}, sharex=True, sharey=True, squeeze=True)
for ax_row in axs:
for ax in ax_row:
ax.set_xticks([])
ax.set_yticks([])
im0 = axs[0][0].imshow(sinx.squeeze().detach().cpu(),cmap='jet')
fig.colorbar(im0, ax=axs[0,0])
im1 = axs[1][0].imshow(dxsinx.squeeze().detach().cpu(),cmap='jet')
fig.colorbar(im1, ax=axs[1,0])
im1 = axs[1][1].imshow(dysinx.squeeze().detach().cpu(),cmap='jet')
fig.colorbar(im1, ax=axs[1,1])
plt.savefig(os.path.join(self.logger[0].log_dir, 'check_' + str(self.current_epoch) + '.png'))
# self.logger[0].experiment.add_figure('Contour Plots', fig, self.current_epoch)
plt.close('all')
exit()
def loss(self, u, inputs_tensor, forcing_tensor):
# self.test()
f = forcing_tensor # renaming variable
# extract diffusivity and boundary conditions here
nu = inputs_tensor[:,0:1,:,:]
bc1 = inputs_tensor[:,1:2,:,:]
bc2 = inputs_tensor[:,2:3,:,:]
# apply boundary conditions
u = torch.where(bc1>0.5,1.0+u*0.0,u)
u = torch.where(bc2>0.5,u*0.0,u)
u_x = nn.functional.conv2d(u, self.sobelx)
u_y = nn.functional.conv2d(u, self.sobely)
u_xx = nn.functional.conv2d(u, self.sobelxx)
u_yy = nn.functional.conv2d(u, self.sobelyy)
u_laplacian = u_xx + u_yy
nu_x = nn.functional.conv2d(nu, self.sobelx)
nu_y = nn.functional.conv2d(nu, self.sobely)
gradU_DOT_gradNU = torch.mul(u_x, nu_x) + torch.mul(u_y, nu_y)
# print("size of nu_x = ", nu_x.shape)
# print("size of nu[:,:,1:-1,1:-1] = ", nu[:,:,1:-1,1:-1].shape)
# print("size of u_x = ", u_x.shape)
# print("size of u_laplacian = ", u_laplacian.shape)
# exit()
res = gradU_DOT_gradNU + torch.mul(nu[:,:,1:-1,1:-1], u_laplacian)
# print("res size = ", (res.view(u.shape[0], -1)).shape)
# loss1 = torch.norm(res.view(u.shape[0], -1), p=1, dim=1)
# loss2 = torch.norm(res.view(u.shape[0], -1), p=2, dim=1)
# print("loss1 = ", loss1, ", size = ", loss1.shape)
# print("loss2 = ", loss2, ", size = ", loss2.shape)
# exit()
# return (0.1*loss1 + 0.9*loss2)
return torch.sum(res, 1)**2
# nu_gp = self.gauss_pt_evaluation(nu)
# f_gp = self.gauss_pt_evaluation(f)
# u_gp = self.gauss_pt_evaluation(u)
# u_x_gp = self.gauss_pt_evaluation_der_x(u)
# u_y_gp = self.gauss_pt_evaluation_der_y(u)
# transformation_jacobian = self.gpw.unsqueeze(-1).unsqueeze(-1).unsqueeze(0).type_as(nu_gp)
# res_elmwise = transformation_jacobian * (nu_gp * (u_x_gp**2 + u_y_gp**2) - (u_gp * f_gp))
# res_elmwise = torch.sum(res_elmwise, 1)
# loss = torch.mean(res_elmwise)
# return loss
def forward(self, batch):
inputs_tensor, forcing_tensor = batch
return self.network[0], inputs_tensor, forcing_tensor
def configure_optimizers(self):
"""
Configure optimizer for network parameters
"""
# lr = self.learning_rate
# opts = [torch.optim.LBFGS(self.network, lr=1.0, max_iter=10)]
# return opts, []
opts = [torch.optim.Adam(self.network.parameters(), lr=1e-1), torch.optim.LBFGS(self.network, lr=1.0, max_iter=10)]
schd = []
# schd = [torch.optim.lr_scheduler.MultiStepLR(opts[0], milestones=[2, 5, 8, 12, 16, 20], gamma=0.1)]
schd = [torch.optim.lr_scheduler.ExponentialLR(opts[0], gamma=0.5)]
return opts, schd
def training_step(self, batch, batch_idx, optimizer_idx):
u, inputs_tensor, forcing_tensor = self.forward(batch)
loss_val = self.loss(u, inputs_tensor, forcing_tensor).mean()
self.log('PDE_loss', loss_val.item())
self.log('loss', loss_val.item())
return loss_val
def on_epoch_end(self):
fig, axs = plt.subplots(1, 2, figsize=(2*2,1.2),
subplot_kw={'aspect': 'auto'}, sharex=True, sharey=True, squeeze=True)
for ax in axs:
ax.set_xticks([])
ax.set_yticks([])
self.network.eval()
inputs, forcing = self.dataset[0]
u, inputs_tensor, forcing_tensor = self.forward((inputs.unsqueeze(0).type_as(next(self.network.parameters())), forcing.unsqueeze(0).type_as(next(self.network.parameters()))))
f = forcing_tensor # renaming variable
# extract diffusivity and boundary conditions here
nu = inputs_tensor[:,0:1,:,:]
bc1 = inputs_tensor[:,1:2,:,:]
bc2 = inputs_tensor[:,2:3,:,:]
# apply boundary conditions
u = torch.where(bc1>0.5,1.0+u*0.0,u)
u = torch.where(bc2>0.5,u*0.0,u)
k = nu.squeeze().detach().cpu()
u = u.squeeze().detach().cpu()
im0 = axs[0].imshow(k,cmap='jet')
fig.colorbar(im0, ax=axs[0])
im1 = axs[1].imshow(u,cmap='jet')
fig.colorbar(im1, ax=axs[1])
plt.savefig(os.path.join(self.logger[0].log_dir, 'contour_' + str(self.current_epoch) + '.png'))
self.logger[0].experiment.add_figure('Contour Plots', fig, self.current_epoch)
plt.close('all')
def main():
u_tensor =
|
np.ones((1,1,64,64))
|
numpy.ones
|
# Definition of data structures for BDNE project
# Author : <NAME> <<EMAIL>>
# Imports for type-annotations
from __future__ import annotations
from typing import Tuple, List, Dict, Union, Callable, Any
# Import random to sample from the sets
import random
# Import numpy as part of the type-annotations
import numpy as np
# Import pandas to output the data as a dataframe
import pandas as pd
# Import Mapping to implement the experimental metadata as a mapping class
from collections.abc import Mapping
# Import the core BDNE ORM and configuration to deal with the database
import BDNE.db_orm as db
import BDNE.config as cfg
from BDNE.config import db_batch_size
#################################################################
# A cache class for storing data locally
#################################################################
class DBCache:
"""A basic cache class for database IDs- never kicks out old data unless told to"""
# Store the data in pd.DataFrame
_cache: pd.DataFrame
def __init__(self) -> None:
"""Set up pandas dataframe to store data internally"""
self._cache = pd.DataFrame()
def clear(self) -> None:
"""Empty the cache"""
self._cache = pd.DataFrame()
def __call__(self, ids: List[int]) -> Tuple[List[int], pd.DataFrame]:
"""Convenience function to retrieve a list of results"""
return self.check(ids)
def check(self, ids: List[int]) -> Tuple[List[int], pd.DataFrame]:
"""Look for hits with supplied ids, must be unique index"""
if len(ids) == 0:
return [], pd.DataFrame()
ids = np.array(ids)
# Get from cache
cached = self._cache[self._cache.index.isin(ids)]
# List not found items to be read in
not_found = np.setdiff1d(ids, cached.index.to_numpy()).tolist()
return not_found, cached
def update(self, ids: np.Array, data: pd.DataFrame) -> None:
"""Update the cache for the ids provided with the data provided"""
# Convert to integer
ids = ids.astype('int')
# Make sure not to update existing data
missing = np.setdiff1d(ids, self._cache.index.to_numpy())
# update index
old_index = data.index
data.index = ids
# Create new dataframe
self._cache = self._cache.append(data[data.index.isin(missing)])
# Restore
data.index = old_index
def __len__(self) -> pd.Series:
"""Return the amount of memory used by the cache."""
return self._cache.memory_usage(deep=True)
#################################################################
# A single entity class
#################################################################
class Entity:
"""Class to store all of the data for a given entity.
Typical Usage:
w = Entity(1000);
print(w);"""
# The database ID of this entity
db_id: int = None
# The sample ID (with additional information about the wider sample the entity is from)
_sample_id: int = None
# An internal container for the experimental data associated with this object
experiment_container = []
def __repr__(self) -> str:
"""Return information about this entity, including all experiments (if cached)."""
r = "{} ID={}".format(self.__class__.__name__, self.db_id)
if len(self.experiment_container) > 0:
r += " {}".format([i[0] for i in self.experiment_container])
return r
def __init__(self, db_id: int = None) -> None:
"""Initialise the entity class as empty, or with an entity id."""
if db_id is None:
return
# ID given
self.db_id = db_id
def sample(self) -> Dict[str, str]:
"""Return data about the sample that this entity is associated with."""
if self._sample_id is None:
self._sample_id = cfg.session.query(db.Entity.sampleID).filter(db.Entity.ID == self.db_id).first()[0]
# Set up database query to retrieve
stm = cfg.session.query(db.Sample.ID, db.Sample.supplier, db.Sample.material, db.Sample.preparation_date,
db.Sample.preparation_method, db.Sample.substrate).filter(
db.Sample.ID == self._sample_id).first()
# Zip to dictionary
keys = ['ID', 'Supplier', 'Material', 'Preparation_date', 'Preparation_method', 'Substrate']
return dict(zip(keys, stm))
def populate_from_db(self) -> None:
"""Retrieve all experiments associated with this entity ID"""
stm = cfg.session.query(db.Experiment.type, db.Measurement.ID).join(db.Measurement).join(db.Object).\
join(db.Entity).filter(db.Entity.ID == self.db_id)
# Check whether this entity exists
if not stm.all():
raise KeyError('No Entity exists with ID {}'.format(self.db_id))
self.experiment_container = stm.all()
def experiments(self) -> List[str]:
"""List all experiments associated with this entity"""
if not self.experiment_container:
self.populate_from_db()
return [i[0] for i in self.experiment_container]
# TODO: Find type hint for sqlalchemy session.query
def get(self, experiment: Union[int, str]):
"""Get a single experiment associated with this entity by experiment number or name"""
# Check if we have downloaded experiment list yet
if not self.experiment_container:
self.populate_from_db()
# Check type of experiment
if type(experiment) is int:
exp_id = self.experiment_container[experiment][1]
elif type(experiment) is str:
exp_id = [i[1] for i in self.experiment_container if i[0] == experiment]
# Check how many datasets are associated with this
if len(exp_id) == 0:
raise KeyError('Experiment {} not present for Entity ID {}'.format(experiment, self.db_id))
elif len(exp_id) == 1:
exp_id = exp_id[0]
else:
raise KeyError('Experiment {} ambiguous for Entity ID {}'.format(experiment, self.db_id))
else:
raise TypeError('Experiment must be defined as an integer or a string')
# Retrieve experiment results from database
stm = cfg.session.query(db.Measurement.data).filter(db.Measurement.ID == exp_id)
if len(stm.all()) == 1:
return stm.all()[0][0]
else:
raise KeyError('Measurement ID {} not found in database'.format(exp_id))
def get_data(self):
"""Get all the measurement data associated with this entity"""
# Check if we have downloaded experiment list yet
if not self.experiment_container:
self.populate_from_db()
# Retrieve experiment results from database
measurement_container = list(map(list, zip(*self.experiment_container)))[1]
#Return all experiment IDs and experimental data associated with this entity
#replace db.Experiment.ID with db.Experiment.type to index by experiment names not experiment IDs
stm = cfg.session.query(db.Experiment.type,db.Measurement.data)\
.select_from(db.Measurement) \
.join(db.Object).join(db.Entity).join(db.Experiment)\
.filter(db.Measurement.ID.in_(measurement_container))
return stm.all()
#################################################################
# EntityCollection (a collection of Entities)
#################################################################
class EntityCollection:
"""A collection of entities.
Lazy handling, stores only db_ids for the entities and returns either an entity, a set of entities,
or a set of measurements.
Typical usage:
``w = EntityCollection();
w.load_sample(25);``"""
# Database IDs associated with entities in this set
db_ids: List[int] = []
# Cursor to use as iterator
cursor: int = -1
# Count how many batches of entities have been retrieved already
batch_no = 0
def __repr__(self) -> str:
"""Return string describing collection"""
if self.db_ids:
return "{} IDs={}".format(self.__class__.__name__, len(self.db_ids))
else:
return f'Empty {self.__class__.__name__}'
def __init__(self, start_id: List[int] = None) -> None:
"""Set up wire collection, either blank or with a set of initial entity IDs."""
self.db_ids = start_id
def __len__(self) -> int:
"""The number of entities in this collection"""
return len(self.db_ids)
def __del__(self) -> None:
# Clear up the data in collections
pass
def __getstate__(self) -> List[int]:
"""Select what gets pickled"""
return self.db_ids
def __setstate__(self, state: List[int]) -> None:
"""Only restore db_ids"""
self.db_ids = state
def load_sample(self, sample_id: int) -> None:
"""Load a sample ID into the EntityCollection class"""
stm = cfg.session.query(db.Entity.ID).filter(db.Entity.sampleID == sample_id)
self.db_ids = [i[0] for i in stm.all()]
if not self.db_ids:
raise Warning('No Entities found with sample ID {}'.format(sample_id))
def load_entity_group(self, entity_group_id: int) -> None:
"""Load an entityGroup ID into the EntityCollection class"""
stm = cfg.session.query(db.EntityGroupEntity.entityID).filter(db.EntityGroup.ID == entity_group_id)
self.db_ids = [i[0] for i in stm.all()]
# Check if any entities are returned
if not self.db_ids:
raise Warning('No Entities found with sample ID {}'.format(entity_group_id))
def sample(self, number_to_sample: int = 0) -> Union[Entity, EntityCollection]:
"""Return a random subset of k entities from the EntityCollection."""
if number_to_sample > 0:
wid = random.sample(self.db_ids, k=number_to_sample)
# Select - return either an Entity or a EntityCollection
if len(wid) == 1:
return Entity(wid[0])
else:
return EntityCollection(wid)
else:
raise TypeError('Argument to sample must be an integer.')
def mask(self, id_set: Union[EntityCollection, MeasurementCollection]) -> EntityCollection:
"""Create a new entity set from an intersection with other entity ids"""
if type(id_set) is EntityCollection:
id_set = id_set.db_ids
if type(id_set) is MeasurementCollection:
id_set = id_set.entity_ids
else:
raise TypeError('Mask must be passed as either a MeasurementCollection or another EntityCollection')
# Create an intersection between the local IDs and the remote ID set
intersection = set(self.db_ids).intersection(id_set)
return EntityCollection(list(intersection))
def logical_mask(self, mask: np.Array) -> EntityCollection:
"""Create a new wire collection using a logical mask"""
new_ids = np.array(self.db_ids)[mask].tolist()
return EntityCollection(new_ids)
def get_entity(self, id: int) -> Entity:
"""Return a single entity"""
return Entity(self.db_ids[id])
def get_measurement(self, experiment_name: str) -> MeasurementCollection:
"""Return a MeasurementCollection (when a string is passed)"""
# Make a copy of the db_ids
all_db_ids = self.db_ids.copy()
# Create empty lists to hold the ids
measurement_ids = []
entity_ids = []
# Pop ids to get
while len(all_db_ids) > 0:
# For final batch, take all. For earlier batches, take
if len(all_db_ids) < db_batch_size:
sub_query = all_db_ids
all_db_ids = []
else:
sub_query = all_db_ids[0:db_batch_size]
all_db_ids = all_db_ids[db_batch_size:]
# Create statement
stm = cfg.session.query(db.Measurement.ID, db.Entity.ID).select_from(db.Measurement). \
join(db.Object).join(db.Entity).join(db.Experiment). \
filter(db.Entity.ID.in_(sub_query), db.Experiment.type == experiment_name)
# Execute statement
ret = stm.all()
# Add returned to lists
measurement_ids.extend([i[0] for i in ret])
entity_ids.extend([i[1] for i in ret])
# Return a MeasurementCollection
return MeasurementCollection(measurement_ids=measurement_ids, entity_ids=entity_ids)
def get_batch(self) -> MeasurementCollection:
"""Return a dataframe with a batch of entities and their data,
categorised into columns based on experiment type."""
# Make a copy of the db_ids
all_db_ids = self.db_ids.copy()
all_db_ids.sort()
sub_query = all_db_ids[db_batch_size*self.batch_no:db_batch_size*(self.batch_no+1)]
# Create a query statement
stm = cfg.session.query(db.Entity.ID, db.Experiment.ID, db.Measurement.data).select_from(db.Measurement) \
.join(db.Object).join(db.Entity).join(db.Experiment) \
.filter(db.Entity.ID.in_(sub_query))
# Execute the statement
query = stm.all()
# Turn the returned data into a pandas dataframe
df = pd.DataFrame(query, columns=["nanoobject", "experiment", "data"])
# Use a pivot table to rearrange df into a useful spare matrix
# There may be entities which did not have a specific experiment done on them, the locations corresponding to these will be filled with NaN values.
df = df.pivot(index='nanoobject', columns='experiment', values='data')
# increment batch number by 1, and return the results
self.batch_no += 1
# Return a dataframe
return df
def get_data(self) -> MeasurementCollection:
"""Return a dataframe with all the measurement data related to this entity collection,
with rows representing entities and columns representing different types of experiments."""
# Create statement
stm = cfg.session.query(db.Entity.ID,db.Experiment.ID,db.Measurement.data).select_from(db.Measurement) \
.join(db.Object).join(db.Entity).join(db.Experiment) \
# Execute the statement
query = stm.all()
# Turn the returned data into a pandas dataframe
df = pd.DataFrame(query, columns=["nanoobject", "experiment", "data"])
# Use a pivot table to rearrange df into a useful spare matrix
# There may be entities which did not have a specific experiment done on them,the locations corresponding to these will be filled with NaN values.
df = df.pivot(index='nanoobject', columns='experiment', values='data')
# Return a dataframe
return df
def __next__(self) -> Entity:
"""To iterate over each entity in the Collection"""
self.cursor = self.cursor + 1
# Check for end of list
if self.cursor == len(self.db_ids):
self.cursor = 0
raise StopIteration()
return self.get_entity(self.cursor)
def __iter__(self) -> EntityCollection:
# Return self
return self
def __add__(self, other: EntityCollection) -> EntityCollection:
"""Combine two entityCollections and return a merged set"""
return EntityCollection(self.db_ids + other.db_ids)
#################################################################
# MeasurementCollection
#################################################################
class MeasurementCollection:
"""A class to hold a collection of related measurement.
Uses lazy loading, holding only the database IDs and associated entity IDs until a get()
or collect() is issued.
Typical Usage:
w = EntityCollection();
w.load_entity_group(4);
e = w.get_measurement('spectra'); # A MeasurementCollection"""
# Database IDs for the measurements
db_ids: List[int] = []
# Associated entity IDs
entity_ids: List[int] = []
# Cursor for use as an iterator
cursor: int = -1
# Internal link to cache
_db_cache: DBCache = DBCache()
# Cache switch
_use_cache: bool = True
def __init__(self, measurement_ids: Union[np.array, List[int]] = None,
entity_ids: Union[np.array, List[int]] = None) -> None:
"""Initialise with a list of measurement_IDs and entity_ids"""
if len(measurement_ids) == len(entity_ids):
self.db_ids = measurement_ids
self.entity_ids = entity_ids
else:
raise RuntimeError('Both measurement_id and entity_id must be provided with the same length.')
def __repr__(self) -> str:
"""Return string representation"""
if len(self.db_ids) > 0:
return "{} IDs={}".format(self.__class__.__name__, len(self.db_ids))
else:
return f"Empty {self.__class__.__name__}"
def __len__(self) -> int:
"""Return the number of measurements in this collection"""
return len(self.db_ids)
def __del__(self) -> None:
# Remove the instance from memory and remove the associated Collection data
pass
def __getstate__(self) -> dict:
"""Only store/pickle entity and db_ids"""
return {'db_ids': self.db_ids, 'entity_ids': self.entity_ids}
def __setstate__(self, state: dict) -> None:
"""Only restore db_ids and entity ids"""
self.db_ids = state['db_ids']
self.entity_ids = state['entity_ids']
def sample(self, number: int = 1) -> pd.DataFrame:
"""Get a random selection of 'number' measurements"""
selected = random.choices(range(len(self.db_ids)), k=number)
return self._get(selected)
def _get(self, n: Union[range, list]) -> pd.DataFrame:
"""A cached function to return measurements from the set."""
# Convert ranges to a list
if type(n) is range:
n = list(n)
# Check if a list passed (must be)
if type(n) is not list:
raise NotImplementedError('n must be a list')
# Convert list to numpy array
n = np.array(n)
# Range check
if np.any(n > len(self.db_ids)) or np.any(n < 0):
raise KeyError('Index must be in range 0 to {}'.format(len(self.db_ids)))
# Convert indices to db_ids
to_get = [self.db_ids[i] for i in n]
# If zero length, return empty
if len(to_get) == 0:
# Nothing to return
return pd.DataFrame()
else:
# Multiple datasets to return
# Need to check cache
if self._use_cache:
(to_get, cached) = self._db_cache.check(to_get)
else:
cached = None
# Collect any remaining datasets rest
if len(to_get) > 0:
# TODO: Remove temporary table and use batched retrieve for large sets
# Initialise
db_data, entity, db_id, exp_id = [], [], [], []
while len(to_get) > 0:
if len(to_get) < db_batch_size:
sub_query = to_get
to_get = []
else:
sub_query = to_get[0:db_batch_size]
to_get = to_get[db_batch_size:]
# Assemble the query
stm = cfg.session.query(db.Measurement.data, db.Object.entity_id,
db.Measurement.ID, db.Measurement.experiment_ID).\
join(db.Object).filter(db.Measurement.ID.in_(sub_query))
# Collection from database
query_result = stm.all()
# Format from DB
db_data.extend([
|
np.array(i[0])
|
numpy.array
|
#Imports
from settings import *
import numpy as np
import midi_functions as mf
import _pickle as pickle
import os
import sys
import pretty_midi as pm
import mido
import operator
def programs_to_instrument_matrix(programs, instrument_attach_method, max_voices):
if instrument_attach_method == '1hot-instrument':
#very large, not recommended
instrument_feature_matrix = np.zeros((max_voices, 128))
for i, program in enumerate(programs):
instrument_feature_matrix[i, program] = 1
elif instrument_attach_method == '1hot-category':
#categories according to midi declaration, https://en.wikipedia.org/wiki/General_MIDI
#8 consecutive instruments make 1 category
instrument_feature_matrix = np.zeros((max_voices, 16))
for i, program in enumerate(programs):
instrument_feature_matrix[i, program//8] = 1
elif instrument_attach_method == 'khot-instrument':
#make a khot vector in log2 base for the instrument
#log2(128) = 7
instrument_feature_matrix = np.zeros((max_voices, 7))
for i, program in enumerate(programs):
p = program
for exponent in range(7):
if p % 2 == 0:
instrument_feature_matrix[i, exponent] = 1
p = p // 2
elif instrument_attach_method == 'khot-category':
#categories according to midi declaration, https://en.wikipedia.org/wiki/General_MIDI
#8 consecutive instruments make 1 category
#make a khot vector in log2 base for the category
#log2(16) = 4
instrument_feature_matrix = np.zeros((max_voices, 4))
for i, program in enumerate(programs):
p = program//8
for exponent in range(4):
if p % 2 == 1:
instrument_feature_matrix[i, exponent] = 1
p = p // 2
else:
print("Not implemented!")
return instrument_feature_matrix
def rolls_to_midi(pianoroll, programs, save_folder, filename, bpm, velocity_roll=None, held_notes_roll=None):
#bpm is in quarter notes, so scale accordingly
bpm = bpm * (SMALLEST_NOTE / 4)
pianoroll = np.pad(np.copy(pianoroll), ((0,0),(low_crop,num_notes-high_crop)), mode='constant', constant_values=0)
if not os.path.exists(save_folder):
os.makedirs(save_folder)
midi = pm.PrettyMIDI(initial_tempo=bpm, resolution=1000)
midi.time_signature_changes.append(pm.TimeSignature(4, 4, 0))
for voice, program in enumerate(programs):
current_instrument = pm.Instrument(program=program)
current_pianoroll = pianoroll[voice::len(programs),:]
if velocity_roll is not None:
current_velocity_roll = np.copy(velocity_roll[voice::len(programs)])
#during the training, the velocities were scaled to be in the range 0,1
#scale it back to the actual velocity numbers
current_velocity_roll[np.where(current_velocity_roll < velocity_threshold_such_that_it_is_a_played_note)] = 0
current_velocity_roll[
|
np.where(current_velocity_roll >= velocity_threshold_such_that_it_is_a_played_note)
|
numpy.where
|
import zmq
import numpy as np
from python_zmq_server import *
tg1 = 0.0
tg2 = 0.0
# === enter the application code here ===
class Inner:
def __init__(self):
self.var_double_inner = 54.23
class Test:
def __init__(self):
self.var_int = 42
self.var_double = 12.34
self.var_complex = 7.2 + 1j*17.23
self.var_string = "Hello World!"
self.var_bool = True
self.var_nparray = np.array([1.2, 2, 3, 4, 5])
self.var_nparray_2d = np.array([[1.2, 2.8, 3.28, 44, 55], [18.2, 2.58, 3.3, 4.9, 5.1]])
self.var_nparray_complex = np.array([1.2+5.7j, 2.95+9.3j, 3.9+9.87j, 4.1+98.88j, 5.01+2.1j])
self.var_nparray_complex_2d = np.array([[1.2 + 5.7j, 2.95 + 9.3j, 3.9 + 9.87j, 4.1 + 98.88j, 5.01 + 2.1j],
[8.3 + 1.2j, 7.2 + 9.66j, 1.1 + 11.2j, 4.5 + 9.2j, 5.99 + 8.2j]])
self.var_dict = \
{'Name': 'Zap',
'Number': 25.3,
'Boolean': True,
'Array': self.var_nparray.tolist()
}
self.inner = Inner()
def test(self, pos1=1, pos2=2):
return pos1-pos2
def dict_return(self):
ar =
|
np.array([1,2,3])
|
numpy.array
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Postprocessing mnist and cifar10/100 outputs for simple, precond, dropout.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow.compat.v1 as tf
def postprocess_mnist(workingdir):
"""preprocessing mnist and notmnist outputs.
Args:
workingdir: path to the working directory
"""
path = os.path.join(workingdir, 'proba_tab_*.npy')
if tf.gfile.IsDirectory(os.path.join(workingdir, 'mnist/temp')):
tf.gfile.DeleteRecursively(os.path.join(workingdir, 'mnist/temp'))
if tf.gfile.IsDirectory(os.path.join(workingdir, 'notmnist/temp')):
tf.gfile.DeleteRecursively(os.path.join(workingdir, 'notmnist/temp'))
tf.gfile.MakeDirs(os.path.join(workingdir, 'mnist/temp'))
tf.gfile.MakeDirs(os.path.join(workingdir, 'notmnist/temp'))
files_list = tf.gfile.Glob(path)
n = len(files_list)
for i in np.arange(n):
path = os.path.join(workingdir, 'proba_tab_' + str(i) + '.npy')
with tf.gfile.Open(path, 'rb') as f:
p = np.load(f)
p_mnist = p[:10000, :, :]
p_notmnist = p[10000:, :, :]
for k in np.arange(10):
path = os.path.join(workingdir, 'mnist/temp',
'proba_' + str(i) + '_' + str(k) + '.npy')
with tf.gfile.Open(path, 'wb') as f:
np.save(f, p_mnist[k*1000:(k+1)*1000, :, :])
path = os.path.join(workingdir, 'notmnist/temp',
'proba_' + str(i) + '_' + str(k) + '.npy')
with tf.gfile.Open(path, 'wb') as f:
np.save(f, p_notmnist[k*1000:(k+1)*1000, :, :])
for dataset in ['mnist', 'notmnist']:
for k in np.arange(10):
p_list = []
for i in np.arange(n):
path = os.path.join(workingdir, dataset, 'temp',
'proba_' + str(i) + '_' + str(k) + '.npy')
with tf.gfile.Open(path, 'rb') as f:
p = np.load(f)
p_list.append(p)
proba = np.concatenate(tuple(p_list), axis=-1)
path = os.path.join(workingdir, dataset, 'proba_' + str(k) + '.npy')
with tf.gfile.Open(path, 'wb') as f:
np.save(f, proba)
tf.gfile.DeleteRecursively(os.path.join(workingdir, dataset, 'temp'))
def postprocess_cifar(workingdir, dataset):
"""preprocessing cifar10 outputs.
Args:
workingdir: path to the working directory
dataset: string, 'cifar10' or cifar100'
"""
path = os.path.join(workingdir, 'proba_tab_*.npy')
if tf.gfile.IsDirectory(os.path.join(workingdir, dataset)):
tf.gfile.DeleteRecursively(os.path.join(workingdir, dataset))
if tf.gfile.IsDirectory(os.path.join(workingdir, 'temp')):
tf.gfile.DeleteRecursively(os.path.join(workingdir, 'temp'))
tf.gfile.MakeDirs(os.path.join(workingdir, dataset))
tf.gfile.MakeDirs(os.path.join(workingdir, 'temp'))
files_list = tf.gfile.Glob(path)
n = len(files_list)
for i in np.arange(n):
path = os.path.join(workingdir, 'proba_tab_' + str(i) + '.npy')
with tf.gfile.Open(path, 'rb') as f:
p = np.load(f)
for k in np.arange(10):
path = os.path.join(workingdir, 'temp',
'proba_' + str(i) + '_' + str(k) + '.npy')
with tf.gfile.Open(path, 'wb') as f:
np.save(f, p[k*1000:(k+1)*1000, :, :])
for k in np.arange(10):
p_list = []
for i in np.arange(n):
path = os.path.join(workingdir, 'temp',
'proba_' + str(i) + '_' + str(k) + '.npy')
with tf.gfile.Open(path, 'rb') as f:
p = np.load(f)
p_list.append(p)
proba = np.concatenate(tuple(p_list), axis=-1)
path = os.path.join(workingdir, dataset, 'proba_' + str(k) + '.npy')
with tf.gfile.Open(path, 'wb') as f:
np.save(f, proba)
tf.gfile.DeleteRecursively(os.path.join(workingdir, 'temp'))
def postprocess_bootstrap_mnist(workingdir):
"""preprocessing mnist bootstrap outputs.
Args:
workingdir: path to the working directory
"""
if tf.gfile.IsDirectory(os.path.join(workingdir, 'mnist')):
tf.gfile.DeleteRecursively(os.path.join(workingdir, 'mnist'))
if tf.gfile.IsDirectory(os.path.join(workingdir, 'notmnist')):
tf.gfile.DeleteRecursively(os.path.join(workingdir, 'notmnist'))
list_tasks = tf.gfile.ListDirectory(workingdir)
num_samples = len(list_tasks)
tf.gfile.MakeDirs(os.path.join(workingdir, 'mnist'))
tf.gfile.MakeDirs(os.path.join(workingdir, 'notmnist'))
for k in np.arange(10):
p_mnist_list = []
p_notmnist_list = []
for i in np.arange(1, num_samples + 1):
path_task = os.path.join(workingdir, 'task_' + str(i),
'proba_tab_' + str(i-1) + '.npy')
with tf.gfile.Open(path_task, 'rb') as f:
p = np.load(f)
p_mnist = p[:10000, :, :]
p_notmnist = p[10000:, :, :]
p_mnist_list.append(p_mnist[k*1000:(k+1)*1000, :, :])
p_notmnist_list.append(p_notmnist[k*1000:(k+1)*1000, :, :])
proba_mnist = np.concatenate(tuple(p_mnist_list), axis=-1)
proba_notmnist = np.concatenate(tuple(p_notmnist_list), axis=-1)
path = os.path.join(workingdir, 'mnist', 'proba_' + str(k) + '.npy')
with tf.gfile.Open(path, 'wb') as f:
np.save(f, proba_mnist)
path = os.path.join(workingdir, 'notmnist', 'proba_' + str(k) + '.npy')
with tf.gfile.Open(path, 'wb') as f:
np.save(f, proba_notmnist)
def postprocess_bootstrap_cifar(workingdir, dataset):
"""preprocessing cifar10 bootstrap outputs.
Args:
workingdir: path to the working directory
dataset: string, 'cifar10' or cifar100'
"""
if tf.gfile.IsDirectory(os.path.join(workingdir, dataset)):
tf.gfile.DeleteRecursively(os.path.join(workingdir, dataset))
list_tasks = tf.gfile.ListDirectory(workingdir)
num_samples = len(list_tasks)
tf.gfile.MakeDirs(os.path.join(workingdir, dataset))
for k in np.arange(10):
p_list = []
for i in np.arange(1, num_samples + 1):
path_task = os.path.join(workingdir, 'task_' + str(i),
'proba_tab_' + str(i-1) + '.npy')
with tf.gfile.Open(path_task, 'rb') as f:
p =
|
np.load(f)
|
numpy.load
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
import sys
import unittest
import ray
import ray.experimental.array.remote as ra
import ray.experimental.array.distributed as da
if sys.version_info >= (3, 0):
from importlib import reload
class RemoteArrayTest(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testMethods(self):
for module in [
ra.core, ra.random, ra.linalg, da.core, da.random, da.linalg
]:
reload(module)
ray.init()
# test eye
object_id = ra.eye.remote(3)
val = ray.get(object_id)
assert_almost_equal(val, np.eye(3))
# test zeros
object_id = ra.zeros.remote([3, 4, 5])
val = ray.get(object_id)
assert_equal(val,
|
np.zeros([3, 4, 5])
|
numpy.zeros
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from enum import Enum
from .matlab_utils import find, ismember
from .lp_tools.LP_formulation import LP_formulation
from .lp_tools.simplex_procedures import unsigned_simplex
from .parametric_line import parametric_line
class SCLP_formulation_type(Enum):
primal_classic = 0
dual_classic = 1
weiss = 2
not_bounded = 3
primal_MCLP = 4
dual_MCLP = 5
both_MCLP = 6
primal_infeasible = 7
dual_infeasible = 8
both_infeasible = 9
class SCLP_data_type(Enum):
linear = 0
primal_piecewise_linear = 1
dual_piecewise_linear = 2
piecewise_linear = 3
# We are going to extend this class
# Assume that a,b,c,d are matrix
class SCLP_formulation():
__slots__ = ["G", "F", "H", "a", "b", "c", "d", "alpha", "gamma", "T", "I", "J", "K", "L", "_formulation_type", "_data_type"]
def __init__(self, G, F, H, a, b, c, d, alpha, gamma, T):
self.G, self.F, self.H, self.a, self.b, self.c, self.d, self.alpha, self.gamma, self.T = G, F, H, a, b, c, d, alpha, gamma, T
self.K = G.shape[0]
self.J = G.shape[1]
self.I = H.shape[0]
self.L = F.shape[1]
if self.L == 0:
if self.I == 0:
if np.any(self.alpha < 0):
if np.any(self.gamma > 0):
self._formulation_type = SCLP_formulation_type.both_MCLP
else:
self._formulation_type = SCLP_formulation_type.primal_MCLP
else:
if np.any(self.gamma > 0):
self._formulation_type = SCLP_formulation_type.dual_MCLP
else:
self._formulation_type = SCLP_formulation_type.not_bounded
else:
self._formulation_type = SCLP_formulation_type.primal_classic
if np.any(self.alpha < 0):
self._formulation_type = SCLP_formulation_type.primal_MCLP
else:
if self.I == 0:
self._formulation_type = SCLP_formulation_type.dual_classic
if np.any(self.gamma > 0):
self._formulation_type = SCLP_formulation_type.dual_MCLP
else:
self._formulation_type = SCLP_formulation_type.weiss
# if isinstance(a, piecewise_data):
# if isinstance(c, piecewise_data):
# self._data_type = SCLP_data_type.piecewise_linear
# else:
# self._data_type = SCLP_data_type.primal_piecewise_linear
# else:
# if isinstance(c, piecewise_data):
# self._data_type = SCLP_data_type.dual_piecewise_linear
# else:
# self._data_type = SCLP_data_type.linear
@property
def data_type(self):
return self._data_type
@property
def formulation_type(self):
return self._formulation_type
def formulate_ratesLP(self, x_0, q_N):
Kset = find(x_0)
Jset = find(q_N)
DD = np.vstack((-np.hstack((0, self.c, self.d)), np.hstack((np.vstack(self.a), self.G, self.F)),
np.hstack((np.vstack(self.b), self.H, np.zeros((self.I, self.L))))))
DD = np.ascontiguousarray(DD)
pn = np.ascontiguousarray(np.hstack((np.arange(1, self.K + 1, dtype = np.int32),
-np.arange(self.J + 1, self.J + self.I + 1, dtype = np.int32))), dtype = np.int32)
psx = ismember(np.arange(0, self.K), Kset).astype(np.int32)
psu = -ismember(np.arange(self.J, self.J + self.I), Jset).astype(np.int32)
ps = np.hstack((psx, psu))
dn = np.ascontiguousarray(np.hstack((-np.arange(1, self.J + 1, dtype = np.int32),
np.arange(self.K + 1, self.K + self.L + 1, dtype = np.int32))), dtype = np.int32)
dsq = ismember(np.arange(0, self.J), Jset).astype(np.int32)
dsp = -ismember(np.arange(self.K, self.K + self.L), Kset).astype(np.int32)
ds = np.hstack((dsq, dsp))
return LP_formulation(DD, pn, dn), ps, ds
def get_primalBoundaryLP(self):
DD1 = np.vstack((-np.hstack((0, self.d)), np.hstack((np.vstack(self.alpha), self.F))))
pn1 = np.arange(1, self.K + 1, dtype = np.int32)
dn1 = np.arange(self.K + 1, self.K + self.L + 1, dtype = np.int32)
return LP_formulation(DD1, pn1, dn1)
def get_dualBoundaryLP(self):
DD1 = np.vstack((np.hstack((0, np.hstack(self.b))), np.hstack((np.vstack(-self.gamma), -self.H.transpose()))))
pn1 = np.arange(1, self.J + 1, dtype = np.int32)
dn1 = np.arange(self.J + 1, self.J + self.I + 1, dtype = np.int32)
return LP_formulation(DD1, pn1, dn1)
def get_generalBoundaryLP(self):
DD0 = np.ascontiguousarray(np.vstack((np.hstack((0, -self.gamma, np.zeros((self.L)), self.d)), np.hstack((self.alpha[...,np.newaxis], self.G, self.F)),
np.hstack((np.zeros((self.I, 1)), self.H, np.zeros((self.I, self.L)))))))
pn = np.ascontiguousarray(np.concatenate((np.arange(1, self.K + 1), -np.arange(self.J + 1, self.J + self.I + 1))), dtype = np.int32)
dn = np.ascontiguousarray(np.concatenate((-np.arange(1, self.J + 1), np.arange(self.K + 1, self.K + self.L + 1))), dtype = np.int32)
return LP_formulation(DD0, pn, dn)
def get_general_dualBoundaryLP(self):
DD0 = np.ascontiguousarray(np.vstack(
(np.hstack((0, -self.gamma, np.zeros((1, self.L)), self.d)), np.hstack((self.alpha + self.a * self.T, self.G, self.F)),
np.hstack((np.zeros((self.I, 1)), self.H, np.zeros((self.I, self.L)))))))
pn = np.ascontiguousarray(np.concatenate((np.arange(1, self.K + 1), -np.arange(self.J + 1, self.J + self.I + 1))), dtype = np.int32)
dn = np.ascontiguousarray(np.concatenate((-np.arange(1, self.J + 1), np.arange(self.K + 1, self.K + self.L + 1))), dtype = np.int32)
return LP_formulation(DD0, pn, dn)
def get_dualBoundaryLP_solution(self, tolerance = 0):
if self._formulation_type == SCLP_formulation_type.not_bounded or self._formulation_type == SCLP_formulation_type.dual_classic:
return np.ascontiguousarray(-self.gamma)
elif self._formulation_type == SCLP_formulation_type.primal_classic or self._formulation_type == SCLP_formulation_type.weiss:
LP_form = self.get_dualBoundaryLP()
LP_form, err = unsigned_simplex(LP_form, tolerance)
if err['result'] == 0:
q_N = np.zeros(self.J + self.I, order='C')
q_N[LP_form.prim_name - 1] = LP_form.simplex_dict[1:, 0]
return q_N
LP_form = self.get_generalBoundaryLP()
LP_form, err = unsigned_simplex(LP_form, tolerance)
if err['result'] == 0:
q_N =
|
np.zeros(self.J + self.I, order='C')
|
numpy.zeros
|
from __future__ import division
import abc
from numpy import ndarray
from pyproj import Proj
from pyproj import transform as proj_transform
import numpy as np
import numbers
from resippy.photogrammetry.dem.abstract_dem import AbstractDem
import resippy.utils.image_utils.image_utils as image_utils
from six import add_metaclass
@add_metaclass(abc.ABCMeta)
class AbstractEarthOverheadPointCalc:
"""
This is the Abstract Earth Overhead Point Calculator class. Concrete implementations for specific types of
Overhead Earth Point Calculators should be created for the specific Earth Overhead Image Objects they support.
"""
def __init__(self):
# TODO add parameters that specify the point calculator's altitude reference datum.
"""
This is used to initialize the point calculator. There following class variables store information about
the point calculator:
_lon_lat_center_approximate: The approximate (longitude, latitude) center of the the image
_projection: The native projection of the point calculator
_bands_coregistered: If the image this point calculator supports has multiple bands this variable
specifies whether or not they are coregistered.
"""
self._lon_lat_center_approximate = None
self._projection = None
self._bands_coregistered = True
@abc.abstractmethod
def _lon_lat_alt_to_pixel_x_y_native(self,
lons, # type: ndarray
lats, # type: ndarray
alts, # type: ndarray
band=None # type: int
): # type: (...) -> (ndarray, ndarray)
"""
This is an protected abstract method that can be implemented for concrete implementations of this class.
A point calculator should implement either this method or _pixel_x_y_alt_to_lon_lat_native. If this method
is not implemented and _pixel_x_y_alt_to_lon_lat_native is, then this method can be solved for with iterative
methods.
:param lons: longitudes in the point calculator's native projection, provided as a numpy ndarray
:param lats: latitudes in the point calculator's native projection, provided as a numpy ndarray
:param alts: altitudes in the point calculator's native elevation datum reference, provided as a numpy ndarray
:param band: specific image band provided as an int. If this variable is None it assumes all bands are
coregistered
:return: (pixel_x, pixel_y) provided as a tuple of numpy ndarrays
"""
pass
@abc.abstractmethod
def _pixel_x_y_alt_to_lon_lat_native(self,
pixel_xs, # type: ndarray
pixel_ys, # type: ndarray
alts=None, # type: ndarray
band=None # type: int
): # type: (...) -> (ndarray, ndarray)
"""
This is an protected abstract method that can be implemented for concrete implementations of this class.
A point calculator should implement either this method or _lon_lat_alt_to_pixel_x_y_native. If this method
is not implemented and _lon_lat_alt_to_pixel_x_y_native is, then this method can be solved for with iterative
methods. This functionality is provided automatically within the pixel_x_y_alt_to_lon_lat method of this.
class.
:param pixel_xs: x pixels, provided as a numpy ndarray
:param pixel_ys: y pixels, provided as a numpy ndarray
:param alts: altitudes in the point calculator's native elevation datum reference, provided as a numpy ndarray
:param band: specific image band provided as an int. If this variable is None it assumes all bands are coregistered
:return: (longitudes, latitudes) in the point calculator's native projection, provided as a tuple of numpy
ndarrays
"""
pass
def lon_lat_alt_to_pixel_x_y(self,
lons, # type: ndarray
lats, # type: ndarray
alts, # type: ndarray
world_proj=None, # type: Proj
band=None # type: int
): # type: (...) -> (ndarray, ndarray)
"""
This method calculates pixel x / y values given longitude, latitude and altitude information. It uses
_lon_lat_alt_to_pixel_x_y_native under the hood, and provides some convenience to the user. These
conveniences include automatic handling of different projections, and also allows the user to input
longitudes, latitudes and altidues as either 1d or 2d numpy arrays. The results will be output in the
same dimensions as the inputs.
:param lons: longitudes provided as a numpy ndarray, can be either 1d or 2d numpy array, or a single float value
:param lats: latitudes provided as a numpy ndarray, can be either 1d or 2d numpy array, or a single float value
:param alts: altitudes in the point calculator's native elevation datum reference, provided as a numpy ndarray
:param world_proj: projection of the input longitudes and latitudes
:param band: specific image band provided as an int. If this variable is None it assumes all bands are coregistered
:return: (pixel x, pixel y) as a tuple of numpy ndarrays (1d or 2d), or a tuple of float. The output will match the input
"""
# check for some errors up front
if alts is None:
alts = 0
# standardize inputs, make everything 1 dimensional ndarrays
lons_is_number = isinstance(lons, numbers.Number)
lats_is_number = isinstance(lats, numbers.Number)
alts_is_number = isinstance(alts, numbers.Number)
if lons_is_number or lats_is_number:
lons = np.array([lons])
lats = np.array([lats])
if alts_is_number:
alts = np.zeros(lons.shape) + alts
world_xyz_is_2d = False
# auto-detect if world x-y-z arrays are 2d and flatten world_x and world_y arrays they are are 2d.
# This is done to make all the vector math less complicated and keep it fast without needing to use loops
if np.ndim(lons) == 2:
world_xyz_is_2d = True
nx = np.shape(lons)[1]
ny = np.shape(lons)[0]
lons = np.reshape(lons, nx * ny)
lats = np.reshape(lats, nx * ny)
alts = np.reshape(alts, nx * ny)
# now actually do the calculations with everything in a standard form
if world_proj is None:
world_proj = self.get_projection()
if world_proj.srs != self.get_projection().srs:
lons, lats, alts = proj_transform(world_proj, self.get_projection(), lons, lats, alts)
pixel_coords = self._lon_lat_alt_to_pixel_x_y_native(lons, lats, alts, band)
if lons_is_number or lats_is_number:
pixel_coords = pixel_coords[0][0], pixel_coords[1][0]
# now transform everything back if it wasn't in a standard form coming in
# unflatten world_xyz arrays if the original inputs were 2d
if world_xyz_is_2d:
pixel_coords_x_2d = np.reshape(pixel_coords[0], (ny, nx))
pixel_coords_y_2d = np.reshape(pixel_coords[1], (ny, nx))
return pixel_coords_x_2d, pixel_coords_y_2d
return pixel_coords
def pixel_x_y_alt_to_lon_lat(self,
pixel_xs, # type: ndarray
pixel_ys, # type: ndarray
alts, # type: ndarray
world_proj=None, # type: Proj
band=None, # type: int
pixel_error_threshold=0.01, # type: float
max_iter=1000, # type: int
): # type: (...) -> (ndarray, ndarray)
"""
This will calculate a pixel's lon / lat location on earth. It uses _pixel_x_y_alt_to_lon_lat_native under the
hood, and if that method is not implemented it will solve iteratively using _pixel_x_y_alt_to_lon_lat_native_solver.
It provides conveniences to users such as automatic handling of world projections, and allows the user to input
either numbers, or 1d or 2d numpy arrays as inputs for pixel values and altitudes.
:param pixel_xs: x pixels, as either a float or 1d or 2d numpy array
:param pixel_ys: y pixels, as either a float or 1d or 2d numpy array
:param alts: altitudes in the point calculator's native elevation datum reference, provided as a numpy ndarray
:param world_proj: projection of the input longitudes and latitudes
:param band: band number of the image
:param pixel_error_threshold: pixel threshold to use if the iterative solver is used. Defaults to 0.01 pixels
:param max_iter: maximum iteration. This is used if the iterative solver does not converge to avoid entering
an infinite loop. Defaults to 1000 iterations.
:return: (lon, lat) as a tuple of float, or tuple of 1d or 2d numpy ndarray, to match the input of pixel_xs, and pixel_ys
"""
if world_proj is None:
world_proj = self.get_projection()
if self._pixel_x_y_alt_to_lon_lat_native(pixel_xs, pixel_ys, alts, band) is not None:
native_lons, native_lats = self._pixel_x_y_alt_to_lon_lat_native(pixel_xs, pixel_ys, alts, band=band)
else:
native_lons, native_lats = \
self._pixel_x_y_alt_to_lon_lat_native_solver(pixel_xs,
pixel_ys,
alts,
band=band,
max_pixel_error=pixel_error_threshold,
max_iter=max_iter)
if world_proj.srs != self.get_projection().srs:
lons, lats = proj_transform(self.get_projection(), world_proj, native_lons, native_lats)
return lons, lats
else:
return native_lons, native_lats
def _pixel_x_y_alt_to_lon_lat_native_solver(self,
pixel_xs, # type: ndarray
pixel_ys, # type: ndarray
alts, # type: ndarray
d_lon=None, # type: float
d_lat=None, # type: float
band=None, # type: int
max_pixel_error=0.01, # type: float
max_iter=1000, # type: int
): # type: (...) -> (ndarray, ndarray)
"""
This is a protected method that is used to solve for longitude, and latitude given pixel x, y and altitude values.
It uses an approximation of a newton method solver.
:param pixel_xs: pixel x values, as a 1d numpy ndarray
:param pixel_ys: pixel y values, as a 1d numpy ndarray
:param alts: altitudes in the point calculator's native elevation datum reference, provided as a numpy ndarray
:param d_lon: delta_longitude to use for the newton-like solver. If it is not provided this value will be calculated
:param d_lat: delta_latitude to use for the newton-like solver. If it is not provided this value will be calcualted
:param band: image band as an int or None if all the bands are coregistered
:param max_pixel_error: maximum pixel error. Same as the description in pixel_x_y_alt_to_lon_lat
:param max_iter: Same as the description in pixel_x_y_alt_to_lon_lat
:return: (longitude, latitude) in the point calculator's native projection, as a tuple of numpy ndarrays
"""
n_pixels = np.shape(pixel_xs)
approximate_lon, approximate_lat = self.get_approximate_lon_lat_center()
# initial lons and lats to all be the approximate center of the image
lons, lats = np.zeros(n_pixels) + approximate_lon, np.zeros(n_pixels) + approximate_lat
if d_lon is None or d_lat is None:
d_pixel = 1
# we want the delta to be on the order of 1 pixel or so, maybe change this later to scale with the errors
machine_eps = np.finfo(lons.dtype).eps
machine_max_val = np.finfo(lons.dtype).max
machine_val_cutoff = machine_max_val / 4.0
float_nums = []
current_num = machine_eps
while current_num < machine_val_cutoff:
float_nums.append(current_num)
current_num = current_num * 2
float_nums = np.array(float_nums)
machine_lons = np.zeros(
|
np.shape(float_nums)
|
numpy.shape
|
#!/usr/bin/env python
import numpy as np
from decimal import Decimal
from random import uniform
from math import pi, sin, cos
def main():
title, lattice_constant, a, elements, numofatoms, selective_dynamics, selective_flags, direct_ac, atom_pos = read_poscar('POSCAR.H2x2')
dr = 0.3
dc = 0.5
b_pos = H_generator(lattice_constant, a, numofatoms, atom_pos, selective_flags, dr, dc)
write_poscar('POSCAR', title, lattice_constant, a, elements, numofatoms, selective_dynamics, selective_flags, direct_ac, b_pos)
return
def H_generator(lattice_constant, a, numofatoms, atom_pos, selective_flags, dr, dc):
natom = sum(numofatoms)
sc =
|
np.linalg.norm(a[2,:])
|
numpy.linalg.norm
|
import itk
import pydicom
import numpy as np
from scipy.signal import find_peaks
from skimage.morphology import disk
from skimage.morphology import dilation
import skimage.filters
import itkpocus.util
import skvideo.io
'''Preprocessing and device-specific IO for the Sonoque.'''
def _find_spacing(npimg):
'''
Finds the spacing (pixel dimension in mm) of a Sonoque image by detecting the ruler ticks of the overlay.
Parameters
----------
npimg : ndarray
single channel 0 to 255 (e.g. pydicom's pixel_array or a video frame)
Returns
-------
spacing : float
or None if the ruler cannot be detected
'''
tick_spacing = 5 # in mm
error_threshold = 5 # in pixels
ruler_size_threshold = 0.7 # percentage of vertical image
ticks_offset = 3 # in pixels, right of long ruler line
ruler_intensity_threshold = 80 # minimum brightness of ruler pixels/height in peak finding
ruler_thresh = npimg.shape[0] * ruler_size_threshold
col_count = np.sum(npimg > 0, axis=0)
ruler_col = np.argwhere(col_count > ruler_thresh)[0] + ticks_offset
ruler_ticks, _ = find_peaks(npimg[:, ruler_col].flatten(), height=ruler_intensity_threshold)
ruler_diffs = ruler_ticks[1:] - ruler_ticks[:-1]
if (np.min(ruler_diffs) - np.max(ruler_diffs)) > error_threshold :
return None # bad
spacing = tick_spacing / np.mean(ruler_diffs)
return spacing
def _find_crop(npimg):
'''
Calculates a crop that contains only the ultrasound portion of the image (overlay text may still be on portion).
Parameters
----------
npimg : ndarray
single channel 0 to 255 (e.g. pydicom's pixel_array or a video frame)
Returns
-------
crop : ndarray
(2x2 ndarray) [[topbound, bottombound], [leftbound, rightbound]]
'''
nonempty_threshold = 0.1 # percentage of nonzero pixels
background_threshold = 10 # pixel intensity
stuff_cols = np.sum(npimg > background_threshold, axis=0) > nonempty_threshold * npimg.shape[0]
height_min_crop = 0.05
width_min_crop = 0.19 # currently unused
midcol = npimg.shape[1]/2.0 # not worried if non-integer
zerocol = np.argwhere(stuff_cols == 0)
leftbound = np.max( (np.max(zerocol[zerocol < midcol])+1, int(npimg.shape[1] * width_min_crop) ))
rightbound = np.min( (np.min(zerocol[midcol < zerocol])-1, int(npimg.shape[1] * (1 - width_min_crop) )))
midrow = npimg.shape[0]/2.0
rowsum =
|
np.sum(npimg[:,leftbound:rightbound+1], axis=1)
|
numpy.sum
|
""" Module providing unit-testing for the
`~halotools.mock_observables.tpcf_one_two_halo_decomp` function.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.utils.misc import NumpyRNGContext
import pytest
from ..tpcf_one_two_halo_decomp import tpcf_one_two_halo_decomp
from ....custom_exceptions import HalotoolsError
__all__ = ['test_tpcf_one_two_halo_auto_periodic', 'test_tpcf_one_two_halo_cross_periodic']
# create toy data to test functions
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
fixed_seed = 43
@pytest.mark.slow
def test_tpcf_one_two_halo_auto_periodic():
"""
test the tpcf_one_two_halo autocorrelation with periodic boundary conditions
"""
Npts = 100
with NumpyRNGContext(fixed_seed):
IDs1 = np.random.randint(0, 11, Npts)
sample1 = np.random.random((Npts, 3))
result = tpcf_one_two_halo_decomp(sample1, IDs1, rbins, sample2=None,
randoms=None, period=period, estimator='Natural')
assert len(result) == 2, "wrong number of correlation functions returned."
@pytest.mark.slow
def test_tpcf_one_two_halo_cross_periodic():
"""
test the tpcf_one_two_halo cross-correlation with periodic boundary conditions
"""
Npts = 100
with NumpyRNGContext(fixed_seed):
IDs1 = np.random.randint(0, 11, Npts)
IDs2 = np.random.randint(0, 11, Npts)
sample1 = np.random.random((Npts, 3))
sample2 = np.random.random((Npts, 3))
result = tpcf_one_two_halo_decomp(sample1, IDs1, rbins, sample2=sample2,
sample2_host_halo_id=IDs2, randoms=None,
period=period,
estimator='Natural', approx_cell1_size=[rmax, rmax, rmax],
approx_cell2_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
assert len(result) == 6, "wrong number of correlation functions returned."
@pytest.mark.slow
def test_tpcf_one_two_halo_auto_nonperiodic():
"""
test the tpcf_one_two_halo autocorrelation with periodic boundary conditions
"""
Npts, Nran = 100, 1000
with NumpyRNGContext(fixed_seed):
IDs1 = np.random.randint(0, 11, Npts)
sample1 = np.random.random((Npts, 3))
randoms = np.random.random((Nran, 3))
result = tpcf_one_two_halo_decomp(sample1, IDs1, rbins, sample2=None,
randoms=randoms, period=period, estimator='Natural')
assert len(result) == 2, "wrong number of correlation functions returned."
@pytest.mark.slow
def test_tpcf_one_two_halo_cross_nonperiodic():
"""
test the tpcf_one_two_halo cross-correlation with periodic boundary conditions
"""
Npts, Nran = 100, 1000
with NumpyRNGContext(fixed_seed):
IDs1 = np.random.randint(0, 11, Npts)
IDs2 = np.random.randint(0, 11, Npts)
sample1 = np.random.random((Npts, 3))
sample2 = np.random.random((Npts, 3))
randoms = np.random.random((Nran, 3))
result = tpcf_one_two_halo_decomp(sample1, IDs1, rbins, sample2=sample2,
sample2_host_halo_id=IDs2, randoms=randoms,
period=period,
estimator='Natural', approx_cell1_size=[rmax, rmax, rmax],
approx_cell2_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
assert len(result) == 6, "wrong number of correlation functions returned."
def test_tpcf_decomposition_process_args1():
Npts = 100
with NumpyRNGContext(fixed_seed):
IDs1 = np.random.randint(0, 11, Npts)
IDs2 = np.random.randint(0, 11, Npts)
sample1 = np.random.random((Npts, 3))
sample2 = np.random.random((Npts, 3))
with pytest.raises(ValueError) as err:
result = tpcf_one_two_halo_decomp(sample1, IDs1, rbins, sample2=sample2,
sample2_host_halo_id=None, randoms=None,
period=period,
estimator='Natural', approx_cell1_size=[rmax, rmax, rmax],
approx_cell2_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
substr = ("If passing an input ``sample2``, must also pass sample2_host_halo_id")
assert substr in err.value.args[0]
def test_tpcf_decomposition_process_args2():
Npts = 100
with NumpyRNGContext(fixed_seed):
IDs1 = np.random.randint(0, 11, Npts-1)
IDs2 = np.random.randint(0, 11, Npts)
sample1 = np.random.random((Npts, 3))
sample2 = np.random.random((Npts, 3))
with pytest.raises(HalotoolsError) as err:
result = tpcf_one_two_halo_decomp(sample1, IDs1, rbins, sample2=sample2,
sample2_host_halo_id=IDs2, randoms=None,
period=period,
estimator='Natural', approx_cell1_size=[rmax, rmax, rmax],
approx_cell2_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
substr = ("same length as `sample1`")
assert substr in err.value.args[0]
def test_tpcf_decomposition_process_args3():
Npts = 100
with NumpyRNGContext(fixed_seed):
IDs1 =
|
np.random.randint(0, 11, Npts)
|
numpy.random.randint
|
"""
This code comes with a MIT license.
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
Please acknowledge and give reference if using the source code for your project(s)
"""
"""
Alpha-Compositing python algorithm
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2007"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Demo"
import pygame
import numpy
import timeit
def blend_texture_add(surface1_: pygame.Surface, surface2_: pygame.Surface,
set_alpha1_: (float, numpy.ndarray),
set_alpha2_: (float, numpy.ndarray), mask_: bool = False) -> pygame.Surface:
"""
:param surface1_: First layer texture
:param surface2_: Second layer texture
:param set_alpha1_: Alpha values for surface1 (can be a float or a numpy array)
:param set_alpha2_: Alpha values for surface2 (can be a flaot or a numpy array)
:param mask_: True | False, create a mask from surface1 (only black pixels)
:return: Return a pygame surface (blend between surface1 & surface2)
"""
assert isinstance(surface1_, pygame.Surface), \
'Expecting Surface for argument surface got %s ' % type(surface1_)
assert isinstance(surface2_, pygame.Surface), \
'Expecting Surface for argument surface2_ got %s ' % type(surface2_)
assert isinstance(set_alpha1_, (float, numpy.ndarray)), \
'Expecting float or numpy.ndarray for argument set_alpha1_ got %s ' % type(set_alpha1_)
assert isinstance(set_alpha2_, (float, numpy.ndarray)), \
'Expecting float for argument set_alpha2_ got %s ' % type(set_alpha2_)
# sizes
w, h = surface1_.get_width(), surface1_.get_height()
# Create a BufferProxy for surface1_ and 2
# '3' returns a (surface-width, surface-height, 3) array of RGB color components.
# Each of the red, green, and blue components are unsigned bytes.
# Only 24-bit and 32-bit surfaces are supported.
# The color components must be in either RGB or BGR order within the pixel.
buffer1 = surface1_.get_view('3')
buffer2 = surface2_.get_view('3')
# Extract the alpha channel from surface1 and create
# a mask (array with black pixels flagged) alpha1_ <= 0
if isinstance(mask_, bool):
# Extract the surface1_ alpha channel and create a mask_ for (black pixel)
alpha1_ = numpy.array(surface1_.get_view('a'), dtype=numpy.uint8).transpose(1, 0) / 255
mask_alpha1 = alpha1_ <= 0
if isinstance(set_alpha1_, float):
# Create alpha channels alpha1 and alpha2
alpha1 = numpy.full((w, h, 1), set_alpha1_).transpose(1, 0, 2)
elif isinstance(set_alpha1_, numpy.ndarray):
alpha1 = set_alpha1_
if isinstance(set_alpha2_, float):
# Create alpha channels alpha1 and alpha2
alpha2 = numpy.full((w, h, 1), set_alpha2_).transpose(1, 0, 2)
elif isinstance(set_alpha2_, numpy.ndarray):
alpha2 = set_alpha2_
# ------------------- pre-multiplied -------------------
# 1) create arrays representing surface1_ and surface2_, swap row and column and normalize.
# 2 ) pre - multiplied alphas
rgb1 = (numpy.array(buffer1, dtype=numpy.uint8).transpose(1, 0, 2) / 255) * alpha1
rgb2 = (numpy.array(buffer2, dtype=numpy.uint8).transpose(1, 0, 2) / 255) * alpha2
# create the output array RGBA
new =
|
numpy.zeros((w, h, 4))
|
numpy.zeros
|
__author__ = "<NAME>"
import numpy as np
from matplotlib import pyplot as plt
import cv2
def myGaussianBlur(image):
if not isinstance(image, np.ndarray):
print("myGaussianBlur: Not a tensor. Was: Image=", image.__class__)
return None
imgShape = image.shape
if len(imgShape) == 2:
gaussianMask = (1.0 / 273) * np.array([[1, 4, 7, 4, 1],
[4, 16, 26, 16, 4],
[7, 26, 41, 26, 7],
[4, 16, 26, 16, 4],
[1, 4, 7, 4, 1]],
dtype=np.float64)
img = np.float64(image.copy())
cv2.filter2D(img, -1, gaussianMask)
elif len(imgShape) == 3:
b, g, r = cv2.split(image)
img = cv2.merge((myGaussianBlur(b), myGaussianBlur(g), myGaussianBlur(r)))
else:
print("myGaussianBlur: Illegal image dimension. Length of shape can be 2 or 3 only")
return None
return img
def myGradient(image):
if not isinstance(image, np.ndarray):
print("myGradient: Not a tensor. Was: Image=", image.__class__)
return None
imgShape = image.shape
if len(imgShape) == 2:
img = np.float64(image.copy())
sobelGradientXMask = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]],
dtype=np.float64)
sobelGradientYMask = np.array([[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]],
dtype=np.float64)
print("myGradient: Applying Sobel Gradient X Mask")
gradientX = cv2.filter2D(img, -1, sobelGradientXMask)
print("myGradient: Applying Sobel Gradient Y Mask")
gradientY = cv2.filter2D(img, -1, sobelGradientYMask)
return gradientX, gradientY
elif len(imgShape) == 3:
b, g, r = cv2.split(image)
bSobelX, bSobelY = myGradient(b)
gSobelX, gSobelY = myGradient(g)
rSobelX, rSobelY = myGradient(r)
gradientX = cv2.merge((bSobelX, gSobelX, rSobelX))
gradientY = cv2.merge((bSobelY, gSobelY, rSobelY))
return gradientX, gradientY
else:
print("myGradient: Illegal image dimension. Length of shape can be 2 or 3 only")
return None
def myPhase(gradientX, gradientY):
if not isinstance(gradientX, np.ndarray) or not isinstance(gradientY, np.ndarray):
print("myPhase: Not a tensor. Was: gradientX=", gradientX.__class__, "gradientY=", gradientY.__class__)
return None
gradientXShape = gradientX.shape
gradientYShape = gradientY.shape
if len(gradientXShape) == 2 and len(gradientYShape) == 2:
print("myPhase: Calculating Intensity Gradient")
intensityGradient = np.power((np.power(gradientX, 2) + np.power(gradientY, 2)), 0.5)
# intensityGradient = intensityGradient * 255 / np.max(intensityGradient)
print("myPhase: Calculating Phase")
# phase = np.arctan(np.divide(gradientY, gradientX, out=np.zeros_like(gradientY), where=gradientX != 0))
phase = np.arctan2(gradientY, gradientX)
# Make it 0 to 180, instead of -90 to 90
phase = phase * 180. / np.pi
phase[phase < 0] += 180
elif len(gradientXShape) == 3 and len(gradientYShape) == 3:
bX, gX, rX = cv2.split(gradientX)
bY, gY, rY = cv2.split(gradientY)
intensityGradientB, phaseB = myPhase(bX, bY)
intensityGradientG, phaseG = myPhase(gX, gY)
intensityGradientR, phaseR = myPhase(rX, rY)
intensityGradient = cv2.merge((intensityGradientB, intensityGradientG, intensityGradientR))
phase = cv2.merge((phaseB, phaseG, phaseR))
else:
print("myPhase: Illegal dimension. Only 2D and 3D are supported. Was: gradientX=", gradientXShape, "gradientY=", gradientYShape)
return None
return intensityGradient, phase
def myNonMaximumSuppression(intensityGradient, phase):
if not isinstance(intensityGradient, np.ndarray) or not isinstance(phase, np.ndarray):
print("myNonMaximumSuppression: Not a tensor. Was: intensityGradient=", intensityGradient.__class__, "phase=", phase.__class__)
return None
intensityGradientShape = intensityGradient.shape
phaseShape = phase.shape
if len(intensityGradientShape) == 2 and len(phaseShape) == 2:
nonMaxSuppress = np.zeros(intensityGradient.shape, dtype=np.int32)
# First step: round angles
phase[
|
np.logical_or(phase < 22.5, phase >= 157.5)
|
numpy.logical_or
|
#!/usr/bin/env python3
#
# Accepts subsampled fastq files, bam alignment of whole fastq and reference genome.
# How to subsample fastq files?
# Use the bam file to get reads aligned to reference.
# Subsample this set of reads to the desired coverage.
import gzip
import logging
import os
import sys
from collections import defaultdict
from functools import partial
import numpy as np
import pysam
from Bio import SeqIO
from scipy import stats
class Contigs:
genome_size = int(0)
num_contigs = int(0)
def __init__(self, record):
Contigs.num_contigs += 1
self.name = record.id
self.sequence = str(record.seq)
self.length = int(len(self.sequence))
Contigs.genome_size += self.length
self.ref_prob_array = np.zeros(self.length)
self.ref_depth_array = np.zeros(self.length)
self.ref_covered_bases = float(0)
self.total_depth = float(0)
self.ref_depth = float(0)
def __hash__(self):
return hash(str(self.sequence))
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
# Not strictly necessary, but to avoid having both
# x==y and x!=y being True at the same time
return not (self == other)
# def weighted_ref_depth(self):
# if self.ref_covered_bases == 0:
# return 0
# return Contigs.weighted_cal(self.ref_depth, self.ref_covered_bases, self.length)
@staticmethod
def weighted_cal(success, sample_size, total):
return (success / float(sample_size)) * float(total)
@classmethod
def read_fasta(cls, fasta_file):
return [
Contigs(fasta_record) for fasta_record in SeqIO.parse(fasta_file, "fasta")
]
# def parallel_calculate_coverage(bamfile_name, contigs, min_qual, min_pid, min_paln, cores):
# with concurrent.futures.ThreadPoolExecutor(max_workers=cores) as executor:
# future = [executor.submit(calculate_coverage_per_contig, bamfile_name, min_qual, min_pid, min_paln, contig) for contig in contigs]
# for f in concurrent.futures.as_completed(future):
# result = f.result()
# ## When you don't wish to return anything, but monitor process for exceptions.
# jobs = concurrent.futures.wait(future, return_when='FIRST_EXCEPTION')
# if len(jobs.not_done) > 0:
# print(f'Some ({len(jobs.not_done)}) exceptions occurred')
# for job in jobs.not_done:
# print(f'{job.exception()}')
# sys.exit(1)
def calculate_coverage(
bamfile_name, contigs, read_set, min_qual, min_pid, min_paln, fh
):
covered_contig_lengths = [
calculate_coverage_per_contig(
bamfile_name, read_set, min_qual, min_pid, min_paln, contig, fh
)
for contig in contigs
]
return sum(covered_contig_lengths)
def calculate_coverage_per_contig(
bamfile_name, read_set, minQual, thresh_pid, thresh_paln, contig, fh
):
if len(read_set) == 0:
check_read = partial(keep_read, min_pid=thresh_pid, min_paln=thresh_paln)
else:
check_read = partial(
keep_read, min_pid=thresh_pid, min_paln=thresh_paln, read_set=read_set
)
with pysam.AlignmentFile(bamfile_name, mode="rb") as bamfile:
# https://pysam.readthedocs.io/en/latest/api.html#pysam.AlignmentFile.count_coverage
cov_counts = bamfile.count_coverage(
contig.name,
start=0,
end=contig.length,
read_callback=check_read,
# quality_threshold is the minimum quality score (in phred) a base has to reach to be counted.
quality_threshold=minQual,
)
contig_len = 0
for i, ref_allele in enumerate(contig.sequence):
base_cov = defaultdict(int)
ref_prob = 0
ref_depth = 0
total_depth = 0
ref_allele = str(ref_allele).upper()
# if refseq has N, the prob is 'NA'
if ref_allele in ["A", "T", "C", "G"]:
base_cov["A"] = cov_counts[0][i]
base_cov["C"] = cov_counts[1][i]
base_cov["G"] = cov_counts[2][i]
base_cov["T"] = cov_counts[3][i]
total_depth = (
base_cov["A"] + base_cov["C"] + base_cov["G"] + base_cov["T"]
)
# prob 0 if no depth
if total_depth > 0:
contig.total_depth += total_depth
ref_depth = base_cov[ref_allele]
ref_prob = float(ref_depth) / float(total_depth)
# if ref_depth > 0:
# contig.ref_covered_bases += 1
# contig.ref_depth += ref_depth
else:
ref_prob = np.nan
ref_depth = np.nan
# if ref_prob < 1:
# Write output to a file, with columns as:
# CHROM,POS,REF,A,C,G,T,ref_prob,ref_depth,total_depth
fh.write(
f"{contig.name},"
f"{i},"
f"{ref_allele},"
f"{base_cov['A']},"
f"{base_cov['C']},"
f"{base_cov['G']},"
f"{base_cov['T']},"
f"{ref_prob},"
f"{ref_depth},"
f"{total_depth}"
f"\n"
)
contig_len = i + 1
# contig.ref_prob_array[i] = ref_prob
# contig.ref_depth_array[i] = ref_depth
return contig_len
def keep_read(aln, min_pid, min_paln, read_set=None):
if (read_set is not None) and (aln.query_name not in read_set):
return False
edit_dist = dict(aln.tags)["NM"]
aln_len = aln.query_alignment_length
read_length = aln.infer_read_length()
# MetaBAT: %ID is calculated from the CIGAR string and/or NM/MD fields
# and == 100 * MatchedBases / (MatchedBases + Substituions + Insertions + Deletions)
pid = 100 * (aln_len - edit_dist) / float(aln_len)
p_aln_len = aln_len * 100 / float(read_length)
return (min_pid <= pid <= 100) and (min_paln <= p_aln_len <= 100)
def distance_from_reference(prob_array):
# remove NANs from array
prob_array = remove_nan(prob_array)
# create a unit array of same length representing reference.
ref_array = np.ones_like(prob_array)
return (
root_mean_squared_error(ref_array, prob_array),
cos_sim(ref_array, prob_array),
)
def root_mean_squared_error(ref_array, prob_array):
""" Calculate mean squared error given two arrays of same length """
if len(ref_array) == len(prob_array):
return np.sqrt(np.square(np.subtract(ref_array, prob_array)).mean())
def cos_sim(ref_array, prob_array):
"""calculate cosine distance between reference and given."""
if len(ref_array) == len(prob_array):
return np.dot(ref_array, prob_array) / (
np.linalg.norm(ref_array) *
|
np.linalg.norm(prob_array)
|
numpy.linalg.norm
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.