prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import os
import numpy as np
from PIL import Image
import tensorflow as tf
from absl import app, flags, logging
from absl.flags import FLAGS
from tqdm import trange
import contextlib2
flags.DEFINE_string('dataset', '../dataset/train', '训练集路径')
flags.DEFINE_string('train_record_path', '../dataset/record/train.record', '训练集存储路径')
flags.DEFINE_string('val_record_path', '../dataset/record/val.record', '验证集存储路径')
flags.DEFINE_string('test_record_path', '../dataset/record/test.record', '验证集存储路径')
flags.DEFINE_string('train_txt', '../dataset/train.txt', '训练集txt路径')
flags.DEFINE_string('val_txt', '../dataset/val.txt', '验证集txt路径')
def get_files(file_dir, ratio=0.9):
images = []
labels = []
for image in os.listdir(file_dir):
image_path = os.path.join(file_dir, image)
label_path = file_dir[0:-5] + 'masks/' + image[0:-4] + '.png'
images.append(image_path)
labels.append(label_path)
# 按比例划分训练集和验证集
s1 = np.int(len(images) * ratio)
# 组合训练集和验证集
tmp_list =
|
np.array([images, labels])
|
numpy.array
|
import os
import numpy as np
from astropy.io import fits as pyfits
from astropy.nddata import Cutout2D
from reproject import reproject_interp
from reproject.mosaicking import reproject_and_coadd, find_optimal_celestial_wcs
import fits_magic as fm
import utils
import glob
import sys
class polarisation_mosaic:
"""
Class to produce polarisation mosaics in Stokes Q, U and V.
"""
module_name = 'POLARISATION MOSAIC'
def __init__(self, file_=None, **kwargs):
self.default = utils.load_config(self, file_)
utils.set_mosdirs(self)
self.config_file_name = file_
def go(self):
"""
Function to generate the polarisation mosaics
"""
utils.gen_poldirs(self)
utils.collect_paramfiles(self)
veri = self.check_polimages()
utils.copy_polimages(self, veri)
utils.copy_polbeams(self)
cbeam = utils.get_common_psf(self, veri, format='array')
for sb in range(24):
qimages, uimages, pbimages = utils.get_polfiles(self, sb)
if len(qimages) != 0:
self.make_polmosaic(qimages, uimages, pbimages, sb, cbeam, pbclip=self.pol_pbclip)
else:
print('No data for subband ' + str(sb).zfill(2))
self.make_polcubes()
def check_polimages(self):
"""
Sort out any beams or planes, which are useless for the imaging
"""
# Collect the beam and noise parameters from the main parameter file
rms_array = np.full((40, 24, 2), np.nan)
bmaj_array = np.full((40, 24, 2), np.nan)
bmin_array = np.full((40, 24, 2), np.nan)
bpa_array = np.full((40, 24, 2), np.nan)
for beam in range(0, 40, 1):
try:
rms_array[beam, :] = utils.get_param(self, 'polarisation_B' + str(beam).zfill(2) + '_targetbeams_qu_imagestats')[:, 2, :]
bmaj_array[beam, :] = utils.get_param(self, 'polarisation_B' + str(beam).zfill(2) + '_targetbeams_qu_beamparams')[:, 0, :]
bmin_array[beam, :] = utils.get_param(self, 'polarisation_B' + str(beam).zfill(2) + '_targetbeams_qu_beamparams')[:, 1, :]
bpa_array[beam, :] = utils.get_param(self, 'polarisation_B' + str(beam).zfill(2) + '_targetbeams_qu_beamparams')[:, 2, :]
except KeyError:
print('Synthesised beam parameters and/or noise statistics of beam ' + str(beam).zfill(2) + ' are not available. Excluding beam!')
np.savetxt(self.polmosaicdir + '/Qrms.npy', rms_array[:,:,0])
np.savetxt(self.polmosaicdir + '/Qbmaj.npy', bmaj_array[:,:,0])
np.savetxt(self.polmosaicdir + '/Qbmin.npy', bmin_array[:,:,0])
np.savetxt(self.polmosaicdir + '/Qbpa.npy', bpa_array[:,:,0])
np.savetxt(self.polmosaicdir + '/Urms.npy', rms_array[:, :, 1])
np.savetxt(self.polmosaicdir + '/Ubmaj.npy', bmaj_array[:,:,1])
np.savetxt(self.polmosaicdir + '/Ubmin.npy', bmin_array[:,:,1])
np.savetxt(self.polmosaicdir + '/Ubpa.npy', bpa_array[:,:,1])
# Create an array for the accepted beams
accept_array = np.full((40, 24), True)
# Iterate through the rms and beam sizes of all cubes and filter the images
for b in range(40):
for sb in range(24):
if rms_array[b, sb, 0] > self.pol_rmsclip or np.isnan(rms_array[b, sb, 0]):
accept_array[b, sb] = False
else:
continue
for sb in range(24):
if rms_array[b, sb, 1] > self.pol_rmsclip or np.isnan(rms_array[b, sb, 1]):
accept_array[b, sb] = False
else:
continue
for sb in range(24):
if bmin_array[b, sb, 0] > self.pol_bmin or bmin_array[b, sb, 1] > self.pol_bmin:
accept_array[b, sb] = False
else:
continue
for sb in range(24):
if bmaj_array[b, sb, 0] > self.pol_bmaj or bmaj_array[b, sb, 1] > self.pol_bmaj:
accept_array[b, sb] = False
else:
continue
np.savetxt(self.polmosaicdir + '/accept_array.npy', accept_array)
# Generate the main array for accepting the beams
bacc_array = np.full(40, True, dtype=bool)
badim_array = np.zeros((40))
# Count number of False for each beam and filter all beams out where more than x planes or more are bad
for b in range(40):
badim_array[b] = len(np.where(accept_array[b, :] == False)[0])
if badim_array[b] > self.pol_badim:
bacc_array[b] = False
accept_array[b, :] = False
else:
continue
np.savetxt(self.polmosaicdir + '/badim.npy', badim_array)
np.savetxt(self.polmosaicdir + '/bacc.npy', bacc_array)
# Generate the array for accepting the subbands
sb_acc = np.full(24, True, dtype=bool)
for sb in range(24):
if np.sum(accept_array[:, sb]) < np.sum(bacc_array):
sb_acc[sb] = False
np.savetxt(self.polmosaicdir + '/sbacc.npy', sb_acc)
final_acc_arr = np.full((40, 24), True)
for b in range(40):
for sb in range(24):
if bacc_array[b] and sb_acc[sb]:
final_acc_arr[b,sb] = True
else:
final_acc_arr[b,sb] = False
np.savetxt(self.polmosaicdir + '/final_accept.npy', final_acc_arr)
return final_acc_arr
def make_polmosaic(self, qimages, uimages, pbimages, sb, psf, reference=None, pbclip=None):
"""
Function to generate the polarisation mosaic in Q and U
"""
# Set the directories for the mosaicking
utils.set_mosdirs(self)
# Get the common psf
common_psf = psf
qcorrimages = [] # to mosaic
ucorrimages = [] # to mosaic
qpbweights = [] # of the pixels
upbweights = [] # of the pixels
qrmsweights = [] # of the images themself
urmsweights = [] # of the images themself
qfreqs = []
ufreqs = []
# weight_images = []
for qimg, uimg, pb in zip(qimages, uimages, pbimages):
# prepare the images (squeeze, transfer_coordinates, reproject, regrid pbeam, correct...)
with pyfits.open(qimg) as f:
qimheader = f[0].header
qfreqs.append(qimheader['CRVAl3'])
qtg = qimheader['OBJECT']
with pyfits.open(uimg) as f:
uimheader = f[0].header
ufreqs.append(uimheader['CRVAl3'])
utg = uimheader['OBJECT']
qimg = fm.fits_squeeze(qimg) # remove extra dimentions
uimg = fm.fits_squeeze(uimg) # remove extra dimentions
pb = fm.fits_transfer_coordinates(qimg, pb) # transfer_coordinates
pb = fm.fits_squeeze(pb) # remove extra dimensions
with pyfits.open(qimg) as f:
qimheader = f[0].header
qimdata = f[0].data
with pyfits.open(uimg) as f:
uimheader = f[0].header
uimdata = f[0].data
with pyfits.open(pb) as f:
pbhdu = f[0]
autoclip = np.nanmin(f[0].data)
# reproject
qreproj_arr, qreproj_footprint = reproject_interp(pbhdu, qimheader)
ureproj_arr, ureproj_footprint = reproject_interp(pbhdu, uimheader)
pbclip = self.pol_pbclip or autoclip
print('PB is clipped at %f level', pbclip)
qreproj_arr = np.float32(qreproj_arr)
ureproj_arr = np.float32(ureproj_arr)
qreproj_arr[qreproj_arr < pbclip] = np.nan
ureproj_arr[ureproj_arr < pbclip] = np.nan
qpb_regr_repr = pb.replace('.fits', '_repr.fits')
upb_regr_repr = pb.replace('.fits', '_repr.fits')
pyfits.writeto(qpb_regr_repr, qreproj_arr, qimheader, overwrite=True)
pyfits.writeto(upb_regr_repr, ureproj_arr, uimheader, overwrite=True)
# convolution with common psf
qreconvolved_image = qimg.replace('.fits', '_reconv.fits')
qreconvolved_image = fm.fits_reconvolve_psf(qimg, common_psf, out=qreconvolved_image)
ureconvolved_image = uimg.replace('.fits', '_reconv.fits')
ureconvolved_image = fm.fits_reconvolve_psf(uimg, common_psf, out=ureconvolved_image)
# PB correction
qpbcorr_image = qreconvolved_image.replace('_reconv.fits', '_pbcorr.fits')
qpbcorr_image = fm.fits_operation(qreconvolved_image, qreproj_arr, operation='/', out=qpbcorr_image)
upbcorr_image = ureconvolved_image.replace('_reconv.fits', '_pbcorr.fits')
upbcorr_image = fm.fits_operation(ureconvolved_image, ureproj_arr, operation='/', out=upbcorr_image)
# cropping
qcropped_image = qimg.replace('.fits', '_mos.fits')
qcropped_image, qcutout = fm.fits_crop(qpbcorr_image, out=qcropped_image)
qcorrimages.append(qcropped_image)
ucropped_image = uimg.replace('.fits', '_mos.fits')
ucropped_image, ucutout = fm.fits_crop(upbcorr_image, out=ucropped_image)
ucorrimages.append(ucropped_image)
# primary beam weights
qwg_arr = qreproj_arr - pbclip # the edges weight ~0
qwg_arr[np.isnan(qwg_arr)] = 0 # the NaNs weight 0
qwg_arr = qwg_arr / np.nanmax(qwg_arr) # normalize
qwcut = Cutout2D(qwg_arr, qcutout.input_position_original, qcutout.shape)
qpbweights.append(qwcut.data)
uwg_arr = ureproj_arr - pbclip # the edges weight ~0
uwg_arr[np.isnan(uwg_arr)] = 0 # the NaNs weight 0
uwg_arr = uwg_arr / np.nanmax(uwg_arr) # normalize
uwcut = Cutout2D(uwg_arr, ucutout.input_position_original, ucutout.shape)
upbweights.append(uwcut.data)
# weight the images by RMS noise over the edges
ql, qm = qimdata.shape[0]//10, qimdata.shape[1]//10
qmask = np.ones(qimdata.shape, dtype=np.bool)
qmask[ql:-ql,qm:-qm] = False
qimg_noise = np.nanstd(qimdata[qmask])
qimg_weight = 1 / qimg_noise**2
qrmsweights.append(qimg_weight)
ul, um = uimdata.shape[0]//10, uimdata.shape[1]//10
umask = np.ones(uimdata.shape, dtype=np.bool)
umask[ul:-ul,um:-um] = False
uimg_noise = np.nanstd(uimdata[umask])
uimg_weight = 1 / uimg_noise**2
urmsweights.append(uimg_weight)
# merge the image rms weights and the primary beam pixel weights:
qweights = [qp*qr/max(qrmsweights) for qp, qr in zip(qpbweights, qrmsweights)]
uweights = [up * ur / max(urmsweights) for up, ur in zip(upbweights, urmsweights)]
# create the wcs and footprint for the output mosaic
qwcs_out, qshape_out = find_optimal_celestial_wcs(qcorrimages, auto_rotate=False, reference=reference)
uwcs_out, ushape_out = find_optimal_celestial_wcs(ucorrimages, auto_rotate=False, reference=reference)
qarray, qfootprint = reproject_and_coadd(qcorrimages, qwcs_out, shape_out=qshape_out,
reproject_function=reproject_interp,
input_weights=qweights)
uarray, ufootprint = reproject_and_coadd(ucorrimages, uwcs_out, shape_out=ushape_out,
reproject_function=reproject_interp,
input_weights=uweights)
qarray = np.float32(qarray)
uarray = np.float32(uarray)
# insert common PSF into the header
qpsf = common_psf.to_header_keywords()
qhdr = qwcs_out.to_header()
qhdr.insert('RADESYS', ('FREQ', np.nanmean(qfreqs)))
qhdr.insert('RADESYS', ('BMAJ', qpsf['BMAJ']))
qhdr.insert('RADESYS', ('BMIN', qpsf['BMIN']))
qhdr.insert('RADESYS', ('BPA', qpsf['BPA']))
upsf = common_psf.to_header_keywords()
uhdr = qwcs_out.to_header()
uhdr.insert('RADESYS', ('FREQ', np.nanmean(ufreqs)))
uhdr.insert('RADESYS', ('BMAJ', upsf['BMAJ']))
uhdr.insert('RADESYS', ('BMIN', upsf['BMIN']))
uhdr.insert('RADESYS', ('BPA', upsf['BPA']))
pyfits.writeto(self.polmosaicdir + '/' + str(qtg).upper() + '_' + str(sb).zfill(2) + '_Q.fits', data=qarray,
header=qhdr, overwrite=True)
pyfits.writeto(self.polmosaicdir + '/' + str(utg).upper() + '_' + str(sb).zfill(2) + '_U.fits', data=uarray,
header=uhdr, overwrite=True)
utils.clean_polmosaic_tmp_data(self, sb)
def make_polcubes(self):
"""
Function to generate the cubes in Q and U from the polarisation mosaics
"""
# Set the directories for the mosaicking
utils.set_mosdirs(self)
# Get the fits files
Qmoss = sorted(glob.glob(self.polmosaicdir + '/*_[0-9][0-9]_Q.fits'))
Umoss = sorted(glob.glob(self.polmosaicdir + '/*_[0-9][0-9]_U.fits'))
# Check if the same number of mosaics is available for Q and U
Qmoss_chk = []
Umoss_chk = []
for qchk in Qmoss:
qnew = qchk.replace('_Q','')
Qmoss_chk.append(qnew)
for uchk in Umoss:
unew = uchk.replace('_U','')
Umoss_chk.append(unew)
if Qmoss_chk == Umoss_chk:
pass
else:
print('Different number of Q and U mosaics. Cannot generate cubes!')
sys.exit()
# Find the smallest mosaic image
allim = sorted(Qmoss + Umoss)
naxis1 = []
naxis2 = []
for mos in allim:
with pyfits.open(mos) as m:
imheader = m[0].header
naxis1.append(imheader['NAXIS1'])
naxis2.append(imheader['NAXIS2'])
impix = np.array(naxis1) * np.array(naxis2)
smim = allim[np.argmin(impix)]
# Reproject the rest of the images to this one
# Load the header of the reference image
hduref = pyfits.open(smim)[0]
hduref_hdr = hduref.header
# Reproject the other images to the reference
for image in allim:
hdu = pyfits.open(image)[0]
hdu_hdr = hdu.header
hduref_hdr['FREQ'] = hdu_hdr['FREQ']
repr_image = reproject_interp(hdu, hduref_hdr, return_footprint=False)
pyfits.writeto(image.replace('.fits','_repr.fits'), repr_image, hduref_hdr, overwrite=True)
# Generate a mask to limit the valid area for all images to the largest common valid one
allreprims = sorted(glob.glob(self.polmosaicdir + '/*_repr.fits'))
nall = len(allreprims)
# Generate an array for all the images
alldata = np.full((nall, hduref_hdr['NAXIS2'], hduref_hdr['NAXIS1']), np.nan)
for i, image in enumerate(allreprims):
hdu = pyfits.open(image)[0]
hdu_data = hdu.data
alldata[i,:,:] = hdu_data
# Generate the mask
immask = np.sum(alldata, axis=0)
immask[np.isfinite(immask)] = 1.0
# Apply the mask
for m, mimage in enumerate(allreprims):
mhdu = pyfits.open(mimage)[0]
mhdu_data = mhdu.data
mhdu_hdr = mhdu.header
mdata = mhdu_data*immask
pyfits.writeto(mimage.replace('_repr.fits','_mask.fits'), mdata, mhdu_hdr, overwrite=True)
# Finally create the frequency image cubes
qfinimages = sorted(glob.glob(self.polmosaicdir + '/*Q_mask.fits'))
ufinimages = sorted(glob.glob(self.polmosaicdir + '/*U_mask.fits'))
nq = len(qfinimages)
nu = len(ufinimages)
qdata = np.full((nq, hduref_hdr['NAXIS2'], hduref_hdr['NAXIS1']), np.nan)
udata = np.full((nu, hduref_hdr['NAXIS2'], hduref_hdr['NAXIS1']), np.nan)
freqs = []
# Generate the Q cube
for q, qim in enumerate(qfinimages):
qhdu = pyfits.open(qim)[0]
qhdu_data = qhdu.data
qhdu_hdr = qhdu.header
freqs.append(qhdu_hdr['FREQ'])
qdata[q,:,:] = qhdu_data
qhdu_hdr.insert('NAXIS2', ('NAXIS3', len(qfinimages)), after=True)
qhdu_hdr.insert('CTYPE2', ('CRPIX3', 1.0), after=True)
qhdu_hdr.insert('CRPIX3', ('CDELT3', 6250000.0), after=True)
qhdu_hdr.insert('CDELT3', ('CRVAL3', freqs[0]), after=True)
qhdu_hdr.insert('CRVAL3', ('CTYPE3', 'FREQ-OBS'), after=True)
pyfits.writeto(self.polmosaicdir + '/Qcube.fits', np.float32(qdata), qhdu_hdr, overwrite=True)
# Write the frequency file
with open(self.polmosaicdir + '/freq.txt', 'w') as f:
for item in freqs:
f.write("%s\n" % item)
# Generate the U cube
for u, uim in enumerate(ufinimages):
uhdu = pyfits.open(uim)[0]
uhdu_data = uhdu.data
uhdu_hdr = uhdu.header
udata[u,:,:] = uhdu_data
uhdu_hdr.insert('NAXIS2', ('NAXIS3', len(ufinimages)), after=True)
uhdu_hdr.insert('CTYPE2', ('CRPIX3', 1.0), after=True)
uhdu_hdr.insert('CRPIX3', ('CDELT3', 6250000.0), after=True)
uhdu_hdr.insert('CDELT3', ('CRVAL3', freqs[0]), after=True)
uhdu_hdr.insert('CRVAL3', ('CTYPE3', 'FREQ-OBS'), after=True)
pyfits.writeto(self.polmosaicdir + '/Ucube.fits', np.float32(udata), uhdu_hdr, overwrite=True)
# Write a file with the central coordinates of each pointing used
coord_arr =
|
np.full((40,3), np.nan)
|
numpy.full
|
# coding: utf-8
# In[2]:
import numpy as np
import pandas as pd
import gseapy as gp
import logging, sys
# In[3]:
np.seterr(divide='ignore')
print("GSEApy version: %s"%gp.__version__)
# In[15]:
# identical function with gseapy.algorithm.enrichment_score_tensor
def enrichment_score_tensor(gene_mat, cor_mat, gene_sets, weighted_score_type, nperm=1000,
scale=False, single=False, rs=np.random.RandomState()):
"""Next generation algorithm of GSEA and ssGSEA.
:param gene_mat: the ordered gene list(vector) or gene matrix.
:param cor_mat: correlation vector or matrix (e.g. signal to noise scores)
corresponding to the genes in the gene list or matrix.
:param dict gene_sets: gmt file dict.
:param float weighted_score_type: weighting by the correlation.
options: 0(classic), 1, 1.5, 2. default:1 for GSEA and 0.25 for ssGSEA.
:param int nperm: permutation times.
:param bool scale: If True, normalize the scores by number of genes_mat.
:param bool single: If True, use ssGSEA algorithm, otherwise use GSEA.
:param rs: Random state for initialize gene list shuffling.
Default: np.random.RandomState(seed=None)
:return:
ES: Enrichment score (real number between -1 and +1), it's true for ssGSEA, only scaled
ESNULL: Enrichment score calculated from random permutation
Hits_Indices: Indices of genes if genes are included in gene_set.
RES: Numerical vector containing the running enrichment score for
all locations in the gene list .
"""
# gene_mat -> 1d: prerank, ssSSEA or 2d: GSEA
keys = sorted(gene_sets.keys())
if weighted_score_type == 0:
# don't bother doing calcuation, just set to 1
cor_mat = np.ones(cor_mat.shape)
elif weighted_score_type > 0:
pass
else:
logging.error("Using negative values of weighted_score_type, not allowed")
sys.exit(0)
cor_mat = np.abs(cor_mat)
if cor_mat.ndim ==1:
# ssGSEA or Prerank
#genestes->M, genes->N, perm-> axis=2
N, M = len(gene_mat), len(keys)
# generate gene hits matrix
# for 1d ndarray of gene_mat, set assume_unique=True,
# means the input arrays are both assumed to be unique,
# which can speed up the calculation.
tag_indicator = np.vstack([np.in1d(gene_mat, gene_sets[key], assume_unique=True) for key in keys])
# index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in tag_indicator ]
# generate permutated hits matrix
perm_tag_tensor = np.repeat(tag_indicator, nperm+1).reshape((M,N,nperm+1))
# shuffle matrix, last matrix is not shuffled when nperm > 0
if nperm: np.apply_along_axis(lambda x: np.apply_along_axis(rs.shuffle,0,x),1, perm_tag_tensor[:,:,:-1])
# missing hits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,np.newaxis])** weighted_score_type
elif cor_mat.ndim == 2:
# GSEA
# 2d ndarray, gene_mat and cor_mat are shuffled already
# reshape matrix
cor_mat, gene_mat = cor_mat.T, gene_mat.T
# genestes->M, genes->N, perm-> axis=2
# don't use assume_unique=True in 2d array when use np.isin().
# elements in gene_mat are not unique, or will cause unwanted results
perm_tag_tensor = np.stack([np.isin(gene_mat, gene_sets[key]) for key in keys], axis=0)
#index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in perm_tag_tensor[:,:,-1] ]
# nohits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,:])** weighted_score_type
else:
logging.error("Program die because of unsupported input")
sys.exit(0)
# Nhint = tag_indicator.sum(1)
# Nmiss = N - Nhint
axis=1
P_GW_denominator = np.sum(rank_alpha, axis=axis, keepdims=True)
P_NG_denominator = np.sum(no_tag_tensor, axis=axis, keepdims=True)
REStensor = np.cumsum(rank_alpha / P_GW_denominator - no_tag_tensor / P_NG_denominator, axis=axis)
# ssGSEA: scale es by gene numbers ?
# https://gist.github.com/gaoce/39e0907146c752c127728ad74e123b33
if scale: REStensor = REStensor / len(gene_mat)
if single:
#ssGSEA
esmatrix = np.sum(REStensor, axis=axis)
else:
#GSEA
esmax, esmin = REStensor.max(axis=axis), REStensor.min(axis=axis)
esmatrix = np.where(np.abs(esmax)>np.abs(esmin), esmax, esmin)
es, esnull, RES = esmatrix[:,-1], esmatrix[:,:-1], REStensor[:,:,-1]
return es, esnull, hit_ind, RES
# In[16]:
gex = pd.read_table("./data/testSet_rand1200.gct", comment='#', index_col=0)
# In[18]:
gmt = gp.parser.gsea_gmt_parser("./data/randomSets.gmt")
# In[24]:
df = pd.DataFrame(index=sorted(gmt.keys()))
rs =
|
np.random.RandomState(0)
|
numpy.random.RandomState
|
#encoding: utf-8
import copy
import numpy as np
def count_star(A,N,neiN,motif,edge_adj):
print('开始搜索motif: ',motif)
n=0
a=copy.copy(A)
for i in range(N):
if (np.sum(a[i])>neiN-1):
#print('{} star center is {}'.format(motif,i))
n+=1
edge_num = neiN
while edge_num > 0:
for k in range(len(a[i])):
if a[i][k] >0:
edge_adj[i][k] += str(motif)
edge_adj[k][i] += str(motif)
edge_num -= 1
for j in range(i):
a[N-j-1][i]=0
x=np.nonzero(a[i])
nei_Index=x[0][:neiN]
a[i].fill(0)
for j in nei_Index:
a[j].fill(0)
for k in range(N):
a[k][j]=0
return n
def count_star_5and8(A,nodN,edge_adj):
n_motif5=0
n_motif8=0
for i in range(nodN):
x0 = np.nonzero(A[i])
x=x0[0]
degree=len(x)
if degree<3:
continue
for a_3 in range(degree-2):
for b_3 in range(a_3+1,degree-1):
for c_3 in range(b_3+1,degree):
n_motif5+=1
#print('star 5 center {} to {} {} {}'.format(i,x[a_3],x[b_3],x[c_3]))
edge_adj[i][x[a_3]] += '5'
edge_adj[x[a_3]][i] += '5'
edge_adj[i][x[b_3]] += '5'
edge_adj[x[b_3]][i] += '5'
edge_adj[i][x[c_3]] += '5'
edge_adj[x[c_3]][i] += '5'
if degree>=4:
for a_4 in range(degree-3):
for b_4 in range(a_4+1,degree-2):
for c_4 in range(b_4+1,degree-1):
for d_4 in range(c_4+1,degree):
n_motif8+=1
#print('star 8 center {} to {} {} {} {}'.format(i, x[a_4], x[b_4], x[c_4],x[d_4]))
edge_adj[i][x[a_4]] += '8'
edge_adj[x[a_4]][i] += '8'
edge_adj[i][x[b_4]] += '8'
edge_adj[x[b_4]][i] += '8'
edge_adj[i][x[c_4]] += '8'
edge_adj[x[c_4]][i] += '8'
edge_adj[i][x[d_4]] += '8'
edge_adj[x[d_4]][i] += '8'
return n_motif5,n_motif8
def find_next_2(a,N,i,rest,stack,motif,edge_adj,n):
if rest==0:
#print('当前搜索完成!',stack)
for j in range(len(stack)-1):
edge_adj[stack[j]][stack[j+1]]+=str(motif)
edge_adj[stack[j+1]][stack[j]] += str(motif)
return n+1
else:
if np.sum(a[i])>0:
x =
|
np.nonzero(a[i])
|
numpy.nonzero
|
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2022 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
import numpy as np
from tqdm import tqdm
from disent.util.inout.files import AtomicSaveFile
# ========================================================================= #
# Save Numpy Files #
# ========================================================================= #
def save_dataset_array(array: np.ndarray, out_file: str, overwrite: bool = False, save_key: str = 'images'):
assert array.ndim == 4, f'invalid array shape, got: {array.shape}, must be: (N, H, W, C)'
assert array.dtype == 'uint8', f'invalid array dtype, got: {array.dtype}, must be: "uint8"'
# save the data
with AtomicSaveFile(out_file, overwrite=overwrite) as temp_file:
|
np.savez_compressed(temp_file, **{save_key: array})
|
numpy.savez_compressed
|
"""
QTNM base field module.
Provides the abstract classes, QtnmBaseField and QtnmBaseSolver.
New concrete implementations of this class should be compatible with
other python code within the Electron-Tracking package.
"""
from abc import ABC, abstractmethod
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import electron_mass as me, elementary_charge as qe
from scipy.integrate import solve_ivp
from utils import calculate_omega
class QtnmBaseSolver(ABC):
def __init__(self, charge=-qe, mass=me, b_field=1.0, calc_b_field=None):
self.mass = mass
self.charge = charge
self.b_field = b_field
self.calc_b_field = calc_b_field
if calc_b_field is not None:
# Handle cases where calc_b_field returns a single component
if np.size(calc_b_field(0, 0, 0)) == 1:
self.calc_b_field = lambda x, y, z: \
np.array([0.0, 0.0, calc_b_field(x, y, z)])
# If calc_b_field not provided, assume constant field, and store omega
if calc_b_field is None:
omega0 = calculate_omega(b_field, mass=mass, charge=charge)
if np.size(omega0) == 3:
self.omega0 = omega0
elif np.size(omega0) == 1:
self.omega0 = np.array([0, 0, omega0], dtype=float)
else:
raise ValueError('Calculate omega returned erroneous size')
def get_omega(self, pos=np.zeros(3)):
"""
Calculate omega as a function of position
"""
# Use pre-calculated value if possible
if self.calc_b_field is None:
return self.omega0
bfield = self.calc_b_field(pos[0], pos[1], pos[2])
return calculate_omega(bfield, mass=self.mass, charge=self.charge,
energy=0.0)
@abstractmethod
def rhs(self, t, x):
"""
Return RHS of equation as a function of time(t) and x(vars solved for)
"""
@abstractmethod
def rhs_1d(self, t, x):
"""
Return RHS of equation as a function of time(t) and x(vars solved for),
assuming a one dimensional B-field (in z-direction)
"""
def analytic_solution(self, time, x0=np.array([1.0, 0.0, 0.0]),
v0=np.array([0.0, 1.0, 0.0])):
"""
Return analytic solution as a function of time , assuming a uniform
field
"""
return None
def analytic_solution_1d(self, time, x0=np.array([1.0, 0.0, 0.0]),
v0=
|
np.array([0.0, 1.0, 0.0])
|
numpy.array
|
import cmsisdsp as dsp
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import entropy,tstd, tvar
from scipy.special import logsumexp
from scipy.linalg import cholesky,ldl,solve_triangular
from scipy import signal
def imToReal1D(a):
ar=np.zeros(np.array(a.shape) * 2)
ar[0::2]=a.real
ar[1::2]=a.imag
return(ar)
def realToIm1D(ar):
return(ar[0::2] + 1j * ar[1::2])
print("Max and AbsMax")
a=np.array([1.,-3.,4.,0.,-10.,8.])
i=dsp.arm_absmax_no_idx_f32(a)
print(i)
assert i==10.0
i=dsp.arm_absmax_no_idx_f64(a)
print(i)
assert i==10.0
r,i=dsp.arm_absmax_f64(a)
assert i==4
assert r==10.0
r,i=dsp.arm_max_f64(a)
assert i==5
assert r==8.0
i=dsp.arm_max_no_idx_f32(a)
print(i)
assert i==8
i=dsp.arm_max_no_idx_f64(a)
print(i)
assert i==8
print("Min and AbsMin")
a=np.array([1.,-3.,4.,0.5,-10.,8.])
i=dsp.arm_absmin_no_idx_f32(a)
print(i)
assert i==0.5
i=dsp.arm_absmin_no_idx_f64(a)
print(i)
assert i==0.5
r,i=dsp.arm_absmin_f64(a)
assert i==3
assert r==0.5
r,i=dsp.arm_min_f64(a)
assert i==4
assert r==-10
i=dsp.arm_min_no_idx_f32(a)
print(i)
assert i==-10
i=dsp.arm_min_no_idx_f64(a)
print(i)
assert i==-10
print("Barycenter")
a=[0] * 12
w=np.array([[2] * 12])
w[0,11]=3
a[0] =[0., 0., -0.951057]
a[1] =[0., 0., 0.951057]
a[2] =[-0.850651, 0., -0.425325]
a[3] =[0.850651, 0., 0.425325]
a[4] =[0.688191, -0.5, -0.425325]
a[5] =[0.688191, 0.5, -0.425325]
a[6] =[-0.688191, -0.5, 0.425325]
a[7] =[-0.688191, 0.5, 0.425325]
a[8] =[-0.262866, -0.809017, -0.425325]
a[9] =[-0.262866, 0.809017, -0.425325]
a[10]=[0.262866, -0.809017, 0.425325]
a[11]=[0.262866, 0.809017, 0.425325]
scaled=a * w.T
ref=np.sum(scaled,axis=0)/np.sum(w)
print(ref)
result=dsp.arm_barycenter_f32(np.array(a).reshape(12*3),w.reshape(12),12,3)
print(result)
assert_allclose(ref,result,1e-6)
print("Weighted sum")
nb=10
s = np.random.randn(nb)
w = np.random.randn(nb)
ref=np.dot(s,w)/np.sum(w)
print(ref)
res=dsp.arm_weighted_sum_f32(s,w)
print(res)
assert_allclose(ref,res,2e-5)
print("Entropy")
s = np.abs(np.random.randn(nb))
s = s / np.sum(s)
ref=entropy(s)
print(ref)
res=dsp.arm_entropy_f32(s)
print(res)
assert_allclose(ref,res,1e-6)
res=dsp.arm_entropy_f64(s)
print(res)
assert_allclose(ref,res,1e-10)
print("Kullback-Leibler")
sa = np.abs(np.random.randn(nb))
sa = sa / np.sum(sa)
sb = np.abs(np.random.randn(nb))
sb = sb / np.sum(sb)
ref=entropy(sa,sb)
print(ref)
res=dsp.arm_kullback_leibler_f32(sa,sb)
print(res)
assert_allclose(ref,res,1e-6)
res=dsp.arm_kullback_leibler_f64(sa,sb)
print(res)
assert_allclose(ref,res,1e-10)
print("Logsumexp")
s = np.abs(np.random.randn(nb))
s = s / np.sum(s)
ref=logsumexp(s)
print(ref)
res=dsp.arm_logsumexp_f32(s)
print(res)
assert_allclose(ref,res,1e-6)
print("Logsumexp dot prod")
sa = np.abs(np.random.randn(nb))
sa = sa / np.sum(sa)
sb = np.abs(np.random.randn(nb))
sb = sb / np.sum(sb)
d = 0.001
# It is a proba so must be in [0,1]
# But restricted to ]d,1] so that the log exists
sa = (1-d)*sa + d
sb = (1-d)*sb + d
ref=np.log(np.dot(sa,sb))
print(ref)
sa = np.log(sa)
sb = np.log(sb)
res=dsp.arm_logsumexp_dot_prod_f32(sa,sb)
print(res)
assert_allclose(ref,res,3e-6)
print("vexp")
sa = np.random.randn(nb)
ref = np.exp(sa)
print(ref)
res=dsp.arm_vexp_f32(sa)
print(res)
assert_allclose(ref,res,1e-6)
res=dsp.arm_vexp_f64(sa)
print(res)
assert_allclose(ref,res,1e-10)
print("vlog")
sa = np.abs(np.random.randn(nb)) + 0.001
ref = np.log(sa)
print(ref)
res=dsp.arm_vlog_f32(sa)
print(res)
assert_allclose(ref,res,2e-5,1e-5)
res=dsp.arm_vlog_f64(sa)
print(res)
assert_allclose(ref,res,2e-9,1e-9)
print("Cholesky")
a=np.array([[4,12,-16],[12,37,-43],[-16,-43,98]])
ref=cholesky(a,lower=True)
print(ref)
status,res=dsp.arm_mat_cholesky_f32(a)
print(res)
assert_allclose(ref,res,1e-6,1e-6)
status,res=dsp.arm_mat_cholesky_f64(a)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("LDLT")
def swaprow(m,k,j):
tmp = np.copy(m[j,:])
m[j,:] = np.copy(m[k,:])
m[k,:] = tmp
return(m)
# F32 test
status,resl,resd,resperm=dsp.arm_mat_ldlt_f32(a)
n=3
p=np.identity(n)
for k in range(0,n):
p = swaprow(p,k,resperm[k])
res=resl.dot(resd).dot(resl.T)
permutedSrc=p.dot(a).dot(p.T)
print(res)
print(permutedSrc)
assert_allclose(permutedSrc,res,1e-5,1e-5)
# F64 test
print("LDLT F64")
status,resl,resd,resperm=dsp.arm_mat_ldlt_f64(a)
n=3
p=np.identity(n)
for k in range(0,n):
p = swaprow(p,k,resperm[k])
res=resl.dot(resd).dot(resl.T)
permutedSrc=p.dot(a).dot(p.T)
print(res)
print(permutedSrc)
assert_allclose(permutedSrc,res,1e-9,1e-9)
print("Solve lower triangular")
a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
b = np.array([[4,2,4,2],[8,4,8,4]]).T
x = solve_triangular(a, b,lower=True)
print(a)
print(b)
print(x)
b = np.array([[4,2,4,2],[8,4,8,4]]).T
status,res=dsp.arm_mat_solve_lower_triangular_f32(a,b)
print(res)
assert_allclose(x,res,1e-5,1e-5)
b = np.array([[4,2,4,2],[8,4,8,4]]).T
status,res=dsp.arm_mat_solve_lower_triangular_f64(a,b)
print(res)
assert_allclose(x,res,1e-9,1e-9)
print("Solve upper triangular")
a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
b = np.array([[4,2,4,2],[8,4,8,4]]).T
x = solve_triangular(a.T, b,lower=False)
print(a.T)
print(b)
print(x)
b = np.array([[4,2,4,2],[8,4,8,4]]).T
status,res=dsp.arm_mat_solve_upper_triangular_f32(a.T,b)
print(res)
assert_allclose(x,res,1e-5,1e-5)
b = np.array([[4,2,4,2],[8,4,8,4]]).T
status,res=dsp.arm_mat_solve_upper_triangular_f64(a.T,b)
print(res)
assert_allclose(x,res,1e-9,1e-9)
print("Mat mult f64")
a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
b = np.array([[4,2,4,2],[8,4,8,4]]).T
ref =a.dot(b)
print(ref)
status,res = dsp.arm_mat_mult_f64(a,b)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("mat sub f64")
a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
b = a.T
ref = a - b
print(ref)
status,res = dsp.arm_mat_sub_f64(a,b)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("abs f64")
s = np.random.randn(nb)
ref = np.abs(s)
res=dsp.arm_abs_f64(s)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("add f64")
sa = np.random.randn(nb)
sb = np.random.randn(nb)
ref = sa + sb
res=dsp.arm_add_f64(sa,sb)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("sub f64")
sa = np.random.randn(nb)
sb = np.random.randn(nb)
ref = sa - sb
res=dsp.arm_sub_f64(sa,sb)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("dot prod f64")
sa = np.random.randn(nb)
sb = np.random.randn(nb)
ref = sa.dot(sb)
res=dsp.arm_dot_prod_f64(sa,sb)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("mult f64")
sa = np.random.randn(nb)
sb = np.random.randn(nb)
ref = sa * sb
res=dsp.arm_mult_f64(sa,sb)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("negate f64")
sa = np.random.randn(nb)
ref = -sa
res=dsp.arm_negate_f64(sa)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("offset f64")
sa = np.random.randn(nb)
ref = sa + 0.1
res=dsp.arm_offset_f64(sa,0.1)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("scale f64")
sa = np.random.randn(nb)
ref = sa * 0.1
res=dsp.arm_scale_f64(sa,0.1)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("mean f64")
sa = np.random.randn(nb)
ref = np.mean(sa)
res=dsp.arm_mean_f64(sa)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("power f64")
sa = np.random.randn(nb)
ref = np.sum(sa * sa)
res=dsp.arm_power_f64(sa)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("std f64")
sa = np.random.randn(nb)
ref = tstd(sa)
res=dsp.arm_std_f64(sa)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("variance f64")
sa = np.random.randn(nb)
ref = tvar(sa)
res=dsp.arm_var_f64(sa)
print(ref)
print(res)
assert_allclose(ref,res,1e-10,1e-10)
print("fill f64")
nb=20
ref = np.ones(nb)*4.0
res = dsp.arm_fill_f64(4.0,nb)
|
assert_allclose(ref,res,1e-10,1e-10)
|
numpy.testing.assert_allclose
|
# -*- coding: utf-8 -*-
'''
author: ysoftman
python version : 3.x
desc : 미분, 기울기(gradient) 함수
'''
# pip3 install numpy matplotlib
import numpy as np
import matplotlib.pylab as plt
# for 3d graph
from mpl_toolkits.mplot3d import axes3d
from activation_function import graph
# 수치 미분
# 함수f에서 임의 x 위치에서 변화량으로 미분에 근사한 값을 구한다.
def numerical_differentiation(f, x):
h = 1e-4 # 0.001 더 작으면 파이썬에 0으로 취급된다.
# 함수 f 위에 x+h(시간 또는 거리)와 x-h 두점을 기준으로 접선(변화량)을 계산
return (f(x + h) - f(x - h)) / (2 * h)
def function_1(x):
return 0.01 * x**2 + 0.1 * x
def function_2(x):
return x[0]**2 + x[1]**2
def function_3(x):
if x.ndim == 1:
return np.sum(x**2)
else:
return np.sum(x**2, axis=1)
def function_4(x1):
return x1**2 + 4.0**2
def function_5(x2):
return 3.0**2 + x2**2
# 기울기 구하기
def numerical_gradient(f, x):
h = 1e-4 # 0.001 더 작으면 파이썬에 0으로 취급된다.
grad =
|
np.zeros_like(x)
|
numpy.zeros_like
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 31 16:02:02 2018
@author: gregz
"""
import matplotlib
matplotlib.use('agg')
import argparse as ap
import matplotlib.pyplot as plt
import numpy as np
import os.path as op
import sys
import cosmics
from astropy.convolution import Gaussian2DKernel, Gaussian1DKernel, convolve
from astropy.io import fits
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import Moffat2D
from astropy.table import Table
from astropy.visualization import AsinhStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from copy import copy
from fiber_utils import bspline_x0
from input_utils import setup_logging
from photutils import detect_sources
from reducelrs2 import ReduceLRS2
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
from sklearn.gaussian_process.kernels import ConstantKernel
from sklearn.gaussian_process import GaussianProcessRegressor
from utils import biweight_location, biweight_midvariance
from wave_utils import get_new_wave, get_red_wave, get_single_shift
def get_script_path():
return op.dirname(op.realpath(sys.argv[0]))
DIRNAME = get_script_path()
parser = ap.ArgumentParser(add_help=True)
parser.add_argument("-f", "--filename",
help='''Filename that contains list of files''',
type=str, default=None)
parser.add_argument("-s", "--side",
help='''blue for LRS2-B and red for LRS2-R''',
type=str, default='blue')
parser.add_argument("-rc", "--recalculate_wavelength",
help='''recalculate_wavelength''',
action="count", default=0)
parser.add_argument("-em", "--emission",
help='''Find emission line object?''',
action="count", default=0)
parser.add_argument("-es", "--extract_side",
help='''blue for LRS2-B and red for LRS2-R''',
type=str, default='orange')
parser.add_argument("-we", "--wave_extract",
help='''blue for LRS2-B and red for LRS2-R''',
type=float, default=None)
args = parser.parse_args(args=None)
args.log = setup_logging('combine_amp_reductions')
attrs = ['filename', 'side']
for attr in attrs:
if getattr(args, attr) is None:
args.log.error('Need a "--%s" argument.' % attr)
sys.exit(1)
args.side = args.side.lower()
def make_avg_spec(wave, spec, binsize=35, knots=None):
if knots is None:
knots = wave.shape[1]
ind = np.argsort(wave.ravel())
N, D = wave.shape
wchunks = np.array_split(wave.ravel()[ind],
N * D / binsize)
schunks = np.array_split(spec.ravel()[ind],
N * D / binsize)
nwave = np.array([np.mean(chunk) for chunk in wchunks])
B, c = bspline_x0(nwave, nknots=knots)
nspec = np.array([biweight_location(chunk) for chunk in schunks])
sol = np.linalg.lstsq(c, nspec)[0]
smooth = np.dot(c, sol)
nwave, nind = np.unique(nwave, return_index=True)
return nwave, smooth[nind]
def safe_division(num, denom, eps=1e-8, fillval=0.0):
good = np.isfinite(denom) * (np.abs(denom) > eps)
div = num * 0.
if num.ndim == denom.ndim:
div[good] = num[good] / denom[good]
div[~good] = fillval
else:
div[:, good] = num[:, good] / denom[good]
div[:, ~good] = fillval
return div
def rectify(wave, spec, lims, fac=2.5):
N, D = wave.shape
rect_wave = np.linspace(lims[0], lims[1], int(D*fac))
rect_spec = np.zeros((N, len(rect_wave)))
for i in np.arange(N):
dw = np.diff(wave[i])
dw = np.hstack([dw[0], dw])
I = interp1d(wave[i], spec[i] / dw, kind='quadratic',
bounds_error=False, fill_value=-999.)
rect_spec[i, :] = I(rect_wave)
return rect_wave, rect_spec
def gather_sn_fibers(fibconv, noise, cols):
hightolow = np.argsort(np.median(fibconv[:, cols], axis=1))[::-1]
s = 0.
ss = np.zeros((len(cols),))
nn = noise[cols]
inds = []
for ind in hightolow:
news = fibconv[ind, cols] + ss
newn = np.sqrt(nn**2 + noise[cols]**2)
rat = np.median(news / newn)
if rat > (s+0.5):
nn = newn
ss = news
s = rat
inds.append(ind)
else:
continue
return inds, s
def find_centroid(image, x, y, B):
G = Moffat2D()
G.alpha.value = 3.5
G.alpha.fixed = True
fit = LevMarLSQFitter()(G, x, y, image)
signal_to_noise = fit.amplitude.value / biweight_midvariance(image)
d = np.sqrt((x - fit.x_0.value)**2 + (y - fit.y_0.value)**2)
ratio = fit(x, y) / B
ind = np.argsort(ratio)
dthresh = np.interp(.01, ratio[ind], d[ind])
return (fit.x_0.value, fit.y_0.value, fit.alpha.value, fit.gamma.value,
fit.fwhm, signal_to_noise, dthresh)
def build_weight_matrix(x, y, sig=1.5):
d = np.sqrt((x - x[:, np.newaxis])**2 + (y - y[:, np.newaxis])**2)
G =
|
np.exp(-0.5 * (d / sig)**2)
|
numpy.exp
|
# -*- coding: UTF-8 -*-
"""
@version: 2.0
@author: Jonah
@file: features.py
@Created time: 2020/12/15 00:00
@Last Modified: 2021/12/24 21:59
"""
from plot_format import plot_norm
from collections import Counter
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import time
from tqdm import tqdm
import array
import csv
import sqlite3
from kmeans import KernelKMeans, ICA
from utils import *
from wave_freq import *
import warnings
from matplotlib.pylab import mpl
from plotwindow import PlotWindow
from scipy.signal import savgol_filter
warnings.filterwarnings("ignore")
mpl.rcParams['axes.unicode_minus'] = False #显示负号
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
class Features:
def __init__(self, color_1, color_2, time, trai, status, output, device):
self.color_1 = color_1
self.color_2 = color_2
self.Time = time
self.TRAI = trai
self.convert = lambda x, a, b: pow(x, a) * pow(10, b)
self.status = status
self.output = output
self.device = device
def __cal_linear_interval(self, tmp, interval):
"""
Take the linear interval to get the first number in each order and the interval between grids
:param tmp: Energy/Amplitude/Duration in order of magnitude
:param interval: Number of bins in each order of magnitude
:return:
"""
tmp_max = int(max(tmp))
tmp_min = int(min(tmp))
mid = []
if tmp_min <= 0:
inter = [0] + [pow(10, i) for i in range(len(str(tmp_max)))]
else:
inter = [pow(10, i) for i in range(len(str(tmp_min)) - 1, len(str(tmp_max)))]
for idx in range(len(inter)):
try:
mid.extend([(inter[idx + 1] - inter[idx]) / interval])
except IndexError:
mid.extend([9 * inter[idx] / interval])
return inter, mid
def __cal_log_interval(self, tmp):
"""
Take the logarithmic interval to get the first number in each order
:param tmp: Energy/Amplitude/Duration in order of magnitude
:return:
"""
tmp_min = math.floor(np.log10(min(tmp)))
tmp_max = math.ceil(np.log10(max(tmp)))
inter = [i for i in range(tmp_min, tmp_max + 1)]
return inter
def __cal_negtive_interval(self, res, interval):
"""
:param res:
:param interval:
:return:
"""
tmp = sorted(np.array(res))
tmp_min, tmp_max = math.floor(np.log10(min(tmp))), math.ceil(np.log10(max(tmp)))
inter = [pow(10, i) for i in range(tmp_min, tmp_max + 1)]
mid = [interval * pow(10, i) for i in range(tmp_min + 1, tmp_max + 2)]
return inter, mid
def __cal_linear(self, tmp, inter, mid, interval_num, idx=0):
"""
Calculate the probability density value at linear interval
:param tmp: Energy/Amplitude/Duration in order of magnitude
:param inter: The first number of each order of magnitude
:param mid: Bin spacing per order of magnitude
:param interval_num: Number of bins divided in each order of magnitude
:param idx:
:return:
"""
# 初始化横坐标
x = np.array([])
for i in inter:
if i != 0:
x = np.append(x, np.linspace(i, i * 10, interval_num, endpoint=False))
else:
x = np.append(x, np.linspace(i, 1, interval_num, endpoint=False))
# 初始化纵坐标
y = np.zeros(x.shape[0])
for i, n in Counter(tmp).items():
while True:
try:
if x[idx] <= i < x[idx + 1]:
y[idx] += n
break
except IndexError:
if x[idx] <= i:
y[idx] += n
break
idx += 1
# 对横坐标作进一步筛选,计算概率分布值
x, y = x[y != 0], y[y != 0]
xx = np.zeros(x.shape[0])
yy = y / sum(y)
# 取区间终点作为该段的横坐标
for idx in range(len(x) - 1):
xx[idx] = (x[idx] + x[idx + 1]) / 2
xx[-1] = x[-1] + pow(10, len(str(int(x[-1])))) * (0.9 / interval_num) / 2
# 计算分段区间长度,从而求得概率密度值
interval = []
for i, j in enumerate(mid):
try:
# num = len(np.intersect1d(np.where(inter[i] <= xx)[0],
# np.where(xx < inter[i + 1])[0]))
num = len(np.where((inter[i] <= xx) & (xx < inter[i + 1]))[0])
interval.extend([j] * num)
except IndexError:
num = len(np.where(inter[i] <= xx)[0])
interval.extend([j] * num)
yy = yy / np.array(interval)
# # 取对数变换为线性关系
# log_xx = np.log10(xx)
# log_yy = np.log10(yy)
# fit = np.polyfit(log_xx, log_yy, 1)
# alpha = abs(fit[0])
# fit_x = np.linspace(min(log_xx), max(log_xx), 100)
# fit_y = np.polyval(fit, fit_x)
return xx, yy
def __cal_log(self, tmp, inter, interval_num, idx=0):
"""
Calculate the probability density value at logarithmic interval
:param tmp: Energy/Amplitude/Duration in order of magnitude
:param inter: The first number of each order of magnitude
:param interval_num: Number of bins divided in each order of magnitude
:param idx:
:return:
"""
x, xx, interval = np.array([]), np.array([]), np.array([])
for i in inter:
logspace = np.logspace(i, i + 1, interval_num, endpoint=False)
tmp_inter = [logspace[i + 1] - logspace[i] for i in range(len(logspace) - 1)]
tmp_xx = [(logspace[i + 1] + logspace[i]) / 2 for i in range(len(logspace) - 1)]
tmp_inter.append(10 * logspace[0] - logspace[-1])
tmp_xx.append((10 * logspace[0] + logspace[-1]) / 2)
x = np.append(x, logspace)
interval = np.append(interval, np.array(tmp_inter))
xx = np.append(xx, np.array(tmp_xx))
y = np.zeros(x.shape[0])
for i, n in Counter(tmp).items():
while True:
try:
if x[idx] <= i < x[idx + 1]:
y[idx] += n
break
except IndexError:
if x[idx] <= i:
y[idx] += n
break
idx += 1
xx, y, interval = xx[y != 0], y[y != 0], interval[y != 0]
yy = y / (sum(y) * interval)
return xx, yy
def __cal_N_Naft(self, tmp, eny_lim):
N_ms, N_as = 0, 0
main_peak = np.where(eny_lim[0] < tmp)[0]
if len(main_peak):
for i in range(main_peak.shape[0] - 1):
if main_peak[i] >= eny_lim[1]:
continue
elif main_peak[i + 1] - main_peak[i] == 1:
N_ms += tmp[main_peak[i]]
continue
N_ms += tmp[main_peak[i]]
N_as += np.max(tmp[main_peak[i] + 1:main_peak[i + 1]])
if main_peak[-1] < tmp.shape[0] - 1:
N_as += np.max(tmp[main_peak[-1] + 1:])
N_ms += tmp[main_peak[-1]]
return N_ms + N_as, N_as
def __cal_OmiroLaw_helper(self, tmp, eny_lim):
res = [[] for _ in range(len(eny_lim))]
for idx in range(len(eny_lim)):
main_peak = np.where((eny_lim[idx][0] < tmp) & (tmp < eny_lim[idx][1]))[0]
if len(main_peak):
for i in range(main_peak.shape[0] - 1):
for j in range(main_peak[i] + 1, main_peak[i + 1] + 1):
if tmp[j] < eny_lim[idx][1]:
k = self.Time[j] - self.Time[main_peak[i]]
res[idx].append(k)
else:
break
if main_peak[-1] < tmp.shape[0] - 1:
for j in range(main_peak[-1] + 1, tmp.shape[0]):
k = self.Time[j] - self.Time[main_peak[-1]]
res[idx].append(k)
return res
def cal_PDF(self, tmp, xlabel, ylabel, LIM=None, INTERVAL_NUM=None, COLOR='black', FIT=False, bin_method='log'):
"""
Calculate Probability Density Distribution Function
:param tmp: Energy/Amplitude/Duration in order of magnitude of original data
:param xlabel: 'Amplitude (μV)', 'Duration (μs)', 'Energy (aJ)'
:param ylabel: 'PDF (A)', 'PDF (D)', 'PDF (E)'
:param LIM: Use in function fitting, support specific values or indexes,
value: [0, float('inf')], [100, 900], ...
index: [0, None], [11, -2], ...
:param INTERVAL_NUM: Number of bins divided in each order of magnitude
:param COLOR: Color when drawing with original data, population I and population II respectively
:param FIT: Whether to fit parameters, support True or False
:param bin_method: Method to divide the bin, Support linear partition and logarithmic partition
:return:
"""
if INTERVAL_NUM is None:
INTERVAL_NUM = 6
if LIM is None:
LIM = [0, None]
plotWindow = PlotWindow('PDF--%s' % xlabel, 6, 3.9)
fig = plotWindow.static_canvas.figure
fig.subplots_adjust(left=0.133, bottom=0.179, right=0.975, top=0.962)
fig.text(0.15, 0.2, self.status, fontdict={'family': 'Arial', 'fontweight': 'bold', 'fontsize': 12})
ax = fig.add_subplot()
if bin_method == 'linear':
inter, mid = self.__cal_linear_interval(tmp, INTERVAL_NUM)
xx, yy = self.__cal_linear(tmp, inter, mid, INTERVAL_NUM)
elif bin_method == 'log':
inter = self.__cal_log_interval(tmp)
xx, yy = self.__cal_log(tmp, inter, INTERVAL_NUM)
if FIT:
fit = np.polyfit(np.log10(xx[LIM[0]:LIM[1]]), np.log10(yy[LIM[0]:LIM[1]]), 1)
alpha, b = fit[0], fit[1]
fit_x = np.linspace(xx[LIM[0]], xx[-1], 100)
fit_y = self.convert(fit_x, alpha, b)
ax.plot(fit_x, fit_y, '-.', lw=1, color=COLOR)
ax.loglog(xx, yy, '.', marker='.', markersize=8, color=COLOR, label='slope-{:.2f}'.format(abs(alpha)))
plot_norm(ax, xlabel, ylabel, legend_loc='upper right', legend=True)
else:
ax.loglog(xx, yy, '.', marker='.', markersize=8, color=COLOR)
plot_norm(ax, xlabel, ylabel, legend=False)
with open('/'.join([self.output, self.status]) + '_%s.txt' % ylabel, 'w') as f:
f.write('{}, {}\n'.format(xlabel, ylabel))
for i, j in zip(xx, yy):
f.write('{}, {}\n'.format(i, j))
return plotWindow
def cal_CCDF(self, tmp, xlabel, ylabel, LIM=None, COLOR='black', FIT=False):
"""
Calculate Complementary Cumulative Distribution Function
:param tmp: Energy/Amplitude/Duration in order of magnitude of original data
:param xlabel: 'Amplitude (μV)', 'Duration (μs)', 'Energy (aJ)'
:param ylabel: 'CCDF (A)', 'CCDF (D)', 'CCDF (E)'
:param LIM: Use in function fitting, support specific values or indexes,
value: [0, float('inf')], [100, 900], ...
index: [0, None], [11, -2], ...
:param FIT: Whether to fit parameters, support True or False
:param COLOR: Color when drawing with original data, population I and population II respectively
:return:
"""
if LIM is None:
LIM = [0, float('inf')]
N = len(tmp)
plotWindow = PlotWindow('CCDF--%s' % xlabel, 6, 3.9)
fig = plotWindow.static_canvas.figure
fig.subplots_adjust(left=0.133, bottom=0.179, right=0.975, top=0.962)
fig.text(0.15, 0.2, self.status, fontdict={'family': 'Arial', 'fontweight': 'bold', 'fontsize': 12})
ax = fig.add_subplot()
xx, yy = [], []
for i in range(N - 1):
xx.append(np.mean([tmp[i], tmp[i + 1]]))
yy.append((N - i + 1) / N)
if FIT:
xx, yy = np.array(xx), np.array(yy)
fit_lim = np.where((xx > LIM[0]) & (xx < LIM[1]))[0]
fit = np.polyfit(np.log10(xx[fit_lim[0]:fit_lim[-1]]), np.log10(yy[fit_lim[0]:fit_lim[-1]]), 1)
alpha, b = fit[0], fit[1]
fit_x = np.linspace(xx[fit_lim[0]], xx[fit_lim[-1]], 100)
fit_y = self.convert(fit_x, alpha, b)
ax.plot(fit_x, fit_y, '-.', lw=1, color=COLOR)
ax.loglog(xx, yy, color=COLOR, label='slope-{:.2f}'.format(abs(alpha)))
plot_norm(ax, xlabel, ylabel, legend_loc='upper right')
else:
ax.loglog(xx, yy, color=COLOR)
plot_norm(ax, xlabel, ylabel, legend=False)
with open('/'.join([self.output, self.status]) + '_CCDF(%s).txt' % xlabel[0], 'w') as f:
f.write('{}, {}\n'.format(xlabel, ylabel))
for i, j in zip(xx, yy):
f.write('{}, {}\n'.format(i, j))
return plotWindow
def cal_ML(self, tmp, xlabel, ylabel, COLOR='black', ECOLOR=None):
"""
Calculate the maximum likelihood function distribution
:param tmp: Energy/Amplitude/Duration in order of magnitude of original data
:param xlabel: 'Amplitude (μV)', 'Duration (μs)', 'Energy (aJ)'
:param ylabel: 'ML (A)', 'ML (D)', 'ML (E)'
:param COLOR: Color when drawing with original data, population I and population II respectively
:param ECOLOR: Line color of error bar, corresponding parameter COLOR
:return:
"""
if not ECOLOR:
ECOLOR = [0.7, 0.7, 0.7]
N = len(tmp)
plotWindow = PlotWindow('ML--%s' % xlabel, 6, 3.9)
fig = plotWindow.static_canvas.figure
fig.subplots_adjust(left=0.131, bottom=0.179, right=0.975, top=0.944)
fig.text(0.96, 0.2, self.status, fontdict={'family': 'Arial', 'fontweight': 'bold', 'fontsize': 12},
horizontalalignment="right")
ax = fig.add_subplot()
ax.set_xscale("log", nonposx='clip')
ML_y, Error_bar = [], []
for j in range(N):
valid_x = sorted(tmp)[j:]
E0 = valid_x[0]
Sum = np.sum(np.log(valid_x / E0))
N_prime = N - j
alpha = 1 + N_prime / Sum
error_bar = (alpha - 1) / pow(N_prime, 0.5)
ML_y.append(alpha)
Error_bar.append(error_bar)
ax.errorbar(sorted(tmp), ML_y, yerr=Error_bar, fmt='o', ecolor=ECOLOR, color=COLOR, elinewidth=1, capsize=2, ms=3)
plot_norm(ax, xlabel, ylabel, y_lim=[1.25, 3], legend=False)
with open('/'.join([self.output, self.status]) + '_ML(%s).txt' % xlabel[0], 'w') as f:
f.write('{}, {}, Error bar\n'.format(xlabel, ylabel))
for i, j, k in zip(tmp, ML_y, Error_bar):
f.write('{}, {}, {}\n'.format(i, j, k))
return plotWindow
def cal_contour(self, tmp_1, tmp_2, xlabel, ylabel, x_lim, y_lim, size_x=40, size_y=40, method='linear_bin',
padding=False, colorbar=False, clabel=False):
tmp_1, tmp_2 = 20 * np.log10(tmp_1), 20 *
|
np.log10(tmp_2)
|
numpy.log10
|
import numpy as np
from impedance.models.circuits.fitting import rmse
from impedance.models.circuits.elements import circuit_elements, K # noqa
def linKK(f, Z, c=0.85, max_M=50, fit_type='real', add_cap=False):
""" A method for implementing the Lin-KK test for validating linearity [1]
Parameters
----------
f: np.ndarray
measured frequencies
Z: np.ndarray of complex numbers
measured impedances
c: np.float
cutoff for mu
max_M: int
the maximum number of RC elements
fit_type: str
selects which components of data are fit ('real', 'imag', or
'complex')
add_cap: bool
option to add a serial capacitance that helps validate data with no
low-frequency intercept
Returns
-------
M: int
number of RC elements used
mu: np.float
under- or over-fitting measure
Z_fit: np.ndarray of complex numbers
impedance of fit at input frequencies
resids_real: np.ndarray
real component of the residuals of the fit at input frequencies
resids_imag: np.ndarray
imaginary component of the residuals of the fit at input frequencies
Notes
-----
The lin-KK method from Schönleber et al. [1] is a quick test for checking
the
validity of EIS data. The validity of an impedance spectrum is analyzed by
its reproducibility by a Kramers-Kronig (KK) compliant equivalent circuit.
In particular, the model used in the lin-KK test is an ohmic resistor,
:math:`R_{Ohm}`, and :math:`M` RC elements.
.. math::
\\hat Z = R_{Ohm} + \\sum_{k=1}^{M} \\frac{R_k}{1 + j \\omega \\tau_k}
The :math:`M` time constants, :math:`\\tau_k`, are distributed
logarithmically,
.. math::
\\tau_1 = \\frac{1}{\\omega_{max}} ; \\tau_M = \\frac{1}{\\omega_{min}}
; \\tau_k = 10^{\\log{(\\tau_{min}) + \\frac{k-1}{M-1}\\log{{(
\\frac{\\tau_{max}}{\\tau_{min}}}})}}
and are not fit during the test (only :math:`R_{Ohm}` and :math:`R_{k}`
are free parameters).
In order to prevent under- or over-fitting, Schönleber et al. propose using
the ratio of positive resistor mass to negative resistor mass as a metric
for finding the optimal number of RC elements.
.. math::
\\mu = 1 - \\frac{\\sum_{R_k \\ge 0} |R_k|}{\\sum_{R_k < 0} |R_k|}
The argument :code:`c` defines the cutoff value for :math:`\\mu`. The
algorithm starts at :code:`M = 3` and iterates up to :code:`max_M` until a
:math:`\\mu < c` is reached. The default of 0.85 is simply a heuristic
value based off of the experience of Schönleber et al., but a lower value
may give better results.
If the argument :code:`c` is :code:`None`, then the automatic determination
of RC elements is turned off and the solution is calculated for
:code:`max_M` RC elements. This manual mode should be used with caution as
under- and over-fitting should be avoided.
[1] <NAME> al. A Method for Improving the Robustness of
linear Kramers-Kronig Validity Tests. Electrochimica Acta 131, 20–27 (2014)
`doi: 10.1016/j.electacta.2014.01.034
<https://doi.org/10.1016/j.electacta.2014.01.034>`_.
"""
if c is not None:
M = 0
mu = 1
while mu > c and M <= max_M:
M += 1
ts = get_tc_distribution(f, M)
elements, mu = fit_linKK(f, ts, M, Z, fit_type, add_cap)
if M % 10 == 0:
print(M, mu, rmse(eval_linKK(elements, ts, f), Z))
else:
M = max_M
ts = get_tc_distribution(f, M)
elements, mu = fit_linKK(f, ts, M, Z, fit_type, add_cap)
Z_fit = eval_linKK(elements, ts, f)
resids_real = residuals_linKK(elements, ts, Z, f, residuals='real')
resids_imag = residuals_linKK(elements, ts, Z, f, residuals='imag')
return M, mu, Z_fit, resids_real, resids_imag
def get_tc_distribution(f, M):
""" Returns the distribution of time constants for the linKK method """
t_max = 1/(2 * np.pi * np.min(f))
t_min = 1/(2 * np.pi * np.max(f))
ts = np.zeros(shape=(M,))
ts[0] = t_min
ts[-1] = t_max
if M > 1:
for k in range(2, M):
ts[k-1] = 10**(np.log10(t_min) +
((k-1)/(M-1))*np.log10(t_max/t_min))
return ts
def fit_linKK(f, ts, M, Z, fit_type='real', add_cap=False):
""" Fits the linKK model using linear regression
Parameters
----------
f: np.ndarray
measured frequencies
ts: np.ndarray
logarithmically spaced time constants of RC elements
M: int
the number of RC elements
Z: np.ndarray of complex numbers
measured impedances
fit_type: str
selects which components of data are fit ('real', 'imag', or
'complex')
add_cap: bool
option to add a serial capacitance that helps validate data with no
low-frequency intercept
Returns
-------
elements: np.ndarray
values of fit :math:`R_k` in RC elements and series :math:`R_0`,
L, and optionally C.
mu: np.float
under- or over-fitting measure
Notes
-----
Since we have a system of equations, :math:`Ax ~= b`, that's linear wrt
:math:`R_k`, we can fit the model by calculating the pseudo-inverse of A.
:math:`Ax` is our model fit, :math:`\\hat{Z}`, and :math:`b` is the
normalized real or imaginary component of the impedance data,
:math:`Re(Z)/|Z|` or :math:`Im(Z)/|Z|`, respectively.
:math:`\\hat{Z} = R_0 + \\sum^M_{k=1}(R_k / |Z|(1 + j * w * \\tau_k))`.
:math:`x` is an (M+1) :math:`\\times` 1 matrix where the first row
contains :math:`R_0` and subsequent rows contain :math:`R_k` values.
A is an N :math:`\\times` (M+1) matrix, where N is the number of data
points, and M is the number of RC elements.
Examples
--------
Fitting the real part of data, the first column of A contains
values of :math:`\\frac{1}{|Z|}`, the second column contains
:math:`Re(1 / |Z| (1 + j * w * \\tau_1))`, the third contains
:math:`Re(1 / |Z| (1 + j * w * \\tau_2))` and so on. The :math:`R_k` values
within the x matrix are found using :code:`numpy.linalg.pinv` when
fit_type = 'real' or 'imag'. When fit_type = 'complex' the coefficients are
found "manually" using :math:`r = ||A'x - b'||^2 + ||A''x - b'||^2`
according to Eq 14 of Schonleber [1].
[1] <NAME>. et al. A Method for Improving the Robustness of
linear Kramers-Kronig Validity Tests. Electrochimica Acta 131, 20–27 (2014)
`doi: 10.1016/j.electacta.2014.01.034
<https://doi.org/10.1016/j.electacta.2014.01.034>`_.
"""
w = 2 * np.pi * f
# Fitting model has M RC elements plus 1 series resistance and 1 series
# inductance
a_re = np.zeros((f.size, M+2))
a_im = np.zeros((f.size, M+2))
if add_cap:
a_re = np.zeros((f.size, M+3))
a_im = np.zeros((f.size, M+3))
# Column for series capacitance. Real part = 0.
a_im[:, -2] = - 1 / (w * np.abs(Z))
# Column for series resistance, R_o in model. Imaginary part = 0.
a_re[:, 0] = 1 / np.abs(Z)
# Column for series inductance to capture inevitable contributions from
# the measurement system. Real part = 0.
a_im[:, -1] = w / np.abs(Z)
# Columns for series RC elements
for i, tau in enumerate(ts):
a_re[:, i+1] = K([1, tau], f).real / np.abs(Z)
a_im[:, i+1] = K([1, tau], f).imag / np.abs(Z)
if fit_type == 'real':
elements = np.linalg.pinv(a_re).dot(Z.real / np.abs(Z))
# After fitting real part, need to use imaginary component of fit to
# find values of series inductance and capacitance
a_im = np.zeros((f.size, 2))
a_im[:, -1] = w /
|
np.abs(Z)
|
numpy.abs
|
import numpy as np
#import dataset
AbsErr =
|
np.loadtxt("data/test/dataErrorTestAbsolute.csv",delimiter=",")
|
numpy.loadtxt
|
"""Module for the Graph abstract class."""
from abc import ABC
import matplotlib.pyplot as plt
import numpy as np
from styles.style import Style
from util.savable_graph import SavableGraph
class Graph(ABC):
"""Abstract base class for all kind of graphs."""
def __init__(self,
title=None,
size=(10.5, 6),
color_offset=0):
"""Initialize the Graph object."""
self._figsize = size
self._title = title
# TODO(CustomColorOrder) Allow custom ordering of the color palette.
self.style = Style.load_from_yaml('default_style.yaml')
self._gridColor = '#AAAAAA'
self._gridWidth = 1.0
self._hide_labels = False
self._xticks = None
self._yticks = None
self._spines = set(['bottom', 'left'])
self._xlimit = None
self._ylimit = None
# Label
self._xlabel_fn = None
self._xlabel = ''
self._ylabel = ''
self._label_color = '#222222'
def size(self, width, height):
"""Set the size of the figure."""
self._figsize = (width, height)
return self
# def title(self, title):
# """Set the title of the graph."""
# self._title = title
# return self
def hide_labels(self):
"""Hide the labels of the graph."""
self._hide_labels = True
return self
def xticks(self, ticks):
"""Set the ticks for the x-axis."""
self._xticks = ticks
return self
def yticks(self, ticks):
"""Set the ticks for the y-axis."""
self._yticks = ticks
return self
def spines(self, spines):
"""Define what spines to show.
Can be any number out of the following:
'bottom', 'top', 'left', 'right'
"""
self._spines = self._spines | set(spines)
return self
def xlim(self, xmin, xmax):
"""Set the limit for the x-axis."""
self._xlimit = (xmin, xmax)
return self
def ylim(self, ymin, ymax):
"""Set the limit for the y-axis."""
self._ylimit = (ymin, ymax)
return self
def xlabel(self, xlabel):
"""Set label for the x-axis."""
self._xlabel = xlabel
return self
def ylabel(self, ylabel):
"""Set label for the y-axis."""
self._ylabel = ylabel
return self
def labels(self, xlabel, ylabel):
"""Set labels for both x- and y-axis."""
self.xlabel(xlabel)
self.ylabel(ylabel)
return self
def xlabel_fn(self, fn):
"""Apply the given function to all labels on the x-axis.
Can for example be used when labels should be mapped to a dictionary.
"""
self._xlabel_fn = fn
return self
def label_color(self, color):
"""Set the color for the labels of both axes."""
self._label_color = color
return self
def legends(self, legend_labels, position=None):
"""."""
self._legend_labels = legend_labels
return self
def _prepare_plot(self, x_min, x_max, y_min, y_max):
plt.close() # ??
fig, ax = plt.subplots(figsize=self._figsize,
facecolor='white',
edgecolor='white')
ax.axes.tick_params(labelcolor=self._label_color, labelsize='10')
#
if self._xticks is not None:
xticks = self._xticks
else:
step_size = (x_max - x_min) / 6. # FIXME Round maybe?
step_size = max(step_size, 0.1) # Prevent 0 values.
xticks =
|
np.arange(x_min, x_max + step_size, step_size)
|
numpy.arange
|
"""Central data base keeping track of positions, velocities, relative positions, and distances of all simulated fishes
"""
import math
import random
import numpy as np
from scipy.spatial.distance import cdist
import sys
U_LED_DX = 86 # [mm] leds x-distance on BlueBot
U_LED_DZ = 86 # [mm] leds z-distance on BlueBot
class Environment():
"""Simulated fish environment
Fish get their visible neighbors and corresponding relative positions and distances from here. Fish also update their own positions after moving in here. Environmental tracking data is used for simulation analysis.
"""
def __init__(self, pos, vel, fish_specs, arena):
# Arguments
self.pos = pos # x, y, z, phi; [no_robots X 4]
self.vel = vel # pos_dot
self.v_range = fish_specs[0] # visual range, [mm]
self.w_blindspot = fish_specs[1] # width of blindspot, [mm]
self.r_sphere = fish_specs[2] # radius of blocking sphere for occlusion, [mm]
self.n_magnitude = fish_specs[3] # visual noise magnitude, [% of distance]
self.arena_size = arena # x, y, z
# Parameters
self.no_robots = self.pos.shape[0]
self.no_states = self.pos.shape[1]
# Initialize robot states
self.init_states()
# Initialize tracking
self.init_tracking()
# Initialize LEDs
#self.leds_pos = [np.zeros((3,3))]*self.no_robots # empty init, filled with update_leds() below
#for robot in range(self.no_robots):
# self.update_leds(robot)
def log_to_file(self, filename):
"""Logs tracking data to file
"""
#np.savetxt('./logfiles/{}_data.txt'.format(filename), self.tracking, fmt='%.2f', delimiter=',')
np.savetxt('./logfiles/{}.txt'.format(filename), self.tracking, fmt='%.2f', delimiter=',')
def init_tracking(self):
"""Initializes tracking
"""
pos = np.reshape(self.pos, (1,self.no_robots*self.no_states))
vel = np.reshape(self.vel, (1,self.no_robots*self.no_states))
self.tracking = np.concatenate((pos,vel), axis=1)
self.updates = 0
def update_tracking(self):
"""Updates tracking after every fish took a turn
"""
pos = np.reshape(self.pos, (1,self.no_robots*self.no_states))
vel = np.reshape(self.vel, (1,self.no_robots*self.no_states))
current_state = np.concatenate((pos,vel), axis=1)
self.tracking = np.concatenate((self.tracking,current_state), axis=0)
def update_leds(self, source_index):
""" Updates the position of the three leds based on self.pos, which is the position of led1
"""
pos = self.pos[source_index,:3]
phi = self.pos[source_index,3]
x1 = pos[0]
x2 = x1
x3 = x1 + math.cos(phi)*U_LED_DX
y1 = pos[1]
y2 = y1
y3 = y1 + math.sin(phi)*U_LED_DX
z1 = pos[2]
z2 = z1 + U_LED_DZ
z3 = z1
self.leds_pos[source_index] = np.array([[x1, x2, x3],[y1, y2, y3],[z1, z2, z3]])
def init_states(self):
"""Initializes fish positions and velocities
"""
# Restrict initial positions to arena size
self.pos[:,0] = np.clip(self.pos[:,0], 0, self.arena_size[0])
self.pos[:,1] = np.clip(self.pos[:,1], 0, self.arena_size[1])
self.pos[:,2] = np.clip(self.pos[:,2], 0, self.arena_size[2])
# Initial relative positions
a_ = np.reshape(self.pos, (1, self.no_robots*self.no_states))
a = np.tile(a_, (self.no_robots,1))
b = np.tile(self.pos, (1,self.no_robots))
self.rel_pos = a - b # [4*no_robots X no_robots]
# Initial distances
self.dist = cdist(self.pos[:,:3], self.pos[:,:3], 'euclidean') # without phi; [no_robots X no_robots]
def update_states(self, source_id, pos, vel): # add noise
"""Updates a fish state and affected realtive positions and distances
"""
# Position and velocity
self.pos[source_id,0] = np.clip(pos[0], 0, self.arena_size[0])
self.pos[source_id,1] = np.clip(pos[1], 0, self.arena_size[1])
self.pos[source_id,2] = np.clip(pos[2], 0, self.arena_size[2])
self.pos[source_id,3] = pos[3]
self.vel[source_id,:] = vel
# Relative positions
pos_others = np.reshape(self.pos, (1,self.no_robots*self.no_states))
pos_self = np.tile(self.pos[source_id,:], (1,self.no_robots))
rel_pos = pos_others - pos_self
self.rel_pos[source_id,:] = rel_pos # row
rel_pos_ = np.reshape(rel_pos, (self.no_robots, self.no_states))
self.rel_pos[:,source_id*self.no_states:source_id*self.no_states+self.no_states] = -rel_pos_ # columns
# Relative distances
dist = np.linalg.norm(rel_pos_[:,:3], axis=1) # without phi
self.dist[source_id,:] = dist
self.dist[:,source_id] = dist.T
# Update LEDs
#self.update_leds(source_id)
# Update tracking
self.updates += 1
if self.updates >= self.no_robots:
self.updates = 0
self.update_tracking()
def get_robots(self, source_id, visual_noise=False):
"""Provides visible neighbors and relative positions and distances to a fish
"""
robots = set(range(self.no_robots)) # all robots
robots.discard(source_id) # discard self
rel_pos = np.reshape(self.rel_pos[source_id], (self.no_robots, self.no_states))
return (robots, rel_pos, self.dist[source_id])
# perfect vision here
'''
self.visual_range(source_id, robots)
self.blind_spot(source_id, robots, rel_pos)
self.occlusions(source_id, robots, rel_pos)
leds = self.calc_relative_leds(source_id, robots)
if self.n_magnitude: # no overwrites of self.rel_pos and self.dist
n_rel_pos, n_dist = self.visual_noise(source_id, rel_pos)
return (robots, n_rel_pos, n_dist, leds)
return (robots, rel_pos, self.dist[source_id], leds)
'''
def visual_range(self, source_id, robots):
"""Deletes fishes outside of visible range
"""
conn_drop = 0.005
candidates = robots.copy()
for robot in candidates:
d_robot = self.dist[source_id][robot]
x = conn_drop * (d_robot - self.v_range)
if x < -5:
sigmoid = 1
elif x > 5:
sigmoid = 0
else:
sigmoid = 1 / (1 + math.exp(x))
prob = random.random()
if sigmoid < prob:
robots.remove(robot)
def blind_spot(self, source_id, robots, rel_pos):
"""Omits fishes within the blind spot behind own body
"""
r_blockage = self.w_blindspot/2
phi = self.pos[source_id,3]
phi_xy = [math.cos(phi), math.sin(phi)]
mag_phi =
|
np.linalg.norm(phi_xy)
|
numpy.linalg.norm
|
"""Gym environment for block pushing tasks (2D Shapes and 3D Cubes)."""
import numpy as np
import utils
import gym
from gym import spaces
from gym.utils import seeding
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from PIL import Image
import skimage.draw
def square(r0, c0, width, im_size):
rr, cc = [r0, r0 + width, r0 + width, r0], [c0, c0, c0 + width, c0 + width]
return skimage.draw.polygon(rr, cc, im_size)
def triangle(r0, c0, width, im_size):
rr, cc = [r0, r0 + width, r0 + width], [c0 + width // 2, c0, c0 + width]
return skimage.draw.polygon(rr, cc, im_size)
def fig2rgb_array(fig):
fig.canvas.draw()
buffer = fig.canvas.tostring_rgb()
width, height = fig.canvas.get_width_height()
return np.fromstring(buffer, dtype=np.uint8).reshape(height, width, 3)
def render_cubes(positions, width):
voxels = np.zeros((width, width, width), dtype=np.bool)
colors = np.empty(voxels.shape, dtype=object)
cols = ['purple', 'green', 'orange', 'blue', 'brown']
for i, pos in enumerate(positions):
voxels[pos[0], pos[1], 0] = True
colors[pos[0], pos[1], 0] = cols[i]
fig = plt.figure()
ax = Axes3D(fig)
ax.w_zaxis.set_pane_color((0.5, 0.5, 0.5, 1.0))
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_zaxis.line.set_lw(0.)
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.voxels(voxels, facecolors=colors, edgecolor='k')
im = fig2rgb_array(fig)
plt.close(fig)
im = np.array( # Crop and resize
Image.fromarray(im[215:455, 80:570]).resize((50, 50), Image.ANTIALIAS))
return im / 255.
class BlockPushing(gym.Env):
"""Gym environment for block pushing task."""
def __init__(self, width=5, height=5, render_type='cubes', num_objects=5,
seed=None):
self.width = width
self.height = height
self.render_type = render_type
self.num_objects = num_objects
self.num_actions = 4 * self.num_objects # Move NESW
self.colors = utils.get_colors(num_colors=max(9, self.num_objects))
self.np_random = None
self.game = None
self.target = None
# Initialize to pos outside of env for easier collision resolution.
self.objects = [[-1, -1] for _ in range(self.num_objects)]
# If True, then check for collisions and don't allow two
# objects to occupy the same position.
self.collisions = True
self.action_space = spaces.Discrete(self.num_actions)
self.observation_space = spaces.Box(
low=0, high=1,
shape=(3, self.width, self.height),
dtype=np.float32
)
self.seed(seed)
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def render(self):
if self.render_type == 'grid':
im = np.zeros((3, self.width, self.height))
for idx, pos in enumerate(self.objects):
im[:, pos[0], pos[1]] = self.colors[idx][:3]
return im
elif self.render_type == 'circles':
im = np.zeros((self.width * 10, self.height * 10, 3), dtype=np.float32)
for idx, pos in enumerate(self.objects):
rr, cc = skimage.draw.circle(
pos[0] * 10 + 5, pos[1] * 10 + 5, 5, im.shape)
im[rr, cc, :] = self.colors[idx][:3]
return im.transpose([2, 0, 1])
elif self.render_type == 'shapes':
im = np.zeros((self.width * 10, self.height * 10, 3), dtype=np.float32)
for idx, pos in enumerate(self.objects):
if idx % 3 == 0:
rr, cc = skimage.draw.circle(
pos[0] * 10 + 5, pos[1] * 10 + 5, 5, im.shape)
im[rr, cc, :] = self.colors[idx][:3]
elif idx % 3 == 1:
rr, cc = triangle(
pos[0] * 10, pos[1] * 10, 10, im.shape)
im[rr, cc, :] = self.colors[idx][:3]
else:
rr, cc = square(
pos[0] * 10, pos[1] * 10, 10, im.shape)
im[rr, cc, :] = self.colors[idx][:3]
return im.transpose([2, 0, 1])
elif self.render_type == 'cubes':
im = render_cubes(self.objects, self.width)
return im.transpose([2, 0, 1])
def get_state(self):
im = np.zeros(
(self.num_objects, self.width, self.height), dtype=np.int32)
for idx, pos in enumerate(self.objects):
im[idx, pos[0], pos[1]] = 1
return im
def reset(self):
self.objects = [[-1, -1] for _ in range(self.num_objects)]
# Randomize object position.
for i in range(len(self.objects)):
# Resample to ensure objects don't fall on same spot.
while not self.valid_pos(self.objects[i], i):
self.objects[i] = [
np.random.choice(
|
np.arange(self.width)
|
numpy.arange
|
"""
Author: <NAME>, University of Leeds, Oct 2019
this function processes the HighD data to Input-Output data for a car-following model
In the data, we would have the following files:
1. Recording Meta Information (XX_recordingMeta.csv)
This file contains metadata for each recording. The metadata provides a general overview, e.g. of the time of recording,
the highway section considered and the total number of vehicles recorded.
2.Track Meta Information (XX_tracksMeta.csv)
This file contains an overview of all tracks. For each track there are summary values like the distance covered or
the average speed. The purpose of this file is to allow to filter tracks e.g. by class or driving direction.
3.Tracks (XX_tracks.csv)
This file contains all time dependent values for each track. Information such as current velocities, viewing ranges
and information about surrounding vehicles are included.
This function processes all of these files
TODO:
1. Empirical analysis: Identify cases/situations in the data
2. Throw the data to a DL
3. Simulate and see if it's reproduce the data
4. If not, then we separate the cases to each model
5.
1. Distance to the leading vehicle
2. Driver heterogeneity : different models for each class of drivers
3. Cooperative & uncooperative behaviours
4. Find out whether a car nearby is changing their lanes
5. Rare situations: in the data or not in the data but the model needs to
give estimates
"""
#import pickle
import pandas as pd
import numpy as np
#import os
minSec =10 # in seconds, we focus on vehicles that stay at least 40s in the data
#prject_path = '~/Documents/Research/highD/'
prject_path = 'C:/Research/highD/'
dynamic_col_to_use = [0,6,12,13,14,15,24]
static_col_to_use = [1,2,6,9,10,11]
nearby_index = [2,6]
uniqueID = 0 #give an unique ID to the vehicle being processed
Location = 2 #focus only on the location number 2 in the dataset
L = 0.424 # length of the location under study (in km)
NumLane = 2
"""
STAGE A: First, we process data into a line-by-line dataset of all related information
"""
print("Stage A")
Car_following_df = []
for i in range(1,60):
print("currently at file: " + str(i))
#file names:
if i <10:
record_name = prject_path + "data/0" + str(i) + "_recordingMeta.csv"
tracksMeta_name = prject_path + "./data/0" + str(i) + "_tracksMeta.csv"
track_name = prject_path + "./data/0" + str(i) + "_tracks.csv"
else:
record_name = prject_path + "./data/" + str(i) + "_recordingMeta.csv"
tracksMeta_name = prject_path + "./data/" + str(i) + "_tracksMeta.csv"
track_name = prject_path + "./data/" + str(i) + "_tracks.csv"
#Step A.1: Read the Record Metadata
recordMeta_df = pd.read_csv(record_name)
#only take the data in the morning (if we take the whole day there will be >1M data lines)
#if int(recordMeta_df["startTime"][0][1]) >12:
# continue
timestamp = pd.to_datetime(recordMeta_df["startTime"][0],format='%H:%M')
time_hour =np.array(timestamp.hour+timestamp.minute/60)
#only take data if it's on our location of interests
if recordMeta_df["locationId"][0] != Location:
continue
#Step A.2: Read the tracksMeta data (summary about each vehicle)
tracksMeta_df = pd.read_csv(tracksMeta_name)
#Read the track data (individual vehicle data)
all_track_df = pd.read_csv(track_name)
#loop through the tracksMeta line-by-line, each line is a vehicle
for l in range(0,len(tracksMeta_df.index)):
trackID = tracksMeta_df["id"][l]
drivingDirection = tracksMeta_df["drivingDirection"][l] #1 for upper lanes (drive to the left), and 2 for lower lanes (drive to the right)
numFrames = tracksMeta_df["numFrames"][l]
if numFrames < recordMeta_df["frameRate"][0]*minSec: #only focus to vehicles that we can observed for more than minSec seconds
continue
#sanity check
if trackID != tracksMeta_df.iloc[l,0]:
print("The trackID is not the same at line: " + str(l))
############################################################
# find all the static data of the vehicle (e.g. vehicle length, class, etc)
static_df_track = np.array(tracksMeta_df.iloc[l,static_col_to_use])
# convert categorical to binary variable (e.g Car vs Truck)
if static_df_track[2]=='Car':
static_df_track[2]=0
else: static_df_track[2]=1 #otherwise it should be a truck
#convert to float for speed
static_df_track=static_df_track.astype(float)
#META DATA OF static_df_float: width, height, class, minXSpeed,
#maxXSpeed,meanXSpeed
#Step A.3: Find the dynamic features of each vehicle
track_df = all_track_df[all_track_df["id"]==trackID].reset_index(drop=True)
# on the upper half of the video, the speed and acceleration is negative
# because it uses universal positioning
# we need to convert it to the otherway around
if drivingDirection==1:
track_df["xVelocity"]=-track_df["xVelocity"]
track_df["xAcceleration"]=-track_df["xAcceleration"]
track_df["precedingXVelocity"]=-track_df["precedingXVelocity"]
# loop through each line in the track data
for t in range(0,len(track_df.index)-1,recordMeta_df["frameRate"][0]): #loop by each second
#print('currently looking at line:' + str(t))
#################################################################
# collect all the dynamic vehicle data (e.g. position, speed, etc)
dynamic_df_track = np.array(track_df.iloc[t,dynamic_col_to_use])
# META DATA OF dynamic_df_track: XSpeed, Distance Headway,
#Time Headway, Time to Collision, Preceeding XSpeed
frameID = dynamic_df_track[0]
laneID = dynamic_df_track[-1]
#Step A.4: Find traffic-related variables: Density and traffic mean speed
if drivingDirection==1:
traffic_density = len(all_track_df[(all_track_df["frame"]==frameID) & (all_track_df["laneId"] < NumLane+2)]) / (L*NumLane)
else: traffic_density = len(all_track_df[(all_track_df["frame"]==frameID) & (all_track_df["laneId"] > NumLane+1)]) / (L*NumLane)
if drivingDirection==1:
traffic_speed = -np.mean(all_track_df.loc[(all_track_df["frame"]==frameID) & (all_track_df["laneId"] < NumLane+2),"xVelocity"])
else: traffic_speed = np.mean(all_track_df.loc[(all_track_df["frame"]==frameID) & (all_track_df["laneId"] > NumLane+1),"xVelocity"])
#Step A.5: Now look at the all_track_df data to find the location
#and speed of surrounding vehicles
#for each vehicle we keep [x_location,speed]
if track_df["leftPrecedingId"][t]!=0:
leftPreceding_df = np.array(all_track_df.loc[(all_track_df["id"] == track_df["leftPrecedingId"][t]) & (all_track_df["frame"] == track_df["frame"][t])].values[0][nearby_index])
leftPreceding_df[0] = np.abs(leftPreceding_df[0]-track_df["x"][t])
leftPreceding_df[1]= np.abs(leftPreceding_df[1])
else: leftPreceding_df = np.array([0,0])
if track_df["leftFollowingId"][t]!=0:
leftFollowing_df = np.array(all_track_df.loc[(all_track_df["id"] == track_df["leftFollowingId"][t]) & (all_track_df["frame"] == track_df["frame"][t])].values[0][nearby_index])
leftFollowing_df[0] = np.abs(leftFollowing_df[0]-track_df["x"][t])
leftFollowing_df[1] = np.abs(leftFollowing_df[1])
else: leftFollowing_df = np.array([0,0])
if track_df["leftAlongsideId"][t]!=0:
leftAlongside_df = np.array(all_track_df.loc[(all_track_df["id"] == track_df["leftAlongsideId"][t]) & (all_track_df["frame"] == track_df["frame"][t])].values[0][nearby_index])
leftAlongside_df[0] = np.abs(leftAlongside_df[0]-track_df["x"][t])
leftAlongside_df[1] = np.abs(leftAlongside_df[1])
else: leftAlongside_df = np.array([0,0])
if track_df["rightPrecedingId"][t]!=0:
rightPreceding_df =
|
np.array(all_track_df.loc[(all_track_df["id"] == track_df["rightPrecedingId"][t]) & (all_track_df["frame"] == track_df["frame"][t])].values[0][nearby_index])
|
numpy.array
|
"""
Target Problem:
---------------
* To train a model to predict the brain connectivity for the next time point given the brain connectivity at current time point.
Proposed Solution (Machine Learning Pipeline):
----------------------------------------------
* K-NN
Input to Proposed Solution:
---------------------------
* Directories of training and testing data in csv file format
* These two types of data should be stored in n x m pattern in csv file format.
Typical Example:
----------------
n x m samples in training csv file (Explain n and m)
k x s samples in testing csv file (Explain k and s
Output of Proposed Solution:
----------------------------
* Predictions generated by learning model for testing set
* They are stored in "results_team12.csv" file. (Change the name file if needed)
Code Owner:
-----------
* Copyright © Team 12. All rights reserved.
* Copyright © Istanbul Technical University, Learning From Data Spring/Fall 2020. All rights reserved.
"""
import pandas as pd
from sklearn.model_selection import KFold
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.neighbors import NearestNeighbors
from scipy.stats.stats import pearsonr
import random as r
r.seed(1)
np.random.seed(1)
import warnings
warnings.filterwarnings('ignore')
def load_data(csv):
"""
The method reads train and test data from their dataset files.
Then, it splits train data into features and labels.
Parameters
----------
train_file: directory of the file in which train data set is located
test_file: directory of the file in which test data set is located
"""
# reading the data from the csv files
df = pd.read_csv(csv, sep=',')
# ignoring the index column of the data (0,...,149 or 0,...,79)
df = df.drop(columns=['ID'])
df_np = df.to_numpy()
return df_np
def train_model(train_t0, neighbourCount):
"""
The method creates a learning model and trains it by using training data.
Parameters
----------
train_t0: x
neighbourCount: number of neigbours in KNN
"""
nbrs = []
train_t0_single = np.transpose(train_t0)
for i in range(train_t0_single.shape[0]):
nbrs.append(NearestNeighbors(n_neighbors=neighbourCount, algorithm='ball_tree').fit(train_t0_single[i].reshape(-1,1)))
return nbrs
def predict(train_t0, train_t1, test_t0, nbrs):
"""
The method makes predictions for testing data samples by using trained learning model.
Parameters
----------
train_t0: x
train_t1: y
test_t0: x_test
nbrs: Nearest Neigbors model for each feature
"""
train_t0_single = np.transpose(train_t0)
train_t1_single = np.transpose(train_t1)
test_t0_single = np.transpose(test_t0)
prediction = np.zeros_like(test_t0)
for i in range(train_t0_single.shape[0]):
distances, indices = nbrs[i].kneighbors(test_t0_single[i].reshape(-1,1))
distances = np.ones_like(distances)* 0.7 - distances
mul = np.multiply(distances, train_t1_single[i,indices])
pred = np.divide(np.mean(mul, axis =1), np.mean(distances, axis = 1))
prediction[:,i] = pred.reshape(-1)
nanLocations = np.isnan(prediction)
prediction[nanLocations] = 0
return prediction
def cv5(data_t0, data_t1, neighbourCount):
kf = KFold(n_splits=5 , shuffle = True, random_state=1)
prediction_all = np.zeros_like(data_t1)
mses= []
maes = []
pears = []
for trainIndex, testIndex in kf.split(data_t0):
train_t0, test_t0 = data_t0[trainIndex], data_t0[testIndex] #Split Data into train and test sets
train_t1, test_t1 = data_t1[trainIndex], data_t1[testIndex]
train_t0_single = np.transpose(train_t0) # Use features as rows and subjects as columns
train_t1_single = np.transpose(train_t1)
test_t0_single = np.transpose(test_t0)
prediction = np.zeros_like(test_t0)
preds = []
for i in range(train_t0_single.shape[0]): #Loop through each feature
nbrs = NearestNeighbors(n_neighbors= neighbourCount, algorithm='ball_tree').fit(train_t0_single[i].reshape(-1,1))
distances, indices = nbrs.kneighbors(test_t0_single[i].reshape(-1,1))# Calculate the distances and indices of K closest neighbours of test subjects and train subjects in t0
distances = np.ones_like(distances)* 0.7 - distances # Set distances to (0.7 - d). Neighbours with low distance get larger values and vice versa
mul = np.multiply(distances, train_t1_single[i,indices]) # Use the changed distances as weights and multiply the corresponding t1 of the neighbours
pred = np.divide(np.mean(mul,axis =1),np.mean(distances, axis = 1)) #Take the mean of the weighted t1's and divide by the mean of distances to normalize
prediction[:,i] = pred.reshape(-1) #This is the prediction for this feature acroos all test subjects
preds.append(pred.reshape(-1))
nanLocations = np.isnan(prediction)
prediction[nanLocations] = 0 # Set nan locations to 0
preds = np.asarray(preds)
preds = np.transpose(preds)
mses.append( mean_squared_error(preds, test_t1) )
maes.append( mean_absolute_error(preds, test_t1) )
pears.append(pearsonr(preds.flatten(), test_t1.flatten())[0] )
prediction_all[testIndex] = prediction # Put all predictions for each CV fold into prediction_all
mse_error = mean_squared_error(data_t1, prediction_all)
mae_error = mean_absolute_error(data_t1, prediction_all)
print("mses: ", mses)
print("maes: ", maes)
print("pears", pears)
print("Average error of five fold cross validation MSE:",
|
np.sum(mses)
|
numpy.sum
|
import cv2
import math
import numpy as np
from skimage import transform as trans
def transform(data, center, output_size, scale, rotation):
scale_ratio = scale
rot = float(rotation) * np.pi / 180.0
#translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
t1 = trans.SimilarityTransform(scale=scale_ratio)
cx = center[0] * scale_ratio
cy = center[1] * scale_ratio
t2 = trans.SimilarityTransform(translation=(-1 * cx, -1 * cy))
t3 = trans.SimilarityTransform(rotation=rot)
t4 = trans.SimilarityTransform(translation=(output_size / 2,
output_size / 2))
t = t1 + t2 + t3 + t4
M = t.params[0:2]
cropped = cv2.warpAffine(data,
M, (output_size, output_size),
borderValue=0.0)
return cropped, M
def trans_points2d(pts, M):
new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
for i in range(pts.shape[0]):
pt = pts[i]
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
new_pt = np.dot(M, new_pt)
#print('new_pt', new_pt.shape, new_pt)
new_pts[i] = new_pt[0:2]
return new_pts
def trans_points3d(pts, M):
scale = np.sqrt(M[0][0] * M[0][0] + M[0][1] * M[0][1])
#print(scale)
new_pts =
|
np.zeros(shape=pts.shape, dtype=np.float32)
|
numpy.zeros
|
from .common import Benchmark
import numpy as np
class Core(Benchmark):
def setup(self):
self.l100 = range(100)
self.l50 = range(50)
self.float_l1000 = [float(i) for i in range(1000)]
self.float64_l1000 = [np.float64(i) for i in range(1000)]
self.int_l1000 = list(range(1000))
self.l = [np.arange(1000), np.arange(1000)]
self.l_view = [memoryview(a) for a in self.l]
self.l10x10 = np.ones((10, 10))
self.float64_dtype = np.dtype(np.float64)
def time_array_1(self):
np.array(1)
def time_array_empty(self):
np.array([])
def time_array_l1(self):
np.array([1])
def time_array_l100(self):
np.array(self.l100)
def time_array_float_l1000(self):
np.array(self.float_l1000)
def time_array_float_l1000_dtype(self):
np.array(self.float_l1000, dtype=self.float64_dtype)
def time_array_float64_l1000(self):
np.array(self.float64_l1000)
def time_array_int_l1000(self):
np.array(self.int_l1000)
def time_array_l(self):
np.array(self.l)
def time_array_l_view(self):
np.array(self.l_view)
def time_vstack_l(self):
np.vstack(self.l)
def time_hstack_l(self):
np.hstack(self.l)
def time_dstack_l(self):
np.dstack(self.l)
def time_arange_100(self):
|
np.arange(100)
|
numpy.arange
|
import numpy as np
class RanSamMultiplePriorUser:
def __init__(self, features, target, power, prior_mask, count = 1):
self._features = features
self._target = target
self._count = count
self._power = power
self._prior_mask = prior_mask
def decision(self, disp, disp_type):
dist_to_target = (1 + np.dot(self._features[disp], self._features[self._target])) / 2
dist_to_target = dist_to_target ** self._power
dist_to_target = dist_to_target * self._prior_mask[disp_type]
dist_to_target = dist_to_target / np.sum(dist_to_target)
return disp[np.random.choice(dist_to_target.shape[0], self._count, p=dist_to_target, replace=False)]
def decision_ids(self, disp, disp_type):
dist_to_target = (1 + np.dot(self._features[disp], self._features[self._target])) / 2
dist_to_target = dist_to_target ** self._power
dist_to_target = dist_to_target * self._prior_mask[disp_type]
dist_to_target = dist_to_target / np.sum(dist_to_target)
return
|
np.random.choice(dist_to_target.shape[0], self._count, p=dist_to_target, replace=False)
|
numpy.random.choice
|
"""
Provides functions for parsing an output text file (for a batch calculation)
from the pattern index calculator implemented in interface.py and provides
functions for calculating various statistics.
Functions:
process_data, get_statistics, process_data_output,
compute_statistics_batchwise, compute_statistics_sizewise,
plot_statistics_batchwise, plot_statistics_sizewise
"""
import re
import numpy as np
import matplotlib
from matplotlib import pyplot as plotter
from .io import process_multibatch_output_file
font = {'family' : 'normal',
'size' : 28}
matplotlib.rc('font', **font)
PRI = "Pattern Recurrence Index"
TI = "Tangled Index"
TPRI = "Tangled Pattern Recurrence Index"
def process_data(data_file_name, random_file_name, batchwise=False, sizewise=False):
"""
Input the names of two files, random_file_name and data_file_name,
that contain the output from a batch calculation of the pattern
indices of a sequence of randomly sampled words and a sequence
of words from data.
It then computes various statistics of these batches depending on
the argument values. If batchwise = True, it first computes
the mean pattern index values for each index and each random sample,
and then calculates statistics of this population: mean, median, variance,
standard deviation, upper quartile, lower quartile, minimum, and maximum.
If sizewise = True, for each word size in the data it computes
these statistics for all the pattern indices of every word of that size.
Args:
data_file_name: String.
random_file_name: String.
batchwise: Boolean, defaults to False.
sizewise: Boolean, defaults to False.
"""
random_experiments = process_multibatch_output_file(random_file_name)
data_output = process_multibatch_output_file(data_file_name)
if batchwise:
random_sample_statistics_batchwise = compute_statistics_batchwise(
random_experiments)
plot_statistics_batchwise(random_sample_statistics_batchwise, data_output)
if sizewise:
data_processed = process_data_output(data_output)
random_sample_statistics_sizewise = compute_statistics_sizewise(
random_experiments)
plot_statistics_sizewise(random_sample_statistics_sizewise, data_processed)
def get_statistics(*sequences):
"""
Args:
sequences: List or Tuple of lists of floats or integers.
Returns:
A dictionary with statistic names as keys and statistic values
as values.
"""
means = [np.mean(sequence) for sequence in sequences]
standard_deviations = [np.std(sequence) for sequence in sequences]
medians = [np.percentile(sequence, 50) for sequence in sequences]
lower_quartiles = [np.percentile(sequence, 25) for sequence in sequences]
upper_quartiles = [np.percentile(sequence, 75) for sequence in sequences]
minimums = [np.percentile(sequence, 0) for sequence in sequences]
maximums = [
|
np.percentile(sequence, 100)
|
numpy.percentile
|
import math
import random
from copy import deepcopy
from os.path import basename
import cv2
import numpy
def resample():
return random.choice((cv2.INTER_LINEAR, cv2.INTER_CUBIC))
def resize(image, image_size):
h, w = image.shape[:2]
ratio = image_size / max(h, w)
if ratio != 1:
shape = (int(w * ratio), int(h * ratio))
image = cv2.resize(image, shape, interpolation=resample())
return image, image.shape[:2]
def xy2wh(x):
y = numpy.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xyn2xy(x, w, h, pad_w, pad_h):
y = numpy.copy(x)
y[:, 0] = w * x[:, 0] + pad_w # top left x
y[:, 1] = h * x[:, 1] + pad_h # top left y
return y
def whn2xy(x, w, h, pad_w, pad_h):
y = numpy.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + pad_w # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + pad_h # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + pad_w # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + pad_h # bottom right y
return y
def mask2box(mask, w, h):
x, y = mask.T
inside = (x >= 0) & (y >= 0) & (x <= w) & (y <= h)
x, y, = x[inside], y[inside]
if any(x):
return numpy.array([x.min(), y.min(), x.max(), y.max()]), x, y
else:
return numpy.zeros((1, 4)), x, y
def box_ioa(box1, box2, eps=1E-7):
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
area1 = (numpy.minimum(b1_x2, b2_x2) - numpy.maximum(b1_x1, b2_x1)).clip(0)
area2 = (numpy.minimum(b1_y2, b2_y2) - numpy.maximum(b1_y1, b2_y1)).clip(0)
inter_area = area1 * area2
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps
# Intersection over box2 area
return inter_area / box2_area
def masks2boxes(segments):
boxes = []
for s in segments:
x, y = s.T
boxes.append([x.min(), y.min(), x.max(), y.max()])
return xy2wh(
|
numpy.array(boxes)
|
numpy.array
|
import copy
import random
import cv2
import numpy as np
from alfworld.gen import constants
from alfworld.gen import goal_library as glib
def get_pose(event):
pose = event.pose
return (int(np.round(pose[0] / (1000 * constants.AGENT_STEP_SIZE))),
int(np.round(pose[1] / (1000 * constants.AGENT_STEP_SIZE))),
int(np.round(pose[2] / (1000 * 90))),
int(np.round(pose[3] / (1000))))
def get_object_data(metadata):
return [
{"objectName": obj["name"].split("(Clone)")[0], "position": obj["position"], "rotation": obj["rotation"]}
for obj in metadata["objects"]
if obj["pickupable"]
]
def imresize(image, size, rescale=True):
if image is None:
return None
if image.shape[0] != size[0] or image.shape[1] != size[1]:
image = cv2.resize(image, size)
if rescale:
if image.dtype != np.float32:
image = image.astype(np.float32)
image /= 255.0
return image
def depth_imresize(image, size, rescale=True, max_depth=constants.MAX_DEPTH):
if image is None:
return None
if image.shape[0] != size[0] or image.shape[1] != size[1]:
image = cv2.resize(image, size)
image[image > max_depth] = max_depth
if rescale:
if image.dtype != np.float32:
image = image.astype(np.float32)
image /= max_depth
return image
def get_camera_matrix(pose, camera_height):
assert(pose[2] in {0, 1, 2, 3})
sin_x = np.sin(pose[3] * np.pi / 180)
cos_x = np.cos(pose[3] * np.pi / 180)
x_rotation = np.matrix([
[1, 0, 0],
[0, cos_x, -sin_x],
[0, sin_x, cos_x]])
sin_y = np.sin(pose[2] * np.pi / 180)
cos_y =
|
np.cos(pose[2] * np.pi / 180)
|
numpy.cos
|
#!/usr/bin/env python
"""
This module provides functions for converting between different
types of OperatorStrings.
Note
----
Currently, this module only supports the conversion between strings
of fermions and strings of Majorana fermions.
"""
import warnings
import itertools as it
import numpy as np
import scipy.sparse as ss
import scipy.sparse.linalg as ssla
from .tools import sort_sign, compare
from .config import *
from .operatorstring import OperatorString
from .basis import Basis, Operator
from .algebra import product
# TODO: add support for permutations or alternative X,Y,Z orderings.
def _jordan_wigner(op_string):
# Convert a Pauli string to a Majorana string or vice-versa
# using the Jordan-Wigner transformation:
# a_i = (\prod_{j=1}^{i-1} Z_j) X_i
# b_i = (\prod_{j=1}^{i-1} Z_j) Y_i
# d_i = Z_i
# X_i = (\prod_{j=1}^{i-1} d_j) a_i
# Y_i = (\prod_{j=1}^{i-1} d_j) b_i
# Z_i = d_i
op_type = op_string.op_type
total_coeff = op_string.prefactor
if op_type == 'Pauli':
result = OperatorString([], [], op_type='Majorana')
num_orbitals = len(op_string.orbital_operators)
for ind_orb in range(num_orbitals):
orb_op = op_string.orbital_operators[ind_orb]
orb_label = op_string.orbital_labels[ind_orb]
if orb_op == 'X':
jw_ops = ['D']*orb_label + ['A']
jw_labels = np.arange(orb_label+1)
elif orb_op == 'Y':
jw_ops = ['D']*orb_label + ['B']
jw_labels = np.arange(orb_label+1)
elif orb_op == 'Z':
jw_ops = ['D']
jw_labels = [orb_label]
else:
raise ValueError('Invalid operator {} in OperatorString.'.format(orb_op))
jw_string = OperatorString(jw_ops, jw_labels, 'Majorana')
(coeff, result) = product(result, jw_string)
total_coeff *= coeff
elif op_type == 'Majorana':
result = OperatorString([], [], op_type='Pauli')
num_orbitals = len(op_string.orbital_operators)
for ind_orb in range(num_orbitals):
orb_op = op_string.orbital_operators[ind_orb]
orb_label = op_string.orbital_labels[ind_orb]
if orb_op == 'A':
jw_ops = ['Z']*orb_label + ['X']
jw_labels = np.arange(orb_label+1)
elif orb_op == 'B':
jw_ops = ['Z']*orb_label + ['Y']
jw_labels = np.arange(orb_label+1)
elif orb_op == 'D':
jw_ops = ['Z']
jw_labels = [orb_label]
else:
raise ValueError('Invalid operator {} in OperatorString.'.format(orb_op))
jw_string = OperatorString(jw_ops, jw_labels, 'Pauli')
(coeff, result) = product(result, jw_string)
total_coeff *= coeff
else:
raise ValueError('Cannot perform Jordan-Wigner transformation on OperatorString of op_type: {}'.format(op_type))
return (total_coeff, result)
def _fermion_string_from_cdag_c_labels(prefactor, c_dag_labels, c_labels):
# Construct a fermion string operator from the labels of the creation and
# anhillation (c^\dagger and c) operators.
c_labels_reversed = np.copy(c_labels)
c_labels_reversed = c_labels_reversed[::-1]
orbital_operators = ['CDag']*len(c_dag_labels) + ['C']*len(c_labels)
orbital_labels = np.concatenate((c_dag_labels, c_labels_reversed))
return OperatorString(orbital_operators, orbital_labels, prefactor=prefactor, op_type='Fermion')
def _convert_majorana_string(op_string, include_identity=False):
# Converts a Majorana string to an Operator
# that is a linear combination of Fermion strings.
if op_string.op_type != 'Majorana':
raise ValueError('Trying to convert a Majorana string to a Fermion string but given an OperatorString of type {}'.format(op_string.op_type))
ops = op_string.orbital_operators
labels = op_string.orbital_labels
# The identity operator.
if len(ops) == 0:
if include_identity:
return Operator(np.array([1.0]), [OperatorString([], [], 'Fermion')])
else:
return Operator([], [], 'Fermion')
# Used to make sure that the fermion labels end up normal ordered.
# I add this large number to the anhillation operators c_i labels
# so that they end up last in the list of operators sorted by labels.
large_number = 4*np.maximum(1,np.max(labels)) + 4
[op1, op2, op3] = MAJORANA_OPS
num_ops = len(ops)
coeffs_fermion = []
op_strings_fermion = []
# Perform the Majorana to Fermion string basis conversion.
# Expand a_i = c_i + c_i^\dagger, b_i = -i c_i + i c_i^\dagger, d_i = - 2 c_i^\dagger c_i + I
# A "term choice" of 0 (1) corresponds to the first (second) term, e.g.,
# 1 for a_i is c_i^\dagger, 0 for d_i is -2 c_i^\dagger c_i.
possible_term_choices = list(it.product([0,1], repeat=num_ops))
for term_choices in possible_term_choices:
coeff = op_string.prefactor
fermion_labels = []
num_cdags = 0
num_cs = 0
for ind_op in range(num_ops):
label = labels[ind_op]
if ops[ind_op] == op1: # a_j = c_j + c_j^\dagger
if term_choices[ind_op] == 0: # c_j
fermion_labels.append(-label + large_number)
num_cs += 1
elif term_choices[ind_op] == 1: # c_j^\dagger
fermion_labels.append(label)
num_cdags += 1
elif ops[ind_op] == op2: # b_j = -i c_j + i c_j^\dagger
if term_choices[ind_op] == 0: # -i c_j
coeff *= -1j
fermion_labels.append(-label + large_number)
num_cs += 1
elif term_choices[ind_op] == 1: # i c_j^\dagger
coeff *= 1j
fermion_labels.append(label)
num_cdags += 1
elif ops[ind_op] == op3: # d_j = - 2 c_j^\dagger c_j + I
if term_choices[ind_op] == 0: # -2 c_j^\dagger c_j
coeff *= -2.0
fermion_labels.append(label)
fermion_labels.append(-label + large_number)
num_cdags += 1
num_cs += 1
elif term_choices[ind_op] == 1: # I
coeff *= 1.0
# Resulting operator is identity I.
if len(fermion_labels) == 0 and not include_identity:
continue
(sorted_fermion_labels, sign) = sort_sign(fermion_labels)
coeff *= sign
# The i_1,\ldots,i_m labels
cdag_labels = sorted_fermion_labels[0:num_cdags]
# The j_1,\ldots,j_m labels
c_labels = large_number - sorted_fermion_labels[num_cdags:]
c_labels = c_labels[::-1]
lex_order = compare(cdag_labels, c_labels)
# Resulting operator is not lexicographically sorted. Ignore it.
if lex_order < 0:
continue
# Resulting operator is of type 1: c^\dagger_{i_1} \cdots c^\dagger_{i_m} c_{i_m} \cdots c_{i_1}.
elif lex_order == 0:
coeffs_fermion.append(coeff)
op_strings_fermion.append(_fermion_string_from_cdag_c_labels(1.0, cdag_labels, c_labels))
# Resulting operator is of type 2: c^\dagger_{i_1} \cdots c^\dagger_{i_m} c_{j_l} \cdots c_{j_1} + H.c.
elif lex_order > 0 and np.abs(np.imag(coeff)) < np.finfo(float).eps:
coeffs_fermion.append(np.real(coeff))
op_strings_fermion.append(_fermion_string_from_cdag_c_labels(1.0, cdag_labels, c_labels))
# Resulting operator is of type 3: ic^\dagger_{i_1} \cdots c^\dagger_{i_m} c_{j_l} \cdots c_{j_1} + H.c.
elif lex_order > 0 and np.abs(np.real(coeff)) < np.finfo(float).eps:
coeffs_fermion.append(np.real(coeff/(1j)))
op_strings_fermion.append(_fermion_string_from_cdag_c_labels(1j, cdag_labels, c_labels))
else:
raise ValueError('Invalid lex_order = {} and coeff = {}'.format(lex_order, coeff))
return Operator(np.array(coeffs_fermion), op_strings_fermion)
def _convert_fermion_string(op_string, include_identity=False):
# Converts a Fermion string into an Operator
# that is a linear combination of Majorana strings.
# Obtain the labels of the CDag and C operators (in ascending order).
cdag_labels = [o_lab for (o_lab, o_op) in zip(op_string.orbital_labels, op_string.orbital_operators) if o_op == 'CDag']
c_labels = [o_lab for (o_lab, o_op) in zip(op_string.orbital_labels, op_string.orbital_operators) if o_op == 'C']
c_labels = c_labels[::-1]
# Store the operator type (C, CDag, CDagC) of every label.
label_types = dict()
for cdag_label in cdag_labels:
label_types[cdag_label] = 'CDag'
for c_label in c_labels:
if c_label in label_types:
label_types[c_label] = 'CDagC'
else:
label_types[c_label] = 'C'
# Put all the labels together and reorder them so that the resulting
# Majorana operator labels are in the correct order. Keep track of the
# sign due to reordering when you do this.
fermion_labels = cdag_labels + c_labels[::-1]
(sorted_fermion_labels, sign) = sort_sign(fermion_labels)
# Collect the information about the CDag, C, CDagC fermion operators and their labels into
# the ops = [(orbital operator, orbital operator label), ...] list, which has the operators ordered correctly.
ops = []
num_ops = 0
for f_label in sorted_fermion_labels:
f_op = label_types[f_label]
if not (f_op, f_label) in ops:
ops.append((f_op, f_label))
num_ops += 1
coeffs_majorana = []
op_strings_majorana = []
# Perform the Fermion to Majorana string basis conversion.
# Expand c_j = 1/2(a_j + ib_j), c^\dagger_j = 1/2(a_j - i b_j^\dagger), c^\dagger_j c_j = 1/2 (-d_j + I)
# A "term choice" of 0 (1) corresponds to the first (second) term, e.g.,
# 1 for c_j is i/2 b_j, 0 for c^\dagger_j c_j is -1/2 d_j.
possible_term_choices = list(it.product([0,1], repeat=num_ops))
for term_choice in possible_term_choices:
coeffM = 1.0 #op_string.prefactor * sign
opNameM = ''
orbital_operators = []
orbital_labels = []
for ind_op in range(num_ops):
(op, op_label) = ops[ind_op]
if op == 'CDag' and term_choice[ind_op] == 0:
coeffM *= 0.5
orbital_operators.append('A')
orbital_labels.append(op_label)
elif op == 'CDag' and term_choice[ind_op] == 1:
coeffM *= -0.5j
orbital_operators.append('B')
orbital_labels.append(op_label)
elif op == 'C' and term_choice[ind_op] == 0:
coeffM *= 0.5
orbital_operators.append('A')
orbital_labels.append(op_label)
elif op == 'C' and term_choice[ind_op] == 1:
coeffM *= 0.5j
orbital_operators.append('B')
orbital_labels.append(op_label)
elif op == 'CDagC' and term_choice[ind_op] == 0:
coeffM *= -0.5
orbital_operators.append('D')
orbital_labels.append(op_label)
elif op == 'CDagC' and term_choice[ind_op] == 1:
coeffM *= 0.5
else:
raise ValueError('Invalid op and term_choice: {} {}'.format(op, term_choice[ind_op]))
# Ignore the identity operator.
if len(orbital_operators) == 0 and not include_identity:
continue
op_string_M = OperatorString(orbital_operators, orbital_labels, 'Majorana')
coeffM /= op_string_M.prefactor
coeffM *= sign
coeffM *= op_string.prefactor
# The type 2 and 3 fermion strings have a Hermitian conjugate: (CDag ... C ...) + H.c.,
# that ensures that the resulting operators are Hermitian. In our conversion,
# if an operator ends up being anti-Hermitian, then it cancels with the Hermitian conjugate.
# Otherwise, its coefficient doubles because it equals the Hermitian conjugate.
if cdag_labels != c_labels:
if np.abs(np.imag(coeffM)) > np.finfo(float).eps: #1e-16:
continue
else:
coeffM *= 2.0
coeffs_majorana.append(coeffM)
op_strings_majorana.append(op_string_M)
return Operator(np.array(coeffs_majorana), op_strings_majorana)
def _convert_operator_string(op_string, to_op_type, include_identity=False):
# Converts an OperatorString to a linear combination of
# OperatorStrings of the given op_type.
if op_string.op_type == to_op_type:
return Operator(
|
np.array([1.0])
|
numpy.array
|
import math
from aerocalc3 import std_atm as ISA # type: ignore
import numpy as np # type: ignore
import matplotlib # type: ignore
import matplotlib.pylab as pylab # type: ignore
import matplotlib.pyplot as plt # type: ignore
from intersection import get_intersection_index
from typing import Any
class AndrasConstraint:
def __init__(self):
self.VariableName_unit: int = 0
# Variable names ending in an underscore are non-dimensional:
self.VariableName_: int = 0
self.DesignGrossWeight_kg: int = 15
self.PropEff: float = 0.6
# Take-off performance
self.GroundRun_feet: int = 197
self.TakeOffSpeed_KCAS: int = 31
self.TakeOffElevation_feet: int = 0
# Cruise
# The cruising altitude may be viewed in two fundamentalways. First, itmay be a constraint – for
# example, due to regulatory requirements the aircraft may have to cruise at, say, 350 feet. It can
# also be viewed as a design variable, in which case you may wish to return to this point in the
# document and revise it as part of an iterative process of optimization / reinement.
self.CruisingAlt_feet: int = 400
self.CruisingSpeed_KTAS: float = 58.3
# Climb Performance
# The climb performance of an aircraft and its variation with altitude is the result of
# a complex web of interactions between the aerodynamics of lift generation and the
# response of its powerplant to varying atmospheric conditions and airspeed. Typically a
# range of design points have to be considered, representing a variety of conditions, but
# at this early stage in the design process it is best to keep the number of these design
# points at a more manageable level. Here we use 80% of the cruise speed for the climb
# constraint.
self.RateOfClimb_fpm: int = 591
self.ClimbSpeed_KCAS = self.CruisingSpeed_KTAS * 0.8
# The rate of climb constraint will be evaluated at this altitude:
self.ROCAlt_feet: int = 0
# Turn Performance
# We deine steady, level turn performance in terms of the load factor n (which represents the
# ratio of lift and weight). n = 1∕ cos ���, where ��� is the bank angle (so n = 1.41 corresponds to
# 45∘, n = 2 corresponds to 60∘, etc.).
self.n_cvt_: float = 1.41
# Service Ceiling
self.ServiceCeiling_feet: int = 500
# Approach and Landing
self.ApproachSpeed_KTAS: float = 29.5
# We deine the margin by which the aircraft operates above its stall speed on inal approach
# (e.g., a reserve factor of 1.2 – typical of manned military aircraft – means lying 20% above
# stall, a reserve factor of 1.3 – typical of civil aircraft, means 30% above stall; for small UAVs,
# lower values may be considered).
self.StallReserveFactor: float = 1.1
self.StallSpeedinApproachConf_KTAS = (
self.ApproachSpeed_KTAS / self.StallReserveFactor
)
print(
f"Stall speed in approach configuration: {self.StallSpeedinApproachConf_KTAS} KTAS"
)
# Stall speed in approach configuration: 26.8 KTAS
# Maximum lift coeficient in landing coniguration:
self.CLmax_approach: float = 1.3
# We also deine the highest altitude AMSL where we would expect the aircraft to be established
# on a stable inal approach in landing coniguration:
self.TopOfFinalApp_feet: int = 100
# Unit Conversions
# All constraint analysis calculations in this document are performed in SI units. However, it is
# more common to specify some elements of the design brief in the mix of SI and Imperial units
# traditionally used in aviation – here we perform the appropriate conversions.
self.CruisingAlt_m = self.CruisingAlt_feet * 0.3048
print(f"Cruising altitude: {self.CruisingAlt_m}")
# Cruising altitude: 122 m
self.TopOfFinalApp_m = self.TopOfFinalApp_feet * 0.3048
print(f"Top of final approach: {self.TopOfFinalApp_m} m")
# Top of final approach: 30 m
self.TakeOffElevation_m = self.TakeOffElevation_feet * 0.3048
print(f"Take-off runway elevation: {self.TakeOffElevation_m} m")
# Take-off runway elevation: 0 m
self.ServiceCeiling_m = self.ServiceCeiling_feet * 0.3048
print(f"Service ceiling: {self.ServiceCeiling_m} m")
# Service ceiling: 152 m
self.CruisingSpeed_mpsTAS = self.CruisingSpeed_KTAS * 0.5144444444
print(f"Cruising speed: {self.CruisingSpeed_mpsTAS} m/s TAS")
# Cruising speed: 30.0 m/s TAS
self.ClimbSpeed_mpsCAS = self.ClimbSpeed_KCAS * 0.5144444444
print(f"Climb speed: {self.ClimbSpeed_mpsCAS} m/s CAS")
# Climb speed: 24.0 m/s CAS
self.ApproachSpeed_mpsTAS = self.ApproachSpeed_KTAS * 0.5144444444
print(f"Approach speed: {self.ApproachSpeed_mpsTAS} m/s TAS")
# Approach speed: 15.2 m/s TAS
self.StallSpeedinApproachConf_mpsTAS = (
self.StallSpeedinApproachConf_KTAS * 0.51444444444
)
print(
f"Stall speed in approach configuration: {self.StallSpeedinApproachConf_mpsTAS} m/s TAS"
)
# Stall speed in approach configuration: 13.8 m/s TAS
self.RateOfClimb_mps = self.RateOfClimb_fpm * 0.00508
print(f"Rate of climb: {self.RateOfClimb_mps} m/s")
# Rate of climb: 3.0 m/s
self.TakeOffSpeed_mpsCAS = self.TakeOffSpeed_KCAS * 0.5144444444
print(f"Take-off speed: {self.TakeOffSpeed_mpsCAS} m/s CAS")
# Take-off speed: 15.9 m/s CAS
self.GroundRun_m = self.GroundRun_feet * 0.3048
print(f"Ground run: {self.GroundRun_m} m")
# Ground run: 60 m
# Basic Geometry and Initial Guesses
# Almost by deinition, the early part of the conceptual design process is the only part of the
# product development where we do not yet have a geometry model to refer to. Thus, some of
# the all-important aerodynamic igures have to be guessed at this point, largely on the basis of
# high level geometrical parameters like the aspect ratio.
self.AspectRatio_: float = 9.0
self.CDmin: float = 0.0418
self.WSmax_kgm2: float = 20
self.TWmax: float = 0.6
self.Pmax_kW: float = 4
# Estimated take-off parameters
self.CLTO: float = 0.97
self.CDTO: float = 0.0898
self.muTO: float = 0.17
self.Resolution = 2000
self.Start_Pa = 0.1
# Preamble
# Some of the computations and visualizations performed in this document may require additional
# Python modules; these need to be loaded irst as follows:
# get_ipython().run_line_magic("matplotlib", "inline")
# Preliminary Calculations
# The Operating Environment
# The environment in which the aircraft is expected to operate plays a very important role in
# many of the conceptual design calculations to follow. The conditions corresponding to the
# current design brief are computed as follows:
self.SeaLevelDens_kgm3 = ISA.alt2density(
0, alt_units="ft", density_units="kg/m**3"
)
print(f" ISA density at Sea level elevation: {self.SeaLevelDens_kgm3} kg/m^3")
# ISA density at Sea level elevation: 1.225 kg/m^3
self.TakeOffDens_kgm3 = ISA.alt2density(
self.TakeOffElevation_feet, alt_units="ft", density_units="kg/m**3"
)
print(f" ISA density at take-off elevation: {self.TakeOffDens_kgm3} kg/m^3")
# ISA density at take-off elevation: 1.225 kg/m^3
self.ClimbAltDens_kgm3 = ISA.alt2density(
self.ROCAlt_feet, alt_units="ft", density_units="kg/m**3"
)
print(
f" ISA density at the climb constraint altitude: {self.ClimbAltDens_kgm3} kg/m^3"
)
# ISA density at the climb constraint altitude: 1.225 kg/m^3
self.CruisingAltDens_kgm3 = ISA.alt2density(
self.CruisingAlt_feet, alt_units="ft", density_units="kg/m**3"
)
print(f" ISA density at cruising altitude: {self.CruisingAltDens_kgm3} kg/m^3")
# ISA density at cruising altitude: 1.211 kg/m^3
# Concept Design: Initial Constraint Analysis 153
self.TopOfFinalAppDens_kgm3 = ISA.alt2density(
self.TopOfFinalApp_feet, alt_units="ft", density_units="kg/m**3"
)
print(
f" ISA density at the top of the final approach: {self.TopOfFinalAppDens_kgm3} kg/m^3"
)
# ISA density at the top of the final approach: 1.221 kg/m^3
# Basic Aerodynamic Performance Calculations
# In the absence of a geometry, at this stage any aerodynamic performance estimates will either
# be based on very basic physics or simple, empirical equations.
# We begin with a very rough estimate of the Oswald span eficiency, only suitable for moderate
# aspect ratios and sweep angles below 30∘ (equation due to Raymer):
self.e0 = 1.78 * (1 - 0.045 * self.AspectRatio_ ** 0.68) - 0.64
print(f"{self.e0} ")
# 0.783
# Lift induced drag factor self.k (Cd = Cd0
# + kC2
# l ):
self.k = 1.0 / (math.pi * self.AspectRatio_ * self.e0)
print(f"{self.k}")
# 0.045
# Dynamic pressure at cruise
self.q_cruise_Pa = (
0.5 * self.CruisingAltDens_kgm3 * (self.CruisingSpeed_mpsTAS ** 2)
)
print(f"{self.q_cruise_Pa} Pa")
# 544.5 Pa
# Dynamic pressure in the climb
self.q_climb_Pa = 0.5 * self.ClimbAltDens_kgm3 * (self.ClimbSpeed_mpsCAS ** 2)
print(f"{self.q_climb_Pa} Pa")
# 352.6 Pa
# Dynamic pressure at take-off conditions – for the purposes of this simple approximation we
# assume the acceleration during the take-off run to decrease linearly with ���2, so for the ���2 term
# we’ll use half of the square of the liftoff velocity (i.e., ��� = ���TO∕
# √
# 2):
self.q_TO_Pa = (
0.5 * self.TakeOffDens_kgm3 * (self.TakeOffSpeed_mpsCAS / math.sqrt(2)) ** 2
)
print(f"{self.q_TO_Pa} Pa")
# 77.9 Pa
self.q_APP_Pa = (
0.5
* self.TopOfFinalAppDens_kgm3
* self.StallSpeedinApproachConf_mpsTAS ** 2
)
print(f"{self.q_APP_Pa} Pa")
# 116.2 Pa
def ConstraintPoly(
self, WS_Array: list, TW_Array: list, color: str, color_alfa: float
) -> Any:
WS_Array.append(WS_Array[-1])
TW_Array.append(0)
WS_Array.append(WS_Array[0])
TW_Array.append(0)
WS_Array.append(0)
TW_Array.append(TW_Array[-2])
zp = zip(WS_Array, TW_Array)
print(zp, "zp")
print(type(zp), " type of zp")
# print(list(zp), " list of zp")
pa = matplotlib.patches.Polygon(
list(zp), closed=True, color=color, alpha=color_alfa
)
return pa
# Next, we deine a method for setting the appropriate bounds on each constraint diagram:
def PlotSetUp(self, Xmin, Xmax, Ymin, Ymax, Xlabel, Ylabel):
pylab.ylim([Ymin, Ymax])
pylab.xlim([Xmin, Xmax])
pylab.ylabel(Ylabel)
pylab.xlabel(Xlabel)
# Constraints
# With the basic numbers of the current conceptual design iteration in place, we now draw up
# the boundaries of the wing loading W∕S versus thrust to weight ratio T∕W design domain.
# These boundaries are representations of the basic constraints that enforce the adherence of the
# design to the numbers speciied in the design brief.
# Constraint 1: Level, Constant Velocity Turn
def constant_velocity_turn_constraint(self):
# First, we compute the thrust to weight ratio required to maintain a speciic load factor n in a
# level turn at the cruise altitude
WSlistCVT_Pa = np.linspace(self.Start_Pa, 8500, self.Resolution)
TWlistCVT = []
i = 0
for WS in WSlistCVT_Pa:
TW = self.q_cruise_Pa * (
self.CDmin / WSlistCVT_Pa[i]
+ WSlistCVT_Pa[i] * self.k * (self.n_cvt_ / self.q_cruise_Pa) ** 2
)
TWlistCVT.append(TW)
i = i + 1
WSlistCVT_kgm2 = [x * 0.101971621 for x in WSlistCVT_Pa]
print(WSlistCVT_kgm2[:10])
print(TWlistCVT[:10])
# The load factor n is the inverse of the cosine of the bank angle (denoted here by ���) so the
# latter can be calculated as: ��� = cos−1
# (
# 1
# n
# )
# so ���, in degrees, equals:
theta_deg = math.acos(1 / self.n_cvt_) * 180 / math.pi
print(f"{theta_deg}\xb0")
# 45∘
ConstVeloTurnPoly = self.ConstraintPoly(
WSlistCVT_kgm2, TWlistCVT, "magenta", 0.1
)
figCVT = plt.figure()
self.PlotSetUp(
0, self.WSmax_kgm2, 0, self.TWmax, "$W/S\,[\,kg/m^2]$", "$T/W\,[\,\,]$"
)
axCVT = figCVT.add_subplot(111)
axCVT.add_patch(ConstVeloTurnPoly)
return {"combined_data": (WSlistCVT_kgm2, TWlistCVT, "magenta", 0.1)}
# Constraint 2: Rate of Climb
def rate_of_climb_constraint(self):
WSlistROC_Pa = np.linspace(self.Start_Pa, 8500, self.Resolution)
TWlistROC = []
i = 0
for WS in WSlistROC_Pa:
TW = (
self.RateOfClimb_mps / self.ClimbSpeed_mpsCAS
+ self.CDmin * self.q_climb_Pa / WSlistROC_Pa[i]
+ self.k * WSlistROC_Pa[i] / self.q_climb_Pa
)
TWlistROC.append(TW)
i = i + 1
WSlistROC_kgm2 = [x * 0.101971621 for x in WSlistROC_Pa]
RateOfClimbPoly = self.ConstraintPoly(WSlistROC_kgm2, TWlistROC, "blue", 0.1)
figROC = plt.figure()
self.PlotSetUp(
0, self.WSmax_kgm2, 0, self.TWmax, "$W/S\,[\,kg/m^2]$", "$T/W\,[\,\,]$"
)
axROC = figROC.add_subplot(111)
axROC.add_patch(RateOfClimbPoly)
return {"combined_data": (WSlistROC_kgm2, TWlistROC, "blue", 0.1)}
# Constraint 3: Take-Off Ground Run Constraint
def take_off_run_constraint(self):
WSlistGR_Pa = np.linspace(self.Start_Pa, 8500, self.Resolution)
TWlistGR = []
i = 0
for WS in WSlistGR_Pa:
TW = (
(self.TakeOffSpeed_mpsCAS ** 2) / (2 * 9.81 * self.GroundRun_m)
+ self.q_TO_Pa * self.CDTO / WSlistGR_Pa[i]
+ self.muTO * (1 - self.q_TO_Pa * self.CLTO / WSlistGR_Pa[i])
)
TWlistGR.append(TW)
i = i + 1
WSlistGR_kgm2 = [x * 0.101971621 for x in WSlistGR_Pa]
TORunPoly = self.ConstraintPoly(WSlistGR_kgm2, TWlistGR, "green", 0.1)
figTOR = plt.figure()
self.PlotSetUp(
0, self.WSmax_kgm2, 0, self.TWmax, "$W/S\,[\,kg/m^2]$", "$T/W\,[\,\,]$"
)
axTOR = figTOR.add_subplot(111)
axTOR.add_patch(TORunPoly)
return {"combined_data": (WSlistGR_kgm2, TWlistGR, "green", 0.1)}
# Desired Cruise Airspeed
def cruise_airspeed_constraint(self):
WSlistCR_Pa = np.linspace(self.Start_Pa, 8500, self.Resolution)
TWlistCR = []
i = 0
for WS in WSlistCR_Pa:
TW = (
self.q_cruise_Pa * self.CDmin * (1.0 / WSlistCR_Pa[i])
+ self.k * (1 / self.q_cruise_Pa) * WSlistCR_Pa[i]
)
TWlistCR.append(TW)
i = i + 1
WSlistCR_kgm2 = [x * 0.101971621 for x in WSlistCR_Pa]
CruisePoly = self.ConstraintPoly(WSlistCR_kgm2, TWlistCR, "red", 0.1)
figCruise = plt.figure()
self.PlotSetUp(
0, self.WSmax_kgm2, 0, self.TWmax, "$W/S\,[\,kg/m^2]$", "$T/W\,[\,\,]$"
)
axCruise = figCruise.add_subplot(111)
axCruise.add_patch(CruisePoly)
return {"combined_data": (WSlistCR_kgm2, TWlistCR, "red", 0.1)}
# Constraint 5: Approach Speed
def approach_speed_constraint(self):
self.WS_APP_Pa = self.q_APP_Pa * self.CLmax_approach
self.WS_APP_kgm2 = self.WS_APP_Pa * 0.101971621
print(f"{self.WS_APP_kgm2} kg/m^2")
# 15.41 kg/m^2
WSlistAPP_kgm2 = [
self.WS_APP_kgm2,
self.WSmax_kgm2,
self.WSmax_kgm2,
self.WS_APP_kgm2,
self.WS_APP_kgm2,
]
TWlistAPP = [0, 0, self.TWmax, self.TWmax, 0]
AppStallPoly = self.ConstraintPoly(WSlistAPP_kgm2, TWlistAPP, "grey", 0.1)
figAPP = plt.figure()
self.PlotSetUp(
0, self.WSmax_kgm2, 0, self.TWmax, "$W/S\,[\,kg/m^2]$", "$T/W\,[\,\,]$"
)
axAPP = figAPP.add_subplot(111)
axAPP.add_patch(AppStallPoly)
return {"combined_data": (WSlistAPP_kgm2, TWlistAPP, "grey", 0.1)}
# Combined Constraint Diagram
def combined_constraint_diagram(self):
figCOMP = plt.figure(figsize=(10, 10))
self.PlotSetUp(
0, self.WSmax_kgm2, 0, self.TWmax, "$W/S\,[\,kg/m^2]$", "$T/W\,[\,\,]$"
)
axCOMP = figCOMP.add_subplot(111)
(
WS_Array,
TW_Array,
color,
color_alfa,
) = self.constant_velocity_turn_constraint()["combined_data"]
velocity_TW_Array = TW_Array
ConstVeloTurnPoly = self.ConstraintPoly(WS_Array, TW_Array, color, color_alfa)
axCOMP.add_patch(ConstVeloTurnPoly)
(WS_Array, TW_Array, color, color_alfa) = self.rate_of_climb_constraint()[
"combined_data"
]
RateOfClimbPoly = self.ConstraintPoly(WS_Array, TW_Array, color, color_alfa)
axCOMP.add_patch(RateOfClimbPoly)
(WS_Array, TW_Array, color, color_alfa) = self.take_off_run_constraint()[
"combined_data"
]
TORunPoly = self.ConstraintPoly(WS_Array, TW_Array, color, color_alfa)
axCOMP.add_patch(TORunPoly)
(WS_Array, TW_Array, color, color_alfa) = self.cruise_airspeed_constraint()[
"combined_data"
]
CruisePoly = self.ConstraintPoly(WS_Array, TW_Array, color, color_alfa)
axCOMP.add_patch(CruisePoly)
(WS_Array, TW_Array, color, color_alfa) = self.approach_speed_constraint()[
"combined_data"
]
AppStallPoly = self.ConstraintPoly(WS_Array, TW_Array, color, color_alfa)
axCOMP.add_patch(AppStallPoly)
axCOMP.legend(["Turn", "Climb", "T/O run", "Cruise", "App Stall"])
textstr = "\n The feasible aeroplanelives\n in this white space"
axCOMP.text(
0.05,
0.95,
textstr,
transform=axCOMP.transAxes,
fontsize=14,
verticalalignment="top",
)
# Since propeller and piston engine driven aircraft are normally designed in terms of engine
# power rather than thrust, we next convert the constraint diagram from thrust to weight
# ratio into an installed power requirement by specifying a propulsive eficiency ��� = 0.6
# (note that un-supercharged piston engine power varies with altitude so we also allow for
# this in the conversion using the Gagg and Ferrar model (see Gudmundsson [15]) with
# PowerSL = Power∕(1.132��� − 0.132) where ��� is the air density ratio):
WSlistCVT_Pa = np.linspace(self.Start_Pa, 8500, self.Resolution)
PlistCVT_kW = []
i = 0
for WS in WSlistCVT_Pa:
TW = self.q_cruise_Pa * (
self.CDmin / WSlistCVT_Pa[i]
+ WSlistCVT_Pa[i] * self.k * (self.n_cvt_ / self.q_cruise_Pa) ** 2
)
P_kW = (
9.81
* TW
* self.DesignGrossWeight_kg
* self.CruisingSpeed_mpsTAS
/ self.PropEff
/ (1.132 * self.CruisingAltDens_kgm3 / self.SeaLevelDens_kgm3 - 0.132)
/ 1000
)
PlistCVT_kW.append(P_kW)
i = i + 1
WSlistCVT_kgm2 = [x * 0.101971621 for x in WSlistCVT_Pa]
WSlistROC_Pa =
|
np.linspace(self.Start_Pa, 8500, self.Resolution)
|
numpy.linspace
|
#
# Author: <NAME> 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_
import sys
import keyword
import re
import inspect
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, gammaln, hyp0f1,
entr, kl_div)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, sum, shape,
product, reshape, zeros, floor, logical_and, log, sqrt, exp,
ndarray)
from numpy import (place, any, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _EPS, _XMAX
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'parameters': """\nParameters\n---------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(x, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative density function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative density function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of sf).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = """
Parameters
----------
x : array_like
quantiles
q : array_like
lower or upper tail probability
%(shapes)s : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis.
Default is 'mv'.
"""
_doc_default_longsummary = """\
Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, freeze the distribution and display the frozen pdf:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'callparams': _doc_default_callparams,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'Continuous', 'Discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
docdict_discrete['example'] = _doc_default_discrete_example
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['callparams'],
docdict_discrete['frozennote']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._ctor_param)
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None,
conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
if kwds:
raise ValueError("Discrete expect does not accept **kwds.")
return self.dist.expect(func, a, loc, lb, ub, conditional)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def valarray(shape, value=nan, typecode=None):
"""Return an array of all value.
"""
out = ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
For example,
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
out = valarray(shape(arrays[0]), value=fillvalue)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return (%(shape_arg_str)s), %(locscale_out)s, size
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
a = asarray(df/2.0)
fac = -nc/2.0 - x/2.0 + (a-1)*log(x) - a*log(2) - gammaln(a)
return fac + np.nan_to_num(log(hyp0f1(a, nc * x/4.0)))
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = inspect.getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments
shapes_list = []
for meth in meths_to_inspect:
shapes_args = inspect.getargspec(meth)
shapes_list.append(shapes_args.args)
# *args or **kwargs are not allowed w/automatic shapes
# (generic methods have 'self, x' only)
if len(shapes_args.args) > 2:
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
shapes = max(shapes_list, key=lambda x: len(x))
shapes = shapes[2:] # remove self, x,
# make sure the signatures are consistent
# (generic methods have 'self, x' only)
for item in shapes_list:
if len(item) > 2 and item[2:] != shapes:
raise TypeError('Shape arguments are inconsistent.')
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join(str(_) for _ in shapes_vals)
tempdict['vals'] = vals
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default=1).
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates
If None, rely on self.random_state
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = np.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if np.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (discrete RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# I don't know when or why vecentropy got broken when numargs == 0
# 09.08.2013: is this still relevant? cf check_vecentropy test
# in tests/test_continuous_basic.py
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution.
Parameters
----------
n : int, n>=1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
kwds : keyword arguments, optional
These can include "loc" and "scale", as well as other keyword
arguments relevant for a given distribution.
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Methods
-------
``rvs(<shape(s)>, loc=0, scale=1, size=1)``
random variates
``pdf(x, <shape(s)>, loc=0, scale=1)``
probability density function
``logpdf(x, <shape(s)>, loc=0, scale=1)``
log of the probability density function
``cdf(x, <shape(s)>, loc=0, scale=1)``
cumulative density function
``logcdf(x, <shape(s)>, loc=0, scale=1)``
log of the cumulative density function
``sf(x, <shape(s)>, loc=0, scale=1)``
survival function (1-cdf --- sometimes more accurate)
``logsf(x, <shape(s)>, loc=0, scale=1)``
log of the survival function
``ppf(q, <shape(s)>, loc=0, scale=1)``
percent point function (inverse of cdf --- quantiles)
``isf(q, <shape(s)>, loc=0, scale=1)``
inverse survival function (inverse of sf)
``moment(n, <shape(s)>, loc=0, scale=1)``
non-central n-th moment of the distribution. May not work for array
arguments.
``stats(<shape(s)>, loc=0, scale=1, moments='mv')``
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
``entropy(<shape(s)>, loc=0, scale=1)``
(differential) entropy of the RV.
``fit(data, <shape(s)>, loc=0, scale=1)``
Parameter estimates for generic data
``expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
``median(<shape(s)>, loc=0, scale=1)``
Median of the distribution.
``mean(<shape(s)>, loc=0, scale=1)``
Mean of the distribution.
``std(<shape(s)>, loc=0, scale=1)``
Standard deviation of the distribution.
``var(<shape(s)>, loc=0, scale=1)``
Variance of the distribution.
``interval(alpha, <shape(s)>, loc=0, scale=1)``
Interval that with `alpha` percent probability contains a random
realization of this distribution.
``__call__(<shape(s)>, loc=0, scale=1)``
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array_like
quantiles
q : array_like
lower or upper tail probability
<shape(s)> : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
Notes
-----
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1) which will be given clean arguments (in between
a and b) and passing the argument check method.
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments=<str>``,
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None.
Alternatively, you can override ``_munp``, which takes n and shape
parameters and returns the nth non-central moment of the distribution.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, the `shapes` will be automatically deduced from the signatures of the
overridden methods.
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf(self, x):
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self._size = 1
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
# backwards compat. these were removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vecfunc = np.deprecate(self._ppfvec, "vecfunc")
self.veccdf = np.deprecate(self._cdfvec, "veccdf")
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc)
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.' % longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args), axis=0)
def nnlf(self, theta, x):
'''Return negative loglikelihood function
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where theta are the
parameters (including loc and scale).
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (self.b <= x)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N * log(scale)
def _penalized_nnlf(self, theta, x):
''' Return negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0)
where theta are the parameters (including loc and scale)
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
loginf = log(_XMAX)
if np.isneginf(self.a).all() and np.isinf(self.b).all():
Nbad = 0
else:
cond0 = (x <= self.a) | (self.b <= x)
Nbad = sum(cond0)
if Nbad > 0:
x = argsreduce(~cond0, x)[0]
N = len(x)
return self._nnlf(x, *args) + N*
|
log(scale)
|
numpy.log
|
"""
This file is part of tcg.
"""
from dataclasses import dataclass
from copy import deepcopy
from itertools import product
import numpy as np
@dataclass
class Loc:
"""Make Location object to represent location on grid.
Locations on a 3x3 grid. Locations are represented by their
x and y coordinates. For locations 1,...,9 numbered in the same
order as on a phone keypad, the coordinates are:
1: (0, 2), 2: (1, 2), 3: (2, 2),
4: (0, 1), 5: (1, 1), 6: (2, 1),
7: (0, 0), 8: (1, 0), 9: (2, 0).
Args:
x: An int. The x coordinate on the grid.
y: An int. The y coordinate on the grid.
"""
x: int
y: int
# Define the build-in + operation.
def __add__(self, move):
"""Return location that results from self + move.
Args:
move: A string or an integer.
Return: Loc object.
"""
# Use to switch between string, int, and array representation
# of move.
move_str2arr = {
"up": np.array((0, 1)), "right": np.array((1, 0)),
"down": np.array((0, -1)), "left": np.array((-1, 0))
}
move_int2arr = {
1: np.array((0, 1)), 2: np.array((1, 0)),
3:
|
np.array((0, -1))
|
numpy.array
|
from orangecontrib.recommendation.rating import Learner, Model
from orangecontrib.recommendation.utils.format_data import *
from orangecontrib.recommendation.optimizers import *
from orangecontrib.recommendation.utils.datacaching \
import cache_norms, cache_rows
from collections import defaultdict
import numpy as np
import math
import time
import warnings
__all__ = ['TrustSVDLearner']
__sparse_format__ = lil_matrix
def _compute_extra_terms(Y, W, items_u, trustees_u):
# Implicit information
norm_Iu = math.sqrt(len(items_u))
# TODO: Clean this. Hint: np.nans
y_term = 0
if norm_Iu > 0:
y_sum = np.sum(Y[items_u, :], axis=0)
y_term = y_sum / norm_Iu
# Trust information
w_term = 0
norm_Tu = math.sqrt(len(trustees_u))
if norm_Tu > 0:
w_sum = np.sum(W[trustees_u, :], axis=0)
w_term = w_sum / norm_Tu
return y_term, w_term, norm_Iu, norm_Tu
def _predict(u, j, global_avg, bu, bi, P, Q, Y, W, items_u, trustees_u):
# Compute bias
bias = global_avg + bu[u] + bi[j]
# Compute extra terms
y_term, w_term, norm_Iu, norm_Tu = \
_compute_extra_terms(Y, W, items_u, trustees_u)
# Compute base
p_enhanced = P[u, :] + (y_term + w_term)
base_pred = np.einsum('i,i', p_enhanced, Q[j, :])
# Compute prediction and return extra terms and norms
return bias + base_pred, y_term, w_term, norm_Iu, norm_Tu
def _predict_all_items(u, global_avg, bu, bi, P, Q, Y, W, items_u, trustees_u):
# Compute bias
bias = global_avg + bu[u] + bi
# Compute extra terms
y_term, w_term, _, _ = _compute_extra_terms(Y, W, items_u, trustees_u)
# Compute base
p_enhanced = P[u, :] + (y_term + w_term)
# Compute prediction
base_pred = np.dot(p_enhanced, Q.T)
return bias + base_pred
def _matrix_factorization(ratings, trust, bias, shape, shape_t, num_factors,
num_iter, learning_rate, bias_learning_rate, lmbda,
bias_lmbda, social_lmbda, optimizer, verbose=False,
random_state=None, callback=None):
# Seed the generator
if random_state is not None:
np.random.seed(random_state)
# Get featured matrices dimensions
num_users, num_items = shape
num_users = max(num_users, max(shape_t))
# Initialize low-rank matrices
P = np.random.rand(num_users, num_factors) # User-feature matrix
Q = np.random.rand(num_items, num_factors) # Item-feature matrix
Y = np.random.randn(num_items, num_factors) # Feedback-feature matrix
W = np.random.randn(num_users, num_factors) # Trust-feature matrix
# Compute bias (not need it if learnt)
global_avg = bias['globalAvg']
bu = bias['dUsers']
bi = bias['dItems']
# Configure optimizer
update_bu = create_opt(optimizer, bias_learning_rate).update
update_bj = create_opt(optimizer, bias_learning_rate).update
update_pu = create_opt(optimizer, learning_rate).update
update_qj = create_opt(optimizer, learning_rate).update
update_yi = create_opt(optimizer, learning_rate).update
update_wv = create_opt(optimizer, learning_rate).update
# Cache rows
# >>> From 2 days to 30s
users_cache = defaultdict(list)
trusters_cache = defaultdict(list)
# Cache norms (slower than list, but allows vectorization)
# >>> Lists: 6s; Arrays: 12s -> vectorized: 2s
norm_I = np.zeros(num_users) # norms of Iu
norm_U = np.zeros(num_items) # norms of Ui
norm_Tr = np.zeros(num_users) # norms of Tu
norm_Tc = np.zeros(num_users) # norms of Tv
# Precompute transpose (most costly operation)
ratings_T = ratings.T
trust_T = trust.T
# Print information about the verbosity level
if verbose:
print('TrustSVD factorization started.')
print('\tLevel of verbosity: ' + str(int(verbose)))
print('\t\t- Verbosity = 1\t->\t[time/iter]')
print('\t\t- Verbosity = 2\t->\t[time/iter, loss]')
print('')
# Catch warnings
with warnings.catch_warnings():
# Turn matching warnings into exceptions
warnings.filterwarnings('error')
try:
# Factorize matrix using SGD
for step in range(num_iter):
if verbose:
start = time.time()
print('- Step: %d' % (step + 1))
# Send information about the process
if callback:
callback(step + 1)
# Optimize rating prediction
for u, j in zip(*ratings.nonzero()):
# Store lists in cache
items_u = cache_rows(ratings, u, users_cache)
trustees_u = cache_rows(trust, u, trusters_cache)
# No need to cast for CV due to max(num_users, shape_t[0])
# Prediction and error
ruj_pred, y_term, w_term, norm_Iu, norm_Tu = \
_predict(u, j, global_avg, bu, bi, P, Q, Y, W, items_u,
trustees_u)
euj = ruj_pred - ratings[u, j]
# Store/Compute norms
norm_I[u] = norm_Iu
norm_Tr[u] = norm_Tu
norm_Uj = cache_norms(ratings_T, j, norm_U)
# Gradient Bu
reg_bu = (bias_lmbda/norm_Iu) * bu[u] if norm_Iu > 0 else 0
dx_bu = euj + reg_bu
# Gradient Bi
reg_bi = (bias_lmbda/norm_Uj) * bi[j] if norm_Uj > 0 else 0
dx_bi = euj + reg_bi
# Update the gradients Bu, Bi at the same time
update_bu(dx_bu, bu, u)
update_bj(dx_bi, bi, j)
# Gradient P
reg_p = (lmbda/norm_Iu) * P[u, :] if norm_Iu > 0 else 0
dx_pu = euj * Q[j, :] + reg_p
update_pu(dx_pu, P, u)
# Gradient Q
reg_q = (lmbda/norm_Uj) * Q[j, :] if norm_Uj > 0 else 0
dx_qi = euj * (P[u, :] + y_term + w_term) + reg_q
update_qj(dx_qi, Q, j)
# Gradient Y
if norm_Iu > 0:
tempY1 = (euj/norm_Iu) * Q[j, :]
norms = cache_norms(ratings_T, items_u, norm_U)
norm_b = (lmbda/np.atleast_2d(norms))
dx_yi = tempY1 +
|
np.multiply(norm_b.T, Y[items_u, :])
|
numpy.multiply
|
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
import datetime
class SimpleNet(nn.Module):
def __init__(self, name=None, created_time=None):
super(SimpleNet, self).__init__()
self.created_time = created_time
self.name=name
def visualize(self, vis, epoch, acc, loss=None, eid='main', is_poisoned=False, name=None):
if name is None:
name = self.name + '_poisoned' if is_poisoned else self.name
vis.line(X=np.array([epoch]), Y=np.array([acc]), name=name, win='vacc_{0}'.format(self.created_time), env=eid,
update='append' if vis.win_exists('vacc_{0}'.format(self.created_time), env=eid) else None,
opts=dict(showlegend=True, title='Accuracy_{0}'.format(self.created_time),
width=700, height=400))
if loss is not None:
vis.line(X=np.array([epoch]), Y=
|
np.array([loss])
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 11 13:15:44 2021
@author: <NAME>, PhD Scholar, EEE Dept., IIT Guwahati
@reference:
<NAME>, <NAME>, <NAME>, and <NAME>, “A unified audio
analysis framework for movie genre classification using movie trailers,”
in Proc. of the International Conference on Emerging Smart Computing and
Informatics (ESCI). IEEE, 2021, pp. 510–515.
Implementation details:
1. 34-dimensional features from waveform:
ZCR, energy, the entropy of energy, spectral centroid, spectral spread,
spectral entropy, spectral flux, spectral roll-off, 13-MFCC, 12-chroma,
chroma deviation
2. 34-dimensional features from differenced waveform:
pyAudioAnalysis
3. 50ms/25ms frame size and shift is used
4. Total of 200×68 feature matrix is obtained from 5s non-overlapping
chunks
5. Every chunk is represented by a mean over 200 frames to obtain 1×68
dimensional feature vectors
6. First and last chunk of every trailer is ignored
7. The chunk representations are segmented using K-means clustering with
10 clusters using 400 trailers. The same trailers are not used for training
and validation
8. For every chunk, inverse of euclidian distance from each centroid is
computed
9. The inverse distances are appended to the chunk features to form
78-dimension feature vector
10. Mean over the feature vectors of all chunks in a trailer is computed
to obtain 1×78 dimension feature vector for every trailer
11. The data is normalized in the range of 0 to 1
12. Classified with AFA-net classifier
13. Training/testing split is 85:15
14. Learning rate of 0.001. Trained for 200 epochs
15. Metric: AU(PRC)
16. Genres: Action, Sci-Fi, Comedy, Horror, Romance
"""
import numpy as np
import os
import datetime
import lib.misc as misc
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Dense, Input, Dropout, Activation, BatchNormalization
from tensorflow.keras.models import Model
import tensorflow as tf
from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, ReduceLROnPlateau
import time
from sklearn.metrics import precision_recall_curve, auc, average_precision_score
import sys
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from lib.pyAudioAnalysis import ShortTermFeatures as aF
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances
import librosa
from tensorflow.keras.initializers import he_uniform
from tensorflow.keras.regularizers import l2
from tensorflow.keras.metrics import AUC
def start_GPU_session():
gpu_options = tf.compat.v1.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction=0.4)
config = tf.compat.v1.ConfigProto(
device_count={'GPU': 1 , 'CPU': 1},
gpu_options=gpu_options,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1,
)
sess = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)
def reset_TF_session():
tf.compat.v1.keras.backend.clear_session()
def AFA_net_model(output_dim):
# create model
input_layer = Input((78,))
x = Dense(256, input_dim=(78,))(input_layer)
x = BatchNormalization(axis=-1)(x)
x = Dense(256)(x)
x = BatchNormalization(axis=-1)(x)
x = Dropout(0.4)(x)
x = Dense(64)(x)
x = BatchNormalization(axis=-1)(x)
x = Dense(32)(x)
x = BatchNormalization(axis=-1)(x)
x = Dropout(0.2)(x)
output_layer = Dense(output_dim, activation='sigmoid')(x)
model = Model(input_layer, output_layer)
learning_rate = 0.001
opt = optimizers.Adam(lr=learning_rate)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[AUC(curve='PR')])
return model, learning_rate
def train_model(PARAMS, data_dict, model, weightFile, logFile):
csv_logger = CSVLogger(logFile, append=True)
mcp = ModelCheckpoint(weightFile, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='min', save_freq='epoch')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=0.000001, verbose=1, mode='min', min_delta=0.01)
trainingTimeTaken = 0
start = time.process_time()
train_data = data_dict['train_data']
train_label = data_dict['train_label']
val_data = data_dict['val_data']
val_label = data_dict['val_label']
print('train data: ', np.shape(train_data), np.shape(train_label))
print('genre_list: ', PARAMS['genre_list'])
History = model.fit(
x=train_data,
y=train_label,
epochs=PARAMS['epochs'],
batch_size=PARAMS['batch_size'],
verbose=1,
validation_data=(val_data, val_label),
callbacks=[csv_logger, mcp, reduce_lr],
shuffle=True,
)
trainingTimeTaken = time.process_time() - start
print('Time taken for model training: ',trainingTimeTaken)
return model, trainingTimeTaken, History
def perform_training(PARAMS, data_dict):
modelName = '@'.join(PARAMS['modelName'].split('.')[:-1]) + '.' + PARAMS['modelName'].split('.')[-1]
weightFile = modelName.split('.')[0] + '.h5'
architechtureFile = modelName.split('.')[0] + '.json'
summaryFile = modelName.split('.')[0] + '_summary.txt'
paramFile = modelName.split('.')[0] + '_params.npz'
logFile = modelName.split('.')[0] + '_log.csv'
modelName = '.'.join(modelName.split('@'))
weightFile = '.'.join(weightFile.split('@'))
architechtureFile = '.'.join(architechtureFile.split('@'))
summaryFile = '.'.join(summaryFile.split('@'))
paramFile = '.'.join(paramFile.split('@'))
logFile = '.'.join(logFile.split('@'))
input_dim = np.shape(data_dict['train_data'])[1]
print('Weight file: ', weightFile, input_dim)
if not os.path.exists(paramFile):
model, learning_rate = AFA_net_model(len(PARAMS['genre_list']))
misc.print_model_summary(summaryFile, model)
print(model.summary())
print('Architecture of Sharma et al., IEEE ESCI, 2021')
model, trainingTimeTaken, History = train_model(PARAMS, data_dict, model, weightFile, logFile)
if PARAMS['save_flag']:
with open(architechtureFile, 'w') as f:
f.write(model.to_json())
np.savez(paramFile, lr=str(learning_rate), TTT=str(trainingTimeTaken))
trainingTimeTaken = float(np.load(paramFile)['TTT'])
model, learning_rate = AFA_net_model(len(PARAMS['genre_list']))
model.load_weights(weightFile)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[AUC(curve='PR')])
print('Sharma et al. model exists! Loaded. Training time required=',trainingTimeTaken)
# # print(model.summary())
Train_Params = {
'model': model,
'trainingTimeTaken': trainingTimeTaken,
'learning_rate': learning_rate,
'paramFile': paramFile,
'architechtureFile': architechtureFile,
'weightFile': weightFile,
}
return Train_Params
def test_model(PARAMS, test_data, test_label, Train_Params):
start = time.process_time()
# loss, performance
loss, auc_measure = Train_Params['model'].evaluate(x=test_data, y=test_label)
print('evaluation: ', loss, auc_measure)
Predictions = Train_Params['model'].predict(test_data)
print('Trailer_pred: ', np.shape(Predictions), np.shape(test_label))
P_curve = {}
R_curve = {}
T = {}
AUC_values = {}
Precision = {}
Recall = {}
ConfMat = {}
F1_score = {}
Threshold = {}
macro_avg_auc = 0
macro_weighted_avg_auc = 0
for genre_i in PARAMS['genre_list'].keys():
# print(genre_i, PARAMS['genre_list'][genre_i])
pred = Predictions[:,PARAMS['genre_list'][genre_i]]
# print(genre_i, ' pred: ', np.shape(pred))
gt = test_label[:,PARAMS['genre_list'][genre_i]]
precision_curve, recall_curve, threshold = precision_recall_curve(gt, pred)
fscore_curve = np.divide(2*np.multiply(precision_curve, recall_curve), np.add(precision_curve, recall_curve)+1e-10)
Precision[genre_i] = np.round(np.mean(precision_curve),4)
Recall[genre_i] = np.round(np.mean(recall_curve),4)
F1_score[genre_i] = np.round(np.mean(fscore_curve),4)
P_curve[genre_i] = precision_curve
R_curve[genre_i] = recall_curve
AUC_values[genre_i] = np.round(auc(recall_curve, precision_curve)*100,2)
# print(genre_i, ' AUC: ', AUC_values[genre_i])
macro_avg_auc += AUC_values[genre_i]
macro_weighted_avg_auc += PARAMS['genre_freq'][genre_i]*AUC_values[genre_i]
micro_avg_precision_curve, micro_avg_recall_curve, threshold = precision_recall_curve(test_label.ravel(), Predictions.ravel())
P_curve['micro_avg'] = micro_avg_precision_curve
R_curve['micro_avg'] = micro_avg_recall_curve
AUC_values['micro_avg'] = np.round(auc(micro_avg_recall_curve, micro_avg_precision_curve)*100,2)
AUC_values['macro_avg'] = np.round(macro_avg_auc/len(PARAMS['genre_list']),2)
AUC_values['macro_avg_weighted'] = np.round(macro_weighted_avg_auc,2)
print('AUC (macro-avg): ', AUC_values['macro_avg'])
print('AUC (micro-avg): ', AUC_values['micro_avg'])
print('AUC (macro-avg weighted): ', AUC_values['macro_avg_weighted'])
AP = {}
AP['macro'] = np.round(average_precision_score(y_true=test_label, y_score=Predictions, average='macro')*100,2)
AP['micro'] = np.round(average_precision_score(y_true=test_label, y_score=Predictions, average='micro')*100,2)
AP['samples'] = np.round(average_precision_score(y_true=test_label, y_score=Predictions, average='samples')*100,2)
AP['weighted'] = np.round(average_precision_score(y_true=test_label, y_score=Predictions, average='weighted')*100,2)
print('AP (macro): ', AP['macro'])
print('AP (micro): ', AP['micro'])
print('AP (samples): ', AP['samples'])
print('AP (weighted): ', AP['weighted'])
testingTimeTaken = time.process_time() - start
Test_Params = {
'testingTimeTaken': testingTimeTaken,
'P_curve': P_curve,
'R_curve': R_curve,
'threshold': T,
'AUC': AUC_values,
'Predictions': Predictions,
'test_label': test_label,
'Precision':Precision,
'Recall':Recall,
'ConfMat':ConfMat,
'F1_score':F1_score,
'Threshold': Threshold,
'average_precision': AP,
}
return Test_Params
def get_train_test_files(PARAMS):
train_files = {}
test_files = {}
for clNum in PARAMS['classes'].keys():
class_name = PARAMS['classes'][clNum]
train_files[class_name] = []
test_files[class_name] = []
for i in range(PARAMS['CV_folds']):
files = PARAMS['cv_file_list'][class_name]['fold'+str(i)]
if PARAMS['fold']==i:
test_files[class_name].extend(files)
else:
train_files[class_name].extend(files)
return train_files, test_files
def compute_features(PARAMS, fName):
Xin, fs = librosa.load(PARAMS['audio_path']+'/'+fName.split('/')[-1], mono=True, sr=None)
duration = np.round(len(Xin) / float(fs),2)
print(fName, ' Xin: ', np.shape(Xin), ' fs=', fs, f'duration = {duration} seconds')
Xin -= np.mean(Xin)
Xin /= (np.max(Xin)-np.min(Xin))
frameSize = int(PARAMS['Tw']*fs/1000) # frame size in samples
frameShift = int(PARAMS['Ts']*fs/1000) # frame shift in samples
chunk_size = 5*fs # 5s chunk size in samples
# print(len(Xin), chunk_size)
FV = np.empty([])
for chunk_start in range(chunk_size,len(Xin),chunk_size): # Starting from 2nd chunk
chunk_end = np.min([chunk_start+chunk_size, len(Xin)])
if chunk_end==len(Xin): # ignoring last chunk, as described in paper
continue
Xin_chunk = Xin[chunk_start:chunk_end]
[chunk_fv, feat_names] = aF.feature_extraction(Xin_chunk, fs, frameSize, frameShift)
mean_chunk_fv =
|
np.mean(chunk_fv, axis=1)
|
numpy.mean
|
import numpy
import datetime
import math
import time
import scipy.integrate
def powerGeneration(latitude, velocity, start_time, end_time, cloudy):
cloudy = cloudy/100
deltaX = 60 #minutes, defaulted to sample every hour
year = start_time.timetuple()[0]
month = start_time.timetuple()[1]
day = start_time.timetuple()[2]
startHour = start_time.timetuple()[3]
startMinute = start_time.timetuple()[4]
endHour = end_time.timetuple()[3]
endMinute = end_time.timetuple()[4]
minutes = (endHour - startHour)*60 + (endMinute - startMinute)
samples = minutes/deltaX
fringe = minutes%deltaX
yValues = []
xValues = range(samples)
for i in range(samples):
yValues.append(totalPower(latitude, datetime.datetime(year, month, day, startHour+int((i*(deltaX/60.0))), (startMinute+i*deltaX) % 60).timetuple()))
#fringe
yValues.append(totalPower(latitude, datetime.datetime(year, month, day, endHour, endMinute).timetuple()))
xValues.append(samples - (fringe/60.0))
result = scipy.integrate.simps(yValues, xValues)
result *= (1 - .65*cloudy**2) #I_effective = I_sol * (1-0.65* c^2)
return result
def totalPower(latitude, timeTuple):
global shell_normal
global shell_faceO
global shell_vertO
matrixImport()
month = timeTuple[1]
day = timeTuple[2]
hour = timeTuple[3]
heading = 85 # Moving SSE
shell_heading = heading
shell_azimuths = 180/math.pi*numpy.arctan2(-shell_normal[:,1] ,shell_normal[:,0]) + heading
shell_tilts = 90 - 180/math.pi*numpy.arcsin(shell_normal[:,2])
a = shell_vertO[numpy.int_(shell_faceO[:,0]),:]
b = shell_vertO[numpy.int_(shell_faceO[:,1]),:]
c = shell_vertO[numpy.int_(shell_faceO[:,2]),:]
v1 = b - a
v2 = c - a
temp = numpy.cross(v1,v2)**2
temp = numpy.sum(temp, 1)
shell_Area = 0.5*temp**0.5
#shell_area = numpy.sum(shell_Area)
shell_flux = incident_radiation(month, day, hour, shell_tilts, shell_azimuths, latitude)
shell_power =
|
numpy.dot(shell_flux,shell_Area)
|
numpy.dot
|
"""
MCMC for the Cauchy distribution
--------------------------------
Figure 5.22
Markov chain monte carlo (MCMC) estimates of the posterior pdf for parameters
describing the Cauchy distribution. The data are the same as those used in
figure 5.10: the dashed curves in the top-right panel show the results of
direct computation on a regular grid from that diagram. The solid curves are
the corresponding MCMC estimates using 10,000 sample points. The left and the
bottom panels show marginalized distributions.
"""
# Author: <NAME> (adapted to PyMC3 by <NAME>)
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import cauchy
from matplotlib import pyplot as plt
from astroML.plotting.mcmc import convert_to_stdev
import pymc3 as pm
# ----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def cauchy_logL(xi, sigma, mu):
"""Equation 5.74: cauchy likelihood"""
xi = np.asarray(xi)
n = xi.size
shape = np.broadcast(sigma, mu).shape
xi = xi.reshape(xi.shape + tuple([1 for s in shape]))
return ((n - 1) * np.log(sigma)
- np.sum(np.log(sigma ** 2 + (xi - mu) ** 2), 0))
# ----------------------------------------------------------------------
# Draw the sample from a Cauchy distribution
np.random.seed(44)
mu_0 = 0
gamma_0 = 2
xi = cauchy(mu_0, gamma_0).rvs(10)
# ----------------------------------------------------------------------
# Set up and run MCMC:
with pm.Model():
mu = pm.Uniform('mu', -5, 5)
log_gamma = pm.Uniform('log_gamma', -10, 10)
# set up our observed variable x
x = pm.Cauchy('x', mu, np.exp(log_gamma), observed=xi)
trace = pm.sample(draws=12000, tune=1000, cores=1)
# compute histogram of results to plot below
L_MCMC, mu_bins, gamma_bins = np.histogram2d(trace['mu'],
np.exp(trace['log_gamma']),
bins=(np.linspace(-5, 5, 41),
np.linspace(0, 5, 41)))
L_MCMC[L_MCMC == 0] = 1E-16 # prevents zero-division errors
# ----------------------------------------------------------------------
# Compute likelihood analytically for comparison
mu = np.linspace(-5, 5, 70)
gamma = np.linspace(0.1, 5, 70)
logL = cauchy_logL(xi, gamma[:, np.newaxis], mu)
logL -= logL.max()
p_mu = np.exp(logL).sum(0)
p_mu /= p_mu.sum() * (mu[1] - mu[0])
p_gamma = np.exp(logL).sum(1)
p_gamma /= p_gamma.sum() * (gamma[1] - gamma[0])
hist_mu, bins_mu =
|
np.histogram(trace['mu'], bins=mu_bins, density=True)
|
numpy.histogram
|
# -*- coding: UTF-8 -*-
import torch
from torch import nn
from torch.nn import Parameter
from torch.nn import functional as F
import numpy as np
from models.BaseModel import SequentialModel
class SRGNN(SequentialModel):
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--num_layers', type=int, default=1,
help='Number of self-attention layers.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
self.emb_size = args.emb_size
self.num_layers = args.num_layers
super().__init__(args, corpus)
def _define_params(self):
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size, padding_idx=0)
self.linear1 = nn.Linear(self.emb_size, self.emb_size, bias=True)
self.linear2 = nn.Linear(self.emb_size, self.emb_size, bias=True)
self.linear3 = nn.Linear(self.emb_size, 1, bias=False)
self.linear_transform = nn.Linear(self.emb_size * 2, self.emb_size, bias=True)
self.gnn = GNN(self.emb_size, self.num_layers)
def actions_before_train(self):
std = 1.0 / np.sqrt(self.emb_size)
for weight in self.parameters():
weight.data.uniform_(-std, std)
def _get_slice(self, item_seq):
items, n_node, A, alias_inputs = [], [], [], []
max_n_node = item_seq.size(1)
item_seq = item_seq.cpu().numpy()
for u_input in item_seq:
node = np.unique(u_input)
items.append(node.tolist() + [0] * (max_n_node - len(node)))
u_A = np.zeros((max_n_node, max_n_node))
for i in np.arange(len(u_input) - 1):
if u_input[i + 1] == 0:
break
u =
|
np.where(node == u_input[i])
|
numpy.where
|
from __future__ import print_function, absolute_import
import os
import numpy as np
import math
import cv2
import torch
from matplotlib import cm
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def im_to_numpy(img):
img = to_numpy(img)
img = np.transpose(img, (1, 2, 0)) # H*W*C
return img
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) # C*H*W
img = to_torch(img).float()
return img
def resize(img, owidth, oheight):
img = im_to_numpy(img)
img = cv2.resize( img, (owidth, oheight) )
img = im_to_torch(img)
return img
def load_image(img_path):
# H x W x C => C x H x W
img = cv2.imread(img_path)
# print(img_path)
img = img.astype(np.float32)
img = img / 255.0
img = img[:,:,::-1]
img = img.copy()
return im_to_torch(img)
def color_normalize(x, mean, std):
if x.size(0) == 1:
x = x.repeat(3, 1, 1)
for t, m, s in zip(x, mean, std):
t.sub_(m)
t.div_(s)
return x
import time
######################################################################
def try_np_load(p):
try:
return np.load(p)
except:
return None
def make_lbl_set(lbls):
print(lbls.shape)
t00 = time.time()
lbl_set = [
|
np.zeros(3)
|
numpy.zeros
|
#
debug_output = False
rosDebug = False
import numpy as np
import os, sys
try:
import gwtools
import gwsurrogate as gws
print(" gwsurrogate: ", gws.__file__)
print(" gwtools: ", gwtools.__file__)
except:
print(" - no gwsurrogate - (almost everything from ROMWaveformManager will hard fail if you use it) ")
try:
import NRSur7dq2
print(" NRSur7dq2: ", NRSur7dq2.__version__, NRSur7dq2.__file__)
except:
print(" - no NRSur7dq2 - ")
import lalsimulation as lalsim
import lal
from .. import lalsimutils
try:
import LALHybrid
except:
print(" - no hybridization - ")
from scipy.interpolate import interp1d
from scipy.linalg import inv
from scipy.interpolate import splrep as _splrep
import pickle
import h5py
try:
dirBaseFiles =os.environ["GW_SURROGATE"] # surrogate base directory
except:
print( " ==> WARNING: GW_SURROGATE environment variable is not set <== ")
print( " Only surrogates with direct implementation are available (NRSur7dq2) ")
print(" ROMWaveformManager: ILE version")
#default_interpolation_kind = 'quadratic' # spline interpolation # very slow!
default_interpolation_kind = 'linear' # robust, fast
internal_ParametersAvailable ={}
# For each interesting simulation, store the definitions in a file
# Use 'execfile' to load those defintions now
MsunInSec = lal.MSUN_SI*lal.G_SI/lal.C_SI**3
#execfile(dirBaseFiles + "/"+"Sequence-GT-Aligned-UnequalMass/interface.py")
def myzero(arg):
return 0
def RangeWrap1d(bound, val,fn):
"""
RangeWrap1d: Uses np.piecewise to construct a piecewise function which is =fn inside the boundary, and 0 outside.
SHOULD be syntactic sugar, but depending on the python version the language needed to implement this changes.
"""
# # return (lambda x: fn(x) if (x>bound[0] and x<bound[1]) else val)
# # WARNING: piecewise is much faster, but will fail for numpy versions less than 1.8-ish :http://stackoverflow.com/questions/20800324/scipy-pchipinterpolator-error-array-cannot-be-safely-cast-to-required-type
# # Unfortunately that is the version LIGO uses on their clusters.
return (lambda x: np.piecewise( x, [
np.logical_and(x> bound[0], x<bound[1]),
np.logical_not(np.logical_and(x> bound[0], x<bound[1]))
], [fn, myzero]))
# return (lambda x: np.where( np.logical_and(x> bound[0], x<bound[1]), fn(x),0)) # vectorized , but does not protect the call
def ModeToString(pair):
return "l"+str(pair[0])+"_m"+str(pair[1])
def CreateCompatibleComplexOverlap(hlmf,**kwargs):
modes = hlmf.keys()
hbase = hlmf[modes[0]]
deltaF = hbase.deltaF
fNyq = np.max(lalsimutils.evaluate_fvals(hbase))
if debug_output:
# for key, value in kwargs.items():
# print (key, value)
print(kwargs)
print("dF, fNyq, npts = ",deltaF, fNyq, len(hbase.data.data))
IP = lalsimutils.ComplexOverlap(fNyq=fNyq, deltaF=deltaF, **kwargs)
return IP
def CreateCompatibleComplexIP(hlmf,**kwargs):
"""
Creates complex IP (no maximization)
"""
modes = hlmf.keys()
hbase = hlmf[modes[0]]
deltaF = hbase.deltaF
fNyq = np.max(lalsimutils.evaluate_fvals(hbase))
if debug_output:
# for key, value in kwargs.items():
# print (key, value)
print(kwargs)
print("dF, fNyq, npts = ",deltaF, fNyq, len(hbase.data.data))
IP = lalsimutils.ComplexIP(fNyq=fNyq, deltaF=deltaF, **kwargs)
return IP
class NRError(Exception):
"""Base class for this module"""
pass
class NRNoSimulation(NRError):
"""Nothing"""
def __init__(self,expr,msg):
print("No known simulation ", expr, msg)
pass
def SurrogateDimensionlessBasisFunction(sur,k):
def w(t):
return sur.amp_fit_func(k,t)*np.exp(1j*sur.phase_fit_func(k,t))
return w
def sur_identity(t,hp,hc):
return t, hp, hc
def sur_conj(t,hp,hc):
return t, hp, -hc
def ConvertWPtoSurrogateParams(P,**kwargs):
"""
Takes P, returns arguments of the form usually used in gwsurrogate.
(currently, just returns 1/q = P.m1/P.m1, the mass ratio parameter usually accepted)
"""
q = P.m2/P.m1
# return {"q":1./q}
return 1./q
def ConvertWPtoSurrogateParamsAligned(P,**kwargs):
"""
Takes P, returns arguments of the form used in gwsurrogate for a nonprecessing binary
"""
q = P.m2/P.m1
chi1 = np.array([0.0,0.0,P.s1z])
chi2 = np.array([0.0,0.0,P.s2z])
mtot=P.m1+P.m2
tidal = {'Lambda1': P.lambda1,'Lambda2': P.lambda2}
dist_mpc = P.dist/1e6/lal.PC_SI
val =[1./q, chi1, chi2, mtot, dist_mpc, tidal]
return val
def ConvertWPtoSurrogateParamsPrecessing(P,**kwargs):
"""
Takes P, returns arguments of the form usually used in gwsurrogate.
(currently, just returns 1/q = P.m1/P.m1, the mass ratio parameter usually accepted)
"""
q = P.m2/P.m1
chi1 = P.extract_param('chi1')
theta1=phi1 =0
if np.abs(chi1)>1e-5:
theta1 = np.arccos( P.s1z/chi1)
phi1 = np.arctan2(P.s1x,P.s1y)
# return {"q":1./q, "chi1": chi1,"theta1":theta1,"phi1":phi1,"chi2z":P.s2z}
val =np.array([1./q, chi1,theta1,phi1,P.s2z])
return val
def ConvertWPtoSurrogateParamsPrecessingFull(P,**kwargs):
"""
Takes P, returns arguments of the form usually used in gwsurrogate.
(currently, just returns 1/q = P.m1/P.m1, the mass ratio parameter usually accepted)
"""
q = P.m2/P.m1
val =[1./q, np.array([P.s1x,P.s1y,P.s1z]), np.array([P.s2x,P.s2y,P.s2z]) ]
return val
class WaveformModeCatalog:
"""
Class containing ROM model.
API is currently **unsafe** for precessing binaries (=ambiguous reference time)
Reference for underlying notation: Eq. (30) in http://arxiv.org/pdf/1308.3565v2
group
param
lmax # specifies modes to attempt to load. Not guaranteed to/required to find all.
strain_basis_functions_dimensionless # don't recall
mode_list_to_load # ability to constrain the mode list. Passed directly to low-level code
build_fourier_time_window # window for FT. NOT USED
reflection_symmetric # reflection symmetry used
max_nbasis_per_mode # constrain basis size
coord_names_internal # coordinate names used by the basis. FUTURE
"""
def __init__(self, group ,param, lmax=2,
strain_basis_functions_dimensionless=None,
mode_list_to_load=None,build_fourier_time_window=1000,reflection_symmetric=True,max_nbasis_per_mode=None,coord_names_internal=['q']):
self.group = group
self.param = param
self.deltaToverM =0
self.lmax =lmax
self.coord_names=coord_names_internal
self.fOrbitLower =0. # Used to clean results. Based on the phase of the 22 mode
self.fMinMode ={}
self.sur_dict = {}
self.post_dict = {}
self.post_dict_complex ={}
self.post_dict_complex_coef ={}
self.parameter_convert = {}
self.single_mode_sur = True
self.nbasis_per_mode ={} # number of basis functions
self.reflection_symmetric = reflection_symmetric
lm_list=None
lm_list = []
if rosDebug:
print(" WARNING: Using a restricted mode set requires a custom modification to gwsurrogate ")
Lmax =lmax
for l in np.arange(2,Lmax+1):
for m in np.arange(-l,l+1):
if m<0 and reflection_symmetric:
continue
lm_list.append( (l,m))
if not(mode_list_to_load is None):
lm_list = mode_list_to_load # overrride
if rosDebug:
print(" ROMWaveformManager: Loading restricted mode set ", lm_list)
my_converter = ConvertWPtoSurrogateParams
if 'NRSur4d' in param:
print(" GENERATING ROM WAVEFORM WITH SPIN PARAMETERS ")
my_converter = ConvertWPtoSurrogateParamsPrecessing
reflection_symmetric=False
if 'NRHyb' in param and not'Tidal' in param:
print(" GENERATING hybrid ROM WAVEFORM WITH ALIGNED SPIN PARAMETERS ")
my_converter = ConvertWPtoSurrogateParamsAligned
self.single_mode_sur=False
if 'Tidal' in param:
print(" GENERATING hybrid ROM WAVEFORM WITH ALIGNED SPIN AND TIDAL PARAMETERS ")
my_converter = ConvertWPtoSurrogateParamsAligned
self.single_mode_sur=False
if 'NRSur7d' in param:
if rosDebug:
print(" GENERATING ROM WAVEFORM WITH FULL SPIN PARAMETERS ")
my_converter = ConvertWPtoSurrogateParamsPrecessingFull
self.single_mode_sur=False
reflection_symmetric=False
# PENDING: General-purpose interface, based on the coordinate string specified. SHOULD look up these names from the surrogate!
def convert_coords(P):
vals_out = np.zeros(len(coord_names_internal))
for indx in np.arange(len(coord_names_internal)):
vals_out[indx] = P.extract_param( coord_names_internal[indx])
if coord_names_internal[indx] == 'q':
vals_out[indx] = 1./vals_out[indx]
return vals_out
raw_modes =[]
if self.single_mode_sur: #(not 'NRSur7d' in param) and (not 'NRHyb' in param):
self.sur = gws.EvaluateSurrogate(dirBaseFiles +'/'+group+param,use_orbital_plane_symmetry=reflection_symmetric, ell_m=None) # lm_list) # straight up filename. MODIFY to change to use negative modes
# Modified surrogate import call to load *all* modes all the time
raw_modes = self.sur.all_model_modes()
self.modes_available=[]
elif 'NRHybSur' in param:
if 'Tidal' in param:
self.sur = gws.LoadSurrogate(dirBaseFiles +'/'+group+'/NRHybSur3dq8.h5',surrogate_name_spliced=param) # get the dimensinoless surrogate file?
else:
self.sur = gws.LoadSurrogate(dirBaseFiles +'/'+group+param) # get the dimensinoless surrogate file?
raw_modes = self.sur._sur_dimless.mode_list # raw modes
reflection_symmetric = True
self.modes_available=[]
# self.modes_available=[(2, 0), (2, 1), (2,-1), (2, 2),(2,-2), (3, 0), (3, 1),(3,-1), (3, 2),(3,-2), (3, 3),(3,-3), (4, 2),(4,-2), (4, 3),(4,-3), (4, 4), (4,-4),(5, 5), (5,-5)] # see sur.mode_list
t = self.sur._sur_dimless.domain
self.ToverMmin = t.min()
self.ToverMmax = t.max()
self.ToverM_peak=0 # Need to figure out where this is? Let's assume it is zero to make my life easier
# for mode in self.modes_available:
# # Not used, bt populate anyways
# self.post_dict[mode] = sur_identity
# self.post_dict_complex[mode] = lambda x: x # to mode
# self.post_dict_complex_coef[mode] = lambda x:x # to coefficients.
# self.parameter_convert[mode] = my_converter # ConvertWPtoSurrogateParams # default conversion routine
# return
elif 'NRSur7dq4' in param:
print(param)
self.sur = gws.LoadSurrogate(dirBaseFiles +'/'+group+param) # get the dimensinoless surrogate file?
raw_modes = self.sur._sur_dimless.mode_list # raw modes
reflection_symmetric = False
self.modes_available=[]
print(raw_modes)
self.modes_available=raw_modes
t = self.sur._sur_dimless.t_coorb
self.ToverMmin = t.min()
self.ToverMmax = t.max()
self.ToverM_peak=0 # Need to figure out where this is? Let's assume it is zero to make my life easier
for mode in raw_modes:
# # Not used, bt populate anyways
self.post_dict[mode] = sur_identity
self.post_dict_complex[mode] = lambda x: x # to mode
self.post_dict_complex_coef[mode] = lambda x:x # to coefficients.
self.parameter_convert[mode] = my_converter # ConvertWPtoSurrogateParams # default conversion routine
return
else:
self.sur = NRSur7dq2.NRSurrogate7dq2()
reflection_symmetric = False
self.modes_available = [(2, -2), (2, -1), (2, 0), (2, 1), (2, 2), (3, -3), (3, -2), (3, -1), (3, 0), (3, 1), (3, 2), (3, 3), (4, -4), (4, -3), (4, -2), (4, -1), (4, 0), (4, 1), (4, 2), (4, 3), (4, 4)];
t = self.sur.t_coorb
self.ToverMmin = t.min()
self.ToverMmax = t.max()
self.ToverM_peak=0
for mode in self.modes_available:
# Not used, bt populate anyways
self.post_dict[mode] = sur_identity
self.post_dict_complex[mode] = lambda x: x # to mode
self.post_dict_complex_coef[mode] = lambda x:x # to coefficients.
self.parameter_convert[mode] = my_converter # ConvertWPtoSurrogateParams # default conversion routine
return
# Load surrogates from a mode-by-mode basis, and their conjugates
for mode in raw_modes:
if mode[0]<=self.lmax and mode in lm_list: # latter SHOULD be redundant (because of ell_m=lm_list)
print(" Loading mode ", mode)
self.modes_available.append(mode)
self.post_dict[mode] = sur_identity
self.post_dict_complex[mode] = lambda x: x # to mode
self.post_dict_complex_coef[mode] = lambda x:x # to coefficients.
self.parameter_convert[mode] = my_converter # ConvertWPtoSurrogateParams # default conversion routine
if self.single_mode_sur:
self.sur_dict[mode] = self.sur.single_mode(mode)
print(' mode ', mode, self.sur_dict[mode].B.shape)
self.nbasis_per_mode[mode] = (self.sur_dict[mode].B.shape)[1]
if max_nbasis_per_mode != None and self.sur_dict[mode].surrogate_mode_type == 'waveform_basis':
if max_nbasis_per_mode >0: # and max_nbasis_per_mode < self.nbasis_per_mode[mode]:
# See https://arxiv.org/pdf/1308.3565v2.pdf Eqs. 13 - 19
# Must truncate *orthogonal* basis.
# Works only for LINEAR basis
# B are samples of the basis on some long time, V
sur = self.sur_dict[mode]
print(" Truncating basis for mode ", mode, " to size ", max_nbasis_per_mode, " but note the number of EIM points remains the same...")
V = self.sur_dict[mode].V
n_basis = len(V)
V_inv = inv(V)
mtx_E = np.dot(self.sur_dict[mode].B,V)
# print "E ", mtx_E.shape
# Zero out the components we don't want
if max_nbasis_per_mode < n_basis:
mtx_E[:,max_nbasis_per_mode:n_basis] *=0
# Regenerate
sur.B = np.dot(mtx_E , V_inv)
sur.reB_spline_params = [_splrep(sur.times, sur.B[:,jj].real, k=3) for jj in range(sur.B.shape[1])]
sur.imB_spline_params = [_splrep(sur.times, sur.B[:,jj].imag, k=3) for jj in range(sur.B.shape[1])]
self.nbasis_per_mode[mode] = len(self.sur_dict[mode].V) # if you truncate over the orthogonal basis, you still need to use the fit at all the EIM points!
# This SHOULD update the copies inside the surrogate, so the later interpolation called by EvaluateSingleModeSurrogate will interpolate this data
if reflection_symmetric and raw_modes.count((mode[0],-mode[1]))<1:
mode_alt = (mode[0],-mode[1])
if rosDebug:
print(" Adjoining postprocessing to enable complex conjugate for reflection symmetric case", mode_alt)
# if max_nbasis_per_mode:
# self.nbasis_per_mode[mode_alt] = np.max([int(max_nbasis_per_mode),1]) # INFRASTRUTCTURE PLAN: Truncate t print " Loading mode ", mode_alt, " via reflection symmetry "
self.modes_available.append(mode_alt)
self.post_dict[mode_alt] = sur_conj
self.post_dict_complex_coef[mode_alt] = lambda x,l=mode[0]: np.power(-1,l)*np.conj(x) # beware, do not apply this twice.
self.post_dict_complex[mode_alt] = np.conj # beware, do not apply this twice.
self.parameter_convert[mode_alt] = my_converter
if self.single_mode_sur:
self.nbasis_per_mode[mode_alt] = self.nbasis_per_mode[mode]
self.sur_dict[mode_alt] = self.sur_dict[mode]
if not self.single_mode_sur:
# return after performing all the neat reflection symmetrization setup described above, in case model is *not* a single-mode surrogate
print(" ... done setting mode symmetry requirements", self.modes_available)
# print raw_modes, self.post_dict
return
# CURRENTLY ONLY LOAD THE 22 MODE and generate the 2,-2 mode by symmetr
t = self.sur_dict[(2,2)].times # end time
self.ToverMmin = t.min()
self.ToverMmax = t.max()
P=lalsimutils.ChooseWaveformParams() # default is q=1 object
params_tmp = self.parameter_convert[(2,2)](P)
if rosDebug:
print(" Passing temporary parameters ", params_tmp, " to find the peak time default ")
# print dir(self.sur_dict[(2,2)])
# print self.sur_dict[(2,2)].__dict__.keys()
# print self.sur_dict[(2,2)].parameterization
# t, hp, hc = self.sur_dict[(2,2)]( **params_tmp ); # calculate merger time -- addresses conventions on peak time location, and uses named arguments
t, hp, hc = self.sur_dict[(2,2)]( params_tmp ); # calculate merger time -- addresses conventions on peak time location, and uses named arguments
self.ToverM_peak = t[np.argmax(np.abs(hp**2+hc**2))] # discrete maximum time. Sanity check
if rosDebug:
print(" Peak time for ROM ", self.ToverM_peak)
# BASIS MANAGEMENT: Not yet implemented
# Assume a DISTINCT BASIS SET FOR AL MODES, which will be ANNOYING
self.strain_basis_functions_dimensionless_data ={}
self.strain_basis_functions_dimensionless = self.sur_dict[(2,2)].resample_B # We may need to add complex conjugate functions too. And a master index for basis functions associated with different modes
def print_params(self):
print(" Surrogate model ")
print(" Modes available ")
for mode in self.sur_dict:
print(" " , mode, " nbasis = ", self.nbasis_per_mode[mode])
# same arguments as hlm
def complex_hoft(self, P, force_T=False, deltaT=1./16384, time_over_M_zero=0.,sgn=-1):
hlmT = self.hlmoft(P, force_T, deltaT,time_over_M_zero)
npts = hlmT[(2,2)].data.length
wfmTS = lal.CreateCOMPLEX16TimeSeries("h", lal.LIGOTimeGPS(0.), 0., deltaT, lalsimutils.lsu_DimensionlessUnit, npts)
wfmTS.data.data[:] = 0 # SHOULD NOT BE NECESARY, but the creation operator doesn't robustly clean memory
wfmTS.epoch = hlmT[(2,2)].epoch
for mode in hlmT.keys():
# PROBLEM: Be careful with interpretation. The incl and phiref terms are NOT tied to L.
if rosDebug:
print(mode, np.max(hlmT[mode].data.data), " running max ", np.max(np.abs(wfmTS.data.data)))
wfmTS.data.data += np.exp(-2*sgn*1j*P.psi)* hlmT[mode].data.data*lal.SpinWeightedSphericalHarmonic(P.incl,-P.phiref,-2, int(mode[0]),int(mode[1]))
return wfmTS
def complex_hoff(self,P, force_T=False):
htC = self.complex_hoft(P, force_T=force_T,deltaT= P.deltaT)
TDlen = int(1./P.deltaF * 1./P.deltaT)
assert TDlen == htC.data.length
hf = lal.CreateCOMPLEX16FrequencySeries("Template h(f)",
htC.epoch, htC.f0, 1./htC.deltaT/htC.data.length, lalsimutils.lsu_HertzUnit,
htC.data.length)
fwdplan=lal.CreateForwardCOMPLEX16FFTPlan(htC.data.length,0)
lal.COMPLEX16TimeFreqFFT(hf, htC, fwdplan)
return hf
def real_hoft(self,P,Fp=None, Fc=None):
"""
Returns the real-valued h(t) that would be produced in a single instrument.
Translates epoch as needed.
Based on 'hoft' in lalsimutils.py
"""
# Create complex timessereis
htC = self.complex_hoft(P,force_T=1./P.deltaF, deltaT= P.deltaT) # note P.tref is NOT used in the low-level code
TDlen = htC.data.length
if rosDebug:
print("Size sanity check ", TDlen, 1/(P.deltaF*P.deltaT))
print(" Raw complex magnitude , ", np.max(htC.data.data))
# Create working buffers to extract data from it -- wasteful.
hp = lal.CreateREAL8TimeSeries("h(t)", htC.epoch, 0.,
P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
hc = lal.CreateREAL8TimeSeries("h(t)", htC.epoch, 0.,
P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
hT = lal.CreateREAL8TimeSeries("h(t)", htC.epoch, 0.,
P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
# Copy data components over
# - note htC is hp - i hx
hp.data.data = np.real(htC.data.data)
hc.data.data = (-1) * np.imag(htC.data.data)
# transform as in lalsimutils.hoft
if Fp!=None and Fc!=None:
hp.data.data *= Fp
hc.data.data *= Fc
hp = lal.AddREAL8TimeSeries(hp, hc)
hoft = hp
elif P.radec==False:
fp = lalsimutils.Fplus(P.theta, P.phi, P.psi)
fc = lalsimutils.Fcross(P.theta, P.phi, P.psi)
hp.data.data *= fp
hc.data.data *= fc
hp.data.data = lal.AddREAL8TimeSeries(hp, hc)
hoft = hp
else:
# Note epoch must be applied FIRST, to make sure the correct event time is being used to construct the modulation functions
hp.epoch = hp.epoch + P.tref
hc.epoch = hc.epoch + P.tref
if rosDebug:
print(" Real h(t) before detector weighting, ", np.max(hp.data.data), np.max(hc.data.data))
hoft = lalsim.SimDetectorStrainREAL8TimeSeries(hp, hc, # beware, this MAY alter the series length??
P.phi, P.theta, P.psi,
lalsim.DetectorPrefixToLALDetector(str(P.detector)))
hoft = lal.CutREAL8TimeSeries(hoft, 0, hp.data.length) # force same length as before??
if rosDebug:
print("Size before and after detector weighting " , hp.data.length, hoft.data.length)
if rosDebug:
print(" Real h_{IFO}(t) generated, pre-taper : max strain =", np.max(hoft.data.data))
if P.taper != lalsimutils.lsu_TAPER_NONE: # Taper if requested
lalsim.SimInspiralREAL8WaveTaper(hoft.data, P.taper)
if P.deltaF is not None:
TDlen = int(1./P.deltaF * 1./P.deltaT)
print("Size sanity check 2 ", int(1./P.deltaF * 1./P.deltaT), hoft.data.length)
assert TDlen >= hoft.data.length
npts = hoft.data.length
hoft = lal.ResizeREAL8TimeSeries(hoft, 0, TDlen)
# Zero out the last few data elements -- NOT always reliable for all architectures; SHOULD NOT BE NECESSARY
hoft.data.data[npts:TDlen] = 0
if rosDebug:
print(" Real h_{IFO}(t) generated : max strain =", np.max(hoft.data.data))
return hoft
def non_herm_hoff(self,P):
"""
Returns the 2-sided h(f) associated with the real-valued h(t) seen in a real instrument.
Translates epoch as needed.
Based on 'non_herm_hoff' in lalsimutils.py
"""
htR = self.real_hoft() # Generate real-valued TD waveform, including detector response
if P.deltaF == None: # h(t) was not zero-padded, so do it now
TDlen = nextPow2(htR.data.length)
htR = lal.ResizeREAL8TimeSeries(htR, 0, TDlen)
else: # Check zero-padding was done to expected length
TDlen = int(1./P.deltaF * 1./P.deltaT)
assert TDlen == htR.data.length
fwdplan=lal.CreateForwardCOMPLEX16FFTPlan(htR.data.length,0)
htC = lal.CreateCOMPLEX16TimeSeries("hoft", htR.epoch, htR.f0,
htR.deltaT, htR.sampleUnits, htR.data.length)
# copy h(t) into a COMPLEX16 array which happens to be purely real
htC.data.data[:htR.data.length] = htR.data.data
# for i in range(htR.data.length):
# htC.data.data[i] = htR.data.data[i]
hf = lal.CreateCOMPLEX16FrequencySeries("Template h(f)",
htR.epoch, htR.f0, 1./htR.deltaT/htR.data.length, lalsimutils.lsu_HertzUnit,
htR.data.length)
lal.COMPLEX16TimeFreqFFT(hf, htC, fwdplan)
return hf
def estimateFminHz(self,P,fmin=10.):
# This SHOULD use information from the ROM
return 2*self.fMin/(MsunInSec*(P.m1+P.m2)/lal.MSUN_SI)
def estimateDurationSec(self,P,fmin=10.):
"""
estimateDuration uses fmin*M from the (2,2) mode to estimate the waveform duration from the *well-posed*
part. By default it uses the *entire* waveform duration.
CURRENTLY DOES NOT IMPLEMENT frequency-dependent duration
"""
return (self.ToverMmax - self.ToverMmin)*MsunInSec*(P.m1+P.m2)/lal.MSUN_SI
return None
def basis_oft(self, P, force_T=False, deltaT=1./16384, time_over_M_zero=0.,return_numpy=False):
m_total_s = MsunInSec*(P.m1+P.m2)/lal.MSUN_SI
# Create a suitable set of time samples. Zero pad to 2^n samples.
T_estimated = np.abs(self.sur_dict[(2,2)].tmin)*m_total_s
print(" Estimated duration ", T_estimated)
# T_estimated = 20 # FIXME. Time in seconds
npts=0
if not force_T:
npts_estimated = int(T_estimated/deltaT)
npts = lalsimutils.nextPow2(npts_estimated)
else:
npts = int(force_T/deltaT)
if rosDebug:
print(" Forcing length T=", force_T, " length ", npts)
tvals = (
|
np.arange(npts)
|
numpy.arange
|
import datetime
import numpy as np
def metadata_to_header(metadata):
"""
Parameters
----------
metadata : dict
Dict of metadata as returned by wradlib.io.read_radolan_composite
Returns
-------
header : byte string
Header byte string conforming to the definition of RADOALN binary files
"""
if metadata['producttype'] != 'RW':
raise NotImplementedError('Currently only RADOALN-RW is supported')
len_header_fixed_part = 82
len_header_radar_locations = len(
'<' + ','.join(metadata['radarlocations']) + '> ')
len_header = len_header_fixed_part + len_header_radar_locations
# Generate empty header with only whitespaces
header_out = np.array(['', ] * len_header, dtype='S1')
header_out[:] = ' '
# Fill header with metadata and tokens
header_out[0:2] = list(metadata['producttype'])
header_out[2:8] = list(
datetime.datetime.strftime(metadata['datetime'], '%d%H%M'))
header_out[8:13] = list(metadata['radarid'])
header_out[13:17] = list(
datetime.datetime.strftime(metadata['datetime'], '%m%y'))
header_out[17:19] = list('BY')
# Have to add one here to get correct length in header string.
# Do not know why. Maybe because of the 'etx' char
header_out[19:26] = list(str(metadata['datasize'] + len_header + 1))
header_out[26:28] = list('VS')
header_out[28:30] = list(
{
'100 km and 128 km (mixed)': ' 0',
'100 km': ' 1',
'128 km': ' 2',
'150 km': ' 3'
}.get(metadata['maxrange'])
)
header_out[30:32] = list('SW')
header_out[32:41] = list(metadata['radolanversion'].rjust(9))
header_out[41:43] = list('PR')
header_out[43:48] = list(
{
0.01: ' E-02',
0.1: ' E-01',
1: ' E-00',
}.get(metadata['precision'])
)
header_out[48:51] = list('INT')
header_out[51:55] = list(
str(int(metadata['intervalseconds'] / 60)).rjust(4))
header_out[55:57] = list('GP')
header_out[57:66] = list(
str(metadata['nrow']).rjust(4) + 'x' + str(metadata['ncol']).rjust(4))
header_out[66:68] = list('MF')
header_out[69:77] = list(str(int(metadata['moduleflag'])).zfill(8))
header_out[77:79] = list('MS')
header_out[79:82] = list(str(int(len_header_radar_locations)).rjust(3))
header_out[82:(82 + len_header_radar_locations)] = list(
'<' + ','.join(metadata['radarlocations']) + '> ')
header_out = b''.join(header_out).decode()
return header_out
def data_to_byte_array(data , metadata):
"""
Parameters
----------
data
metadata
Returns
-------
"""
if metadata['producttype'] != 'RW':
raise NotImplementedError('Currently only RADOALN-RW is supported')
arr = (data / metadata['precision']).flatten().astype(np.uint16)
secondary = np.zeros_like(arr, dtype=np.uint16)
secondary[metadata['secondary']] = 0x1000
nodatamask =
|
np.zeros_like(arr, dtype=np.uint16)
|
numpy.zeros_like
|
from collections import OrderedDict
from threading import Lock
from typing import Dict, List, Tuple, Union
import numpy as np
from vispy.color import BaseColormap as VispyColormap
from vispy.color import get_colormap, get_colormaps
from ..translations import trans
from .bop_colors import bopd
from .colormap import Colormap
from .vendored import cm, colorconv
ValidColormapArg = Union[
str,
VispyColormap,
Colormap,
Tuple[str, VispyColormap],
Tuple[str, Colormap],
Dict[str, VispyColormap],
Dict[str, Colormap],
Dict,
]
matplotlib_colormaps = _MATPLOTLIB_COLORMAP_NAMES = OrderedDict(
viridis=trans._p('colormap', 'viridis'),
magma=trans._p('colormap', 'magma'),
inferno=trans._p('colormap', 'inferno'),
plasma=trans._p('colormap', 'plasma'),
gray=trans._p('colormap', 'gray'),
gray_r=trans._p('colormap', 'gray r'),
hsv=trans._p('colormap', 'hsv'),
turbo=trans._p('colormap', 'turbo'),
twilight=trans._p('colormap', 'twilight'),
twilight_shifted=trans._p('colormap', 'twilight shifted'),
gist_earth=trans._p('colormap', 'gist earth'),
PiYG=trans._p('colormap', 'PiYG'),
)
_MATPLOTLIB_COLORMAP_NAMES_REVERSE = {
v: k for k, v in matplotlib_colormaps.items()
}
_VISPY_COLORMAPS_ORIGINAL = _VCO = get_colormaps()
_VISPY_COLORMAPS_TRANSLATIONS = OrderedDict(
autumn=(trans._p('colormap', 'autumn'), _VCO['autumn']),
blues=(trans._p('colormap', 'blues'), _VCO['blues']),
cool=(trans._p('colormap', 'cool'), _VCO['cool']),
greens=(trans._p('colormap', 'greens'), _VCO['greens']),
reds=(trans._p('colormap', 'reds'), _VCO['reds']),
spring=(trans._p('colormap', 'spring'), _VCO['spring']),
summer=(trans._p('colormap', 'summer'), _VCO['summer']),
fire=(trans._p('colormap', 'fire'), _VCO['fire']),
grays=(trans._p('colormap', 'grays'), _VCO['grays']),
hot=(trans._p('colormap', 'hot'), _VCO['hot']),
ice=(trans._p('colormap', 'ice'), _VCO['ice']),
winter=(trans._p('colormap', 'winter'), _VCO['winter']),
light_blues=(trans._p('colormap', 'light blues'), _VCO['light_blues']),
orange=(trans._p('colormap', 'orange'), _VCO['orange']),
viridis=(trans._p('colormap', 'viridis'), _VCO['viridis']),
coolwarm=(trans._p('colormap', 'coolwarm'), _VCO['coolwarm']),
PuGr=(trans._p('colormap', 'PuGr'), _VCO['PuGr']),
GrBu=(trans._p('colormap', 'GrBu'), _VCO['GrBu']),
GrBu_d=(trans._p('colormap', 'GrBu_d'), _VCO['GrBu_d']),
RdBu=(trans._p('colormap', 'RdBu'), _VCO['RdBu']),
cubehelix=(trans._p('colormap', 'cubehelix'), _VCO['cubehelix']),
single_hue=(trans._p('colormap', 'single hue'), _VCO['single_hue']),
hsl=(trans._p('colormap', 'hsl'), _VCO['hsl']),
husl=(trans._p('colormap', 'husl'), _VCO['husl']),
diverging=(trans._p('colormap', 'diverging'), _VCO['diverging']),
RdYeBuCy=(trans._p('colormap', 'RdYeBuCy'), _VCO['RdYeBuCy']),
)
_VISPY_COLORMAPS_TRANSLATIONS_REVERSE = {
v[0]: k for k, v in _VISPY_COLORMAPS_TRANSLATIONS.items()
}
_PRIMARY_COLORS = OrderedDict(
red=(trans._p('colormap', 'red'), [1.0, 0.0, 0.0]),
green=(trans._p('colormap', 'green'), [0.0, 1.0, 0.0]),
blue=(trans._p('colormap', 'blue'), [0.0, 0.0, 1.0]),
cyan=(trans._p('colormap', 'cyan'), [0.0, 1.0, 1.0]),
magenta=(trans._p('colormap', 'magenta'), [1.0, 0.0, 1.0]),
yellow=(trans._p('colormap', 'yellow'), [1.0, 1.0, 0.0]),
)
SIMPLE_COLORMAPS = {
name: Colormap(
name=name, display_name=display_name, colors=[[0.0, 0.0, 0.0], color]
)
for name, (display_name, color) in _PRIMARY_COLORS.items()
}
# dictionay for bop colormap objects
BOP_COLORMAPS = {
name: Colormap(value, name=name, display_name=display_name)
for name, (display_name, value) in bopd.items()
}
def _all_rgb():
"""Return all 256**3 valid rgb tuples."""
base = np.arange(256, dtype=np.uint8)
r, g, b = np.meshgrid(base, base, base, indexing='ij')
return np.stack((r, g, b), axis=-1).reshape((-1, 3))
# obtained with colorconv.rgb2luv(_all_rgb().reshape((-1, 256, 3)))
LUVMIN =
|
np.array([0.0, -83.07790815, -134.09790293])
|
numpy.array
|
"""
This network uses the last 26 observations of gwl, tide, and rain to predict the next 18
values of gwl for well MMPS-175
"""
import pandas as pd
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import keras
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Activation
from math import sqrt
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import random as rn
import os
matplotlib.rcParams.update({'font.size': 8})
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# def create_weights(train_labels):
# obs_mean = np.mean(train_labels, axis=-1)
# obs_mean = np.reshape(obs_mean, (n_batch, 1))
# obs_mean = np.repeat(obs_mean, n_ahead, axis=1)
# weights = (train_labels + obs_mean) / (2 * obs_mean)
# return weights
#
#
# def sq_err(y_true, y_pred):
# return K.square(y_pred - y_true)
#
#
def mse(y_true, y_pred):
return K.mean(K.square(y_pred - y_true), axis=-1)
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def pw_rmse(y_true, y_pred):
# num_rows, num_cols = K.int_shape(y_true)[0], K.int_shape(y_true)[1]
# print(num_rows, num_cols)
act_mean = K.mean(y_true, axis=-1)
# print("act_mean 1 is:", act_mean)
act_mean = K.reshape(act_mean, (n_batch, 1))
# print("act_mean is: ", act_mean)
mean_repeat = K.repeat_elements(act_mean, n_ahead, axis=1)
# print("mean_repeat is:", mean_repeat)
weights = (y_true+mean_repeat)/(2*mean_repeat)
return K.sqrt(K.mean((K.square(y_pred - y_true)*weights), axis=-1))
# configure network
n_lags = 116
n_ahead = 18
n_features = 3
n_train = 52551
n_test = 8359
n_epochs = 500
n_neurons = 10
n_batch = 52551
# load dataset
dataset_raw = read_csv("C:/Users/<NAME>/Documents/HRSD GIS/Site Data/MMPS_175_no_blanks.csv",
index_col=None, parse_dates=True, infer_datetime_format=True)
# dataset_raw = dataset_raw[0:len(dataset_raw)-1]
# split datetime column into train and test for plots
train_dates = dataset_raw[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[:n_train]
test_dates = dataset_raw[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[n_train:]
test_dates = test_dates.reset_index(drop=True)
test_dates['Datetime'] = pd.to_datetime(test_dates['Datetime'])
# drop columns we don't want to predict
dataset = dataset_raw.drop(dataset_raw.columns[[0]], axis=1)
values = dataset.values
values = values.astype('float32')
gwl = values[:, 0]
gwl = gwl.reshape(gwl.shape[0], 1)
tide = values[:, 1]
tide = tide.reshape(tide.shape[0], 1)
rain = values[:, 2]
rain = rain.reshape(rain.shape[0], 1)
# normalize features with individual scalers
gwl_scaler, tide_scaler, rain_scaler = MinMaxScaler(), MinMaxScaler(), MinMaxScaler()
gwl_scaled = gwl_scaler.fit_transform(gwl)
tide_scaled = tide_scaler.fit_transform(tide)
rain_scaled = rain_scaler.fit_transform(rain)
scaled = np.concatenate((gwl_scaled, tide_scaled, rain_scaled), axis=1)
# frame as supervised learning
reframed = series_to_supervised(scaled, n_lags, n_ahead)
values = reframed.values
# split into train and test sets
train, test = values[:n_train, :], values[n_train:, :]
# split into input and outputs
input_cols, label_cols = [], []
for i in range(values.shape[1]):
if i <= n_lags*n_features-1:
input_cols.append(i)
elif i % 3 != 0:
input_cols.append(i)
elif i % 3 == 0:
label_cols.append(i)
train_X, train_y = train[:, input_cols], train[:, label_cols] # [start:stop:increment, (cols to include)]
test_X, test_y = test[:, input_cols], test[:, label_cols]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
#create weights for peak weighted rmse loss function
# weights = create_weights(train_y)
# load model here if needed
# model = keras.models.load_model("C:/Users/<NAME>/PycharmProjects/Tensorflow/keras_models/mmps175.h5",
# custom_objects={'pw_rmse':pw_rmse})
# set random seeds for model reproducibility as suggested in:
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# define model
model = Sequential()
model.add(LSTM(units=n_neurons, input_shape=(None, train_X.shape[2])))
# model.add(LSTM(units=n_neurons, return_sequences=True, input_shape=(None, train_X.shape[2])))
# model.add(LSTM(units=n_neurons, return_sequences=True))
# model.add(LSTM(units=n_neurons))
model.add(Dropout(.1))
model.add(Dense(input_dim=n_neurons, activation='linear', units=n_ahead))
# model.add(Activation('linear'))
model.compile(loss=pw_rmse, optimizer='adam')
tbCallBack = keras.callbacks.TensorBoard(log_dir='C:/tmp/tensorflow/keras/logs', histogram_freq=0, write_graph=True,
write_images=False)
earlystop = keras.callbacks.EarlyStopping(monitor='loss', min_delta=0.0001, patience=5, verbose=1, mode='auto')
history = model.fit(train_X, train_y, batch_size=n_batch, epochs=n_epochs, verbose=2, shuffle=False,
callbacks=[earlystop, tbCallBack])
# save model
# model.save("C:/Users/<NAME>/PycharmProjects/Tensorflow/keras_models/mmps175.h5")
# plot model history
# plt.plot(history.history['loss'], label='train')
# # plt.plot(history.history['val_loss'], label='validate')
# # plt.legend()
# # ticks = np.arange(0, n_epochs, 1) # (start,stop,increment)
# # plt.xticks(ticks)
# plt.xlabel("Epochs")
# plt.ylabel("Loss")
# plt.tight_layout()
# plt.show()
# make predictions
trainPredict = model.predict(train_X)
yhat = model.predict(test_X)
inv_trainPredict = gwl_scaler.inverse_transform(trainPredict)
inv_yhat = gwl_scaler.inverse_transform(yhat)
inv_y = gwl_scaler.inverse_transform(test_y)
inv_train_y = gwl_scaler.inverse_transform(train_y)
# save test predictions and observed
inv_yhat_df = DataFrame(inv_yhat)
inv_yhat_df.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps175_results/predicted.csv")
inv_y_df = DataFrame(inv_y)
inv_y_df.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps175_results/observed.csv")
# calculate RMSE for whole test series (each forecast step)
RMSE_forecast = []
for i in np.arange(0, n_ahead, 1):
rmse = sqrt(mean_squared_error(inv_y[:, i], inv_yhat[:, i]))
RMSE_forecast.append(rmse)
RMSE_forecast = DataFrame(RMSE_forecast)
rmse_avg = sqrt(mean_squared_error(inv_y, inv_yhat))
print('Average Test RMSE: %.3f' % rmse_avg)
RMSE_forecast.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps175_results/RMSE.csv")
# calculate RMSE for each individual time step
RMSE_timestep = []
for i in np.arange(0, inv_yhat.shape[0], 1):
rmse = sqrt(mean_squared_error(inv_y[i, :], inv_yhat[i, :]))
RMSE_timestep.append(rmse)
RMSE_timestep = DataFrame(RMSE_timestep)
# plot rmse vs forecast steps
plt.plot(RMSE_forecast, 'ko')
ticks = np.arange(0, n_ahead, 1) # (start,stop,increment)
plt.xticks(ticks)
plt.ylabel("RMSE (ft)")
plt.xlabel("Forecast Step")
plt.tight_layout()
plt.show()
# plot training predictions
plt.plot(inv_train_y[:, 0], label='actual')
plt.plot(inv_trainPredict[:, 0], label='predicted')
plt.xlabel("Timestep")
plt.ylabel("GWL (ft)")
plt.title("Training Predictions")
# ticks = np.arange(0, n_ahead, 1)
# plt.xticks(ticks)
plt.legend()
plt.tight_layout()
plt.show()
# plot test predictions for Hermine, Julia, and Matthew
dates = DataFrame(test_dates[["Datetime"]][n_lags:-n_ahead+1])
dates = dates.reset_index(inplace=False)
dates = dates.drop(columns=['index'])
dates = dates[5700:8000]
dates = dates.reset_index(inplace=False)
dates = dates.drop(columns=['index'])
dates_9 = DataFrame(test_dates[["Datetime"]][n_lags+8:-n_ahead+9])
dates_9 = dates_9.reset_index(inplace=False)
dates_9 = dates_9.drop(columns=['index'])
dates_9 = dates_9[5700:8000]
dates_9 = dates_9.reset_index(inplace=False)
dates_9 = dates_9.drop(columns=['index'])
dates_18 = DataFrame(test_dates[["Datetime"]][n_lags+17:])
dates_18 = dates_18.reset_index(inplace=False)
dates_18 = dates_18.drop(columns=['index'])
dates_18 = dates_18[5700:8000]
dates_18 = dates_18.reset_index(inplace=False)
dates_18 = dates_18.drop(columns=['index'])
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(6.5, 3))
x_ticks = np.arange(0, 2300, 168)
ax1.plot(inv_y[5700:8000, 0], 'k-', label='Obs.')
ax1.plot(inv_yhat[5700:8000, 0], 'k:', label='Pred.')
ax1.set_xticks(x_ticks)
ax1.set_xticklabels(dates['Datetime'][x_ticks].dt.strftime('%Y-%m-%d'), rotation='vertical')
ax2.plot(inv_y[5700:8000, 8], 'k-', label='Obs.')
ax2.plot(inv_yhat[5700:8000, 8], 'k:', label='Pred.')
ax2.set_xticks(x_ticks)
ax2.set_xticklabels(dates_9['Datetime'][x_ticks].dt.strftime('%Y-%m-%d'), rotation='vertical')
ax3.plot(inv_y[5700:8000, 17], 'k-', label='Obs.')
ax3.plot(inv_yhat[5700:8000, 17], 'k:', label='Pred.')
ax3.set_xticks(x_ticks)
ax3.set_xticklabels(dates_18['Datetime'][x_ticks].dt.strftime('%Y-%m-%d'), rotation='vertical')
ax1.set(ylabel="GWL (ft)", title='t+1')
ax2.set(title='t+9')
ax3.set(title='t+18')
plt.legend()
plt.tight_layout()
plt.show()
# fig.savefig('C:/Users/<NAME>/Documents/HRSD GIS/Presentation Images/Paper Figures/MMPS175_preds.tif', dpi=300)
# create dfs of timestamps, obs, and pred data to find peak values and times
obs_t1 = np.reshape(inv_y[5700:8000, 0], (2300, 1))
pred_t1 = np.reshape(inv_yhat[5700:8000, 0], (2300,1))
df_t1 = np.concatenate([obs_t1, pred_t1], axis=1)
df_t1 = DataFrame(df_t1, index=None, columns=["obs", "pred"])
df_t1 = pd.concat([df_t1, dates], axis=1)
df_t1 = df_t1.set_index("Datetime")
obs_t9 = np.reshape(inv_y[5700:8000, 8], (2300, 1))
pred_t9 = np.reshape(inv_yhat[5700:8000, 8], (2300,1))
df_t9 = np.concatenate([obs_t9, pred_t9], axis=1)
df_t9 = DataFrame(df_t9, index=None, columns=["obs", "pred"])
df_t9 = pd.concat([df_t9, dates_9], axis=1)
df_t9 = df_t9.set_index("Datetime")
obs_t18 = np.reshape(inv_y[5700:8000, 17], (2300, 1))
pred_t18 = np.reshape(inv_yhat[5700:8000, 17], (2300,1))
df_t18 = np.concatenate([obs_t18, pred_t18], axis=1)
df_t18 = DataFrame(df_t18, index=None, columns=["obs", "pred"])
df_t18 = pd.concat([df_t18, dates_18], axis=1)
df_t18 = df_t18.set_index("Datetime")
HerminePeak_t1 = df_t1.loc["2016-09-02T00:00:00.000000000":"2016-09-08T00:00:00.000000000"].max()
HerminePeak_t1_time = df_t1.loc["2016-09-02T00:00:00.000000000":"2016-09-08T00:00:00.000000000"].idxmax()
JuliaPeak_t1 = df_t1.loc["2016-09-18T00:00:00.000000000":"2016-09-25T00:00:00.000000000"].max()
JuliaPeak_t1_time = df_t1.loc["2016-09-18T00:00:00.000000000":"2016-09-25T00:00:00.000000000"].idxmax()
MatthewPeak_t1 = df_t1.loc["2016-10-07T00:00:00.000000000":"2016-10-14T00:00:00.000000000"].max()
MatthewPeak_t1_time = df_t1.loc["2016-10-07T00:00:00.000000000":"2016-10-14T00:00:00.000000000"].idxmax()
HerminePeak_t9 = df_t9.loc["2016-09-02T00:00:00.000000000":"2016-09-08T00:00:00.000000000"].max()
HerminePeak_t9_time = df_t9.loc["2016-09-02T00:00:00.000000000":"2016-09-08T00:00:00.000000000"].idxmax()
JuliaPeak_t9 = df_t9.loc["2016-09-18T00:00:00.000000000":"2016-09-25T00:00:00.000000000"].max()
JuliaPeak_t9_time = df_t9.loc["2016-09-18T00:00:00.000000000":"2016-09-25T00:00:00.000000000"].idxmax()
MatthewPeak_t9 = df_t9.loc["2016-10-07T00:00:00.000000000":"2016-10-14T00:00:00.000000000"].max()
MatthewPeak_t9_time = df_t9.loc["2016-10-07T00:00:00.000000000":"2016-10-14T00:00:00.000000000"].idxmax()
HerminePeak_t18 = df_t18.loc["2016-09-02T00:00:00.000000000":"2016-09-08T00:00:00.000000000"].max()
HerminePeak_t18_time = df_t18.loc["2016-09-02T00:00:00.000000000":"2016-09-08T00:00:00.000000000"].idxmax()
JuliaPeak_t18 = df_t18.loc["2016-09-18T00:00:00.000000000":"2016-09-25T00:00:00.000000000"].max()
JuliaPeak_t18_time = df_t18.loc["2016-09-18T00:00:00.000000000":"2016-09-25T00:00:00.000000000"].idxmax()
MatthewPeak_t18 = df_t18.loc["2016-10-07T00:00:00.000000000":"2016-10-14T00:00:00.000000000"].max()
MatthewPeak_t18_time = df_t18.loc["2016-10-07T00:00:00.000000000":"2016-10-14T00:00:00.000000000"].idxmax()
peaks_values = DataFrame([HerminePeak_t1, JuliaPeak_t1, MatthewPeak_t1, HerminePeak_t9, JuliaPeak_t9, MatthewPeak_t9,
HerminePeak_t18, JuliaPeak_t18, MatthewPeak_t18])
peaks_values = peaks_values.transpose()
peaks_values.columns = ['HerminePeak_t1', 'JuliaPeak_t1', 'MatthewPeak_t1', 'HerminePeak_t9', 'JuliaPeak_t9',
'MatthewPeak_t9', 'HerminePeak_t18', 'JuliaPeak_t18', 'MatthewPeak_t18']
peak_times = DataFrame([HerminePeak_t1_time, JuliaPeak_t1_time, MatthewPeak_t1_time, HerminePeak_t9_time,
JuliaPeak_t9_time, MatthewPeak_t9_time, HerminePeak_t18_time, JuliaPeak_t18_time,
MatthewPeak_t18_time])
peak_times = peak_times.transpose()
peak_times.columns = ['HermineTime_t1', 'JuliaTime_t1', 'MatthewTime_t1', 'HermineTime_t9', 'JuliaTime_t9',
'MatthewTime_t9', 'HermineTime_t18', 'JuliaTime_t18', 'MatthewTime_t18']
peaks_df = pd.concat([peaks_values, peak_times], axis=1)
cols = ['HerminePeak_t1', 'HermineTime_t1', 'JuliaPeak_t1', 'JuliaTime_t1', 'MatthewPeak_t1', 'MatthewTime_t1',
'HerminePeak_t9', 'HermineTime_t9', 'JuliaPeak_t9', 'JuliaTime_t9', 'MatthewPeak_t9', 'MatthewTime_t9',
'HerminePeak_t18', 'HermineTime_t18', 'JuliaPeak_t18', 'JuliaTime_t18', 'MatthewPeak_t18', 'MatthewTime_t18']
peaks_df = peaks_df[cols]
peaks_df.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps175_results/peaks.csv")
# plot all test predictions
plt.plot(inv_y[5700:8000, 0], label='actual')
plt.plot(inv_yhat[5700:8000, 0], label='predicted')
plt.xlabel("Timestep")
plt.ylabel("GWL (ft)")
plt.title("Testing Predictions")
# ticks = np.arange(0, n_ahead, 1)
# plt.xticks(ticks)
plt.legend()
plt.tight_layout()
plt.show()
# # plot test predictions, 18 hours from specific period
# plt.plot(inv_y[6275, :], label='actual')
# plt.plot(inv_yhat[6275, :], label='predicted')
# plt.xlabel("Timestep")
# plt.ylabel("GWL (ft)")
# plt.title("Testing Predictions")
# # ticks = np.arange(0, n_ahead, 1)
# # plt.xticks(ticks)
# plt.legend()
# plt.tight_layout()
# plt.show()
# combine prediction data with observations
start_date, stop_date = "2016-09-17 00:00:00", "2016-09-25 00:00:00"
act_cols, pred_cols = [], []
for i in range(n_ahead):
gwl_name = "Actual t+{0}".format(i)
pred_name = 'Predicted t+{0}'.format(i)
act_cols.append(gwl_name)
pred_cols.append(pred_name)
df_act = DataFrame(inv_y, columns=act_cols)
df_pred = DataFrame(inv_yhat, columns=pred_cols)
df_gwl = pd.concat([df_act, df_pred], axis=1)
df = pd.concat([test_dates, df_gwl], axis=1)
df = df[:inv_y.shape[0]]
df = df.set_index('Datetime')
storm = df.loc[start_date:stop_date]
storm.reset_index(inplace=True)
# plot test predictions with observed rain and tide
ax = storm[["Tide", "Actual t+0", "Predicted t+0"]].plot(color=["k"], style=[":", '-', '-.'], legend=None)
start, end = ax.get_xlim()
ticks = np.arange(0, end, 24) # (start,stop,increment)
ax2 = ax.twinx()
# ax2.set_ylim(ymax=2.5, ymin=0)
# ax.set_ylim(ymax=4, ymin=-1.25)
ax2.invert_yaxis()
storm["Precip."].plot.bar(ax=ax2, color="k")
ax2.set_xticks([])
ax.set_xticks(ticks)
ax.set_xticklabels(storm.loc[ticks, 'Datetime'].dt.strftime('%Y-%m-%d'), rotation='vertical')
ax.set_ylabel("Hourly Avg GW/Tide Level (ft)")
ax2.set_ylabel("Total Hourly Precip. (in)")
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc=1) # location: 0=best, 9=top center
plt.tight_layout()
plt.show()
# save plot for publication
# plt.savefig('C:/Users/<NAME>/Documents/HRSD GIS/Presentation Images/Plots/Floods_GWL_comparisons/'
# '20160919_bw_averaged.png', dpi=300)
# calculate NSE for each forecast period
NSE_timestep = []
for i in np.arange(0, inv_yhat.shape[0], 1):
num_diff = np.subtract(inv_y[i, :], inv_yhat[i, :])
num_sq =
|
np.square(num_diff)
|
numpy.square
|
#!/usr/bin/env python3
import warnings
import numpy as np
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import numpy.typing as npt
def pearsons_correlation(
x: "npt.ArrayLike",
y: "npt.ArrayLike"
) -> float:
from scipy.stats import pearsonr
from scipy.stats import (
PearsonRNearConstantInputWarning,
PearsonRConstantInputWarning
)
x_ = np.array(x)
y_ = np.array(y)
if len(x_.shape) == 1:
x_ = x_.reshape(-1, 1)
if len(y_.shape) == 1:
y_ = y_.reshape(-1, 1)
assert x_.shape == y_.shape
out = np.zeros(x_.shape[1])
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=PearsonRNearConstantInputWarning,
)
warnings.filterwarnings(
"ignore",
category=PearsonRConstantInputWarning,
)
for i in range(x_.shape[1]):
cor, _ = pearsonr(x_[:, i], y_[:, i])
out[i] = cor
if np.all(np.isnan(out)):
return np.nan
else:
return np.nanmean(out)
def spearmans_correlation(
x: "npt.ArrayLike",
y: "npt.ArrayLike"
) -> float:
from scipy.stats import spearmanr
from scipy.stats import SpearmanRConstantInputWarning
x_ = np.array(x)
y_ = np.array(y)
if len(x_.shape) == 1:
x_ = x_.reshape(-1, 1)
if len(y_.shape) == 1:
y_ = y_.reshape(-1, 1)
assert x_.shape == y_.shape
out = np.zeros(x_.shape[1])
# Note that spearmanr does accept multi-column
# but it will then output a matrix of pairwise correlations..
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=SpearmanRConstantInputWarning
)
for i in range(x_.shape[1]):
cor, _ = spearmanr(x_[:, i], y_[:, i])
out[i] = cor
if np.all(
|
np.isnan(out)
|
numpy.isnan
|
# -------------------------------------------------------------------------------------------------------
# This python file contains the implementation of multi-view NMF algorithm
#
# Reference:
# <NAME>, <NAME>, <NAME>, <NAME>, Multi-View Clustering via Joint Nonnegative Matrix Factorization,
# SIAM ICDM (SDM), 2013.
# Coded by <NAME>
# Date: 2020-02-22
# All rights reserved
# -------------------------------------------------------------------------------------------------------
import numpy as np
from cala import *
def preLabel(cV):
nCls, nSam = np.shape(cV)
B, index = iMax(cV, axis=0)
labels = index + 1
return labels
def nonneg(Fea):
nFea = len(Fea)
for i in range(nFea):
tmx = Fea[i]
nRow, nCol = np.shape(tmx)
mVal = np.min(np.min(tmx))
tmx = tmx - mVal
Fea[i] = tmx
return Fea
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# This function implements the multiplicative algorithm of NMF
# Reference:
# <NAME> and <NAME>, Algorithms for Non-negative Matrix Factorization,
# NIPS, 2000.
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def nmf(X, r, nIter):
xRow, xCol = np.shape(X)
W = np.random.rand(xRow, r)
W = justNorm(W)
H = np.random.rand(r, xCol)
H = justNorm(H)
for ii in range(nIter):
# +++++ Update H +++++
tmp = np.dot(np.transpose(W), X) # r * xCol
tnp = np.dot(np.transpose(W), W) # r * r
tnp = np.dot(tnp, H) # r * xCol
tm = tmp / tnp
H = H * tm # r * xCol
# +++++ Update W +++++
tmp = np.dot(X, np.transpose(H)) # xRow * r
tnp = np.dot(W, H) # xRow * xCol
tnp = np.dot(tnp, np.transpose(H)) # xRow * r
tm = tmp / tnp
W = W * tm
# +++++ Check the objective +++++
tmp = np.dot(W, H)
obj = X - tmp
obj = norm(obj, 1)
str = 'The %d-th iteration: ' %ii + '%f' %obj
print(str)
if obj < 1e-7:
break
return W, H
def totalObj(Fea, U, V, cV, lamda):
nFea = len(Fea)
obj = 0
for i in range(nFea):
tmx = Fea[i]
tmu = U[i]
tmv = V[i]
tml = lamda[i]
tmp = np.dot(tmu, tmv)
tmp = tmx - tmp
tm = norm(tmp, 1)
q = np.sum(tmu, axis=0)
Q = np.diag(q) # r * r
tmp = np.dot(Q, tmv) # r * nCol
tmp = tmp - cV
tn = tml * norm(tmp, 1)
tmn = tm + tn
obj = obj + tmn
return obj
def calObj(X, U, V, cV, Q, lamda):
tmp = np.dot(U, V)
tmp = X - tmp
tm = norm(tmp, 1)
tmp = np.dot(Q, V) # r * nCol
tmp = tmp - cV # r * nCol
tn = lamda * norm(tmp, 1)
obj = tm + tn
return obj
def pervNMF(X, U, V, cV, lamda, maxIter):
nRow, nCol = np.shape(X)
_, r = np.shape(U)
obj = 1e7
for ii in range(maxIter):
# +++++ Update U +++++
tmp = np.dot(X, np.transpose(V)) # nRow * r
tmq = V * cV # r * nCol
tmq = np.sum(tmq, axis=1) # r * 1
tmq = repVec(tmq, nRow) # r * nRow
tmq = np.transpose(tmq) # nRow * r
tm = tmp + lamda * tmq
tnp = np.dot(U, V) # nRow * nCol
tnp = np.dot(tnp, np.transpose(V)) # nRow * r
tnq = V ** 2
tnq = np.sum(tnq, axis=1) # r * 1
tnq = repVec(tnq, nRow)
tnq = np.transpose(tnq) # nRow * r
tnq = U * tnq # nRow * r
tnq =
|
np.sum(tnq, axis=0)
|
numpy.sum
|
"""
=======================================================
:mod:`go_benchmark` -- Benchmark optimization functions
=======================================================
This module provides a set of benchmark problems for global optimization.
.. Copyright 2013 <NAME>
.. module:: go_benchmark
.. moduleauthor:: <NAME> <<EMAIL>>
.. modifiedby:: <NAME> <<EMAIL>> 2016
"""
# Array math module implemented in C / C++
import numpy
# Optimized mathematical functions
from numpy import abs, arctan2, cos, dot, exp, floor, inf, log, log10, pi, prod, sin, sqrt, sum, tan, tanh
# Array functions
from numpy import arange, asarray, atleast_1d, ones, roll, seterr, sign, where, zeros, zeros_like
from numpy.random import uniform
from math import factorial
# Tell numpy to ignore errors
seterr(all='ignore')
# -------------------------------------------------------------------------------- #
class Benchmark(object):
"""
Defines a global optimization benchmark problem.
This abstract class defines the basic structure of a global
optimization problem. Subclasses should implement the ``evaluator`` method
for a particular optimization problem.
Public Attributes:
- *dimensions* -- the number of inputs to the problem
- *fun_evals* -- stores the number of function evaluations, as some crappy
optimization frameworks (i.e., `nlopt`) do not return this value
- *change_dimensionality* -- whether we can change the benchmark function `x`
variable length (i.e., the dimensionality of the problem)
- *custom_bounds* -- a set of lower/upper bounds for plot purposes (if needed).
- *spacing* -- the spacing to use to generate evenly spaced samples across the
lower/upper bounds on the variables, for plotting purposes
"""
def __init__(self, dimensions):
self.dimensions = dimensions
self.fun_evals = 0
self.change_dimensionality = False
self.custom_bounds = None
self.record = [] # A record of objective values per evaluations
if dimensions == 1:
self.spacing = 1001
else:
self.spacing = 201
def __str__(self):
return "%s (%i dimensions)"%(self.__class__.__name__, self.dimensions)
def __repr__(self):
return self.__class__.__name__
def generator(self):
"""The generator function for the benchmark problem."""
return [uniform(l, u) for l, u in self.bounds]
def evaluator(self, candidates):
"""The evaluator function for the benchmark problem."""
raise NotImplementedError
def set_dimensions(self, ndim):
self.dimensions = ndim
def lower_bounds_constraints(self, x):
lower = asarray([b[0] for b in self.bounds])
return asarray(x) - lower
def upper_bounds_constraints(self, x):
upper = asarray([b[1] for b in self.bounds])
return upper - asarray(x)
#-----------------------------------------------------------------------
# SINGLE-OBJECTIVE PROBLEMS
#-----------------------------------------------------------------------
class Ackley(Benchmark):
"""
Ackley test objective function.
This class defines the Ackley global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Ackley}}(\\mathbf{x}) = -20e^{-0.2 \\sqrt{\\frac{1}{n} \\sum_{i=1}^n x_i^2}} - e^{ \\frac{1}{n} \\sum_{i=1}^n \\cos(2 \\pi x_i)} + 20 + e
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-32, 32]` for :math:`i=1,...,n`.
.. figure:: figures/Ackley.png
:alt: Ackley function
:align: center
**Two-dimensional Ackley function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-32.0] * self.dimensions,
[ 32.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
a = 20.0; b = 0.2; c = 2.0*pi
return -a*exp(-b*sqrt(1./self.dimensions*sum(x**2)))-exp(1./self.dimensions*sum(cos(c*x)))+a+exp(1.)
#-----------------------------------------------------------------------
class Adjiman(Benchmark):
"""
Adjiman test objective function.
This class defines the Adjiman global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Adjiman}}(\\mathbf{x}) = \\cos(x_1)\\sin(x_2) - \\frac{x_1}{(x_2^2 + 1)}
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-1, 2]` and :math:`x_2 \\in [-1, 1]`.
.. figure:: figures/Adjiman.png
:alt: Adjiman function
:align: center
**Two-dimensional Adjiman function**
*Global optimum*: :math:`f(x_i) = -2.02181` for :math:`\\mathbf{x} = [2, 0.10578]`
"""
def __init__(self, dimensions):
Benchmark.__init__(self, 2)
self.bounds = [(-1.0, 2.0), (-1.0, 1.0)]
self.global_optimum = [2.0, 0.10578]
self.fglob = -2.02180678
self.change_dimensionality = False
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return cos(x1)*sin(x2) - x1/(x2**2.0 + 1)
# -------------------------------------------------------------------------------- #
class Alpine01(Benchmark):
"""
Alpine 1 test objective function.
This class defines the Alpine 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Alpine01}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\lvert {x_i \\sin \\left( x_i \\right) + 0.1 x_i} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Alpine01.png
:alt: Alpine 1 function
:align: center
**Two-dimensional Alpine 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(abs(x*sin(x) + 0.1*x))
# -------------------------------------------------------------------------------- #
class Alpine02(Benchmark):
"""
Alpine 2 test objective function.
This class defines the Alpine 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Alpine02}}(\\mathbf{x}) = \\prod_{i=1}^{n} \\sqrt{x_i} \\sin(x_i)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Alpine02.png
:alt: Alpine 2 function
:align: center
**Two-dimensional Alpine 2 function**
*Global optimum*: :math:`f(x_i) = -6.1295` for :math:`x_i = 7.917` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [7.91705268, 4.81584232]
self.fglob = -6.12950
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return prod(sqrt(x)*sin(x))
# -------------------------------------------------------------------------------- #
class AMGM(Benchmark):
"""
AMGM test objective function.
This class defines the Arithmetic Mean - Geometric Mean Equality global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{AMGM}}(\\mathbf{x}) = \\left ( \\frac{1}{n} \\sum_{i=1}^{n} x_i - \\sqrt[n]{ \\prod_{i=1}^{n} x_i} \\right )^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,n`.
.. figure:: figures/AMGM.png
:alt: AMGM function
:align: center
**Two-dimensional Arithmetic Mean - Geometric Mean Equality function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_1 = x_2 = ... = x_n` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [1, 1]
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
f1 = sum(x)
f2 = prod(x)
xsum = f1
f1 = f1/n
f2 = f2**(1.0/n)
return (f1 - f2)**2
# -------------------------------------------------------------------------------- #
class BartelsConn(Benchmark):
"""
Bartels-Conn test objective function.
This class defines the Bartels-Conn global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{BartelsConn}}(\\mathbf{x}) = \\lvert {x_1^2 + x_2^2 + x_1x_2} \\rvert + \\lvert {\\sin(x_1)} \\rvert + \\lvert {\\cos(x_2)} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-50, 50]` for :math:`i=1,...,n`.
.. figure:: figures/BartelsConn.png
:alt: Bartels-Conn function
:align: center
**Two-dimensional Bartels-Conn function**
*Global optimum*: :math:`f(x_i) = 1` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self):
Benchmark.__init__(self, 2)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 1.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return abs(x1**2.0 + x2**2.0 + x1*x2) + abs(sin(x1)) + abs(cos(x2))
# -------------------------------------------------------------------------------- #
class Beale(Benchmark):
"""
Beale test objective function.
This class defines the Beale global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Beale}}(\\mathbf{x}) = \\left(x_1 x_2 - x_1 + 1.5\\right)^{2} + \\left(x_1 x_2^{2} - x_1 + 2.25\\right)^{2} + \\left(x_1 x_2^{3} - x_1 + 2.625\\right)^{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Beale.png
:alt: Beale function
:align: center
**Two-dimensional Beale function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [3, 0.5]`
"""
def __init__(self, dimensions):
Benchmark.__init__(self, 2)
self.bounds = list(zip([-4.5] * self.dimensions,
[ 4.5] * self.dimensions))
self.global_optimum = [3.0, 0.5]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return (1.5 - x[0] + x[0]*x[1])**2 + (2.25 - x[0] + x[0]*x[1]**2)**2 + (2.625 - x[0] + x[0]*x[1]**3)**2
# -------------------------------------------------------------------------------- #
class Bird(Benchmark):
"""
Bird test objective function.
This class defines the Bird global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bird}}(\\mathbf{x}) = \\left(x_1 - x_2\\right)^{2} + e^{\left[1 - \\sin\\left(x_1\\right) \\right]^{2}} \\cos\\left(x_2\\right) + e^{\left[1 - \\cos\\left(x_2\\right)\\right]^{2}} \\sin\\left(x_1\\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-2\\pi, 2\\pi]` for :math:`i=1,2`.
.. figure:: figures/Bird.png
:alt: Bird function
:align: center
**Two-dimensional Bird function**
*Global optimum*: :math:`f(x_i) = -106.7645367198034` for :math:`\\mathbf{x} = [4.701055751981055 , 3.152946019601391]` or
:math:`\\mathbf{x} = [-1.582142172055011, -3.130246799635430]`
"""
def __init__(self):
Benchmark.__init__(self, 2)
self.bounds = list(zip([-2.0*pi] * self.dimensions,
[ 2.0*pi] * self.dimensions))
self.global_optimum = ([4.701055751981055 , 3.152946019601391],
[-1.582142172055011, -3.130246799635430])
self.fglob = -106.7645367198034
def evaluator(self, x, *args):
self.fun_evals += 1
return sin(x[0])*exp((1-cos(x[1]))**2) + cos(x[1])*exp((1-sin(x[0]))**2) + (x[0]-x[1])**2
# -------------------------------------------------------------------------------- #
class Bohachevsky(Benchmark):
"""
Bohachevsky test objective function.
This class defines the Bohachevsky global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bohachevsky}}(\\mathbf{x}) = \\sum_{i=1}^{n-1}\\left[x_i^2 + 2x_{i+1}^2 - 0.3\\cos(3\\pi x_i) - 0.4\\cos(4\\pi x_{i+1}) + 0.7\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-15, 15]` for :math:`i=1,...,n`.
.. figure:: figures/Bohachevsky.png
:alt: Bohachevsky function
:align: center
**Two-dimensional Bohachevsky function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-15.0] * self.dimensions,
[ 15.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x0 = x[:-1]
x1 = roll(x,-1)[:-1]
return sum(x0**2 + 2*x1**2 - 0.3 * cos(3*pi*x0) - 0.4 * cos(4*pi*x1) + 0.7)
# -------------------------------------------------------------------------------- #
class BoxBetts(Benchmark):
"""
BoxBetts test objective function.
This class defines the Box-Betts global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{BoxBetts}}(\\mathbf{x}) = \\sum_{i=1}^k g(x_i)^2
Where, in this exercise:
.. math:: g(x) = e^{-0.1(i+1)x_1} - e^{-0.1(i+1)x_2} - \\left[(e^{-0.1(i+1)}) - e^{-(i+1)}x_3\\right]
And :math:`k = 10`.
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [0.9, 1.2], x_2 \\in [9, 11.2], x_3 \\in [0.9, 1.2]`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 10, 1]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = ([0.9, 1.2], [9.0, 11.2], [0.9, 1.2])
self.global_optimum = [1.0, 10.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
y = 0.0
for i in range(1, 11):
y += (exp(-0.1*i*x[0]) - exp(-0.1*i*x[1]) - (exp(-0.1*i) - exp(-1.0*i))*x[2])**2.0
return y
# -------------------------------------------------------------------------------- #
class Branin01(Benchmark):
"""
Branin 1 test objective function.
This class defines the Branin 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Branin01}}(\\mathbf{x}) = \\left(- 1.275 \\frac{x_1^{2}}{\pi^{2}} + 5 \\frac{x_1}{\pi} + x_2 -6\\right)^{2} + \\left(10 - \\frac{5}{4 \\pi} \\right) \\cos\\left(x_1\\right) + 10
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-5, 10], x_2 \\in [0, 15]`
.. figure:: figures/Branin01.png
:alt: Branin 1 function
:align: center
**Two-dimensional Branin 1 function**
*Global optimum*: :math:`f(x_i) = 0.39788735772973816` for :math:`\\mathbf{x} = [-\\pi, 12.275]` or
:math:`\\mathbf{x} = [\\pi, 2.275]` or :math:`\\mathbf{x} = [9.42478, 2.475]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-5., 10.), (0., 15.)]
self.global_optimum = [(-pi, 12.275), (pi, 2.275), (9.42478, 2.475)]
self.fglob = 0.39788735772973816
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[1]-(5.1/(4*pi**2))*x[0]**2+5*x[0]/pi-6)**2+10*(1-1/(8*pi))*cos(x[0])+10
# -------------------------------------------------------------------------------- #
class Branin02(Benchmark):
"""
Branin 2 test objective function.
This class defines the Branin 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Branin02}}(\\mathbf{x}) = \\left(- 1.275 \\frac{x_1^{2}}{\pi^{2}} + 5 \\frac{x_1}{\pi} + x_2 -6\\right)^{2} + \\left(10 - \\frac{5}{4 \\pi} \\right) \\cos\\left(x_1\\right) \\cos\\left(x_2\\right) + \\log(x_1^2+x_2^2 +1) + 10
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 15]` for :math:`i=1,2`.
.. figure:: figures/Branin02.png
:alt: Branin 2 function
:align: center
**Two-dimensional Branin 2 function**
*Global optimum*: :math:`f(x_i) = 5.559037` for :math:`\\mathbf{x} = [-3.2, 12.53]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-5.0, 15.0), (-5.0, 15.0)]
self.global_optimum = [-3.2, 12.53]
self.fglob = 5.559037
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[1]-(5.1/(4*pi**2))*x[0]**2+5*x[0]/pi-6)**2+10*(1-1/(8*pi))*cos(x[0])*cos(x[1])+log(x[0]**2.0+x[1]**2.0+1.0)+10
# -------------------------------------------------------------------------------- #
class Brent(Benchmark):
"""
Brent test objective function.
This class defines the Brent global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Brent}}(\\mathbf{x}) = (x_1 + 10)^2 + (x_2 + 10)^2 + e^{(-x_1^2-x_2^2)}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Brent.png
:alt: Brent function
:align: center
**Two-dimensional Brent function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, -10]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-10, 2), (-10, 2)]
self.global_optimum = [-10.0, -10.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[0] + 10.0)**2.0 + (x[1] + 10.0)**2.0 + exp(-x[0]**2.0 - x[1]**2.0)
# -------------------------------------------------------------------------------- #
class Brown(Benchmark):
"""
Brown test objective function.
This class defines the Brown global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Brown}}(\\mathbf{x}) = \\sum_{i=1}^{n-1}\\left[ \\left(x_i^2\\right)^{x_{i+1}^2+1} + \\left(x_{i+1}^2\\right)^{x_i^2+1} \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 4]` for :math:`i=1,...,n`.
.. figure:: figures/Brown.png
:alt: Brown function
:align: center
**Two-dimensional Brown function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 4.0] * self.dimensions))
self.custom_bounds = [(-1.0, 1.0), (-1.0, 1.0)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x0 = x[:-1]
x1 = x[1:]
return sum((x0**2.0)**(x1**2.0 + 1.0) + (x1**2.0)**(x0**2.0 + 1.0))
# -------------------------------------------------------------------------------- #
class Bukin02(Benchmark):
"""
Bukin 2 test objective function.
This class defines the Bukin 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bukin02}}(\\mathbf{x}) = 100 (x_2 - 0.01x_1^2 + 1) + 0.01(x_1 + 10)^2
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-15, -5], x_2 \\in [-3, 3]`
.. figure:: figures/Bukin02.png
:alt: Bukin 2 function
:align: center
**Two-dimensional Bukin 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [-10.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*(x[1]**2 - 0.01*x[0]**2 + 1.0) + 0.01*(x[0] + 10.0)**2.0
# -------------------------------------------------------------------------------- #
class Bukin04(Benchmark):
"""
Bukin 4 test objective function.
This class defines the Bukin 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bukin04}}(\\mathbf{x}) = 100 x_2^{2} + 0.01 \\lvert{x_1 + 10} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-15, -5], x_2 \\in [-3, 3]`
.. figure:: figures/Bukin04.png
:alt: Bukin 4 function
:align: center
**Two-dimensional Bukin 4 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [-10.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*x[1]**2 + 0.01*abs(x[0] + 10)
# -------------------------------------------------------------------------------- #
class Bukin06(Benchmark):
"""
Bukin 6 test objective function.
This class defines the Bukin 6 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bukin06}}(\\mathbf{x}) = 100 \\sqrt{ \\lvert{x_2 - 0.01 x_1^{2}} \\rvert} + 0.01 \\lvert{x_1 + 10} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-15, -5], x_2 \\in [-3, 3]`
.. figure:: figures/Bukin06.png
:alt: Bukin 6 function
:align: center
**Two-dimensional Bukin 6 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, 1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [-10.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*sqrt(abs(x[1] - 0.01*x[0]**2)) + 0.01*abs(x[0] + 10)
# -------------------------------------------------------------------------------- #
class CarromTable(Benchmark):
"""
CarromTable test objective function.
This class defines the CarromTable global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CarromTable}}(\\mathbf{x}) = - \\frac{1}{30} e^{2 \\left|{1 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\\right|} \\cos^{2}\\left(x_{1}\\right) \\cos^{2}\\left(x_{2}\\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/CarromTable.png
:alt: CarromTable function
:align: center
**Two-dimensional CarromTable function**
*Global optimum*: :math:`f(x_i) = -24.15681551650653` for :math:`x_i = \\pm 9.646157266348881` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [(9.646157266348881 , 9.646134286497169),
(-9.646157266348881, 9.646134286497169),
(9.646157266348881 , -9.646134286497169),
(-9.646157266348881, -9.646134286497169)]
self.fglob = -24.15681551650653
def evaluator(self, x, *args):
self.fun_evals += 1
return -((cos(x[0])*cos(x[1])*exp(abs(1 - sqrt(x[0]**2 + x[1]**2)/pi)))**2)/30
# -------------------------------------------------------------------------------- #
class Chichinadze(Benchmark):
"""
Chichinadze test objective function.
This class defines the Chichinadze global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Chichinadze}}(\\mathbf{x}) = x_{1}^{2} - 12 x_{1} + 8 \\sin\\left(\\frac{5}{2} \\pi x_{1}\\right) + 10 \\cos\\left(\\frac{1}{2} \\pi x_{1}\\right) + 11 - 0.2 \\frac{\\sqrt{5}}{e^{\\frac{1}{2} \\left(x_{2} -0.5\\right)^{2}}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-30, 30]` for :math:`i=1,2`.
.. figure:: figures/Chichinadze.png
:alt: Chichinadze function
:align: center
**Two-dimensional Chichinadze function**
*Global optimum*: :math:`f(x_i) = -42.94438701899098` for :math:`\\mathbf{x} = [6.189866586965680, 0.5]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-30.0] * self.dimensions,
[ 30.0] * self.dimensions))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [6.189866586965680, 0.5]
self.fglob = -42.94438701899098
def evaluator(self, x, *args):
self.fun_evals += 1
return x[0]**2 - 12*x[0] + 11 + 10*cos(pi*x[0]/2) + 8*sin(5*pi*x[0]/2) - 1.0/sqrt(5)*exp(-((x[1] - 0.5)**2)/2)
# -------------------------------------------------------------------------------- #
class Cigar(Benchmark):
"""
Cigar test objective function.
This class defines the Cigar global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Cigar}}(\\mathbf{x}) = x_1^2 + 10^6\\sum_{i=2}^{n} x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Cigar.png
:alt: Cigar function
:align: center
**Two-dimensional Cigar function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return x[0]**2 + 1e6*sum(x[1:]**2)
# -------------------------------------------------------------------------------- #
class Cola(Benchmark):
"""
Cola test objective function.
This class defines the Cola global optimization problem. The 17-dimensional function computes
indirectly the formula :math:`f(n, u)` by setting :math:`x_0 = y_0, x_1 = u_0, x_i = u_{2(i−2)}, y_i = u_{2(i−2)+1}` :
.. math::
f_{\\text{Cola}}(\\mathbf{x}) = \\sum_{i<j}^{n} \\left (r_{i,j} - d_{i,j} \\right )^2
Where :math:`r_{i,j}` is given by:
.. math::
r_{i,j} = \\sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
And :math:`d` is a symmetric matrix given by:
.. math::
\\mathbf{d} = \\left [ d_{ij} \\right ] = \\begin{pmatrix}
1.27 & & & & & & & & \\\\
1.69 & 1.43 & & & & & & & \\\\
2.04 & 2.35 & 2.43 & & & & & & \\\\
3.09 & 3.18 & 3.26 & 2.85 & & & & & \\\\
3.20 & 3.22 & 3.27 & 2.88 & 1.55 & & & & \\\\
2.86 & 2.56 & 2.58 & 2.59 & 3.12 & 3.06 & & & \\\\
3.17 & 3.18 & 3.18 & 3.12 & 1.31 & 1.64 & 3.00 & \\\\
3.21 & 3.18 & 3.18 & 3.17 & 1.70 & 1.36 & 2.95 & 1.32 & \\\\
2.38 & 2.31 & 2.42 & 1.94 & 2.85 & 2.81 & 2.56 & 2.91 & 2.97
\\end{pmatrix}
This function has bounds :math:`0 \\leq x_0 \\leq 4` and :math:`-4 \\leq x_i \\leq 4` for :math:`i = 1,...,n-1`. It
has a global minimum of 11.7464.
"""
def __init__(self, dimensions=17):
Benchmark.__init__(self, dimensions)
self.bounds = [[0.0, 4.0]] + \
list(zip([-4.0] * (self.dimensions-1),
[ 4.0] * (self.dimensions-1)))
self.global_optimum = [0.651906, 1.30194, 0.099242, -0.883791,
-0.8796, 0.204651, -3.28414, 0.851188,
-3.46245, 2.53245, -0.895246, 1.40992,
-3.07367, 1.96257, -2.97872, -0.807849,
-1.68978]
self.fglob = 11.7464
def evaluator(self, x, *args):
self.fun_evals += 1
d = asarray([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.27, 0, 0, 0, 0, 0, 0, 0, 0],
[1.69, 1.43, 0, 0, 0, 0, 0, 0, 0],
[2.04, 2.35, 2.43, 0, 0, 0, 0, 0, 0],
[3.09, 3.18, 3.26, 2.85, 0, 0, 0, 0, 0],
[3.20, 3.22, 3.27, 2.88, 1.55, 0, 0, 0, 0],
[2.86, 2.56, 2.58, 2.59, 3.12, 3.06, 0, 0, 0],
[3.17, 3.18, 3.18, 3.12, 1.31, 1.64, 3.00, 0, 0],
[3.21, 3.18, 3.18, 3.17, 1.70, 1.36, 2.95, 1.32, 0],
[2.38, 2.31, 2.42, 1.94, 2.85, 2.81, 2.56, 2.91, 2.97]])
# WARNING: This doesn't seem to follow guidelines above...
x1 = asarray([0.0, x[0]] + list(x[1::2]))
x2 = asarray([0.0, 0.0] + list(x[2::2]))
y = 0.0
for i in range(1, len(x1)):
y += sum((sqrt((x1[i] - x1[0:i])**2.0 +
(x2[i] - x2[0:i])**2.0)
- d[i, 0:i])**2.0)
return y
# -------------------------------------------------------------------------------- #
class Colville(Benchmark):
"""
Colville test objective function.
This class defines the Colville global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Colville}}(\\mathbf{x}) = \\left(x_{1} -1\\right)^{2} + 100 \\left(x_{1}^{2} - x_{2}\\right)^{2} + 10.1 \\left(x_{2} -1\\right)^{2} + \\left(x_{3} -1\\right)^{2} + 90 \\left(x_{3}^{2} - x_{4}\\right)^{2} + 10.1 \\left(x_{4} -1\\right)^{2} + 19.8 \\frac{x_{4} -1}{x_{2}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*(x[0]**2-x[1])**2+(x[0]-1)**2+(x[2]-1)**2+90*(x[2]**2-x[3])**2+ 10.1*((x[1]-1)**2+(x[3]-1)**2)+19.8*(1/x[1])*(x[3]-1)
# -------------------------------------------------------------------------------- #
class Corana(Benchmark):
"""
Corana test objective function.
This class defines the Corana global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Corana}}(\\mathbf{x}) = \\begin{cases} \\sum_{i=1}^n 0.15 d_i [z_i - 0.05\\textrm{sgn}(z_i)]^2 & \\textrm{if}|x_i-z_i| < 0.05 \\\\
d_ix_i^2 & \\textrm{otherwise}\\end{cases}
Where, in this exercise:
.. math::
z_i = 0.2 \\lfloor |x_i/s_i|+0.49999\\rfloor\\textrm{sgn}(x_i), d_i=(1,1000,10,100, ...)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
d = [1., 1000., 10., 100.]
r = 0
for j in range(4):
zj = floor(abs(x[j]/0.2) + 0.49999)*sign(x[j]) * 0.2
if abs(x[j]-zj) < 0.05:
r += 0.15 * ((zj - 0.05*sign(zj))**2) * d[j]
else:
r += d[j] * x[j] * x[j]
return r
# -------------------------------------------------------------------------------- #
class CosineMixture(Benchmark):
"""
Cosine Mixture test objective function.
This class defines the Cosine Mixture global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CosineMixture}}(\\mathbf{x}) = -0.1 \\sum_{i=1}^n \\cos(5 \\pi x_i) - \\sum_{i=1}^n x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,N`.
.. figure:: figures/CosineMixture.png
:alt: Cosine Mixture function
:align: center
**Two-dimensional Cosine Mixture function**
*Global optimum*: :math:`f(x_i) = -0.1N` for :math:`x_i = 0` for :math:`i=1,...,N`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -0.1*self.dimensions
def evaluator(self, x, *args):
self.fun_evals += 1
return -0.1*sum(cos(5.0*pi*x)) - sum(x**2.0)
# -------------------------------------------------------------------------------- #
class CrossInTray(Benchmark):
"""
Cross-in-Tray test objective function.
This class defines the Cross-in-Tray global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CrossInTray}}(\\mathbf{x}) = - 0.0001 \\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-15, 15]` for :math:`i=1,2`.
.. figure:: figures/CrossInTray.png
:alt: Cross-in-Tray function
:align: center
**Two-dimensional Cross-in-Tray function**
*Global optimum*: :math:`f(x_i) = -2.062611870822739` for :math:`x_i = \\pm 1.349406608602084` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [(1.349406685353340 , 1.349406608602084),
(-1.349406685353340, 1.349406608602084),
(1.349406685353340, -1.349406608602084),
(-1.349406685353340, -1.349406608602084)]
self.fglob = -2.062611870822739
def evaluator(self, x, *args):
self.fun_evals += 1
return -0.0001*(abs(sin(x[0])*sin(x[1])*exp(abs(100 - sqrt(x[0]**2 + x[1]**2)/pi))) + 1)**(0.1)
# -------------------------------------------------------------------------------- #
class CrossLegTable(Benchmark):
"""
Cross-Leg-Table test objective function.
This class defines the Cross-Leg-Table global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CrossLegTable}}(\\mathbf{x}) = - \\frac{1}{\\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/CrossLegTable.png
:alt: Cross-Leg-Table function
:align: center
**Two-dimensional Cross-Leg-Table function**
*Global optimum*: :math:`f(x_i) = -1`. The global minimum is found on the planes :math:`x_1 = 0` and :math:`x_2 = 0`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
# WARNING: There was an error here, I added the global optimum
self.global_optimum = [0.0, 2.0]
self.fglob = -1.0
def evaluator(self, x, *args):
self.fun_evals += 1
return -(abs(sin(x[0])*sin(x[1])*exp(abs(100 - sqrt(x[0]**2 + x[1]**2)/pi))) + 1)**(-0.1)
# -------------------------------------------------------------------------------- #
class CrownedCross(Benchmark):
"""
Crowned Cross test objective function.
This class defines the Crowned Cross global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CrownedCross}}(\\mathbf{x}) = 0.0001 \\left(\\left|{e^{\\left|{100- \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/CrownedCross.png
:alt: Crowned Cross function
:align: center
**Two-dimensional Crowned Cross function**
*Global optimum*: :math:`f(x_i) = 0.0001`. The global minimum is found on the planes :math:`x_1 = 0` and :math:`x_2 = 0`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [0, 0]
self.fglob = 0.0001
def evaluator(self, x, *args):
self.fun_evals += 1
return 0.0001*(abs(sin(x[0])*sin(x[1])*exp(abs(100 - sqrt(x[0]**2 + x[1]**2)/pi))) + 1)**(0.1)
# -------------------------------------------------------------------------------- #
class Csendes(Benchmark):
"""
Csendes test objective function.
This class defines the Csendes global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Csendes}}(\\mathbf{x}) = \\sum_{i=1}^n x_i^6 \\left[ 2 + \\sin \\left( \\frac{1}{x_i} \\right ) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,N`.
.. figure:: figures/Csendes.png
:alt: Csendes function
:align: center
**Two-dimensional Csendes function**
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x_i = 0` for :math:`i=1,...,N`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum((x**6.0)*(2.0 + sin(1.0/x)))
# -------------------------------------------------------------------------------- #
class Cube(Benchmark):
"""
Cube test objective function.
This class defines the Cube global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Cube}}(\\mathbf{x}) = 100(x_2 - x_1^3)^2 + (1 - x1)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,N`.
.. figure:: figures/Cube.png
:alt: Cube function
:align: center
**Two-dimensional Cube function**
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`\\mathbf{x} = [1, 1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [1.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100.0*(x[1] - x[0]**3.0)**2.0 + (1.0 - x[0])**2.0
# -------------------------------------------------------------------------------- #
class Damavandi(Benchmark):
"""
Damavandi test objective function.
This class defines the Damavandi global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Damavandi}}(\\mathbf{x}) = \\left[ 1 - \\lvert{\\frac{\\sin[\\pi(x_1-2)]\\sin[\\pi(x2-2)]}{\\pi^2(x_1-2)(x_2-2)}} \\rvert^5 \\right] \\left[2 + (x_1-7)^2 + 2(x_2-7)^2 \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 14]` for :math:`i=1,...,n`.
.. figure:: figures/Damavandi.png
:alt: Damavandi function
:align: center
**Two-dimensional Damavandi function**
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x_i = 2` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[14.0] * self.dimensions))
self.global_optimum = [2.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
numerator = sin(pi*(x1 - 2.0))*sin(pi*(x2 - 2.0))
denumerator = (pi**2)*(x1 - 2.0)*(x2 - 2.0)
factor1 = 1.0 - (abs(numerator / denumerator))**5.0
factor2 = 2 + (x1 - 7.0)**2.0 + 2*(x2 - 7.0)**2.0
return factor1*factor2
# -------------------------------------------------------------------------------- #
class Deb01(Benchmark):
"""
Deb 1 test objective function.
This class defines the Deb 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Deb01}}(\\mathbf{x}) = - \\frac{1}{N} \\sum_{i=1}^n \\sin^6(5 \\pi x_i)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Deb01.png
:alt: Deb 1 function
:align: center
**Two-dimensional Deb 1 function**
*Global optimum*: :math:`f(x_i) = 0.0`. The number of global minima is :math:`5^n` that are evenly spaced
in the function landscape, where :math:`n` represents the dimension of the problem.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.3, -0.3]
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return -(1.0/self.dimensions)*sum(sin(5*pi*x)**6.0)
# -------------------------------------------------------------------------------- #
class Deb02(Benchmark):
"""
Deb 2 test objective function.
This class defines the Deb 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Deb02}}(\\mathbf{x}) = - \\frac{1}{N} \\sum_{i=1}^n \\sin^6 \\left[ 5 \\pi \\left ( x_i^{3/4} - 0.05 \\right) \\right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Deb02.png
:alt: Deb 2 function
:align: center
**Two-dimensional Deb 2 function**
*Global optimum*: :math:`f(x_i) = 0.0`. The number of global minima is :math:`5^n` that are evenly spaced
in the function landscape, where :math:`n` represents the dimension of the problem.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.93388314, 0.68141781]
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return -(1.0/self.dimensions)*sum(sin(5*pi*(x**0.75 - 0.05))**6.0)
# -------------------------------------------------------------------------------- #
class Decanomial(Benchmark):
"""
Decanomial test objective function.
This class defines the Decanomial function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Decanomial}}(\\mathbf{x}) = 0.001 \\left(\\lvert{x_{2}^{4} + 12 x_{2}^{3} + 54 x_{2}^{2} + 108 x_{2} + 81.0}\\rvert + \\lvert{x_{1}^{10} - 20 x_{1}^{9} + 180 x_{1}^{8} - 960 x_{1}^{7} + 3360 x_{1}^{6} - 8064 x_{1}^{5} + 13340 x_{1}^{4} - 15360 x_{1}^{3} + 11520 x_{1}^{2} - 5120 x_{1} + 2624.0}\\rvert\\right)^{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Decanomial.png
:alt: Decanomial function
:align: center
**Two-dimensional Decanomial function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [2, -3]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(0, 2.5), (-2, -4)]
self.global_optimum = [2.0, -3.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
F1 = abs(x[0]**10 - 20*x[0]**9 + 180*x[0]**8 - 960*x[0]**7 + 3360*x[0]**6 - 8064*x[0]**5 + \
13340*x[0]**4 - 15360*x[0]**3 + 11520*x[0]**2 - 5120*x[0] + 2624.0)
F2 = abs(x[1]**4 + 12*x[1]**3 + 54*x[1]**2 + 108*x[1] + 81.0)
return 0.001*(F1 + F2)**2
# -------------------------------------------------------------------------------- #
class Deceptive(Benchmark):
"""
Deceptive test objective function.
This class defines the Deceptive global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Deceptive}}(\\mathbf{x}) = - \\left [\\frac{1}{n} \\sum_{i=1}^{n} g_i(x_i) \\right ]^{\\beta}
Where :math:`\\beta` is a fixed non-linearity factor; in this exercise, :math:`\\beta = 2`. The function :math:`g_i(x_i)`
is given by:
.. math::
g_i(x_i) = \\begin{cases} - \\frac{x}{\\alpha_i} + \\frac{4}{5} & \\textrm{if} \\hspace{5pt} 0 \\leq x_i \\leq \\frac{4}{5} \\alpha_i \\\\
\\frac{5x}{\\alpha_i} -4 & \\textrm{if} \\hspace{5pt} \\frac{4}{5} \\alpha_i \\le x_i \\leq \\alpha_i \\\\
\\frac{5(x - \\alpha_i)}{\\alpha_i-1} & \\textrm{if} \\hspace{5pt} \\alpha_i \\le x_i \\leq \\frac{1 + 4\\alpha_i}{5} \\\\
\\frac{x - 1}{1 - \\alpha_i} & \\textrm{if} \\hspace{5pt} \\frac{1 + 4\\alpha_i}{5} \\le x_i \\leq 1 \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Deceptive.png
:alt: Deceptive function
:align: center
**Two-dimensional Deceptive function**
*Global optimum*: :math:`f(x_i) = -1` for :math:`x_i = \\alpha_i` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
n = self.dimensions
self.global_optimum = numpy.arange(1.0, n + 1.0)/(n + 1.0)
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
alpha = numpy.arange(1.0, n + 1.0)/(n + 1.0)
beta = 2.0
g = zeros((n, ))
for i in range(n):
if x[i] <= 0.0:
g[i] = x[i]
elif x[i] < 0.8*alpha[i]:
g[i] = -x[i]/alpha[i] + 0.8
elif x[i] < alpha[i]:
g[i] = 5.0*x[i]/alpha[i] - 4.0
elif x[i] < (1.0 + 4*alpha[i])/5.0:
g[i] = 5.0*(x[i] - alpha[i])/(alpha[i] - 1.0) + 1.0
elif x[i] <= 1.0:
g[i] = (x[i] - 1.0)/(1.0 - alpha[i]) + 4.0/5.0
else:
g[i] = x[i] - 1.0
return -((1.0/n)*sum(g))**beta
# -------------------------------------------------------------------------------- #
class DeckkersAarts(Benchmark):
"""
Deckkers-Aarts test objective function.
This class defines the Deckkers-Aarts global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeckkersAarts}}(\\mathbf{x}) = 10^5x_1^2 + x_2^2 - (x_1^2 + x_2^2)^2 + 10^{-5}(x_1^2 + x_2^2)^4
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-20, 20]` for :math:`i=1,2`.
.. figure:: figures/DeckkersAarts.png
:alt: DeckkersAarts function
:align: center
**Two-dimensional Deckkers-Aarts function**
*Global optimum*: :math:`f(x_i) = -24777` for :math:`\\mathbf{x} = [0, \\pm 15]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-20.0] * self.dimensions,
[ 20.0] * self.dimensions))
# WARNING: Custom bounds was a tuple of lists..
self.custom_bounds = [(-1, 1), (14, 16)]
self.global_optimum = [0.0, 15.0]
self.fglob = -24776.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 1e5*x1**2.0 + x2**2.0 - (x1**2.0 + x2**2.0)**2.0 + 1e-5*(x1**2.0 + x2**2.0)**4.0
# -------------------------------------------------------------------------------- #
class DeflectedCorrugatedSpring(Benchmark):
"""
DeflectedCorrugatedSpring test objective function.
This class defines the Deflected Corrugated Spring function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeflectedCorrugatedSpring}}(\\mathbf{x}) = 0.1\\sum_{i=1}^n \\left[ (x_i - \\alpha)^2 - \\cos \\left( K \\sqrt {\\sum_{i=1}^n (x_i - \\alpha)^2} \\right ) \\right ]
Where, in this exercise, :math:`K = 5` and :math:`\\alpha = 5`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 2\\alpha]` for :math:`i=1,...,n`.
.. figure:: figures/DeflectedCorrugatedSpring.png
:alt: Deflected Corrugated Spring function
:align: center
**Two-dimensional Deflected Corrugated Spring function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = \\alpha` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
alpha = 5.0
self.bounds = list(zip([0] * self.dimensions,
[2*alpha] * self.dimensions))
self.global_optimum = [alpha] * self.dimensions
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
K, alpha = 5.0, 5.0
return -cos(K*sqrt(sum((x - alpha)**2))) + 0.1*sum((x - alpha)**2)
# -------------------------------------------------------------------------------- #
class DeVilliersGlasser01(Benchmark):
"""
DeVilliers-Glasser 1 test objective function.
This class defines the DeVilliers-Glasser 1 function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeVilliersGlasser01}}(\\mathbf{x}) = \\sum_{i=1}^{24} \\left[ x_1x_2^{t_i} \\sin(x_3t_i + x_4) - y_i \\right ]^2
Where, in this exercise, :math:`t_i = 0.1(i-1)` and :math:`y_i = 60.137(1.371^{t_i}) \\sin(3.112t_i + 1.761)`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [1, 100]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`\\mathbf{x} = [60.137, 1.371, 3.112, 1.761]`.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 1.0] * self.dimensions,
[100.0] * self.dimensions))
self.global_optimum = [60.137, 1.371, 3.112, 1.761]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
t_i = 0.1*numpy.arange(24)
y_i = 60.137*(1.371**t_i)*sin(3.112*t_i + 1.761)
x1, x2, x3, x4 = x
return sum((x1*(x2**t_i)*sin(x3*t_i + x4) - y_i)**2.0)
# -------------------------------------------------------------------------------- #
class DeVilliersGlasser02(Benchmark):
"""
DeVilliers-Glasser 2 test objective function.
This class defines the DeVilliers-Glasser 2 function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeVilliersGlasser01}}(\\mathbf{x}) = \\sum_{i=1}^{24} \\left[ x_1x_2^{t_i} \\tanh \\left [x_3t_i + \\sin(x_4t_i) \\right] \\cos(t_ie^{x_5}) - y_i \\right ]^2
Where, in this exercise, :math:`t_i = 0.1(i-1)` and :math:`y_i = 53.81(1.27^{t_i}) \\tanh (3.012t_i + \\sin(2.13t_i)) \\cos(e^{0.507}t_i)`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [1, 60]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`\\mathbf{x} = [53.81, 1.27, 3.012, 2.13, 0.507]`.
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 1.0] * self.dimensions,
[60.0] * self.dimensions))
self.global_optimum = [53.81, 1.27, 3.012, 2.13, 0.507]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
t_i = 0.1*numpy.arange(16)
y_i = 53.81*1.27**t_i*tanh(3.012*t_i + sin(2.13*t_i))*cos(exp(0.507)*t_i)
x1, x2, x3, x4, x5 = x
return sum((x1*(x2**t_i)*tanh(x3*t_i + sin(x4*t_i))*cos(t_i*exp(x5)) - y_i)**2.0)
# -------------------------------------------------------------------------------- #
class DixonPrice(Benchmark):
"""
Dixon and Price test objective function.
This class defines the Dixon and Price global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DixonPrice}}(\\mathbf{x}) = (x_i - 1)^2 + \\sum_{i=2}^n i(2x_i^2 - x_{i-1})^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/DixonPrice.png
:alt: Dixon and Price function
:align: center
**Two-dimensional Dixon and Price function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 2^{- \\frac{(2^i-2)}{2^i}}` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2, 3), (-2, 3)]
self.global_optimum = [2.0**(-(2.0**i-2.0)/2.0**i)
for i in range(1, self.dimensions+1)]
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
s = 0.0
for i in range(1, self.dimensions):
s += i*(2.0*x[i]**2.0 - x[i-1])**2.0
y = s + (x[0] - 1.0)**2.0
return y
# -------------------------------------------------------------------------------- #
class Dolan(Benchmark):
"""
Dolan test objective function.
This class defines the Dolan global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Dolan}}(\\mathbf{x}) = \\lvert (x_1 + 1.7x_2)\\sin(x_1) - 1.5x_3 - 0.1x_4\\cos(x_5 + x_5 - x_1) + 0.2x_5^2 - x_2 - 1 \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
*Global optimum*: :math:`f(x_i) = 10^{-5}` for :math:`\\mathbf{x} = [8.39045925, 4.81424707, 7.34574133, 68.88246895, 3.85470806]`
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [8.39045925, 4.81424707, 7.34574133,
68.88246895, 3.85470806]
self.fglob = 1e-5
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3, x4, x5 = x
return abs((x1 + 1.7*x2)*sin(x1) - 1.5*x3 - 0.1*x4*cos(x4 + x5 - x1) + 0.2*x5**2.0 - x2 - 1.0)
# -------------------------------------------------------------------------------- #
class DropWave(Benchmark):
"""
DropWave test objective function.
This class defines the DropWave global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DropWave}}(\\mathbf{x}) = - \\frac{1 + \\cos\\left(12 \\sqrt{\\sum_{i=1}^{n} x_i^{2}}\\right)}{2 + 0.5 \\sum_{i=1}^{n} x_i^{2}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5.12, 5.12]` for :math:`i=1,2`.
.. figure:: figures/DropWave.png
:alt: DropWave function
:align: center
**Two-dimensional DropWave function**
*Global optimum*: :math:`f(x_i) = -1` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.12] * self.dimensions,
[ 5.12] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -1.0
def evaluator(self, x, *args):
self.fun_evals += 1
norm_x = sum(x**2)
return -(1+cos(12 * sqrt(norm_x)))/(0.5 * norm_x + 2)
# -------------------------------------------------------------------------------- #
class Easom(Benchmark):
"""
Easom test objective function.
This class defines the Easom global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Easom}}(\\mathbf{x}) = a - \\frac{a}{e^{b \\sqrt{\\frac{\\sum_{i=1}^{n} x_i^{2}}{n}}}} + e - e^{\\frac{\\sum_{i=1}^{n} \\cos\\left(c x_i\\right)}{n}}
Where, in this exercise, :math:`a = 20, b = 0.2` and :math:`c = 2\\pi`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Easom.png
:alt: Easom function
:align: center
**Two-dimensional Easom function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
a = 20.0
b = 0.2
c = 2*pi
n = self.dimensions
return -a * exp(-b * sqrt(sum(x**2) / n)) - exp(sum(cos(c * x)) / n) + a + exp(1)
# -------------------------------------------------------------------------------- #
class EggCrate(Benchmark):
"""
Egg Crate test objective function.
This class defines the Egg Crate global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{EggCrate}}(\\mathbf{x}) = x_1^2 + x_2^2 + 25 \\left[ \\sin^2(x_1) + \\sin^2(x_2) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,2`.
.. figure:: figures/EggCrate.png
:alt: Egg Crate function
:align: center
**Two-dimensional Egg Crate function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return x1**2.0 + x2**2.0 + 25.0*(sin(x1)**2.0 + sin(x2)**2.0)
# -------------------------------------------------------------------------------- #
class EggHolder(Benchmark):
"""
Egg Holder test objective function.
This class defines the Egg Holder global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{EggHolder}}(\\mathbf{x}) = - x_{1} \\sin\\left(\\sqrt{\\lvert{x_{1} - x_{2} -47}\\rvert}\\right) - \\left(x_{2} + 47\\right) \\sin\\left(\\sqrt{\\left|{\\frac{1}{2} x_{1} + x_{2} + 47}\\right|}\\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-512, 512]` for :math:`i=1,2`.
.. figure:: figures/EggHolder.png
:alt: Egg Holder function
:align: center
**Two-dimensional Egg Holder function**
*Global optimum*: :math:`f(x_i) = -959.640662711` for :math:`\\mathbf{x} = [512, 404.2319]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-512.0] * self.dimensions,
[ 512.0] * self.dimensions))
self.global_optimum = [512.0, 404.2319]
self.fglob = -959.640662711
def evaluator(self, x, *args):
self.fun_evals += 1
return -(x[1]+47)*sin(sqrt(abs(x[1]+x[0]/2+47)))-x[0]*sin(sqrt(abs(x[0]-(x[1]+47))))
# -------------------------------------------------------------------------------- #
class ElAttarVidyasagarDutta(Benchmark):
"""
El-Attar-Vidyasagar-Dutta test objective function.
This class defines the El-Attar-Vidyasagar-Dutta function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{ElAttarVidyasagarDutta}}(\\mathbf{x}) = (x_1^2 + x_2 - 10)^2 + (x_1 + x_2^2 - 7)^2 + (x_1^2 + x_2^3 - 1)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/ElAttarVidyasagarDutta.png
:alt: El-Attar-Vidyasagar-Dutta function
:align: center
**Two-dimensional El-Attar-Vidyasagar-Dutta function**
*Global optimum*: :math:`f(x_i) = 1.712780354` for :math:`\\mathbf{x} = [3.40918683, -2.17143304]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-4, 4), (-4, 4)]
self.global_optimum = [3.40918683, -2.17143304]
self.fglob = 1.712780354
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return (x1**2.0 + x2 - 10)**2.0 + (x1 + x2**2.0 - 7)**2.0 + (x1**2.0 + x2**3.0 - 1)**2.0
# -------------------------------------------------------------------------------- #
class Exp2(Benchmark):
"""
Exp2 test objective function.
This class defines the Exp2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Exp2}}(\\mathbf{x}) = \\sum_{i=0}^9 \\left ( e^{-ix_1/10} - 5e^{-ix_2/10} -e^{-i/10} + 5e^{-i} \\right )^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 20]` for :math:`i=1,2`.
.. figure:: figures/Exp2.png
:alt: Exp2 function
:align: center
**Two-dimensional Exp2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = [1, 0.1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[20.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [1.0, 0.1]
self.fglob = 0
def evaluator(self, x, *args):
self.fun_evals += 1
y = 0.0
for i in range(10):
y += (exp(-i*x[0]/10.0) - 5*exp(-i*x[1]*10) - exp(-i/10.0) + 5*exp(-i))**2.0
return y
# -------------------------------------------------------------------------------- #
class Exponential(Benchmark):
"""
Exponential test objective function.
This class defines the Exponential global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Exponential}}(\\mathbf{x}) = -e^{-0.5 \\sum_{i=1}^n x_i^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Exponential.png
:alt: Exponential function
:align: center
**Two-dimensional Exponential function**
*Global optimum*: :math:`f(x_i) = -1` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return -exp(-0.5*sum(x**2.0))
# -------------------------------------------------------------------------------- #
class FreudensteinRoth(Benchmark):
"""
FreudensteinRoth test objective function.
This class defines the Freudenstein & Roth global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{FreudensteinRoth}}(\\mathbf{x}) = \\left\{x_1 - 13 + \\left[(5 - x_2)x_2 - 2 \\right] x_2 \\right\}^2 + \\left \{x_1 - 29 + \\left[(x_2 + 1)x_2 - 14 \\right] x_2 \\right\}^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/FreudensteinRoth.png
:alt: FreudensteinRoth function
:align: center
**Two-dimensional FreudensteinRoth function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [5, 4]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-3, 3), (-5, 5)]
self.global_optimum = [5.0, 4.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
f1 = (-13.0 + x[0] + ((5.0 - x[1])*x[1] - 2.0)*x[1])**2
f2 = (-29.0 + x[0] + ((x[1] + 1.0)*x[1] - 14.0)*x[1])**2
return f1 + f2
# -------------------------------------------------------------------------------- #
class Gear(Benchmark):
"""
Gear test objective function.
This class defines the Gear global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Gear}}(\\mathbf{x}) = \\left \\{ \\frac{1.0}{6.931} - \\frac{\\lfloor x_1\\rfloor \\lfloor x_2 \\rfloor } {\\lfloor x_3 \\rfloor \\lfloor x_4 \\rfloor } \\right\\}^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [12, 60]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 2.7 \\cdot 10^{-12}` for :math:`\\mathbf{x} = [16, 19, 43, 49]`, where the various
:math:`x_i` may be permuted.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([12.0] * self.dimensions,
[60.0] * self.dimensions))
self.global_optimum = [16, 19, 43, 49]
self.fglob = 2.7e-12
def evaluator(self, x, *args):
self.fun_evals += 1
return (1.0/6.931 - floor(x[0])*floor(x[1])/(floor(x[2])*floor(x[3])))**2
# -------------------------------------------------------------------------------- #
class Giunta(Benchmark):
"""
Giunta test objective function.
This class defines the Giunta global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Giunta}}(\\mathbf{x}) = 0.6 + \\sum_{i=1}^{n} \\left[\\sin^{2}\\left(1 - \\frac{16}{15} x_i\\right) - \\frac{1}{50} \\sin\\left(4 - \\frac{64}{15} x_i\\right) - \\sin\\left(1 - \\frac{16}{15} x_i\\right)\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,2`.
.. figure:: figures/Giunta.png
:alt: Giunta function
:align: center
**Two-dimensional Giunta function**
*Global optimum*: :math:`f(x_i) = 0.06447042053690566` for :math:`\\mathbf{x} = [0.4673200277395354, 0.4673200169591304]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.4673200277395354, 0.4673200169591304]
self.fglob = 0.06447042053690566
def evaluator(self, x, *args):
self.fun_evals += 1
arg = 16*x/15.0 - 1
return 0.6 + sum(sin(arg) + sin(arg)**2 + sin(4*arg)/50)
# -------------------------------------------------------------------------------- #
class GoldsteinPrice(Benchmark):
"""
Goldstein-Price test objective function.
This class defines the Goldstein-Price global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{GoldsteinPrice}}(\\mathbf{x}) = \\left[ 1+(x_1+x_2+1)^2(19-14x_1+3x_1^2-14x_2+6x_1x_2+3x_2^2) \\right] \\left[ 30+(2x_1-3x_2)^2(18-32x_1+12x_1^2+48x_2-36x_1x_2+27x_2^2) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-2, 2]` for :math:`i=1,2`.
.. figure:: figures/GoldsteinPrice.png
:alt: Goldstein-Price function
:align: center
**Two-dimensional Goldstein-Price function**
*Global optimum*: :math:`f(x_i) = 3` for :math:`\\mathbf{x} = [0, -1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-2.0] * self.dimensions,
[ 2.0] * self.dimensions))
self.global_optimum = [0., -1.]
self.fglob = 3.0
def evaluator(self, x, *args):
self.fun_evals += 1
a = 1+(x[0]+x[1]+1)**2*(19-14*x[0]+3*x[0]**2-14*x[1]+6*x[0]*x[1]+3*x[1]**2)
b = 30+(2*x[0]-3*x[1])**2*(18-32*x[0]+12*x[0]**2+48*x[1]-36*x[0]*x[1]+27*x[1]**2)
return a*b
# -------------------------------------------------------------------------------- #
class Griewank(Benchmark):
"""
Griewank test objective function.
This class defines the Griewank global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Griewank}}(\\mathbf{x}) = \\frac{1}{4000}\\sum_{i=1}^n x_i^2 - \\prod_{i=1}^n\\cos\\left(\\frac{x_i}{\\sqrt{i}}\\right) + 1
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-600, 600]` for :math:`i=1,...,n`.
.. figure:: figures/Griewank.png
:alt: Griewank function
:align: center
**Two-dimensional Griewank function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-600.0] * self.dimensions,
[ 600.0] * self.dimensions))
self.custom_bounds = [(-50, 50), (-50, 50)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(x**2)/4000.0 - prod(cos(x/sqrt(1.0+arange(len(x))))) + 1.0
# -------------------------------------------------------------------------------- #
class Gulf(Benchmark):
"""
Gulf test objective function.
This class defines the Gulf global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Gulf}}(\\mathbf{x}) = \\sum_{i=1}^m \\left( e^{-\\frac{\\lvert y_i - x_2 \\rvert^{x_3}}{x_1} } - t_i \\right)
Where, in this exercise:
.. math::
t_i = i/100 \\\\
y_i = 25 + [-50 \\log(t_i)]^{2/3}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 60]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [50, 25, 1.5]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[50.0] * self.dimensions))
self.global_optimum = [50.0, 25.0, 1.5]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3 = x
y = 0.0
for i in range(30):
ti = (i)*0.01;
yi = 25.0 + (-50*log(ti))**(2.0/3.0)
ai = yi - x2
y += (exp(-((abs(ai)**x3)/x1)) - ti)**2.0
return y
# -------------------------------------------------------------------------------- #
class Hansen(Benchmark):
"""
Hansen test objective function.
This class defines the Hansen global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hansen}}(\\mathbf{x}) = \\left[ \\sum_{i=0}^4(i+1)\\cos(ix_1+i+1)\\right ] \\left[\\sum_{j=0}^4(j+1)\\cos[(j+2)x_2+j+1])\\right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Hansen.png
:alt: Hansen function
:align: center
**Two-dimensional Hansen function**
*Global optimum*: :math:`f(x_i) = -2.3458` for :math:`\\mathbf{x} = [-7.58989583, -7.70831466]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-7.58989583, -7.70831466]
self.fglob = -176.54
def evaluator(self, x, *args):
self.fun_evals += 1
f1 = f2 = 0.0
for i in range(5):
f1 += (i+1)*cos(i*x[0] + i + 1)
f2 += (i+1)*cos((i+2)*x[1] + i + 1)
return f1*f2
# -------------------------------------------------------------------------------- #
class Hartmann3(Benchmark):
"""
Hartmann3 test objective function.
This class defines the Hartmann3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hartmann3}}(\\mathbf{x}) = -\\sum\\limits_{i=1}^{4} c_i e^{-\\sum\\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}
Where, in this exercise:
.. math::
\\begin{array}{l|ccc|c|ccr}
\\hline
i & & a_{ij}& & c_i & & p_{ij} & \\\\
\\hline
1 & 3.0 & 10.0 & 30.0 & 1.0 & 0.689 & 0.1170 & 0.2673 \\\\
2 & 0.1 & 10.0 & 35.0 & 1.2 & 0.4699 & 0.4387 & 0.7470 \\\\
3 & 3.0 & 10.0 & 30.0 & 3.0 & 0.1091 & 0.8732 & 0.5547 \\\\
4 & 0.1 & 10.0 & 35.0 & 3.2 & 0.0381 & 0.5743 & 0.8828 \\\\
\\hline
\\end{array}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = -3.86278214782076` for :math:`\\mathbf{x} = [0.1, 0.55592003, 0.85218259]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.1, 0.55592003, 0.85218259]
self.fglob = -3.86278214782076
def evaluator(self, x, *args):
self.fun_evals += 1
a = asarray([[3.0, 0.1, 3.0, 0.1],
[10.0, 10.0, 10.0, 10.0],
[30.0, 35.0, 30.0, 35.0]])
p = asarray([[0.36890, 0.46990, 0.10910, 0.03815],
[0.11700, 0.43870, 0.87320, 0.57430],
[0.26730, 0.74700, 0.55470, 0.88280]])
c = asarray([1.0, 1.2, 3.0, 3.2])
d = zeros_like(c)
for i in range(4):
d[i] = sum(a[:, i]*(x - p[:, i])**2)
return -sum(c*exp(-d))
# -------------------------------------------------------------------------------- #
class Hartmann6(Benchmark):
"""
Hartmann6 test objective function.
This class defines the Hartmann6 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hartmann6}}(\\mathbf{x}) = -\\sum\\limits_{i=1}^{4} c_i e^{-\\sum\\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}
Where, in this exercise:
.. math::
\\begin{array}{l|cccccc|r}
\\hline
i & & & a_{ij} & & & & c_i \\\\
\\hline
1 & 10.0 & 3.0 & 17.0 & 3.50 & 1.70 & 8.00 & 1.0 \\\\
2 & 0.05 & 10.0 & 17.0 & 0.10 & 8.00 & 14.00 & 1.2 \\\\
3 & 3.00 & 3.50 & 1.70 & 10.0 & 17.00 & 8.00 & 3.0 \\\\
4 & 17.00 & 8.00 & 0.05 & 10.00 & 0.10 & 14.00 & 3.2 \\\\
\\hline
\\end{array}
\\newline
\\\\
\\newline
\\begin{array}{l|cccccr}
\\hline
i & & & p_{ij} & & & \\\\
\\hline
1 & 0.1312 & 0.1696 & 0.5569 & 0.0124 & 0.8283 & 0.5886 \\\\
2 & 0.2329 & 0.4135 & 0.8307 & 0.3736 & 0.1004 & 0.9991 \\\\
3 & 0.2348 & 0.1451 & 0.3522 & 0.2883 & 0.3047 & 0.6650 \\\\
4 & 0.4047 & 0.8828 & 0.8732 & 0.5743 & 0.1091 & 0.0381 \\\\
\\hline
\\end{array}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,6`.
*Global optimum*: :math:`f(x_i) = -3.32236801141551` for :math:`\\mathbf{x} = [0.20168952, 0.15001069, 0.47687398, 0.27533243, 0.31165162, 0.65730054]`
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.20168952, 0.15001069, 0.47687398,
0.27533243, 0.31165162, 0.65730054]
self.fglob = -3.32236801141551
def evaluator(self, x, *args):
self.fun_evals += 1
a = asarray([[10.00, 0.05, 3.00, 17.00],
[3.00, 10.00, 3.50, 8.00],
[17.00, 17.00, 1.70, 0.05],
[3.50, 0.10, 10.00, 10.00],
[1.70, 8.00, 17.00, 0.10],
[8.00, 14.00, 8.00, 14.00]])
p = asarray([[0.1312, 0.2329, 0.2348, 0.4047],
[0.1696, 0.4135, 0.1451, 0.8828],
[0.5569, 0.8307, 0.3522, 0.8732],
[0.0124, 0.3736, 0.2883, 0.5743],
[0.8283, 0.1004, 0.3047, 0.1091],
[0.5886, 0.9991, 0.6650, 0.0381]])
c = asarray([1.0, 1.2, 3.0, 3.2])
d = zeros_like(c)
for i in range(4):
d[i] = sum(a[:, i]*(x - p[:, i])**2)
return -sum(c*exp(-d))
# -------------------------------------------------------------------------------- #
class HelicalValley(Benchmark):
"""
HelicalValley test objective function.
This class defines the HelicalValley global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{HelicalValley}}(\\mathbf{x}) = 100{[z-10\\Psi(x_1,x_2)]^2+(\\sqrt{x_1^2+x_2^2}-1)^2}+x_3^2
Where, in this exercise:
.. math::
2\\pi\\Psi(x,y) = \\begin{cases} \\arctan(y/x) & \\textrm{for} x > 0 \\\\
\\pi + \\arctan(y/x) & \\textrm{for} x < 0 \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-\infty, \\infty]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 0, 0]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100] * self.dimensions))
self.global_optimum = [1.0, 0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*((x[2] - 10*arctan2(x[1], x[0])/2/pi)**2 + (sqrt(x[0]**2 + x[1]**2) - 1)**2) + x[2]**2
# -------------------------------------------------------------------------------- #
class HimmelBlau(Benchmark):
"""
HimmelBlau test objective function.
This class defines the HimmelBlau global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{HimmelBlau}}(\\mathbf{x}) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 -7)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-6, 6]` for :math:`i=1,2`.
.. figure:: figures/HimmelBlau.png
:alt: HimmelBlau function
:align: center
**Two-dimensional HimmelBlau function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-6] * self.dimensions,
[ 6] * self.dimensions))
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[0] * x[0] + x[1] - 11)**2 + (x[0] + x[1] * x[1] - 7)**2
# -------------------------------------------------------------------------------- #
class HolderTable(Benchmark):
"""
HolderTable test objective function.
This class defines the HolderTable global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{HolderTable}}(\\mathbf{x}) = - \\left|{e^{\\left|{1 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi} }\\right|} \\sin\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right|
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/HolderTable.png
:alt: HolderTable function
:align: center
**Two-dimensional HolderTable function**
*Global optimum*: :math:`f(x_i) = -19.20850256788675` for :math:`x_i = \\pm 9.664590028909654` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [(8.055023472141116 , 9.664590028909654),
(-8.055023472141116, 9.664590028909654),
(8.055023472141116 , -9.664590028909654),
(-8.055023472141116, -9.664590028909654)]
self.fglob = -19.20850256788675
def evaluator(self, x, *args):
self.fun_evals += 1
return -abs(sin(x[0])*cos(x[1])*exp(abs(1 - sqrt(x[0]**2 + x[1]**2)/pi)))
# -------------------------------------------------------------------------------- #
class Holzman(Benchmark):
"""
Holzman test objective function.
This class defines the Holzman global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Holzman}}(\\mathbf{x}) = \\sum_{i=0}^{99} \\left [ e^{\\frac{1}{x_1} (u_i-x_2)^{x_3}} -0.1(i+1) \\right ]
Where, in this exercise:
.. math::
u_i = 25 + (-50 \\log{[0.01(i+1)]})^{2/3}
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [0, 100], x_2 \\in [0, 25.6], x_3 \\in [0, 5]`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [50, 25, 1.5]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = ([0.0, 100.0], [0.0, 25.6], [0.0, 5.0])
self.global_optimum = [50.0, 25.0, 1.5]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
y = 0.0
for i in range(100):
ui = 25.0 + (-50.0*log(0.01*(i+1)))**(2.0/3.0)
y += -0.1*(i+1) + exp(1.0/x[0]*(ui-x[1])**x[2])
return y
# -------------------------------------------------------------------------------- #
class Hosaki(Benchmark):
"""
Hosaki test objective function.
This class defines the Hosaki global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hosaki}}(\\mathbf{x}) = \\left ( 1 - 8x_1 + 7x_1^2 - \\frac{7}{3}x_1^3 + \\frac{1}{4}x_1^4 \\right )x_2^2e^{-x_1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,2`.
.. figure:: figures/Hosaki.png
:alt: Hosaki function
:align: center
**Two-dimensional Hosaki function**
*Global optimum*: :math:`f(x_i) = -2.3458` for :math:`\\mathbf{x} = [4, 2]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(0, 5), (0, 5)]
self.global_optimum = [4, 2]
self.fglob = -2.3458
def evaluator(self, x, *args):
self.fun_evals += 1
return (1 + x[0]*(-8 + x[0]*(7 + x[0]*(-7.0/3.0 + x[0] *1.0/4.0))))*x[1]*x[1] * exp(-x[1])
# -------------------------------------------------------------------------------- #
class Infinity(Benchmark):
"""
Infinity test objective function.
This class defines the Infinity global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Infinity}}(\\mathbf{x}) = \\sum_{i=1}^{n} x_i^{6} \\left [ \\sin\\left ( \\frac{1}{x_i} \\right )+2 \\right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Infinity.png
:alt: Infinity function
:align: center
**Two-dimensional Infinity function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [1e-16] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(x**6.0*(sin(1.0/x) + 2.0))
# -------------------------------------------------------------------------------- #
class JennrichSampson(Benchmark):
"""
Jennrich-Sampson test objective function.
This class defines the Jennrich-Sampson global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{JennrichSampson}}(\\mathbf{x}) = \\sum_{i=1}^{10} \\left [2 + 2i - (e^{ix_1} + e^{ix_2}) \\right ]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,2`.
.. figure:: figures/JennrichSampson.png
:alt: Jennrich-Sampson function
:align: center
**Two-dimensional Jennrich-Sampson function**
*Global optimum*: :math:`f(x_i) = 124.3621824` for :math:`\\mathbf{x} = [0.257825, 0.257825]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.custom_bounds = [(-1, 0.34), (-1, 0.34)]
self.global_optimum = [0.257825, 0.257825]
self.fglob = 124.3621824
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
rng = numpy.arange(1.0, 11.0)
return sum((2.0 + 2.0*rng - (exp(rng*x1) + exp(rng*x2)))**2.0)
# -------------------------------------------------------------------------------- #
class Judge(Benchmark):
"""
Judge test objective function.
This class defines the Judge global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Judge}}(\\mathbf{x}) = \\sum_{i=1}^{20} \\left [ \\left (x_1 + A_i x_2 + B x_2^2 \\right ) - C_i \\right ]^2
Where, in this exercise:
.. math::
\\begin{cases} A = [4.284, 4.149, 3.877, 0.533, 2.211, 2.389, 2.145, 3.231, 1.998, 1.379, 2.106, 1.428, 1.011, 2.179, 2.858, 1.388, 1.651, 1.593, 1.046, 2.152] \\\\
B = [0.286, 0.973, 0.384, 0.276, 0.973, 0.543, 0.957, 0.948, 0.543, 0.797, 0.936, 0.889, 0.006, 0.828, 0.399, 0.617, 0.939, 0.784, 0.072, 0.889] \\\\
C = [0.645, 0.585, 0.310, 0.058, 0.455, 0.779, 0.259, 0.202, 0.028, 0.099, 0.142, 0.296, 0.175, 0.180, 0.842, 0.039, 0.103, 0.620, 0.158, 0.704] \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Judge.png
:alt: Judge function
:align: center
**Two-dimensional Judge function**
*Global optimum*: :math:`f(x_i) = 16.0817307` for :math:`\\mathbf{x} = [0.86479, 1.2357]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2.0, 2.0), (-2.0, 2.0)]
self.global_optimum = [0.86479, 1.2357]
self.fglob = 16.0817307
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
Y = asarray([4.284, 4.149, 3.877, 0.533, 2.211, 2.389, 2.145,
3.231, 1.998, 1.379, 2.106, 1.428, 1.011, 2.179,
2.858, 1.388, 1.651, 1.593, 1.046, 2.152])
X2 = asarray([0.286, 0.973, 0.384, 0.276, 0.973, 0.543, 0.957,
0.948, 0.543, 0.797, 0.936, 0.889, 0.006, 0.828,
0.399, 0.617, 0.939, 0.784, 0.072, 0.889])
X3 = asarray([0.645, 0.585, 0.310, 0.058, 0.455, 0.779, 0.259,
0.202, 0.028, 0.099, 0.142, 0.296, 0.175, 0.180,
0.842, 0.039, 0.103, 0.620, 0.158, 0.704])
return sum(((x1 + x2*X2 + (x2**2.0)*X3) - Y)**2.0)
# -------------------------------------------------------------------------------- #
class Katsuura(Benchmark):
"""
Katsuura test objective function.
This class defines the Katsuura global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Katsuura}}(\\mathbf{x}) = \\prod_{i=0}^{n-1} \\left [ 1 + (i+1) \\sum_{k=1}^{d} \\lfloor (2^k x_i) \\rfloor 2^{-k} \\right ]
Where, in this exercise, :math:`d = 32`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Katsuura.png
:alt: Katsuura function
:align: center
**Two-dimensional Katsuura function**
*Global optimum*: :math:`f(x_i) = 1` for :math:`x_i = 0` for :math:`i=1,...,n`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(0, 1), (0, 1)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
d = 32
prod = 1.0
for i in range(self.dimensions):
s = 0.0
for k in range(1, d+1):
pow2 = 2.0**k
s += round(pow2*x[i])/pow2
prod = prod*(1.0 + (i+1.0)*s)
return prod
# -------------------------------------------------------------------------------- #
class Keane(Benchmark):
"""
Keane test objective function.
This class defines the Keane global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Keane}}(\\mathbf{x}) = \\frac{\\sin^2(x_1 - x_2)\\sin^2(x_1 + x_2)}{\\sqrt{x_1^2 + x_2^2}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,2`.
.. figure:: figures/Keane.png
:alt: Keane function
:align: center
**Two-dimensional Keane function**
*Global optimum*: :math:`f(x_i) = 0.673668` for :math:`\\mathbf{x} = [0.0, 1.39325]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(-1, 0.34), (-1, 0.34)]
self.global_optimum = [0.0, 1.39325]
self.fglob = 0.673668
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return (sin(x1 - x2)**2.0*sin(x1 + x2)**2.0)/
|
sqrt(x1**2.0 + x2**2.0)
|
numpy.sqrt
|
import numpy as np
import pytest
import probnum.statespace as pnss
from probnum import randvars
from probnum.problems.zoo.linalg import random_spd_matrix
from .test_sde import TestLTISDE
@pytest.fixture
def some_ordint(test_ndim):
return test_ndim - 1
class TestIntegrator:
"""An integrator should be usable as is, but its tests are also useful for IBM,
IOUP, etc."""
# Replacement for an __init__ in the pytest language. See:
# https://stackoverflow.com/questions/21430900/py-test-skips-test-class-if-constructor-is-defined
@pytest.fixture(autouse=True)
def _setup(self, some_ordint):
self.some_ordint = some_ordint
self.integrator = pnss.Integrator(ordint=self.some_ordint, spatialdim=1)
def test_proj2coord(self):
base = np.zeros(self.some_ordint + 1)
base[0] = 1
e_0_expected = np.kron(np.eye(1), base)
e_0 = self.integrator.proj2coord(coord=0)
np.testing.assert_allclose(e_0, e_0_expected)
base = np.zeros(self.some_ordint + 1)
base[-1] = 1
e_q_expected = np.kron(np.eye(1), base)
e_q = self.integrator.proj2coord(coord=self.some_ordint)
|
np.testing.assert_allclose(e_q, e_q_expected)
|
numpy.testing.assert_allclose
|
#coding=utf-8
from __future__ import division
from gaussian_filter import gaussianfilter
from numpy import array, zeros, abs, sqrt, arctan2, arctan, pi, real
from numpy.fft import fft2, ifft2
# 2.2 Function for Finding gradients
# The Canny algorithm basically finds edges where the grayscale intensity of the image changes the most.
# These areas are found by determining gradients of the image.
# Gradients at each pixel in the smoothed image are determined by applying
# what is known as the Sobel-operator.
def findinggradient(im):
# Given Sobel operator kernels
op1 = array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
op2 = array([[-1, -2, -1],
[ 0, 0, 0],
[ 1, 2, 1]])
kernel1 = zeros(im.shape)
kernel1[:op1.shape[0], :op1.shape[1]] = op1
kernel1 = fft2(kernel1)
kernel2 =
|
zeros(im.shape)
|
numpy.zeros
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.animation import FuncAnimation, PillowWriter
import matplotlib.patches as mpatches
from matplotlib.legend_handler import HandlerPatch, HandlerCircleCollection
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
import os
from IPython.display import Image, display
#from model_evaluation_3D import plot_covariance_ellipsoide
from filterpy.kalman import KalmanFilter
from filterpy.common import Q_discrete_white_noise
from filterpy.common import Saver
DT= 0.01
SIGMA=0.5
class Trajectoy3DGenerattion:
def __init__(self,sigma=0.5, T=10.0, fs=100.0):
global DT,SIGMA
self.fs = fs # Sampling Frequency
self.dt = 1.0/fs
# Set Global Variables
DT = self.dt
SIGMA = sigma
self.T = T # measuremnt time
self.m = int(self.T/self.dt) # number of measurements
self.sigma = sigma
self.px= 0.0 # x Position Start
self.py= 0.0 # y Position Start
self.pz= 1.0 # z Position Start
self.vx = 10.0 # m/s Velocity at the beginning
self.vy = 0.0 # m/s Velocity
self.vz = 0.0 # m/s Velocity
c = 0.1 # Drag Resistance Coefficient
self.Xr=[]
self.Yr=[]
self.Zr=[]
self.Vx=[]
self.Vy=[]
self.Vz=[]
self.ax =[]
self.az =[]
for i in range(int(self.m)):
# Just to simulate a trajectory
accx = -c*self.vx**2
self.vx += accx*self.dt
self.px += self.vx*self.dt
accz = -9.806 + c*self.vz**2
self.vz += accz*self.dt
self.pz += self.vz*self.dt
self.Xr.append(self.px)
self.Yr.append(self.py)
self.Zr.append(self.pz)
self.Vx.append(self.vx)
self.Vy.append(self.vy)
self.Vz.append(self.vz)
self.az.append(accz)
self.ax.append(accx)
aux = self.Xr
self.Xr = self.Zr
self.Zr = aux
aux = self.Vx
self.Vx = self.Vz
self.Vz = aux
aux = self.ax
self.ax = self.az
self.az = aux
def get_velocities(self):
return self.Vx, self.Vy, self.Vz
def get_trajectory_position(self):
return np.array(self.Xr), np.array(self.Yr), np.array(self.Zr)
def get_acceleration(self):
return self.ax, self.az
def get_measurements(self):
#adding Noise
np.random.seed(25)
self.Xm = self.Xr + self.sigma * (np.random.randn(self.m))
self.Ym = self.Yr + self.sigma * (np.random.randn(self.m))
self.Zm = self.Zr + self.sigma * (np.random.randn(self.m))
return self.Xm, self.Ym, self.Zm
def plot_planets(x, y, z, ax):
ax.scatter(x[0], y[0], z[0], c='b', s=850, facecolor='b')
ax.scatter(x[-1], y[-1], z[-1], c='gray', s=350, facecolor='b')
e_txt = ax.text(x[0]-3, y[0], z[0]-10.5,"Earth", weight='bold', c="b", fontsize=10)
m_txt = ax.text(x[-1]-4, y[-1], z[-1]+4,"Moon", weight='bold', c="gray", fontsize=10)
return e_txt, m_txt
def plot_measurements_3D(traj, ax, title=""):
x,y,z = traj.get_measurements()
plot_planets(x, y, z, ax)
ax.scatter(x, y, z, c='g', alpha=0.3, facecolor=None, label="Measurements")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title(title, fontsize=15)
#ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
# Axis equal
max_range = np.array([x.max()-x.min(), y.max()-y.min(), z.max()-z.min()]).max() / 3.0
mean_x = x.mean()
mean_y = y.mean()
mean_z = z.mean()
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
ax.legend(loc='best',prop={'size':15})
def plot_trajectory_3D(traj, ax, title=""):
x,y,z = traj.get_trajectory_position()
plot_planets(x, y, z, ax)
ax.plot(x, y, z, c='r', lw=2, ls="--", label="Trajectory")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title(title, fontsize=15)
#ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
# Axis equal
max_range = np.array([x.max()-x.min(), y.max()-y.min(), z.max()-z.min()]).max() / 3.0
mean_x = x.mean()
mean_y = y.mean()
mean_z = z.mean()
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
ax.legend(loc='best',prop={'size':15})
def plot_prediction(preds,traj, ax):
global SIGMA
xt, yt, zt = preds[:,0], preds[:,1], preds[:,2]
Xr, Yr, Zr = traj.get_trajectory_position()
Xm, Ym, Zm = traj.get_measurements()
print("Xm: ", Xm.shape)
print("Ym: ", Ym.shape)
print("Zm: ", Zm.shape)
plot_planets(Xr, Yr, Zr, ax)
ax.plot(xt,yt,zt, lw=2, label='Kalman Filter Estimate')
ax.plot(Xr, Yr, Zr, lw=2, label='Real Trajectory Without Noise')
ax.scatter(Xm, Ym, Zm, edgecolor='g', alpha=0.1, lw=2, label="Measurements")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.legend(loc='best',prop={'size':15})
ax.set_title("Kalman Filter Estimate - Sigma={}".format(SIGMA), fontsize=15)
# Axis equal
max_range = np.array([Xm.max()-Xm.min(), Ym.max()-Ym.min(), Zm.max()-Zm.min()]).max() / 3.0
mean_x = Xm.mean()
mean_y = Ym.mean()
mean_z = Zm.mean()
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
def plot_x_z_2D(ax, traj, preds):
global SIGMA
xt, yt, zt = preds[:,0], preds[:,1], preds[:,2]
Xr, Yr, Zr = traj.get_trajectory_position()
Xm, Ym, Zm = traj.get_measurements()
ax.plot(xt,zt, label='Kalman Filter Estimate')
ax.scatter(Xm,Zm, label='Measurement', c='gray', s=15, alpha=0.5)
ax.plot(Xr, Zr, label='Real')
ax.set_title("Kalman Filter Estimate 2D - Sigma={}".format(SIGMA), fontsize=15)
ax.legend(loc='best',prop={'size':15})
ax.set_xlabel('X ($m$)')
ax.set_ylabel('Y ($m$)')
#-------------------------- KALMAN FUNCTIONS -------------------------------------------------
def init_kalman(traj):
global SIGMA, DT
#Transition_Matrix matrix
PHI = np.array([[1.0, 0.0, 0.0, DT, 0.0, 0.0, 1/2.0*DT**2, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, DT, 0.0, 0.0, 1/2.0*DT**2, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, DT, 0.0, 0.0, 1/2.0*DT**2],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, DT, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, DT, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, DT],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
# Matrix Observation_Matrix
#We are looking for the position of the spaceship x,y,z
H = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
x, y, z = traj.get_measurements()
vx, vy, vz = traj.get_velocities()
ax, az = traj.get_acceleration()
#initial state
init_states = np.array([x[0], y[0], z[0], vx[0], vy[0], vz[0], ax[0], 0., az[0]])
P = np.eye(9)*(0.5**2)
rp = 1 # Noise of Position Measurement
R = np.eye(3)* rp
G = np.array([ [(DT**2)/2],
[(DT**2)/2],
[(DT**2)/2],
[ DT ],
[ DT ],
[ DT ],
[ 1. ],
[ 1. ],
[ 1. ]])
acc_noise = 0.1 # acceleration proccess noise
Q= np.dot(G, G.T)* acc_noise**2
#Q = Q_discrete_white_noise(3, dt=DT, var=50, block_size=3)
return init_states, PHI, H, Q, P, R
def Ship_tracker(traj):
global DT
init_states, PHI, H, Q, P, R = init_kalman(traj)
tracker= KalmanFilter(dim_x = 9, dim_z=3)
tracker.x = init_states
tracker.F = PHI
tracker.H = H # Measurement function
tracker.P = P # covariance matrix
tracker.R = R # state uncertainty
tracker.Q = Q # process uncertainty
return tracker
def run(tracker, traj):
x, y, z = traj.get_measurements()
zs = np.asarray([ x, y, z]).T
preds, cov = [],[]
for z in zs:
tracker.predict()
tracker.update(z=z)
preds.append(tracker.x)
cov.append(tracker.P)
return np.array(preds), np.array(cov)
def run_half_measures(tracker, traj):
x, y, z = traj.get_measurements()
zs = np.asarray([ x, y, z]).T
preds, cov = [],[]
for i,z in enumerate(zs):
tracker.predict()
if i <= len(zs)//2:
tracker.update(z=z)
preds.append(tracker.x)
cov.append(tracker.P)
return
|
np.array(preds)
|
numpy.array
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Make sure the following dependencies are installed.
#!pip install albumentations --upgrade
#!pip install timm
#!pip install iterative-stratification
__author__ = 'MPWARE: https://www.kaggle.com/mpware'
# In[ ]:
# Configure HOME and DATA_HOME according to your setup
HOME = "./"
DATA_HOME = "./data/"
TRAIN_HOME = DATA_HOME + "train/"
TRAIN_IMAGES_HOME = TRAIN_HOME + "images/"
IMAGE_SIZE = 512 # Image size for training
RESIZED_IMAGE_SIZE = 384 # For random crop
COMPOSE = None # For RGBY support
# Set to True for interactive session
PT_SCRIPT = True # True
# In[ ]:
import sys, os, random, math
import numpy as np
import h5py
import cv2
import torch
import torch.nn as nn
import operator
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
import albumentations as A
import torch.nn.functional as F
import functools
from collections import OrderedDict
import torch.nn.functional as F
from torch.optim import Adam, SGD
import timm
import iterstrat
# In[ ]:
LABEL = "Label"
ID = "ID"
EID = "EID"
IMAGE_WIDTH = "ImageWidth"
IMAGE_HEIGHT = "ImageHeight"
META = "META"
TOTAL = "Total"
EXT = "ext"
DEFAULT = "default"
# 19 class labels. Some rare classes: Mitotic spindle (0.37%), Negative: (0.15%)
class_mapping = {
0: 'Nucleoplasm', 1: 'Nuclear membrane', 2: 'Nucleoli', 3: 'Nucleoli fibrillar center',
4: 'Nuclear speckles', 5: 'Nuclear bodies', 6: 'Endoplasmic reticulum', 7: 'Golgi apparatus', 8: 'Intermediate filaments',
9: 'Actin filaments', 10: 'Microtubules', 11: 'Mitotic spindle', 12: 'Centrosome', 13: 'Plasma membrane', 14: 'Mitochondria',
15: 'Aggresome', 16: 'Cytosol', 17: 'Vesicles and punctate cytosolic patterns', 18: 'Negative',
}
class_mapping_inv = {v:k for k,v in class_mapping.items()}
class_labels = [str(k) for k,v in class_mapping.items()]
class_names = [str(v) for k,v in class_mapping.items()]
LABELS_OHE_START = 3
# In[ ]:
def seed_everything(s):
random.seed(s)
os.environ['PYTHONHASHSEED'] = str(s)
np.random.seed(s)
# Torch
torch.manual_seed(s)
torch.cuda.manual_seed(s)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
torch.cuda.manual_seed_all(s)
# In[ ]:
def l1_loss(A_tensors, B_tensors):
return torch.abs(A_tensors - B_tensors)
class ComboLoss(nn.Module):
def __init__(self, alpha=1.0, beta=1.0, gamma=1.0, from_logits=True, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.from_logits = from_logits
print("alpha:", self.alpha, "beta:", self.beta, "gamma:", self.gamma)
self.loss_classification = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, y_pred, y_true, features_single=None, y_pred_tiles=None, features_tiles=None, y_pred_tiled_flatten=None):
loss_ = self.alpha * self.loss_classification(y_pred, y_true).mean()
if features_tiles is not None and self.beta > 0:
logits_reconstruction = y_pred_tiles
loss_tiles_class_ = self.loss_classification(logits_reconstruction, y_true).mean()
loss_ = loss_ + self.beta * loss_tiles_class_
if features_single is not None and features_tiles is not None and self.gamma > 0:
loss_reconstruction_ = l1_loss(features_single, features_tiles).mean()
loss_ = loss_ + self.gamma * loss_reconstruction_
return loss_
# In[ ]:
# Main configuration
class raw_conf:
def __init__(self, factory):
super().__init__()
self.inference = False
self.compose = COMPOSE
self.normalize = False if factory == "HDF5" else True
self.norm_value = None if factory == "HDF5" else 65535.0
# Dataset
self.image_size = None if factory == "HDF5" else IMAGE_SIZE
self.denormalize = 255
# Model
self.mtype = "siamese" # "regular"
self.backbone = 'seresnext50_32x4d' # 'gluon_seresnext101_32x4d' # 'cspresnext50' 'regnety_064'
self.pretrained_weights = "imagenet"
self.INPUT_RANGE = [0, 1]
self.IMG_MEAN = [0.485, 0.456, 0.406, 0.485] if self.compose is None else [0.485, 0.456, 0.406]
self.IMG_STD = [0.229, 0.224, 0.225, 0.229] if self.compose is None else [0.229, 0.224, 0.225]
self.num_classes = 19
self.with_cam = True
self.puzzle_pieces = 4
self.hpa_classifier_weights = None
self.dropout = None
# Model output
self.post_activation = "sigmoid"
self.output_key = "logits" if self.mtype == "regular" else "single_logits" # None
self.output_key_extra = "features" if self.mtype == "regular" else "single_features" # None
self.output_key_siamese = None if self.mtype == "regular" else "tiled_logits"
self.output_key_extra_siamese = None if self.mtype == "regular" else "tiled_features"
# Loss
self.alpha = 1.0 # Single image classification loss
self.beta = 0.0 if self.mtype == "regular" else 1.0 # Reconstructed image classification loss
self.gamma = 0.0 if self.mtype == "regular" else 0.5 # 0.25
self.loss = ComboLoss(alpha=self.alpha, beta=self.beta, gamma=self.gamma)
self.sampler = "prob"
self.sampler_cap = "auto" # None
self.fp16 = True
self.finetune = False
self.optimizer = "Adam" # "SGD"
self.scheduler = None if self.finetune is True or self.optimizer != "Adam" else "ReduceLROnPlateau" # "CosineAnnealingWarmRestarts"
self.scheduler_factor = 0.3
self.scheduler_patience = 8
self.lr = 0.0003
self.min_lr = 0.00005
self.beta1 = 0.9
self.train_verbose = True
self.valid_verbose = True
# Train parameters
self.L_DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.map_location = self.L_DEVICE
self.WORKERS = 0 if PT_SCRIPT is False else 8
self.BATCH_SIZE = 36 if self.mtype == "siamese" else 48
self.ITERATIONS_LOGS = 30
self.CYCLES = 1
self.EPOCHS_PER_CYCLE = 48 # 36
self.EPOCHS = self.CYCLES * self.EPOCHS_PER_CYCLE
self.WARMUP = 0
self.FOLDS = 4
self.METRIC_ = "min" # "max"
self.pin_memory = True
# In[ ]:
# Load CSV data, drop duplicates if any
def prepare_data(filename, ext_name=None):
train_pd = pd.read_csv(DATA_HOME + filename)
train_pd[LABEL] = train_pd[LABEL].apply(literal_eval)
train_pd[LABEL] = train_pd[LABEL].apply(lambda x: [int(l) for l in x])
if EXT not in train_pd.columns:
train_pd.insert(2, EXT, DEFAULT)
if ext_name is not None:
train_pd[EXT] = ext_name
train_pd = train_pd.drop_duplicates(subset=[ID]).reset_index(drop=True)
assert(np.argwhere(train_pd.columns.values == EXT)[0][0] == 2)
return train_pd
# In[ ]:
# Use PIL to support 16 bits, normalize=True to return [0-1.0] float32 image
def read_image(filename, compose=None, normalize=False, norm_value=65535.0, images_root=TRAIN_IMAGES_HOME):
filename = images_root + filename
filename = filename + "_red.png" if "_red.png" not in filename else filename
mt_, pi_, nu_, er_ = filename, filename.replace('_red', '_green'), filename.replace('_red', '_blue'), filename.replace('_red', '_yellow')
if compose is None:
mt = np.asarray(Image.open(mt_)).astype(np.uint16)
pi = np.asarray(Image.open(pi_)).astype(np.uint16)
nu = np.asarray(Image.open(nu_)).astype(np.uint16)
er = np.asarray(Image.open(er_)).astype(np.uint16)
ret = np.dstack((mt, pi, nu, er))
else:
if compose == "RGB":
mt = np.asarray(Image.open(mt_)).astype(np.uint16)
pi = np.asarray(Image.open(pi_)).astype(np.uint16)
nu = np.asarray(Image.open(nu_)).astype(np.uint16)
ret = np.dstack((mt, pi, nu))
elif compose == "RYB":
mt = np.asarray(Image.open(mt_)).astype(np.uint16)
er = np.asarray(Image.open(er_)).astype(np.uint16)
nu = np.asarray(Image.open(nu_)).astype(np.uint16)
ret = np.dstack((mt, er, nu))
elif compose == "RYGYB":
mt = np.asarray(Image.open(mt_))
pi = np.asarray(Image.open(pi_))
nu = np.asarray(Image.open(nu_))
er = np.asarray(Image.open(er_))
ret =
|
np.dstack(((mt + er)/2.0, (pi + er/2)/1.5, nu))
|
numpy.dstack
|
#!/usr/bin/env python3
# Copyright: <NAME>, 2021
"""
Author: <NAME>
Date: 2021Dec17
Brief: Functions to generate SLR pulses
"""
import os
import json
import numpy as np
import matplotlib.pyplot as plt
import sigpy
import sigpy.mri.rf as rf
# SLR pulse parameters
N = 256 # Number of time points
TBW = 9.62 # time-bandwidth product
PULSE_LEN = 1 #ms
PULSE_BW = TBW/PULSE_LEN #kHz
PBR = 0.01 # pass-band ripple
SBR = 0.01 # stop-band ripple
PULSE_TYPE = "ex" # inv, se, ex, sat, st (small-tip)
FILTER_TYPE = "min" # 'pm', 'ls', 'ms', 'min', 'max'
# Root-flipped pulse parameters
ROOT_FLIP = False
if PULSE_TYPE == "ex" or PULSE_TYPE == "sat":
ROOT_FLIP_ANGLE = 90
elif PULSE_TYPE == "inv" or PULSE_TYPE == "se":
ROOT_FLIP_ANGLE = 180
else:
ROOT_FLIP_ANGLE = 180
# Multiband pulse parameters
MULTI_BAND = False
N_BANDS = 2
PHS_TYPE = 'quad_mod' # for n_bands >= 3 only: phs_mod, amp_mod,
# for all n_bands: quad_mod, or 'None'
BAND_SEP = 6*TBW # separated by BAND_SEP slice widths
# Saving
SAVE_PULSE = False
BASE_PATH = "C:/Users/RudrakshaMajumdar/Documents/GitHub/rf-bloch-simulator/saved_rf_pulses"
NAME_PULSE = "SLR_test"
def slr_rootflip(flip=ROOT_FLIP_ANGLE):
"""
flip: Target flip angle in degrees
"""
flip_rad = np.deg2rad(flip)
# code extracted from dzrf():
[bsf, d1, d2] = rf.slr.calc_ripples(PULSE_TYPE, PBR, SBR)
b = rf.slr.dzmp(N, TBW, d1, d2)
b = b[::-1]
b = bsf*b
# root flipping the pulse, using the b polynomial
[am_rootflip, bRootFlipped] = rf.slr.root_flip(b, d1, flip_rad, TBW)
return am_rootflip, bRootFlipped
def slr_pulse(
num=N, time_bw=TBW,
ptype=PULSE_TYPE, ftype=FILTER_TYPE,
d_1=PBR, d_2=SBR,
root_flip=ROOT_FLIP,
multi_band = MULTI_BAND,
n_bands = N_BANDS,
phs_type = PHS_TYPE,
band_sep = BAND_SEP
):
"""Use Shinnar-Le Roux algorithm to generate pulse"""
if root_flip is False:
complex_pulse = rf.dzrf(n=num, tb=time_bw, ptype=ptype, ftype=ftype, d1=d_1, d2=d_2)
amp_arr = complex_pulse
else:
amp_arr, b_rootflip = slr_rootflip(ROOT_FLIP_ANGLE)
phs_arr = np.zeros(num)
for idx in range(num):
if amp_arr[idx] < 0:
phs_arr[idx] = 180
else:
phs_arr[idx] = 0
if multi_band is True:
amp_arr = rf.multiband.mb_rf(amp_arr, n_bands, band_sep, phs_type)
# prepare pulse for instrument, which takes absolute only
# cast negative values to positive
amp_arr_abs = np.abs(amp_arr)
# shift amplitude such that the lowest value is 0
amp_arr_abs = amp_arr_abs - amp_arr_abs.min()
# fold back phase when it exceeds 360
phs_arr = phs_arr % 360
freq_arr = (np.diff(phs_arr)/num)/360
return amp_arr, freq_arr, phs_arr, amp_arr_abs
def ck_to_mag(alpha, beta, se=False):
"""Convert Shinnar-Le Roux parameters to magnetization"""
if se is False:
m_z = 1-2*np.abs(beta)**2
m_xy = 2 * np.conj(alpha) * beta
else:
m_z = 1-2*np.abs(beta)**2
m_xy = 1j * (np.conj(alpha) **2 + beta**2)
return m_z, m_xy
if __name__ == "__main__":
am, fm, pm, am_abs = slr_pulse()
# simulations
if MULTI_BAND is True:
space = np.linspace(-20*TBW, 20*TBW, 2000)
else:
space = np.linspace(-2*TBW, 2*TBW, 200)
alpha, beta = sigpy.mri.rf.sim.abrm(am, space, balanced=False)
if PULSE_TYPE == "se":
m_z, m_xy = ck_to_mag(alpha, beta, se=True)
else:
m_z, m_xy = ck_to_mag(alpha, beta, se=False)
fig = plt.figure()
time_ax =
|
np.linspace(0, PULSE_LEN, N)
|
numpy.linspace
|
# Functions for step algorithms: Newton-Raphson, Rational Function Optimization,
# <NAME>.
import numpy as np
#from .optParams import Params # this will not cause changes in trust to persist
from . import optParams as op
from .displace import displace
from .intcosMisc import qShowForces
from .addIntcos import linearBendCheck
from math import sqrt, fabs
from .printTools import printArray, printMat, print_opt
from .misc import symmetrizeXYZ, isDqSymmetric
from .linearAlgebra import absMax, symmMatEig, asymmMatEig, symmMatInv, norm
from . import v3d
from .history import History
from . import optExceptions
# This function and its components:
# 1. Computes Dq, the step in internal coordinates.
# 2. Calls displace and attempts to take the step.
# 3. Updates history with results.
def Dq(Molsys, E, qForces, H, stepType=None, energy_function=None):
if len(H) == 0 or len(qForces) == 0: return np.zeros((0), float)
if not stepType:
stepType = op.Params.step_type
if stepType == 'NR':
return Dq_NR(Molsys, E, qForces, H)
elif stepType == 'RFO':
return Dq_RFO(Molsys, E, qForces, H)
elif stepType == 'SD':
return Dq_SD(Molsys, E, qForces)
elif stepType == 'BACKSTEP':
return Dq_BACKSTEP(Molsys)
elif stepType == 'P_RFO':
return Dq_P_RFO(Molsys, E, qForces, H)
elif stepType == 'LINESEARCH':
return Dq_LINESEARCH(Molsys, E, qForces, H, energy_function)
else:
raise optExceptions.OPT_FAIL('Dq: step type not yet implemented')
# Apply crude maximum step limit by scaling.
def applyIntrafragStepScaling(dq):
trust = op.Params.intrafrag_trust
if sqrt(np.dot(dq, dq)) > trust:
scale = trust / sqrt(np.dot(dq, dq))
print_opt("\tStep length exceeds trust radius of %10.5f.\n" % trust)
print_opt("\tScaling displacements by %10.5f\n" % scale)
dq *= scale
return
# Compute energy change along one dimension
def DE_projected(model, step, grad, hess):
if model == 'NR':
return (step * grad + 0.5 * step * step * hess)
elif model == 'RFO':
return (step * grad + 0.5 * step * step * hess) / (1 + step * step)
else:
raise optExceptions.OPT_FAIL("DE_projected does not recognize model.")
# geometry and E are just for passing
# at present we are not storing the ACTUAL dq but the attempted
def Dq_NR(Molsys, E, fq, H):
print_opt("\tTaking NR optimization step.\n")
# Hinv fq = dq
Hinv = symmMatInv(H, redundant=True)
dq = np.dot(Hinv, fq)
# applies maximum internal coordinate change
applyIntrafragStepScaling(dq)
# get norm |q| and unit vector in the step direction
nr_dqnorm = sqrt(np.dot(dq, dq))
nr_u = dq.copy() / nr_dqnorm
print_opt("\tNorm of target step-size %15.10f\n" % nr_dqnorm)
# get gradient and hessian in step direction
nr_g = -1 * np.dot(fq, nr_u) # gradient, not force
nr_h = np.dot(nr_u, np.dot(H, nr_u))
if op.Params.print_lvl > 1:
print_opt('\t|NR target step|: %15.10f\n' % nr_dqnorm)
print_opt('\tNR_gradient : %15.10f\n' % nr_g)
print_opt('\tNR_hessian : %15.10f\n' % nr_h)
DEprojected = DE_projected('NR', nr_dqnorm, nr_g, nr_h)
print_opt(
"\tProjected energy change by quadratic approximation: %20.10lf\n" % DEprojected)
# Scale fq into aJ for printing
fq_aJ = qShowForces(Molsys.intcos, fq)
displace(Molsys._fragments[0].intcos, Molsys._fragments[0].geom, dq, fq_aJ)
dq_actual = sqrt(np.dot(dq, dq))
print_opt("\tNorm of achieved step-size %15.10f\n" % dq_actual)
# Symmetrize the geometry for next step
# symmetrize_geom()
# save values in step data
History.appendRecord(DEprojected, dq, nr_u, nr_g, nr_h)
# Can check full geometry, but returned indices will correspond then to that.
linearList = linearBendCheck(Molsys.intcos, Molsys.geom, dq)
if linearList:
raise optExceptions.ALG_FAIL("New linear angles", newLinearBends=linearList)
return dq
# Take Rational Function Optimization step
def Dq_RFO(Molsys, E, fq, H):
print_opt("\tTaking RFO optimization step.\n")
dim = len(fq)
dq = np.zeros((dim), float) # To be determined and returned.
trust = op.Params.intrafrag_trust # maximum step size
max_projected_rfo_iter = 25 # max. # of iterations to try to converge RS-RFO
rfo_follow_root = op.Params.rfo_follow_root # whether to follow root
rfo_root = op.Params.rfo_root # if following, which root to follow
# Determine the eigenvectors/eigenvalues of H.
Hevals, Hevects = symmMatEig(H)
# Build the original, unscaled RFO matrix.
RFOmat = np.zeros((dim + 1, dim + 1), float)
for i in range(dim):
for j in range(dim):
RFOmat[i, j] = H[i, j]
RFOmat[i, dim] = RFOmat[dim, i] = -fq[i]
if op.Params.print_lvl >= 4:
print_opt("Original, unscaled RFO matrix:\n")
printMat(RFOmat)
symm_rfo_step = False
SRFOmat = np.zeros((dim + 1, dim + 1), float) # For scaled RFO matrix.
converged = False
dqtdq = 10 # square of norm of step
alpha = 1.0 # scaling factor for RS-RFO, scaling matrix is sI
last_iter_evect = np.zeros((dim), float)
if rfo_follow_root and len(History.steps) > 1:
last_iter_evect[:] = History.steps[
-2].followedUnitVector # RFO vector from previous geometry step
# Iterative sequence to find alpha
alphaIter = -1
while not converged and alphaIter < max_projected_rfo_iter:
alphaIter += 1
# If we exhaust iterations without convergence, then bail on the
# restricted-step algorithm. Set alpha=1 and apply crude scaling instead.
if alphaIter == max_projected_rfo_iter:
print_opt("\tFailed to converge alpha. Doing simple step-scaling instead.\n")
alpha = 1.0
elif op.Params.simple_step_scaling:
# Simple_step_scaling is on, not an iterative method.
# Proceed through loop with alpha == 1, and then continue
alphaIter = max_projected_rfo_iter
# Scale the RFO matrix.
for i in range(dim + 1):
for j in range(dim):
SRFOmat[j, i] = RFOmat[j, i] / alpha
SRFOmat[dim, i] = RFOmat[dim, i]
if op.Params.print_lvl >= 4:
print_opt("\nScaled RFO matrix.\n")
printMat(SRFOmat)
# Find the eigenvectors and eigenvalues of RFO matrix.
SRFOevals, SRFOevects = asymmMatEig(SRFOmat)
if op.Params.print_lvl >= 4:
print_opt("Eigenvectors of scaled RFO matrix.\n")
printMat(SRFOevects)
if op.Params.print_lvl >= 2:
print_opt("Eigenvalues of scaled RFO matrix.\n")
printArray(SRFOevals)
print_opt("First eigenvector (unnormalized) of scaled RFO matrix.\n")
printArray(SRFOevects[0])
# Do intermediate normalization. RFO paper says to scale eigenvector
# to make the last element equal to 1. Bogus evect leads can be avoided
# using root following.
for i in range(dim + 1):
# How big is dividing going to make the largest element?
# Same check occurs below for acceptability.
if fabs(SRFOevects[i][dim]) > 1.0e-10:
tval = absMax(SRFOevects[i] / SRFOevects[i][dim])
if tval < op.Params.rfo_normalization_max:
for j in range(dim + 1):
SRFOevects[i, j] /= SRFOevects[i, dim]
if op.Params.print_lvl >= 4:
print_opt("All scaled RFO eigenvectors (rows).\n")
printMat(SRFOevects)
# Use input rfo_root
# If root-following is turned off, then take the eigenvector with the rfo_root'th lowest eigvenvalue.
# If its the first iteration, then do the same. In subsequent steps, overlaps will be checked.
if not rfo_follow_root or len(History.steps) < 2:
# Determine root only once at beginning ?
if alphaIter == 0:
print_opt("\tChecking RFO solution %d.\n" % (rfo_root + 1))
for i in range(rfo_root, dim + 1):
# Check symmetry of root.
dq[:] = SRFOevects[i, 0:dim]
if not op.Params.accept_symmetry_breaking:
symm_rfo_step = isDqSymmetric(Molsys.intcos, Molsys.geom, dq)
if not symm_rfo_step: # Root is assymmetric so reject it.
print_opt("\tRejecting RFO root %d because it breaks the molecular point group.\n"\
% (rfo_root+1))
continue
# Check normalizability of root.
if fabs(SRFOevects[i][dim]) < 1.0e-10: # don't even try to divide
print_opt(
"\tRejecting RFO root %d because normalization gives large value.\n"
% (rfo_root + 1))
continue
tval = absMax(SRFOevects[i] / SRFOevects[i][dim])
if tval > op.Params.rfo_normalization_max: # matching test in code above
print_opt(
"\tRejecting RFO root %d because normalization gives large value.\n"
% (rfo_root + 1))
continue
rfo_root = i # This root is acceptable.
break
else:
rfo_root = op.Params.rfo_root
# no good one found, use the default
# Save initial root. 'Follow' during the RS-RFO iterations.
rfo_follow_root = True
else: # Do root following.
# Find maximum overlap. Dot only within H block.
dots = np.array(
[v3d.dot(SRFOevects[i], last_iter_evect, dim) for i in range(dim)], float)
bestfit = np.argmax(dots)
if bestfit != rfo_root:
print_opt("Root-following has changed rfo_root value to %d." %
(bestfit + 1))
rfo_root = bestfit
if alphaIter == 0:
print_opt("\tUsing RFO solution %d.\n" % (rfo_root + 1))
last_iter_evect[:] = SRFOevects[rfo_root][0:dim] # omit last column on right
# Print only the lowest eigenvalues/eigenvectors
if op.Params.print_lvl >= 2:
print_opt("\trfo_root is %d\n" % (rfo_root + 1))
for i in range(dim + 1):
if SRFOevals[i] < -1e-6 or i < rfo_root:
print_opt("Scaled RFO eigenvalue %d: %15.10lf (or 2*%-15.10lf)\n" %
(i + 1, SRFOevals[i], SRFOevals[i] / 2))
print_opt("eigenvector:\n")
printArray(SRFOevects[i])
dq[:] = SRFOevects[rfo_root][0:dim] # omit last column
# Project out redundancies in steps.
# Added this projection in 2014; but doesn't seem to help, as f,H are already projected.
# project_dq(dq);
# zero steps for frozen coordinates?
dqtdq = np.dot(dq, dq)
# If alpha explodes, give up on iterative scheme
if fabs(alpha) > op.Params.rsrfo_alpha_max:
converged = False
alphaIter = max_projected_rfo_iter - 1
elif sqrt(dqtdq) < (trust + 1e-5):
converged = True
if alphaIter == 0 and not op.Params.simple_step_scaling:
print_opt("\n\tDetermining step-restricting scale parameter for RS-RFO.\n")
if alphaIter == 0:
print_opt("\tMaximum step size allowed %10.5lf\n" % trust)
print_opt("\t Iter |step| alpha rfo_root \n")
print_opt("\t------------------------------------------------\n")
print_opt("\t%5d%12.5lf%14.5lf%12d\n" % (alphaIter + 1, sqrt(dqtdq), alpha,
rfo_root + 1))
elif alphaIter > 0 and not op.Params.simple_step_scaling:
print_opt("\t%5d%12.5lf%14.5lf%12d\n" % (alphaIter + 1, sqrt(dqtdq), alpha,
rfo_root + 1))
# Find the analytical derivative, d(norm step squared) / d(alpha)
Lambda = -1 * v3d.dot(fq, dq, dim)
if op.Params.print_lvl >= 2:
print_opt("dq:\n")
printArray(dq, dim)
print_opt("fq:\n")
printArray(fq, dim)
print_opt("\tLambda calculated by (dq^t).(-f) = %20.10lf\n" % Lambda)
# Calculate derivative of step size wrt alpha.
# Equation 20, Besalu and Bofill, Theor. Chem. Acc., 1999, 100:265-274
tval = 0
for i in range(dim):
tval += (pow(v3d.dot(Hevects[i], fq, dim), 2)) / (pow(
(Hevals[i] - Lambda * alpha), 3))
analyticDerivative = 2 * Lambda / (1 + alpha * dqtdq) * tval
if op.Params.print_lvl >= 2:
print_opt("\tAnalytic derivative d(norm)/d(alpha) = %20.10lf\n" %
analyticDerivative)
# Calculate new scaling alpha value.
# Equation 20, Besalu and Bofill, Theor. Chem. Acc., 1999, 100:265-274
alpha += 2 * (trust * sqrt(dqtdq) - dqtdq) / analyticDerivative
# end alpha RS-RFO iterations
print_opt("\t------------------------------------------------\n")
# Crude/old way to limit step size if RS-RFO iterations
if not converged or op.Params.simple_step_scaling:
applyIntrafragStepScaling(dq)
if op.Params.print_lvl >= 3:
print_opt("\tFinal scaled step dq:\n")
printArray(dq)
# Get norm |dq|, unit vector, gradient and hessian in step direction
# TODO double check Hevects[i] here instead of H ? as for NR
rfo_dqnorm = sqrt(np.dot(dq, dq))
print_opt("\tNorm of target step-size %15.10f\n" % rfo_dqnorm)
rfo_u = dq.copy() / rfo_dqnorm
rfo_g = -1 * np.dot(fq, rfo_u)
rfo_h = np.dot(rfo_u, np.dot(H, rfo_u))
DEprojected = DE_projected('RFO', rfo_dqnorm, rfo_g, rfo_h)
if op.Params.print_lvl > 1:
print_opt('\t|RFO target step| : %15.10f\n' % rfo_dqnorm)
print_opt('\tRFO gradient : %15.10f\n' % rfo_g)
print_opt('\tRFO hessian : %15.10f\n' % rfo_h)
print_opt("\tProjected energy change by RFO approximation: %20.10lf\n" % DEprojected)
# Scale fq into aJ for printing
fq_aJ = qShowForces(Molsys.intcos, fq)
# this won't work for multiple fragments yet until dq and fq get cut up.
for F in Molsys._fragments:
displace(F.intcos, F.geom, dq, fq_aJ)
# For now, saving RFO unit vector and using it in projection to match C++ code,
# could use actual Dq instead.
dqnorm_actual = sqrt(np.dot(dq, dq))
print_opt("\tNorm of achieved step-size %15.10f\n" % dqnorm_actual)
# To test step sizes
#x_before = original geometry
#x_after = new geometry
#masses
#change = 0.0;
#for i in range(Natom):
# for xyz in range(3):
# change += (x_before[3*i+xyz] - x_after[3*i+xyz]) * (x_before[3*i+xyz] - x_after[3*i+xyz])
# * masses[i]
#change = sqrt(change);
#print_opt("Step-size in mass-weighted cartesian coordinates [bohr (amu)^1/2] : %20.10lf\n" % change)
#print_opt("\tSymmetrizing new geometry\n")
#geom = symmetrizeXYZ(geom)
History.appendRecord(DEprojected, dq, rfo_u, rfo_g, rfo_h)
linearList = linearBendCheck(Molsys.intcos, Molsys.geom, dq)
if linearList:
raise optExceptions.ALG_FAIL("New linear angles", newLinearBends=linearList)
# Before quitting, make sure step is reasonable. It should only be
# screwball if we are using the "First Guess" after the back-transformation failed.
if sqrt(np.dot(dq, dq)) > 10 * trust:
raise optExceptions.ALG_FAIL("opt.py: Step is far too large.")
return dq
def Dq_P_RFO(Molsys, E, fq, H):
Hdim = len(fq) # size of Hessian
trust = op.Params.intrafrag_trust # maximum step size
rfo_follow_root = op.Params.rfo_follow_root # whether to follow root
print_lvl = op.Params.print_lvl
if print_lvl > 2:
print_opt("Hessian matrix\n")
printMat(H)
# Diagonalize H (technically only have to semi-diagonalize)
hEigValues, hEigVectors = symmMatEig(H)
if print_lvl > 2:
print_opt("Eigenvalues of Hessian\n")
printArray(hEigValues)
print_opt("Eigenvectors of Hessian (rows)\n")
printMat(hEigVectors)
# Construct diagonalized Hessian with evals on diagonal
HDiag = np.zeros((Hdim, Hdim), float)
for i in range(Hdim):
HDiag[i, i] = hEigValues[i]
if print_lvl > 2:
print_opt("H diagonal\n")
printMat(HDiag)
print_opt(
"\tFor P-RFO, assuming rfo_root=1, maximizing along lowest eigenvalue of Hessian.\n"
)
print_opt("\tLarger values of rfo_root are not yet supported.\n")
rfo_root = 0
""" TODO: use rfo_root to decide which eigenvectors are moved into the max/mu space.
if not rfo_follow_root or len(History.steps) < 2:
rfo_root = op.Params.rfo_root
print_opt("\tMaximizing along %d lowest eigenvalue of Hessian.\n" % (rfo_root+1) )
else:
last_iter_evect = history[-1].Dq
dots = np.array([v3d.dot(hEigVectors[i],last_iter_evect,Hdim) for i in range(Hdim)], float)
rfo_root = np.argmax(dots)
print_opt("\tOverlaps with previous step checked for root-following.\n")
print_opt("\tMaximizing along %d lowest eigenvalue of Hessian.\n" % (rfo_root+1) )
"""
# number of degrees along which to maximize; assume 1 for now
mu = 1
print_opt("\tInternal forces in au:\n")
printArray(fq)
fqTransformed = np.dot(hEigVectors, fq) #gradient transformation
print_opt("\tInternal forces in au, in Hevect basis:\n")
printArray(fqTransformed)
# Build RFO max
maximizeRFO = np.zeros((mu + 1, mu + 1), float)
for i in range(mu):
maximizeRFO[i, i] = hEigValues[i]
maximizeRFO[i, -1] = -fqTransformed[i]
maximizeRFO[-1, i] = -fqTransformed[i]
if print_lvl > 2:
print_opt("RFO max\n")
printMat(maximizeRFO)
# Build RFO min
minimizeRFO = np.zeros((Hdim - mu + 1, Hdim - mu + 1), float)
for i in range(0, Hdim - mu):
minimizeRFO[i, i] = HDiag[i + mu, i + mu]
minimizeRFO[i, -1] = -fqTransformed[i + mu]
minimizeRFO[-1, i] = -fqTransformed[i + mu]
if print_lvl > 2:
print_opt("RFO min\n")
printMat(minimizeRFO)
RFOMaxEValues, RFOMaxEVectors = symmMatEig(maximizeRFO)
RFOMinEValues, RFOMinEVectors = symmMatEig(minimizeRFO)
print_opt("RFO min eigenvalues:\n")
printArray(RFOMinEValues)
print_opt("RFO max eigenvalues:\n")
printArray(RFOMaxEValues)
if print_lvl > 2:
print_opt("RFO min eigenvectors (rows) before normalization:\n")
printMat(RFOMinEVectors)
print_opt("RFO max eigenvectors (rows) before normalization:\n")
printMat(RFOMaxEVectors)
# Normalize max and min eigenvectors
for i in range(mu + 1):
if abs(RFOMaxEVectors[i, mu]) > 1.0e-10:
tval = abs(absMax(RFOMaxEVectors[i, 0:mu]) / RFOMaxEVectors[i, mu])
if fabs(tval) < op.Params.rfo_normalization_max:
RFOMaxEVectors[i] /= RFOMaxEVectors[i, mu]
if print_lvl > 2:
print_opt("RFO max eigenvectors (rows):\n")
printMat(RFOMaxEVectors)
for i in range(Hdim - mu + 1):
if abs(RFOMinEVectors[i][Hdim - mu]) > 1.0e-10:
tval = abs(
absMax(RFOMinEVectors[i, 0:Hdim - mu]) / RFOMinEVectors[i, Hdim - mu])
if fabs(tval) < op.Params.rfo_normalization_max:
RFOMinEVectors[i] /= RFOMinEVectors[i, Hdim - mu]
if print_lvl > 2:
print_opt("RFO min eigenvectors (rows):\n")
printMat(RFOMinEVectors)
VectorP = RFOMaxEVectors[mu, 0:mu]
VectorN = RFOMinEVectors[rfo_root, 0:Hdim - mu]
print_opt("Vector P\n")
print_opt(str(VectorP) + '\n')
print_opt("Vector N\n")
print_opt(str(VectorN) + '\n')
# Combines the eignvectors from RFO max and min
PRFOEVector = np.zeros(Hdim, float)
PRFOEVector[0:len(VectorP)] = VectorP
PRFOEVector[len(VectorP):] = VectorN
PRFOStep = np.dot(hEigVectors.transpose(), PRFOEVector)
if print_lvl > 1:
print_opt("RFO step in Hessian Eigenvector Basis\n")
printArray(PRFOEVector)
print_opt("RFO step in original Basis\n")
printArray(PRFOStep)
dq = PRFOStep
#if not converged or op.Params.simple_step_scaling:
applyIntrafragStepScaling(dq)
# Get norm |dq|, unit vector, gradient and hessian in step direction
# TODO double check Hevects[i] here instead of H ? as for NR
rfo_dqnorm = sqrt(np.dot(dq, dq))
print_opt("\tNorm of target step-size %15.10f\n" % rfo_dqnorm)
rfo_u = dq.copy() / rfo_dqnorm
rfo_g = -1 * np.dot(fq, rfo_u)
rfo_h = np.dot(rfo_u, np.dot(H, rfo_u))
DEprojected = DE_projected('RFO', rfo_dqnorm, rfo_g, rfo_h)
if op.Params.print_lvl > 1:
print_opt('\t|RFO target step| : %15.10f\n' % rfo_dqnorm)
print_opt('\tRFO gradient : %15.10f\n' % rfo_g)
print_opt('\tRFO hessian : %15.10f\n' % rfo_h)
print_opt("\tProjected Delta(E) : %15.10f\n\n" % DEprojected)
# Scale fq into aJ for printing
fq_aJ = qShowForces(Molsys.intcos, fq)
# this won't work for multiple fragments yet until dq and fq get cut up.
for F in Molsys._fragments:
displace(F.intcos, F.geom, dq, fq_aJ)
# For now, saving RFO unit vector and using it in projection to match C++ code,
# could use actual Dq instead.
dqnorm_actual = sqrt(np.dot(dq, dq))
print_opt("\tNorm of achieved step-size %15.10f\n" % dqnorm_actual)
History.appendRecord(DEprojected, dq, rfo_u, rfo_g, rfo_h)
linearList = linearBendCheck(Molsys.intcos, Molsys.geom, dq)
if linearList:
raise optExceptions.ALG_FAIL("New linear angles", newLinearBends=linearList)
# Before quitting, make sure step is reasonable. It should only be
# screwball if we are using the "First Guess" after the back-transformation failed.
if sqrt(
|
np.dot(dq, dq)
|
numpy.dot
|
import numpy as np
import tensorflow as tf
import cv2
import glob
import tensorflow.contrib.slim as slim
from collections import OrderedDict
import os
def get_variables_to_restore(scope_to_include, suffix_to_exclude):
"""to parse which var to include and which
var to exclude"""
vars_to_include = []
for scope in scope_to_include:
vars_to_include += slim.get_variables(scope)
vars_to_exclude = set()
for scope in suffix_to_exclude:
vars_to_exclude |= set(
slim.get_variables_by_suffix(scope))
return [v for v in vars_to_include if v not in vars_to_exclude]
def remove_first_scope(name):
return '/'.join(name.split('/')[1:])
def collect_vars(scope, start=None, end=None, prepend_scope=None):
vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
var_dict = OrderedDict()
if isinstance(start, str):
for i, var in enumerate(vars):
var_name = remove_first_scope(var.op.name)
if var_name.startswith(start):
start = i
break
if isinstance(end, str):
for i, var in enumerate(vars):
var_name = remove_first_scope(var.op.name)
if var_name.startswith(end):
end = i
break
for var in vars[start:end]:
var_name = remove_first_scope(var.op.name)
if prepend_scope is not None:
var_name = os.path.join(prepend_scope, var_name)
var_dict[var_name] = var
return var_dict
# 32 means for data augmentation
def data_augmentation_together(batch, bg, img_size):
batch_size = batch.shape[0]
# left-right flip
if np.random.rand(1) > 0.5:
batch = batch[:, :, ::-1, :]
bg = bg[:, :, ::-1, :]
# up-down flip
if np.random.rand(1) > 0.5:
batch = batch[:, ::-1, :, :]
bg = bg[:, ::-1, :, :]
# rotate 90
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=1) # 90
bg[id, :, :, :] = np.rot90(bg[id, :, :, :], k=1)
# rotate 180
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=2) # 180
bg[id, :, :, :] = np.rot90(bg[id, :, :, :], k=2) # 180
# rotate 270
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=-1) # 270
bg[id, :, :, :] = np.rot90(bg[id, :, :, :], k=-1) # 270
# random crop and resize 0.5~1.0
if np.random.rand(1) > 0.5:
IMG_SIZE = batch.shape[1]
scale = np.random.rand(1) * 0.5 + 0.5
crop_height = int(scale * img_size)
crop_width = int(scale * img_size)
x_st = int((1 - scale) * np.random.rand(1) * (img_size - 1))
y_st = int((1 - scale) * np.random.rand(1) * (img_size - 1))
x_nd = x_st + crop_width
y_nd = y_st + crop_height
for id in range(batch_size):
cropped_img = batch[id, y_st:y_nd, x_st:x_nd, :]
cropped_bg = bg[id, y_st:y_nd, x_st:x_nd, :]
batch[id, :, :, :] = cv2.resize(cropped_img, dsize=(img_size, img_size))
bg[id, :, :, :] = cv2.resize(cropped_bg, dsize=(img_size, img_size))
return batch, bg
def data_augmentation(batch, img_size):
batch_size = batch.shape[0]
# left-right flip
if np.random.rand(1) > 0.5:
batch = batch[:, :, ::-1, :]
# up-down flip
if np.random.rand(1) > 0.5:
batch = batch[:, ::-1, :, :]
# rotate 90
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=1) # 90
# rotate 180
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=2) # 180
# rotate 270
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=-1) # 270
# random crop and resize 0.5~1.0
if np.random.rand(1) > 0.5:
IMG_SIZE = batch.shape[1]
scale = np.random.rand(1) * 0.5 + 0.5
crop_height = int(scale * img_size)
crop_width = int(scale * img_size)
x_st = int((1 - scale) * np.random.rand(1) * (img_size - 1))
y_st = int((1 - scale) *
|
np.random.rand(1)
|
numpy.random.rand
|
from matplotlib import pyplot as plt
import improc as imp
import numpy as np
import os
import scipy.io as scio
patchSize = [480, 480, 4]
patchSize = [240, 240, 4]
# patchSize = [32, 32, 4]
numPatches = 5000
numSelPtcs = 500
sortway = 'ascent'
sortway = 'descent'
sortway = None
seed = 2019
seed = None
startid = 0
noise = 'wgn'
# noise = None
SNR = 100
tranformway = 'orig'
tranformway = 'flipud'
tranformway = 'fliplr'
tranformway = 'transpose'
tranformway = 'rot90'
tranformway = 'flipud(fliplr)'
tranformway = 'fliplr(rot90)'
# tranformway = 'fliplr(transpose)'
WriteImg = False
# --------------------------------------
datasetname = 'RSSRAI2019TRAIN'
folderIN = '/mnt/d/DataSets/oi/rsi/RSSRAI2019/new/train/train/'
folderOUT = '/mnt/d/DataSets/oi/rsi/RSSRAI2019/new/train/samples3/'
num = [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20]
# datasetname = 'RSSRAI2019VALID'
# folderIN = '/mnt/d/DataSets/oi/rsi/RSSRAI2019/new/valid/valid/'
# folderOUT = '/mnt/d/DataSets/oi/rsi/RSSRAI2019/new/valid/samples3/'
# num = [10, 15]
folderA = 'img_2017'
folderB = 'img_2018'
folderC = 'mask'
folderAin = os.path.join(folderIN, folderA)
folderBin = os.path.join(folderIN, folderB)
folderCin = os.path.join(folderIN, folderC)
folderAout = os.path.join(folderOUT, folderA)
folderBout = os.path.join(folderOUT, folderB)
folderCout = os.path.join(folderOUT, folderC)
os.makedirs(folderAout, exist_ok=True)
os.makedirs(folderBout, exist_ok=True)
os.makedirs(folderCout, exist_ok=True)
imageNameA = 'image_2017_960_960_'
imageNameB = 'image_2018_960_960_'
imageNameC = 'mask_2017_2018_960_960_'
imgspathA = []
imgspathB = []
imgspathC = []
for n in num:
imgspathA.append(folderAin + '/' + imageNameA + str(n) + '.tif')
imgspathB.append(folderBin + '/' + imageNameB + str(n) + '.tif')
imgspathC.append(folderCin + '/' + imageNameC + str(n) + '.tif')
print(imgspathA)
print(imgspathB)
print(imgspathC)
A = []
B = []
C = []
cc = np.zeros((960, 960, 4), dtype='uint8')
for n in range(len(num)):
A.append(imp.imreadadv(imgspathA[n]))
B.append(imp.imreadadv(imgspathB[n]))
c = imp.imreadadv(imgspathC[n])
# print(c.shape)
cc[:, :, 0] = c
cc[:, :, 1] = c
cc[:, :, 2] = c
cc[:, :, 3] = c
# print(cc.min(), cc.max())
C.append(cc.copy())
# N-H-W-C --> H-W-C-N
A = np.transpose(np.array(A), (1, 2, 3, 0))
B = np.transpose(np.array(B), (1, 2, 3, 0))
C = np.transpose(np.array(C), (1, 2, 3, 0))
plt.figure()
plt.subplot(231)
plt.imshow(A[:, :, 0:3, 0])
plt.subplot(232)
plt.imshow(B[:, :, 0:3, 0])
plt.subplot(233)
plt.imshow(C[:, :, 0:3, 0])
print("===tranformway:", tranformway)
if tranformway is 'flipud':
A = np.flipud(A)
B = np.flipud(B)
C = np.flipud(C)
if tranformway is 'fliplr':
A = np.fliplr(A)
B = np.fliplr(B)
C = np.fliplr(C)
if tranformway is 'transpose':
A = np.transpose(A, (1, 0, 2, 3))
B = np.transpose(B, (1, 0, 2, 3))
C = np.transpose(C, (1, 0, 2, 3))
if tranformway is 'rot90':
A = np.rot90(A)
B = np.rot90(B)
C = np.rot90(C)
if tranformway is 'flipud(fliplr)':
A = np.flipud(np.fliplr(A))
B = np.flipud(np.fliplr(B))
C = np.flipud(np.fliplr(C))
if tranformway is 'fliplr(rot90)':
A = np.fliplr(np.rot90(A))
B = np.fliplr(
|
np.rot90(B)
|
numpy.rot90
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Whole population model"""
import sys
import os.path
import tensorflow as tf
from absl import app
from absl import flags
from absl import gfile
import cPickle as pickle
import matplotlib
matplotlib.use('TkAgg')
import numpy as np, h5py
import scipy.io as sio
from scipy import ndimage
import random
FLAGS = flags.FLAGS
flags.DEFINE_float('lam_w', 0.0001, 'sparsitiy regularization of w')
flags.DEFINE_float('lam_a', 0.0001, 'sparsitiy regularization of a')
flags.DEFINE_integer('ratio_SU', 7, 'ratio of subunits/cells')
flags.DEFINE_float('su_grid_spacing', 3, 'grid spacing')
flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed')
flags.DEFINE_integer('randseed', 65, 'python RNG seed')
flags.DEFINE_float('eta_w', 1e-3, 'learning rate for optimization functions')
flags.DEFINE_float('eta_a', 1e-2, 'learning rate for optimization functions')
flags.DEFINE_float('bias_init_scale', -1, 'bias initialized at scale*std')
flags.DEFINE_string('model_id', 'relu', 'which model to learn?');
flags.DEFINE_float('step_sz', 10, 'step size for learning algorithm')
flags.DEFINE_integer('window', 3, 'size of window for each subunit in relu_window model')
flags.DEFINE_integer('stride', 3, 'stride for relu_window')
flags.DEFINE_string('folder_name', 'experiment4', 'folder where to store all the data')
flags.DEFINE_string('save_location',
'/home/bhaishahster/',
'where to store logs and outputs?');
flags.DEFINE_string('data_location',
'/home/bhaishahster/data_breakdown/',
'where to take data from?')
flags.DEFINE_integer('batchsz', 1000, 'batch size for training')
flags.DEFINE_integer('n_chunks', 216, 'number of data chunks') # should be 216
flags.DEFINE_integer('n_b_in_c', 1, 'number of batches in one chunk of data')
def hex_grid(gridx, d, n):
x_log = np.array([])
y_log = np.array([])
for i in range(n):
x_log = (np.append(x_log, (((i*d)%gridx) +
(np.floor(i*d/gridx)%2)*d/2)) +
np.random.randn(1)*0.01)
y_log = np.append(y_log, np.floor((i*d/gridx))*d/2) + np.random.randn(1)*0.01
return x_log, y_log
def gauss_su(x_log, y_log, gridx=80, gridy=40):
ns = x_log.shape[0]
wts = np.zeros((3200, ns))
for isu in range(ns):
xx = np.zeros((gridy, gridx))
if((np.round(y_log[isu]) >= gridy) |
(np.round(y_log[isu]) < 0) |
(np.round(x_log[isu]) >= gridx) | (np.round(x_log[isu]) < 0)):
continue
xx[np.round(y_log[isu]), np.round(x_log[isu])] = 1
blurred_xx = ndimage.gaussian_filter(xx, sigma=2)
wts[:,isu] = np.ndarray.flatten(blurred_xx)
return wts
def initialize_su(n_su=107*10, gridx=80, gridy=40, spacing=5.7):
spacing = FLAGS.su_grid_spacing
x_log, y_log = hex_grid(gridx, spacing, n_su)
wts = gauss_su(x_log, y_log)
return wts
def get_test_data():
# stimulus.astype('float32')[216000-1000: 216000-1, :]
# response.astype('float32')[216000-1000: 216000-1, :]
# length
test_data_chunks = [FLAGS.n_chunks];
for ichunk in test_data_chunks:
filename = FLAGS.data_location + 'Off_par_data_' + str(ichunk) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_part = data['maskedMovdd_part'].T
resp_part = data['Y_part'].T
test_len = stim_part.shape[0]
#logfile.write('\nReturning test data')
return stim_part, resp_part, test_len
# global stimulus variables
stim_train_part = np.array([])
resp_train_part = np.array([])
chunk_order = np.array([])
cells_choose = np.array([])
chosen_mask = np.array([])
def get_next_training_batch(iteration):
# stimulus.astype('float32')[tms[icnt: icnt+FLAGS.batchsz], :],
# response.astype('float32')[tms[icnt: icnt+FLAGS.batchsz], :]
# FLAGS.batchsz
# we will use global stimulus and response variables
global stim_train_part
global resp_train_part
global chunk_order
togo = True
while togo:
if(iteration % FLAGS.n_b_in_c == 0):
# load new chunk of data
ichunk = (iteration / FLAGS.n_b_in_c) % (FLAGS.n_chunks - 1 ) # last one chunks used for testing
if (ichunk == 0): # shuffle training chunks at start of training data
chunk_order = np.random.permutation(np.arange(FLAGS.n_chunks-1)) # remove first chunk - weired?
# if logfile != None :
# logfile.write('\nTraining chunks shuffled')
if chunk_order[ichunk] + 1 != 1:
filename = FLAGS.data_location + 'Off_par_data_' + str(chunk_order[ichunk] + 1) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_train_part = data['maskedMovdd_part']
resp_train_part = data['Y_part']
ichunk = chunk_order[ichunk] + 1
while stim_train_part.shape[1] < FLAGS.batchsz:
#print('Need to add extra chunk')
if (ichunk> FLAGS.n_chunks):
ichunk = 2
filename = FLAGS.data_location + 'Off_par_data_' + str(ichunk) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_train_part = np.append(stim_train_part, data['maskedMovdd_part'], axis=1)
resp_train_part = np.append(resp_train_part, data['Y_part'], axis=1)
#print(np.shape(stim_train_part), np.shape(resp_train_part))
ichunk = ichunk + 1
# if logfile != None:
# logfile.write('\nNew training data chunk loaded at: '+ str(iteration) + ' chunk #: ' + str(chunk_order[ichunk]))
ibatch = iteration % FLAGS.n_b_in_c
try:
stim_train = np.array(stim_train_part[:,ibatch: ibatch + FLAGS.batchsz], dtype='float32').T
resp_train = np.array(resp_train_part[:,ibatch: ibatch + FLAGS.batchsz], dtype='float32').T
togo=False
except:
iteration = np.random.randint(1,100000)
print('Load exception iteration: ' + str(iteration) + 'chunk: ' + str(chunk_order[ichunk]) + 'batch: ' + str(ibatch) )
togo=True
return stim_train, resp_train, FLAGS.batchsz
def main(argv):
print('\nCode started')
print('Model is ' + FLAGS.model_id)
np.random.seed(FLAGS.np_randseed)
random.seed(FLAGS.randseed)
global chunk_order
chunk_order = np.random.permutation(np.arange(FLAGS.n_chunks-1))
## Load data summary
filename = FLAGS.data_location + 'data_details.mat'
summary_file = gfile.Open(filename, 'r')
data_summary = sio.loadmat(summary_file)
cells = np.squeeze(data_summary['cells'])
nCells = cells.shape[0]
stim_dim = np.squeeze(data_summary['stim_dim'])
tot_spks = np.squeeze(data_summary['tot_spks'])
total_mask = np.squeeze(data_summary['totalMaskAccept_log']).T
print(np.shape(total_mask))
print('\ndataset summary loaded')
# decide the number of subunits to fit
Nsub = FLAGS.ratio_SU*nCells
with tf.Session() as sess:
stim = tf.placeholder(tf.float32, shape=[None, stim_dim], name='stim')
resp = tf.placeholder(tf.float32, name='resp')
data_len = tf.placeholder(tf.float32, name='data_len')
if FLAGS.model_id == 'relu':
# lam_c(X) = sum_s(a_cs relu(k_s.x)) , a_cs>0
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_lam_a='+str(FLAGS.lam_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_normalized_bg')
if FLAGS.model_id == 'exp':
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_bias_init=' + str(FLAGS.bias_init_scale) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_normalized_bg')
if FLAGS.model_id == 'mel_re_pow2':
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_lam_a='+str(FLAGS.lam_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_normalized_bg')
if FLAGS.model_id == 'relu_logistic':
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_lam_a='+str(FLAGS.lam_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_normalized_bg')
if FLAGS.model_id == 'relu_proximal':
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_lam_a='+str(FLAGS.lam_a) + '_eta_w=' + str(FLAGS.eta_w) + '_eta_a=' + str(FLAGS.eta_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_proximal_bg')
if FLAGS.model_id == 'relu_eg':
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_eta_w=' + str(FLAGS.eta_w) + '_eta_a=' + str(FLAGS.eta_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_eg_bg')
if FLAGS.model_id == 'relu_window':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_mother':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_mother_sfm':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_mother_sfm_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_mother_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_a_support':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'exp_window_a_support':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
parent_folder = FLAGS.save_location + FLAGS.folder_name + '/'
if not gfile.IsDirectory(parent_folder):
gfile.MkDir(parent_folder)
FLAGS.save_location = parent_folder +short_filename + '/'
print(gfile.IsDirectory(FLAGS.save_location))
if not gfile.IsDirectory(FLAGS.save_location):
gfile.MkDir(FLAGS.save_location)
print(FLAGS.save_location)
save_filename = FLAGS.save_location + short_filename
'''
# load previous iteration data, if available
try:
saved_filename = save_filename + '.pkl'
saved_file = gfile.Open(saved_filename,'r')
saved_data = pickle.load(saved_file)
w_load = saved_data['w']
a_load = saved_data['a']
w_init = saved_data['w_init']
a_init = saved_data['a_init']
ls_train_log = np.squeeze(saved_data['ls_train_log'])
ls_test_log = np.squeeze(saved_data['ls_test_log'])
start_iter = np.squeeze(saved_data['last_iter'])
chunk_order = np.squeeze(saved_data['chunk_order'])
print(np.shape(w_init),np.shape(a_init))
load_prev = True
except:
# w and a initialized same for all models! (maybe should be different for exp NL?)
w_init = initialize_su(n_su=Nsub) * 0.01
if FLAGS.model_id != 'exp':
a_init = np.random.rand(Nsub, nCells) * 0.01
else:
a_init = np.random.rand(nCells,1,Nsub) * 0.01
w_load = w_init
a_load = a_init
ls_train_log = np.array([])
ls_test_log = np.array([])
start_iter=0
print(np.shape(w_init),np.shape(a_init))
load_prev = False
'''
w_init = initialize_su(n_su=Nsub) * 0.01
if FLAGS.model_id != 'exp':
a_init = np.random.rand(Nsub, nCells) * 0.01
else:
a_init = np.random.rand(nCells,1,Nsub) * 0.01
w_load = w_init
a_load = a_init
ls_train_log = np.array([])
ls_test_log = np.array([])
print(np.shape(w_init),np.shape(a_init))
load_prev = False
if FLAGS.model_id == 'relu':
# LNL model with RELU nl
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
lam = tf.matmul(tf.nn.relu(tf.matmul(stim, w)), tf.nn.relu(a)) + 0.0001
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam))) / data_len
loss = loss_inter
+ FLAGS.lam_w*tf.reduce_sum(tf.abs(w)) + FLAGS.lam_a*tf.reduce_sum(tf.abs(a))
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss, var_list=[w, a])
a_pos = tf.assign(a, (a + tf.abs(a))/2)
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'exp':
# lam_c(X) = sum_s(exp(k_s.x + b_cs)) ; used in earlier models.
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
lam = tf.transpose(tf.reduce_sum(tf.exp(tf.matmul(stim,w) + a), 2))
loss_inter = (tf.reduce_sum(lam/tot_spks)/120. - tf.reduce_sum(resp*tf.log(lam)/tot_spks)) / data_len
loss = loss_inter
train_step = tf.train.AdamOptimizer(FLAGS.step_sz).minimize(loss, var_list=[w, a])
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'mel_re_pow2':
# lam_c(X) = sum_s(relu(k_s.x + a_cs)^2); MEL approximation of log-likelihood
stimulus,_,_ = get_next_training_batch(10)
sigma = np.diag(np.diag(stimulus[1000:2000,:].T.dot(stimulus[1000:2000,: ])))
sig_tf = tf.Variable(sigma,dtype='float32')
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
a_pos = tf.assign(a, (a + tf.abs(a))/2)
lam = tf.matmul(tf.pow(tf.nn.relu(tf.matmul(stim, w)), 2), a) + 0.0001
loss_p1 = tf.reduce_sum(tf.matmul(tf.transpose(a / tot_spks),tf.expand_dims(tf.diag_part(tf.matmul(tf.transpose(w),tf.matmul(sig_tf,w))) / 2,1)))
loss_inter = (loss_p1 / 120.) - (tf.reduce_sum(resp * tf.log(lam) / tot_spks)) / data_len
loss = loss_inter
+ FLAGS.lam_w*tf.reduce_sum(tf.abs(w)) + FLAGS.lam_a*tf.reduce_sum(tf.abs(a))
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss, var_list=[w, a])
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'relu_logistic':
# f(X) = sum_s(a_cs relu(k_s.x)), acs - any sign, logistic loss.
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
b_init = np.random.randn(nCells)#np.log((np.sum(response,0))/(response.shape[0]-np.sum(response,0)))
b = tf.Variable(b_init,dtype='float32')
f = tf.matmul(tf.nn.relu(tf.matmul(stim, w)), a) + b
loss_inter = tf.reduce_sum(tf.nn.softplus(-2 * (resp - 0.5)*f))/ data_len
loss = loss_inter
+ FLAGS.lam_w*tf.reduce_sum(tf.abs(w)) + FLAGS.lam_a*tf.reduce_sum(tf.abs(a))
sigmoid_input = -resp*f
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss, var_list=[w, a, b])
a_pos = tf.assign(a, (a + tf.abs(a))/2)
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
b_summary = tf.histogram_summary('b', b)
if FLAGS.model_id == 'relu_proximal':
# lnl model with regularization, with proximal updates
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
lam = tf.matmul(tf.nn.relu(tf.matmul(stim, w)), a) + 0.0001
loss_inter = (tf.reduce_sum(lam/tot_spks)/120. - tf.reduce_sum(resp*tf.log(lam)/tot_spks)) / data_len
loss = loss_inter + FLAGS.lam_w*tf.reduce_sum(tf.abs(w)) + FLAGS.lam_a*tf.reduce_sum(tf.abs(a))
# training steps for a.
train_step_a = tf.train.AdagradOptimizer(FLAGS.eta_a).minimize(loss_inter, var_list=[a])
# as 'a' is positive, this is op soft-thresholding for L1 and projecting to feasible set
soft_th_a = tf.assign(a, tf.nn.relu(a - FLAGS.eta_a * FLAGS.lam_a))
# training steps for w
train_step_w = tf.train.AdagradOptimizer(FLAGS.eta_w).minimize(loss_inter, var_list=[w])
# do soft thresholding for 'w'
soft_th_w = tf.assign(w, tf.nn.relu(w - FLAGS.eta_w * FLAGS.lam_w) - tf.nn.relu(- w - FLAGS.eta_w * FLAGS.lam_w))
def training(inp_dict):
# gradient step for 'w'
sess.run(train_step_w, feed_dict=inp_dict)
# soft thresholding for w
sess.run(soft_th_w, feed_dict=inp_dict)
# gradient step for 'a'
sess.run(train_step_a, feed_dict=inp_dict)
# soft thresholding for a, and project in constraint set
sess.run(soft_th_a, feed_dict=inp_dict)
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
if FLAGS.model_id == 'relu_eg':
a_load = a_load / np.sum(a_load, axis=0)# normalize initial a
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
lam = tf.matmul(tf.nn.relu(tf.matmul(stim, w)), a) + 0.0001
loss_inter = (tf.reduce_sum(lam/tot_spks)/120. - tf.reduce_sum(resp*tf.log(lam)/tot_spks)) / data_len
loss = loss_inter + FLAGS.lam_w*tf.reduce_sum(tf.abs(w))
# steps to update a
# as 'a' is positive, this is op soft-thresholding for L1 and projecting to feasible set
eta_a_tf = tf.constant(np.squeeze(FLAGS.eta_a),dtype='float32')
grads_a = tf.gradients(loss_inter, a)
exp_grad_a = tf.squeeze(tf.mul(a,tf.exp(-eta_a_tf * grads_a)))
a_update = tf.assign(a,exp_grad_a/tf.reduce_sum(exp_grad_a,0))
# steps to update w
# gradient update of 'w'..
train_step_w = tf.train.AdagradOptimizer(FLAGS.eta_w).minimize(loss_inter, var_list=[w])
# do soft thresholding for 'w'
soft_th_w = tf.assign(w, tf.nn.relu(w - FLAGS.eta_w * FLAGS.lam_w) - tf.nn.relu(- w - FLAGS.eta_w * FLAGS.lam_w))
def training(inp_dict):
# gradient step for 'a' and 'w'
sess.run(train_step_w, feed_dict=inp_dict)
# soft thresholding for w
sess.run(soft_th_w)
# update a
sess.run(a_update, feed_dict=inp_dict)
print('eg training made')
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
if FLAGS.model_id == 'relu_window':
# convolution weights, each layer is delta(x,y) - basically take window of stimulus.
window = FLAGS.window
n_pix = (2* window + 1) ** 2
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# set weight and other variables
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
w = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32')) # exp 5
#w = tf.Variable(np.array(np.random.randn(dimx, dimy, n_pix),dtype='float32')) # exp 4
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
a_pos = tf.assign(a, (a + tf.abs(a))/2)
# get firing rate
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_masked = tf.nn.conv2d(stim4D, mask_tf, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID" )
stim_wts = tf.nn.relu(tf.reduce_sum(tf.mul(stim_masked, w), 3))
lam = tf.matmul(tf.reshape(stim_wts, [-1,dimx*dimy]),a) + 0.00001
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam)))/data_len
loss = loss_inter + FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w))
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,var_list=[w,a])
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss, feed_dict=inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'relu_window_mother':
# convolution weights, each layer is delta(x,y) - basically take window of stimulus.
window = FLAGS.window
n_pix = (2* window + 1) ** 2
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# set weight and other variables
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
w_del = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
a_pos = tf.assign(a, (a + tf.abs(a))/2)
# get firing rate
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
# mother weight convolution
stim_convolved = tf.reduce_sum( tf.nn.conv2d(stim4D, w_mother, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID"),3)
#
stim_masked = tf.nn.conv2d(stim4D, mask_tf, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID" )
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
su_act = tf.nn.relu(stim_del + stim_convolved)
lam = tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]),a) + 0.00001
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam)))/data_len
loss = loss_inter + FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w_del))
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,var_list=[w_mother, w_del, a])
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss, feed_dict=inp_dict)
return ls
w_del_summary = tf.histogram_summary('w_del', w_del)
w_mother_summary = tf.histogram_summary('w_mother', w_mother)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'relu_window_mother_sfm':
# softmax weights used!
# convolution weights, each layer is delta(x,y) - basically take window of stimulus.
window = FLAGS.window
n_pix = (2* window + 1) ** 2
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# set weight and other variables
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
w_del = tf.Variable(np.array(0.1 + 0.05*np.random.randn(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(
|
np.random.randn(dimx*dimy, nCells)
|
numpy.random.randn
|
#
# Copyright © 2020 Intel Corporation.
#
# This software and the related documents are Intel copyrighted
# materials, and your use of them is governed by the express
# license under which they were provided to you (License). Unless
# the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the
# related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with
# no express or implied warranties, other than those that are
# expressly stated in the License.
"""Various plotting utilities for DNNs on Loihi."""
import os
from typing import TYPE_CHECKING
import numpy as np
from nxsdk_modules_ncl.dnn.src.utils import normalizeImageDim, importPlt
from matplotlib.ticker import IndexLocator, AutoMinorLocator
plt = importPlt()
if TYPE_CHECKING:
from nxsdk_modules_ncl.dnn.src.data_structures import Layer
plotproperties = {
'font.size': 12,
'axes.titlesize': 'x-large',
'axes.labelsize': 'x-large',
'xtick.labelsize': 'x-large',
'xtick.major.size': 7,
'xtick.minor.size': 5,
'ytick.labelsize': 'x-large',
'ytick.major.size': 7,
'ytick.minor.size': 5,
'legend.fontsize': 'x-large',
'lines.markersize': 6,
'figure.figsize': (7, 3),
'savefig.format': 'pdf',
'savefig.dpi': 300}
# matplotlib.rcParams.update(plotproperties)
COLORS = ['firebrick', 'forestgreen', 'gold', 'skyblue', 'maroon',
'darkblue', 'grey']
def plotMat(mat, fontSize=0, backgroundVal=0, showColorBar=False, title=None,
savepath=None):
"""Plot a matrix showing numeric values for each entry.
:param np.ndarray mat: Matrix to plot.
:param int fontSize: Fontsize for values in ``mat``.
:param int backgroundVal: Entries with this value in ``mat`` are
considered background.
:param bool showColorBar: Whether to display the color bar.
:param str title: Figure title.
:param str savepath: If given, where to save figure.
"""
fig, ax = plt.subplots()
ax.matshow(mat, cmap='Blues')
if fontSize > 0:
assert mat.ndim == 2
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
val = mat[i, j]
if val != backgroundVal:
ax.text(j, i, str(val), va='center', ha='center',
fontdict={'fontsize': fontSize})
if title is not None:
ax.set_title(title)
plt.axis('equal')
plt.axis('tight')
if showColorBar:
fig.colorbar()
if savepath is not None:
fig.savefig(savepath, bbox_inches='tight')
def plot_sweep_results(path, sweep, xlabel, shape=None):
"""Plot the results of a sweep across several network configurations.
:param str path: Where to load the data from.
:param str sweep: Name of parameter that was varied.
:param str xlabel: Label for x-axis of plots.
:param tuple | list | np.ndarray shape: Input shape, only used for labels.
"""
data_path = os.path.join(path, 'partitions')
plot_path = os.path.join(path, 'plots')
if not os.path.exists(plot_path):
os.makedirs(plot_path)
data_path_sweep = os.path.join(data_path, sweep)
plot_path_sweep = os.path.join(plot_path, sweep)
if not os.path.exists(plot_path_sweep):
os.makedirs(plot_path_sweep)
core_counts = {}
core_occupancy = {}
for scale in os.listdir(data_path_sweep):
core_counts[scale] = 0
core_occupancy[scale] = []
for layer_file in os.listdir(os.path.join(data_path_sweep, scale)):
data = np.load(os.path.join(data_path_sweep, scale, layer_file))
core_counts[scale] += data['numCores']
core_occupancy[scale] += list(data['coreOccupancy'].flatten())
scales = [eval(k) for k in core_counts.keys()]
if shape is not None:
xtick_labels = ['{}'.format(int(scale * shape[0]))
for scale in scales]
scales = np.array(scales) ** 2
xtick_locs = scales
# Scale factor reduces network size quadratically
xtick_kwargs = {'fontsize': 11}
else:
xtick_locs = scales
xtick_labels = scales
xtick_kwargs = {}
plt.figure()
plt.scatter(scales, core_counts.values(), label='measured scaling')
num_cores_per_chip = 128
form_factors = {'Loihi': 1, 'KapohoBay': 2, 'WolfMountain': 4,
'Nahuku8': 8, 'Nahuku32': 32}
for label, form_factor in form_factors.items():
plt.hlines(form_factor * num_cores_per_chip, -0.01, 1.05, colors='k',
linewidth=0.5)
plt.text(0.8, (0.6 * form_factor) * num_cores_per_chip, label,
fontsize=11)
# plt.plot([0, 1], [0, core_counts['1']], color='orange',
# label='linear scaling')
# plt.title("Cost vs network size for ResNet")
plt.xlabel(xlabel)
plt.ylabel("Num cores")
plt.yscale('log', basey=2)
plt.xticks(xtick_locs, xtick_labels, **xtick_kwargs)
plt.yticks(2**np.arange(7) * num_cores_per_chip,
2**np.arange(7) * num_cores_per_chip)
# plt.grid()
# plt.legend()
plt.xlim(-0.01, 1.05)
plt.ylim(32, None)
plt.savefig(os.path.join(plot_path_sweep, 'num_cores'),
bbox_inches='tight')
plt.figure()
plt.boxplot([100 * np.array(data) / 1024
for data in core_occupancy.values()],
positions=scales, widths=0.02, meanline=True,
manage_xticks=False, showmeans=True, showcaps=False)
plt.xlabel(xlabel)
plt.ylabel("Core occupancy [%]")
plt.xticks(xtick_locs, xtick_labels, **xtick_kwargs)
plt.xlim(0, None)
plt.ylim(-5, 105)
plt.savefig(os.path.join(plot_path_sweep, 'core_occupancy'),
bbox_inches='tight')
plt.figure()
for scale, data in zip(scales, core_occupancy.values()):
plt.scatter(np.repeat(scale, len(data)),
100 * np.array(data) / 1024, color='steelblue')
plt.xlabel(xlabel)
plt.ylabel("Core occupancy [%]")
plt.xticks(xtick_locs, xtick_labels, **xtick_kwargs)
plt.grid()
plt.xlim(0, None)
plt.ylim(0, 100)
plt.savefig(os.path.join(plot_path_sweep, 'core_occupancy2'),
bbox_inches='tight')
def plot_core_utilization(layers, path):
"""Plot how efficiently the resources of cores are used.
For each layer, draw the distribution of each resource type as a box plot.
Resource types include compartments, synaptic memory, input and output axon
config entries.
:param list[Layer] layers: List of partitioned layers.
:param str path: Where to write the output to.
"""
compartments = []
input_axons = []
output_axons = []
synapses = []
for layer in layers:
compartments.append([])
input_axons.append([])
output_axons.append([])
synapses.append([])
for partition in layer.partitions:
compartments[-1].append(len(partition.compartmentGroup.cxIds)
* 100 / 1024)
input_axons[-1].append(partition.inputAxonCost * 100)
output_axons[-1].append(partition.outputAxonCost * 100)
synapses[-1].append(partition.synapseCost * 100)
labels = ['cx', 'inAx', 'outAx', 'syn']
num_cost_terms = len(labels)
num_layers = len(layers)
xticks = np.arange(num_layers)
jitter = (np.arange(num_cost_terms) - (num_cost_terms - 1) / 2) / 10
# colors = plt.cm.get_cmap('Set1', num_cost_terms).colors
colors = COLORS
fig, ax = plt.subplots()
use_boxplot = True
for i, data in enumerate([compartments, input_axons,
output_axons, synapses]):
color = colors[i]
if use_boxplot:
bp = ax.boxplot(data, positions=xticks+jitter[i], widths=0.09,
meanline=True, manage_xticks=False, showmeans=True,
showcaps=False, patch_artist=True,
medianprops={'linewidth': 10, 'linestyle': ':'},
meanprops={'linewidth': 10},
flierprops={'markerfacecolor': color,
'marker': '.',
'markeredgecolor': color})
# Color
for element in ['boxes', 'whiskers', 'fliers', 'medians', 'caps',
'means']:
plt.setp(bp[element], color=color)
for patch in bp['boxes']:
patch.set(facecolor='white')
# Legend
ax.text(num_layers - 0.4, 100 - i * 5, labels[i], color=color)
else:
label = labels[i]
for j, column in enumerate(data):
plt.scatter(np.repeat(j + jitter[i], len(column)), column,
color=color, label=label)
label = None
ax.legend()
ax.set_xlabel("Layer number")
ax.set_ylabel("Core utilization [%]")
ax.xaxis.set_ticks_position('none')
ax.set_xticks(np.arange(0, num_layers, num_layers // 5 + 1))
ax.set_xlim(-0.5, num_layers - 0.5)
ax.set_ylim(-5, 105)
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
ax.grid(b=True, axis='x', which='minor')
fig.savefig(os.path.join(path, 'core_utilization'), bbox_inches='tight')
np.savez_compressed(os.path.join(path, 'core_utilization'),
cx=compartments, in_ax=input_axons,
out_ax=output_axons, syn=synapses)
def plot_multiplicity(m, path, name):
"""Plot multiplicityMap.
:param np.array m: multiplicityMap.
:param str path: Where to save figure.
:param str name: Name of partition.
"""
m = normalizeImageDim(m)
fig, ax = plt.subplots()
im = ax.imshow(m, cmap='Blues', vmin=0)
vals = np.unique(m)
fig.colorbar(im, ticks=[0] + list(vals[::(len(vals) // 10) + 1]),
fraction=0.02, pad=0.04)
ax.set_xticks(np.arange(0, m.shape[1], m.shape[1] // 5 + 1))
ax.set_yticks(np.arange(0, m.shape[0], m.shape[0] // 5 + 1))
ax.set_title('Multiplicity map of input to {}'.format(name))
ax.tick_params(which='both', left=False, bottom=False)
ax.xaxis.set_minor_locator(IndexLocator(1, 1))
ax.yaxis.set_minor_locator(IndexLocator(1, 1))
ax.grid(which='minor')
fig.savefig(os.path.join(path, 'multiplicityMap_{}'.format(name)),
bbox_inches='tight')
def plot_coreIdMap(m, path, name):
"""Plot coreIdMap.
:param np.array m: coreIdMap.
:param str path: Where to save figure.
:param str name: Name of partition.
"""
yy = normalizeImageDim(m)
shape = yy.shape
fig, ax = plt.subplots()
im = ax.imshow(yy, cmap='Blues', vmin=0)
vals = np.unique(yy)
fig.colorbar(im, ticks=vals[::len(vals) // 10 + 1],
fraction=0.02, pad=0.04)
ax.set_xticks(np.arange(0, shape[1], shape[1] // 5 + 1))
ax.set_yticks(np.arange(0, shape[0], shape[0] // 5 + 1))
ax.tick_params(which='both', left=False, bottom=False)
ax.xaxis.set_minor_locator(IndexLocator(1, 1))
ax.yaxis.set_minor_locator(IndexLocator(1, 1))
ax.grid(which='minor')
if m.ndim == 3:
num_depth_partitions = len(np.unique(m[0, 0, :]))
num_channels = m.shape[-1]
s = '' if num_depth_partitions == 1 else 's'
ss = '' if num_channels == 1 else 's'
ax.set_title('Core ID map of layer {}\n({} partition{} along '
'{} channel{}.)'.format(name, num_depth_partitions, s,
num_channels, ss))
else:
ax.set_title('Core ID map of layer {}'.format(name))
fig.savefig(os.path.join(path, 'partition_{}'.format(name)),
bbox_inches='tight')
def plot_core_occupancy(occ, path, name):
"""Plot number of compartments per core of a layer.
:param np.ndarray occ: Core occupancy to plot. Shape is equal to the number
of cores per axis.
:param str path: Where to save figure.
:param str name: Name of partition.
"""
occ = normalizeImageDim(occ)
fig, ax = plt.subplots()
im = ax.imshow(occ, cmap='Blues', vmin=0, vmax=1024)
vals = np.unique([0] + list(np.ravel(occ)))
if 1024 - np.max(vals) > 100:
vals = np.concatenate([vals, [1024]])
fig.colorbar(im, ticks=vals[::(len(vals) // 10) + 1],
fraction=0.02, pad=0.04)
ax.set_xticks(np.arange(0, occ.shape[1], occ.shape[1] // 10 + 1))
ax.set_yticks(np.arange(0, occ.shape[0], occ.shape[0] // 10 + 1))
ax.tick_params(which='both', left=False, bottom=False)
ax.xaxis.set_minor_locator(IndexLocator(1, 1))
ax.yaxis.set_minor_locator(IndexLocator(1, 1))
ax.grid(which='minor')
ax.set_title('Core occupancy of layer {}'.format(name))
fig.savefig(os.path.join(path, 'coreOccupancy_{}'.format(name)),
bbox_inches='tight')
def visualize_partitions(path):
"""Visualize the partition result of a layer.
:param str path: Where to load the data from and save figures.
"""
data_path = os.path.join(path, 'model_dumps', 'partitions')
for layer_name in os.listdir(data_path):
data = np.load(os.path.join(data_path, layer_name))
name = data['id']
y = data['multiplicityMap']
if y.size:
plot_multiplicity(y, path, name)
plot_coreIdMap(data['coreIdMap'], path, name)
plot_core_occupancy(data['coreOccupancy'], path, name)
def plot_cost_graph(path):
"""Visualize the cost of a number of layer partitions.
:param str path: Where to load the data from.
"""
filepath = os.path.join(path, 'candidate_costs.npz')
if not os.path.exists(filepath):
print("Plotting cost graph failed: Could not load {}.".format(
filepath))
return
data = np.load(filepath)
all_costs = data['all_costs']
num_candidates, num_layers = all_costs.shape
num_optimal_candidates = int(np.sqrt(num_candidates))
# Candidates are already sorted by cost.
optimal_costs = all_costs[:num_optimal_candidates]
fig, ax = plt.subplots()
# Plot the cost of each candidate of each layer as data points, together
# with the mean and variance across the candidates of a layer.
colormap = plt.cm.get_cmap('prism', num_candidates)
colors = np.array([colormap(i) for i in np.repeat(np.arange(
num_optimal_candidates), num_optimal_candidates)])
for layer_num, partition_costs in enumerate(all_costs.T):
ax.errorbar(layer_num, np.mean(partition_costs),
2 * np.sqrt(
|
np.var(partition_costs)
|
numpy.var
|
"""Functions for propagation through free space
Propagation class initialization options:
kernel_type: 'fraunhofer' (alias 'fourier'), 'fresnel', 'fresnel_conv',
'asm' (alias 'angular_spectrum'), or 'kirchoff'. The transfer
function approaches may be more accurate
fraunhofer: far-field diffraction, purely a Fourier transform
fresnel: near-field diffraction with Fresnel approximation, implemented
as a multiplication with a transfer function in Fourier domain
fresnel_conv: same as fresnel, but implemented as a convolution with a
spatial kernel, via FFT conv for speed
asm: near-field diffraction with the Angular Spectrum Method,
implemented as a transfer function. Note that this may have a 1px
shift relative to the others due to the source paper padding the
input by an extra pixel (for linear convolution) for derivations
kirchoff: near-field diffractoin with the Kirchoff equations,
implemented with a spatial kernel
propagation_distances: distance or distances from SLM to image plane.
Accepts scalars or lists.
slm_resolution: number of pixels on SLM
slm_pixel_pitch: size of pixels on SLM.
image_resolution: number of sampling locations at image plane (optional,
default matches SLM resolution)
wavelength: laser wavelength, (optional, default 532e-9).
propagation_parameters: override parameters for kernel/transfer function
construction. Optional. Possible parameters, with
defaults given:
# for all methods
'padding_type', 'zero': pad complex field with 'median' or 'zero'.
Using median may have less ringing, but zero
is probably more accurate
# for the spatial kernel convolution methods
'circular_prop_mask', True: circular mask for propagation kernels, for
bandlimiting the phase function
'apodize_kernel', True: smooth the circular mask
'apodization_width', 50: width of cosine dropoff at edge, in pixels
'prop_mask_fraction', 1: artificially reduces the size of propagation
mask (e.g., 2 will use half the radius)
'normalize_output', True: forces output field to have the same average
amplitudes as the input when True. Only valid
when using a single propagation distance
# for the transfer function multiplication methods
'circular_padding', False: doesn't pad the field when True, resulting in
implicit circular padding in the Fourier
domain for the input field. May reduce
ringing at the edges
'normalize_output', False: same as for the spatial kernel methods, but
defaults to False because the transfer
functions do a better job at energy
preservation by default
# only for the Angular Spectrum Method
'extra_pixel', True: when not using circular_padding, i.e., for a linear
convolution, pad one extra pixel more than required
(i.e., linear conv to length a + b instead of the
minimum valid a + b - 1). The derivation from
Matsushima and Shimobaba (2009) has an extra pixel,
may not be correct without it, but set if the pixel
shift is important
# only for Fraunhofer
'fraunhofer_crop_image', True: when resolution changes, crop image
plane instead of SLM plane, details in
__init__ for FraunhoferPropagation
# only for Fraunhofer with multiple distances
'focal_length', no default: required to determine plane for Fourier
relationship (e.g., lens focal length)
relative to which the other distances are
propagated.
device: torch parameter for the device to place the convolution kernel on.
If not given, will default to the device of the input_field.
Propagation.forward and Propagation.backward:
input_field: complex field at starting plane (e.g. SLM for foward)
Returns: output_field at the ending plane matching the specified resolution
(for single distance) or output_fields, a dictionary of fields at
each propagation distance (keys are distances)
All units are in meters and radians unless explicitly stated as otherwise.
Terms for resolution are in ij (matrix) order, not xy (cartesian) order.
input_field should be a torch Tensor, everything else can be either numpy or
native python types. input_field is assumed to be a stack of [real, imag] for
input to the fft (see the torch.fft implementation for details). The
output_field follows the same convention.
Example: Propagate some input_field by 10cm with Fresnel approx, 5um pixel pitch
on the SLM, with a 1080p SLM and image size equal to it
prop = Propagation('fresnel', 10e-2, [1080, 1920], [5e-6, 5e-6])
output_field = prop.forward(input_field)
output_field = prop.backward(input_field)
Example: Propagate some input_field by to multiple distances, using Kirchhoff
propagation.
prop = Propagation('kirchhoff', [10e-2, 20e-2, 30e-2], [1080, 1920],
[5e-6, 5e-6])
Example: Setting non-default parameters, e.g. wavelength of 632nm, image
resolution of 720p, image sampling of 8um, some of the extra propagation
parameters, or device to gpu 0
propagation_parameters = {'circular_prop_mask': True,
'apodize_kernel': True}
prop = Propagation('fresnel', 10e-2, [1080, 1920], [5e-6, 5e-6],
[720, 1280], [8e-6, 8e-6], 632e-9,
propagation_parameters, torch.device('cuda:0'))
# or with named parameters
prop = Propagation(kernel_type='fresnel',
propagation_distances=10e-2,
slm_resolution=[1080, 1920],
slm_pixel_pitch=[5e-6, 5e-6],
image_resolution=[720, 1280],
wavelength=632e-9,
propagation_parameters=propagation_parameters,
device=torch.device('cuda:0'))
Example: Other propagation kernels, alternate ways to define it
prop = Propagation('Fresnel', ...) # not case sensitive
prop = Propagation('fraunhofer', ...) # Fraunhofer
prop = Propagation('asm', ...) # Angular Spectrum Method
Author: <NAME>
"""
import numpy as np
from scipy.signal import fftconvolve
import torch
import torch.nn as nn
import warnings
import utils
class Propagation:
"""Convenience class for using different propagation kernels and sets of
propagation distances"""
def __new__(cls, kernel_type, propagation_distances, slm_resolution,
slm_pixel_pitch, image_resolution=None, wavelength=532e-9,
propagation_parameters=None, device=None):
# process input types for propagation distances
if isinstance(propagation_distances, (np.ndarray, torch.Tensor)):
propagation_distances = propagation_distances.flatten().tolist()
# singleton lists should be made into scalars
if (isinstance(propagation_distances, (tuple, list))
and len(propagation_distances) == 1):
propagation_distances = propagation_distances[0]
# scalar means this is a single distance propagation
if not isinstance(propagation_distances, (tuple, list)):
cls_out = {'fresnel': FresnelPropagation,
'fresnel_conv': FresnelConvPropagation,
'asm': AngularSpectrumPropagation,
'angular_spectrum': AngularSpectrumPropagation,
'kirchhoff': KirchhoffPropagation,
'fraunhofer': FraunhoferPropagation,
'fourier': FraunhoferPropagation}[kernel_type.lower()]
return cls_out(propagation_distances, slm_resolution,
slm_pixel_pitch, image_resolution, wavelength,
propagation_parameters, device)
else:
return MultiDistancePropagation(
kernel_type, propagation_distances, slm_resolution,
slm_pixel_pitch, image_resolution, wavelength,
propagation_parameters, device)
class PropagationBase(nn.Module):
image_native_pitch = None
"""Interface for propagation functions, with some shared functions"""
def __init__(self, propagation_distance, slm_resolution, slm_pixel_pitch,
image_resolution=None, wavelength=532e-9,
propagation_parameters=None, device=None):
super().__init__()
self.slm_resolution = np.array(slm_resolution)
self.slm_pixel_pitch = np.array(slm_pixel_pitch)
self.propagation_distance = propagation_distance
self.wavelength = wavelength
self.dev = device
# default image dimensions to slm dimensions
if image_resolution is None:
self.image_resolution = self.slm_resolution
else:
self.image_resolution = np.array(image_resolution)
# native image sampling matches slm pitch, unless overridden by a
# deriving class (e.g. FraunhoferPropagation)
if self.image_native_pitch is None:
self.image_native_pitch = self.slm_pixel_pitch
# set image pixel pitch to native image sampling
self.image_pixel_pitch = self.image_native_pitch
# physical size of planes in meters
self.slm_size = self.slm_pixel_pitch * self.slm_resolution
self.image_size = self.image_pixel_pitch * self.image_resolution
# dictionary for extra parameters particular to base class
self.propagation_parameters = propagation_parameters
if self.propagation_parameters is None:
self.propagation_parameters = {}
# set default for padding type when convolving
try:
self.padding_type = self.propagation_parameters.pop('padding_type')
except KeyError:
self.padding_type = 'zero'
def forward(self, input_field):
"""Returns output_field, which is input_field propagated by
propagation_distance, from slm_resolution to image_resolution"""
raise NotImplementedError('Must implement in derived class')
def backward(self, input_field):
"""Returns output_field, which is input_field propagated by
-propagation_distance, from image_resolution to slm_resolution"""
raise NotImplementedError('Must implement in derived class')
def to(self, *args, **kwargs):
"""Moves non-parameter tensors needed for propagation to device
Also updates the internal self.dev added to this class
"""
slf = super().to(*args, **kwargs)
device_arg = torch._C._nn._parse_to(*args, **kwargs)[0]
if device_arg is not None:
slf.dev = device_arg
return slf
def pad_smaller_dims(self, field, target_shape, pytorch=True, padval=None):
if padval is None:
padval = self.get_pad_value(field, pytorch)
return utils.pad_smaller_dims(field, target_shape, pytorch,
padval=padval)
def crop_larger_dims(self, field, target_shape, pytorch=True):
return utils.crop_larger_dims(field, target_shape, pytorch)
def get_pad_value(self, field, pytorch=True):
if self.padding_type == 'zero':
return 0
elif self.padding_type == 'median':
if pytorch:
return torch.median(stacked_abs(field))
else:
return np.median(np.abs(field))
else:
raise ValueError('Unknown padding type')
class NearFieldConvPropagationBase(PropagationBase):
"""Defines functions shared across propagation near field approximations
based on convolving a kernel
"""
def __init__(self, propagation_distance, slm_resolution, slm_pixel_pitch,
image_resolution=None, wavelength=532e-9,
propagation_parameters=None, device=None):
super().__init__(propagation_distance, slm_resolution, slm_pixel_pitch,
image_resolution, wavelength, propagation_parameters,
device)
# diffraction pattern calculations
self.max_diffraction_angle = np.arcsin(wavelength
/ self.slm_pixel_pitch / 2)
self.prop_mask_radius = (propagation_distance
* np.tan(self.max_diffraction_angle))
# limit zone plate to maximum usable size
slm_diagonal = np.sqrt((self.slm_size**2).sum())
image_diagonal = np.sqrt((self.image_size**2).sum())
max_usable_distance = slm_diagonal / 2 + image_diagonal / 2
self.prop_mask_radius = np.minimum(self.prop_mask_radius,
max_usable_distance)
# force input and output of forward/backward
# operations to have the same absolute sum
try:
self.normalize_output = self.propagation_parameters.pop(
'normalize_output')
except KeyError:
self.normalize_output = True
# sets self.foward_kernel and self.backward_kernel
self.compute_conv_kernels(**self.propagation_parameters)
if self.dev is not None:
self.forward_kernel = self.forward_kernel.to(self.dev)
self.backward_kernel = self.backward_kernel.to(self.dev)
def compute_conv_kernels(self, *, circular_prop_mask=True,
apodize_kernel=True, apodization_width=50,
prop_mask_fraction=1., **kwargs):
# sampling positions along the x and y dims
coords_x = np.arange(self.slm_pixel_pitch[1],
self.prop_mask_radius[1] / prop_mask_fraction,
self.slm_pixel_pitch[1])
coords_x =
|
np.concatenate((-coords_x[::-1], [0], coords_x))
|
numpy.concatenate
|
#!/usr/local/bin/python
#
# sound-card APRS decoder
#
# <NAME>, AB1HL
#
import numpy
import wave
import weakaudio
import weakutil
import time
import scipy
import sys
import os
import math
from scipy.signal import lfilter, filtfilt
import numpy.lib.stride_tricks
# optimizable tuning parameters.
smoothwindow = 2.0 # symbols, 1.0 0.8 0.7 1.7(for hamming smoother)
slicewindow = 25.0 # symbols, 20 30 20
tonegain = 2.0 # 2.0 (useful for track 02)
advance = 8.0 # symbols 1.0 8.0
# http://gordoncluster.wordpress.com/2014/02/13/python-numpy-how-to-generate-moving-averages-efficiently-part-2/
def smooth(values, window):
#weights = numpy.repeat(1.0, window)/window
weights = numpy.hamming(window)
sma = numpy.convolve(values, weights, 'valid')
sma = sma[0:len(values)]
return sma
# https://github.com/tcort/va2epr-tnc/blob/master/firmware/aprs.c
# <NAME> <<EMAIL>>
# update a CRC with one new byte.
# initial crc should be 0xffff.
def crciter(crc, byte):
byte &= 0xff
crc ^= byte
for i in range(0, 8):
if crc & 1:
crc = (crc >> 1) ^ 0x8408
else:
crc >>= 1
return crc
def crc16(bytes):
crc = 0xffff
for b in bytes:
crc = crciter(crc, b)
return crc
# https://witestlab.poly.edu/blog/capture-and-decode-fm-radio/
def deemphasize(samples, rate):
d = rate * 750e-6 # Calculate the # of samples to hit the -3dB point
x = numpy.exp(-1/d) # Calculate the decay between each sample
b = [1-x] # Create the filter coefficients
a = [1,-x]
out = scipy.signal.lfilter(b,a,samples)
return out
class APRSRecv:
def __init__(self):
self.rate = None
# Bell-202
self.baud = 1200 # bits per second
self.mark = 1200
self.space = 2200
self.off = 0
self.raw = numpy.array([0])
self.flagpat = None
def openwav(self, filename):
self.wav = wave.open(filename)
self.wav_channels = self.wav.getnchannels()
self.wav_width = self.wav.getsampwidth()
self.rate = self.wav.getframerate()
if False:
sys.stdout.write("file=%s chans=%d width=%d rate=%d\n" % (filename,
self.wav_channels,
self.wav_width,
self.rate))
def readwav(self):
z = self.wav.readframes(4096)
if self.wav_width == 1:
zz = numpy.fromstring(z, numpy.int8)
elif self.wav_width == 2:
zz = numpy.fromstring(z, numpy.int16)
else:
sys.stderr.write("oops wave_width %d" % (self.wav_width))
sys.exit(1)
if self.wav_channels == 1:
return zz
elif self.wav_channels == 2:
return zz[0::2] # left
else:
sys.stderr.write("oops wav_channels %d" % (self.wav_channels))
sys.exit(1)
def gotsamples(self, buf):
self.raw = numpy.append(self.raw, buf)
# slice one tone, yielding a running <0 for low and >0 for high.
# slicing level is a running midway between local min and max.
# don't use average since mark and space aren't equally popular.
# already filtered/smoothed so no point in using percentiles.
def sliceone(self, smoothed):
global slicewindow
bitsamples = int(self.rate / float(self.baud))
win = int(slicewindow * bitsamples)
# average just to the right at start of packet,
# and just to the left towards end of packet.
# by inserting sliceshift samples from the packet.
# to avoid averaging in a lot of non-packet noise.
# this is important for packets that end with a
# single flag and that are followed by lots of noise
# (happens a lot in track 02).
sliceshift = int(win/2 + bitsamples)
z = numpy.concatenate((smoothed[0:win],
smoothed[win:win+sliceshift],
smoothed[win:win+64*bitsamples],
smoothed[win+64*bitsamples:win+64*bitsamples+sliceshift],
smoothed[win+64*bitsamples:]))
if (len(z) % win) != 0:
# trim z so it's a multiple of win long.
z = z[0:-(len(z)%win)]
zsplit = numpy.split(z, len(z)/win) # split into win-size pieces
maxes = numpy.amax(zsplit, axis=1) # max of each piece
mins = numpy.amin(zsplit, axis=1)
ii = numpy.arange(0, len(z), 1)
ii = ii / win
maxv = maxes[ii]
minv = mins[ii]
if len(maxv) < len(smoothed):
maxv = numpy.append(maxv, maxv[0:(len(smoothed)-len(maxv))])
minv = numpy.append(minv, minv[0:(len(smoothed)-len(minv))])
elif len(maxv) > len(smoothed):
maxv = maxv[0:len(smoothed)]
minv = minv[0:len(smoothed)]
if False:
# agc -- normalize so that min..max is -0.5..0.5
# XXX this does not help.
sliced = numpy.subtract(smoothed, minv)
sliced = numpy.divide(sliced, maxv - minv)
sliced = sliced - 0.5
return sliced
else:
midv = (maxv + minv) / 2.0
sliced = numpy.subtract(smoothed, midv)
return sliced
# correlate against a tone (1200 or 2200 Hz).
# idea from <NAME>'s Jul/Aug 2012 QEX article.
# (used to use butterworth bandpass filters of order 3
# and width 1100 hz, but the following is slightly better).
# combining the cos and sin compensates for the fact that
# the phase of the received tone isn't known.
def corr(self, samples, tone):
global smoothwindow
win = int(smoothwindow * self.rate / self.baud)
xsin = weakutil.sintone(self.rate, tone, len(samples))
xcos = weakutil.costone(self.rate, tone, len(samples))
c = numpy.sqrt(numpy.add(numpy.square(smooth(xsin * samples, win)),
numpy.square(smooth(xcos * samples, win))))
return c
# correlate, slice, generate +/- for each sample.
# also returns raw correlations, for SNR.
def slice(self, samples):
markcorr = self.corr(samples, self.mark)
spacecorr = self.corr(samples, self.space)
m1 = self.sliceone(markcorr)
s1 = self.sliceone(spacecorr)
sliced = numpy.subtract(m1, s1)
return [ sliced, markcorr, spacecorr ]
def process(self, eof):
global tonegain, advance
bitsamples = self.rate / float(self.baud)
flagsamples = bitsamples * 9 # HDLC 01111110 flag (9 b/c NRZI)
maxpacket = 340 * 8 * bitsamples # guess at number of samples if longest possible packet
if self.raw.size < maxpacket and eof == False:
return
# set up to try multiple emphasis setups.
sliced = [ ]
# no change in emphasis; best when receiver doesn't de-emph,
# but not right for senders that pre-emph.
[ sl, markcorr, spacecorr ] = self.slice(self.raw)
sliced.append( sl )
# de-emphasize, for a receiver that doesn't de-emph,
# but senders that do.
# doesn't seem to help for track 01...
# [ sl, markcorr, spacecorr ] = self.slice(deemphasize(self.raw, self.rate))
# sliced.append(sl)
while self.raw.size >= maxpacket or (eof and self.raw.size > 20*bitsamples):
# sliced[0] is a candidate for start of packet,
# i.e. first sample of first flag.
bestok = 0
bestmsg = None
bestnsymbols = 0
beststart = 0
bestsnr = 0
for sl in sliced:
[ ok, msg, nsymbols, start, snr ] = self.process1(sl, markcorr, spacecorr)
if ok > bestok:
bestok = ok
bestmsg = msg
bestnsymbols = nsymbols
beststart = self.off + start
bestsnr = snr
if bestok > 0 and self.callback:
# compute space-to-mark tone strength ratio, to help understand emphasis.
# space is 2200 hz, mark is 1200 hz.
start = beststart - self.off
#indices = numpy.arange(0, bestnsymbols*bitsamples, bitsamples)
#indices = indices + (start + 0.5*bitsamples)
#indices = numpy.rint(indices).astype(int)
#rawsymbols = sliced[0][indices]
#rawmark = markcorr[indices]
#rawspace = spacecorr[indices]
#meanmark = numpy.mean(numpy.where(rawsymbols > 0, rawmark, 0))
#meanspace = numpy.mean(numpy.where(rawsymbols <= 0, rawspace, 0))
#ratio = meanspace / meanmark
ratio = 1.0
self.callback(bestok, bestmsg, beststart, ratio, snr)
sys.stdout.flush()
if bestok == 2:
trim = int(bestnsymbols * bitsamples) # skip packet
else:
trim = int(advance * bitsamples) # skip a few symbols
self.off += trim
self.raw = self.raw[trim:]
markcorr = markcorr[trim:] # for SNR
spacecorr = spacecorr[trim:] # for SNR
for i in range(0, len(sliced)):
sliced[i] = sliced[i][trim:]
# does a packet start at sliced[0:] ?
# sliced[] likely has far more samples than needed.
# returns [ ok, msg, nsymbols, flagstart, snr ]
# flag starts at sliced[flagstart].
def process1(self, sliced, markcorr, spacecorr):
global advance
bitsamples = self.rate / float(self.baud)
flagsamples = bitsamples * 9 # HDLC 01111110 flag (9 b/c NRZI)
ff = self.findflag(sliced[0:int(round(flagsamples+advance*bitsamples+2))])
if ff != None:
indices = numpy.arange(0, len(sliced) - (ff+2*bitsamples), bitsamples)
indices = indices + (ff + 0.5*bitsamples)
indices = numpy.rint(indices).astype(int)
rawsymbols = sliced[indices]
symbols = numpy.where(rawsymbols > 0, 1, -1)
[ ok, msg, nsymbols ] = self.finishframe(symbols[8:])
if ok >= 1:
# SNR
sigsum = 0.0
sigcount = 0
noisesum = 0.0
noisecount = 0
# indices into sliced/markcorr/spacecorr for the center of each
# symbol in the packet, including starting flag.
indices1 = indices[0:nsymbols+8]
# indices into markcorr/spacecorr for mark symbols.
marki = indices1[
|
numpy.nonzero(sliced[indices1] > 0)
|
numpy.nonzero
|
#!/usr/bin/env python
import argparse
import logging
import os
import cv2
import mxnet as mx
import numpy as np
from lightened_moon import lightened_moon_feature
ctx = mx.gpu(0)
def main():
_, model_args, model_auxs = mx.model.load_checkpoint(args.model_prefix, args.epoch)
symbol = lightened_moon_feature()
cnt = correct_cnt = 0
with open(args.test_list, 'r') as f:
label = np.ones(40)
pred =
|
np.ones(40)
|
numpy.ones
|
import os
import subprocess
import multiprocessing
import json
import numpy as np
import random
num_gpus = 1
#COD CIFS from materials science journals without H (Richard specified no H)
CIFS_DIR = r"\\flexo.ads.warwick.ac.uk\shared41\Microscopy\Jeffrey-Ede\crystal_structures\standardized_inorganic_no_H"
cif_filepaths = [r"Z:\Jeffrey-Ede\crystal_structures\standardized_inorganic_no_H\666.cif"]
PARENT_DIR = r"\\flexo.ads.warwick.ac.uk\shared41\Microscopy\Jeffrey-Ede\models\wavefunctions"
default_json_filepath = os.path.join(PARENT_DIR, "default.json")
failed_file_filepath = os.path.join(PARENT_DIR, "failed_files.txt")
EXE_DIR = r"\\flexo.ads.warwick.ac.uk\shared41\Microscopy\Jeffrey-Ede\models\wavefunctions\clTEM_files"
exe_filepath = os.path.join(EXE_DIR, "clTEM_cmd.exe")
OUTPUT_DIR = os.path.join(PARENT_DIR, "output_single")
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
NUM_REPEATS = 5000
CONFIG_DIR = os.path.join(PARENT_DIR, "temp_single")
config_filepath = os.path.join(CONFIG_DIR, "current_config_single.json")
if not os.path.exists(CONFIG_DIR):
os.makedirs(CONFIG_DIR)
with open(default_json_filepath, "r") as f:
default_config = json.load(f)
failed_paths = []
with open(failed_file_filepath, 'r') as ff:
lines = ff.readlines()
for l in lines:
failed_paths.append(l.rstrip())
# print(default_config)
def random_config():
"""Change default configuration to random configuration."""
config = default_config.copy()
# Things to randomise
# Voltage (use some presets)
# aperture size
# convergence
# defocus spread
voltages = [300, 200, 80]
config["microscope"]["voltage"] = random.choice(voltages)
config["microscope"]["aperture"] = np.random.uniform(5, 30)
config["microscope"]["delta"] = np.random.uniform(0, 20)
config["microscope"]["alpha"] = np.random.uniform(0.1, 2)
# aberrations
config["microscope"]["aberrations"]["C10"]["val"] = np.random.uniform(-30, 30)
config["microscope"]["aberrations"]["C12"]["mag"] = np.random.uniform(-50, 50)
config["microscope"]["aberrations"]["C12"]["ang"] = np.random.uniform(0, 180)
config["microscope"]["aberrations"]["C21"]["mag"] = np.random.uniform(-1000, 1000)
config["microscope"]["aberrations"]["C21"]["ang"] = np.random.uniform(0, 180)
config["microscope"]["aberrations"]["C23"]["mag"] = np.random.uniform(-1000, 1000)
config["microscope"]["aberrations"]["C23"]["ang"] = np.random.uniform(0, 180)
config["microscope"]["aberrations"]["C30"]["val"] = np.random.uniform(-500, 500)
return config
def do_sim(cif_filepath):
#
# This is a real bodge to match the device to the thread....
#
device = 1 #int(multiprocessing.current_process().name[-1]) - 1
device_string = "0:%s" % device
if cif_filepath in failed_paths:
return
out_paths = []
for repetition in range(NUM_REPEATS):
cif_name = os.path.splitext(os.path.basename(cif_filepath))[0]
out_filepath = os.path.join(OUTPUT_DIR, cif_name)
out_repeat_filepath = os.path.join(out_filepath, str(repetition))
if os.path.exists(os.path.join(out_repeat_filepath, 'Image.tif')):
continue # get out this loop as we already have data here
out_paths.append(out_repeat_filepath)
if len(out_paths) == 0:
return
print("\n\nSimulating on device:" + device_string + " using file: " + cif_filepath)
#print("\n\n\n")
# for repetition in range(NUM_REPEATS): # Number of times to go through CIFs
# #
# # Create output folder
# #
# # make a folder for each cif
# cif_name = os.path.splitext(os.path.basename(cif_filepath))[0]
# out_filepath = os.path.join(OUTPUT_DIR, cif_name)
# # make a folder for each repetition
# out_repeat_filepath = os.path.join(out_filepath, str(repetition))
# # while os.path.exists(out_repeat_filepath):
# # counter += 1
# # out_repeat_filepath = os.path.join(out_filepath, str(counter))
# if os.path.exists(os.path.join(out_repeat_filepath, 'Image.tif')):
# continue # get out this loop as we already have data here
for out_path in out_paths:
try:
if not os.path.exists(out_path):
os.makedirs(out_path)
#
# Randomise the simulation parameters
#
# Save random configuration
config = random_config()
with open(config_filepath, "w") as f:
json.dump(config, f)
#
# Randomise the structure inputs
#
# randomise the cell depth (between 5 nm and 100 nm)
cell_depth = np.random.uniform(50, 1000)
cell_widths = np.random.uniform(50, 100)
cell_string = "%s,%s,%s" % (cell_widths, cell_widths, cell_depth)
# randomise the zone axis (only up to 2)
zone_h =
|
np.random.randint(0, 3)
|
numpy.random.randint
|
import itertools
import numpy as np
from copy import deepcopy
from dcptree.analysis import to_group_data, groups_to_group_data
from dcptree.data import check_data
from dcptree.group_helper import check_groups
from scipy.stats import binom
def exact_mcn_test(y, yhat1, yhat2, two_sided = False):
"""
:param y: true
:param yhat1:
:param yhat2:
:param two_sided:
:return: value of the discrete McNemar Test
"""
f1_correct = np.equal(y, yhat1)
f2_correct = np.equal(y, yhat2)
table = np.zeros(shape = (2, 2))
for i in range(2):
for j in range(2):
table[i, j] = np.sum((f1_correct == i) & (f2_correct == j))
b = table[0, 1] #f1 wrong and f2 right
c = table[1, 0] #f1 right and f2 wrong
n = b + c
# envy-freeness requires that
# f1 is correct more often than f2 <=> b < c
#
# We test
#
# H0: error(f1) = error(f2)
# H1: error(f1) > error(f2)
#
# This requires assuming b /(b+c) ~ Bin(0.5)
if two_sided:
test_statistic = min(b, c)
p = 2.0 * binom.cdf(k = min(b, c), n = b + c, p = 0.5)
else:
test_statistic = c
p = binom.cdf(k = test_statistic, n = n, p = 0.5)
return p, test_statistic
def check_model_assignment(groups_to_models, groups, models):
assert isinstance(groups_to_models, dict)
assert isinstance(models, list)
splits = [[(k, l) for l in v['labels']] for k, v in groups.items()]
splits = set(itertools.product(*splits))
assert set(groups_to_models.keys()).issubset(splits), 'mapper should include map every group in the data'
model_indices = list(range(len(models)))
assignment_indices = np.array(list(groups_to_models.values()))
assert np.array_equal(np.unique(assignment_indices), model_indices), 'every model should cover at least one group'
return True
def build_model_assignment_map(p):
groups_to_models = {}
group_labels, group_values = groups_to_group_data(p.groups, stat_field = 'train')
split_values = np.unique(group_values, axis = 0)
for vals in split_values:
s = tuple([(g, z) for g, z in zip(group_labels, vals)])
vals = vals[:, None].transpose()
n_matches = 0
for i, l in enumerate(p.leaves):
if l.contains(group_labels, vals):
groups_to_models[s] = i
n_matches += 1
assert n_matches == 1
assert check_model_assignment(groups_to_models, p.groups, models = p.predictors)
return groups_to_models
class DecoupledClassifierSet(object):
def __init__(self, data, groups, pooled_model, decoupled_models, groups_to_models):
# check inputs
assert check_data(data, ready_for_training = True)
assert check_groups(groups, data)
# initialize data
self._data = {
'X': np.array(data['X']),
'Y': np.array(data['Y']),
'variable_names': list(data['variable_names'])
}
self._groups = deepcopy(groups)
self._pooled_model = pooled_model
self._decoupled_models = decoupled_models
group_names, group_values = groups_to_group_data(groups)
training_values = np.unique(group_values, axis = 0).tolist()
training_splits = [tuple(zip(group_names, v)) for v in training_values]
assert isinstance(groups_to_models, dict)
assert set(training_splits) == set(groups_to_models.keys()), 'mapper should include map every group in the training data'
assignment_idx = np.array(list(groups_to_models.values()))
assert np.array_equal(np.unique(assignment_idx), np.arange(len(self))), 'every model should cover at least one group'
models_to_groups = {k:[] for k in range(len(self))}
for group_tuple, model_index in groups_to_models.items():
group_value = [s[1] for s in group_tuple]
assert len(group_value) == len(group_names)
models_to_groups[model_index].append(group_value)
self._splits = training_splits
self.groups_to_models = groups_to_models
self.models_to_groups = models_to_groups
def __len__(self):
return len(self._decoupled_models)
def __repr__(self):
info = [
'DecoupledClassifierSet',
'# group attributes: %d' % len(self._groups),
'# groups: %d' % len(self.groups_to_models),
'# models: %d' % len(self._decoupled_models),
]
info = info + [', '.join(s) for s in self.split_names]
return '\n'.join(info)
@property
def data(self):
return self._data
@property
def groups(self):
return self._groups
@property
def split_names(self):
return [['%s = %s' % (a, b) for (a, b) in s] for s in self._splits]
@property
def pooled_model(self):
return self._pooled_model
@pooled_model.setter
def pooled_model(self, clf):
assert callable(clf)
self._pooled_model = clf
@property
def decoupled_models(self):
return [clf.predict for clf in self._decoupled_models]
@decoupled_models.setter
def decoupled_models(self, clf_set):
assert len(clf_set) >= 2
assert all([callable(clf) for clf in clf_set])
self._decoupled_models = clf_set
def assigned_indices(self, model_index, group_names, group_values):
assignment_idx = np.repeat(False, group_values.shape[0])
for s in self.models_to_groups[model_index]:
assignment_idx = np.logical_or(assignment_idx, np.all(group_values == s, axis = 1))
return assignment_idx
def _parse_data_args(self, **kwargs):
"""
helper function to parse X, y, group_names, and group_values
from keyword inputs to different methods
:param kwargs:
:return:
"""
if len(kwargs) == 0:
return to_group_data(data = self.data, groups = self.groups, stat_field = 'train')
elif set(['X', 'y', 'group_names', 'group_values']).issubset(kwargs):
return kwargs['X'], kwargs['y'], kwargs['group_names'], kwargs['group_values']
elif set(['data', 'groups']).issubset(kwargs):
return to_group_data(data = kwargs['data'], groups = kwargs['groups'], stat_field = kwargs.get('stat_field') or 'train')
else:
raise ValueError('unsupport input arguments')
def _drop_missing_splits(self, splits, group_values):
new = []
for s in splits:
group_labels = [t[1] for t in s]
if np.all(group_values == group_labels, axis = 1).any():
new.append(s)
return new
def predict(self, X, group_names, group_values):
"""
predict using all of the leaves in this partition
:param X:
:param group_names:
:param group_values:
:return:
"""
yhat =
|
np.repeat(np.nan, X.shape[0])
|
numpy.repeat
|
# coding: utf8
""" Unit tests:
- :class:`TestMultivariateJacobiOPE` check correct implementation of the corresponding class.
"""
import unittest
import numpy as np
from scipy.integrate import quad
from scipy.special import eval_jacobi
import sys
sys.path.append('..')
from dppy.multivariate_jacobi_ope import (MultivariateJacobiOPE,
compute_ordering_BaHa16,
compute_Gautschi_bounds)
from dppy.utils import inner1d, check_random_state
class TestMultivariateJacobiOPE(unittest.TestCase):
"""
"""
seed = 0
def test_ordering(self):
"""Make sure the ordering of multi-indices respects the one prescirbed by :cite:`BaHa16` Section 2.1.3
"""
ord_d2_N16 = [(0, 0),
(0, 1), (1, 0), (1, 1),
(0, 2), (1, 2), (2, 0), (2, 1), (2, 2),
(0, 3), (1, 3), (2, 3), (3, 0), (3, 1), (3, 2), (3, 3)]
ord_d3_N27 = [(0, 0, 0),
(0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1),
(0, 0, 2), (0, 1, 2), (0, 2, 0), (0, 2, 1), (0, 2, 2), (1, 0, 2), (1, 1, 2), (1, 2, 0), (1, 2, 1), (1, 2, 2), (2, 0, 0), (2, 0, 1), (2, 0, 2), (2, 1, 0), (2, 1, 1), (2, 1, 2), (2, 2, 0), (2, 2, 1), (2, 2, 2)]
orderings = [ord_d2_N16, ord_d3_N27]
for idx, ord_to_check in enumerate(orderings):
N, d = len(ord_to_check), len(ord_to_check[0])
self.assertTrue(compute_ordering_BaHa16(N, d), ord_to_check)
def test_square_norms(self):
N = 100
dims = np.arange(2, 5)
max_deg = 50 # to avoid quad warning in dimension 1
for d in dims:
jacobi_params = 0.5 - np.random.rand(d, 2)
jacobi_params[0, :] = -0.5
dpp = MultivariateJacobiOPE(N, jacobi_params)
pol_2_eval = dpp.poly_1D_degrees[:max_deg]
quad_square_norms =\
[[quad(lambda x:
(1-x)**a * (1+x)**b * eval_jacobi(n, a, b, x)**2,
-1, 1)[0]
for n, a, b in zip(deg,
dpp.jacobi_params[:, 0],
dpp.jacobi_params[:, 1])]
for deg in pol_2_eval]
self.assertTrue(np.allclose(
dpp.poly_1D_square_norms[pol_2_eval,
range(dpp.dim)],
quad_square_norms))
def test_Gautschi_bounds(self):
"""Test if bounds computed w/wo log scale coincide"""
N = 100
dims = np.arange(2, 5)
for d in dims:
jacobi_params = 0.5 - np.random.rand(d, 2)
jacobi_params[0, :] = -0.5
dpp = MultivariateJacobiOPE(N, jacobi_params)
with_log_scale = compute_Gautschi_bounds(dpp.jacobi_params,
dpp.ordering,
log_scale=True)
without_log_scale = compute_Gautschi_bounds(dpp.jacobi_params,
dpp.ordering,
log_scale=False)
self.assertTrue(np.allclose(with_log_scale, without_log_scale))
def test_kernel_symmetry(self):
"""
K(x) == K(x, x)
K(x, y) == K(y, x)
K(x, Y) == K(Y, x) = [K(x, y) for y in Y]
K(X) == [K(x, x) for x in X]
K(X, Y) == [K(x, y) for x, y in zip(X, Y)]
"""
N = 100
dims = np.arange(2, 5)
for d in dims:
jacobi_params = 0.5 - np.random.rand(d, 2)
jacobi_params[0, :] = -0.5
dpp = MultivariateJacobiOPE(N, jacobi_params)
x, y = np.random.rand(d), np.random.rand(d)
X, Y = np.random.rand(5, d),
|
np.random.rand(5, d)
|
numpy.random.rand
|
# Examine effect of each PC after remove one but kept the others
import numpy as np
from scipy import stats
from ATT.iofunc import iofiles
from cnntools import cnntools
from torchvision import models
from torch import nn
import torch
import pandas as pd
from ATT.algorithm import tools
from sklearn.decomposition import PCA
cnn_model = models.alexnet(pretrained=False)
cnn_model.features = torch.nn.DataParallel(cnn_model.features)
cnn_model.cuda()
checkpoint = torch.load('/home/user/working_dir/liulab_server_bnuold/models/DNNmodel_param/alexnet_shapebiased.pth.tar')
cnn_model.load_state_dict(checkpoint["state_dict"])
sizerank_pd = pd.read_csv('/home/user/working_dir/liulab_server_bnuold/data/PhysicalSize/Real_SizeRanks8.csv')
sizerank_pd = sizerank_pd.sort_values('name')
ranklabel = sizerank_pd['real_sizerank'].unique()
ranklabel.sort()
imgnames, actval = cnntools.extract_activation(cnn_model, '/home/user/working_dir/liulab_server_bnuold/data/PhysicalSize/ObjectSize/SizeDataset_2021/Object100_origin', layer_loc=('features', 'module', '8'), batch_size=1, isgpu=True, keeporig=True)
actval = actval.reshape(*actval.shape[:2], -1).mean(axis=-1)
actval = actval/np.tile(np.linalg.norm(actval,axis=-1), (actval.shape[-1],1)).T
iopkl = iofiles.make_ioinstance('/home/user/working_dir/liulab_server_bnuold/models/pca_imgnetval_conv4_alexnetshape.pkl')
pcamodel = iopkl.load()
pcacomp = np.dot(actval, np.linalg.inv(pcamodel.components_))
# Template
real_temp = np.zeros((8,8))
for i in range(8):
for j in range(8):
real_temp[i,j] = 1-np.abs(i-j)/8
avg_ranksize = []
for lbl in ranklabel:
avg_ranksize.append(actval[sizerank_pd['real_sizerank']==lbl].mean(axis=0))
avg_ranksize = np.array(avg_ranksize)
r_obj, _ = tools.pearsonr(avg_ranksize, avg_ranksize)
r_realsize_baseline, _ = stats.pearsonr(r_obj[
|
np.triu_indices(8,1)
|
numpy.triu_indices
|
__doc__ = """Tests for rod initialisation module"""
import numpy as np
from numpy.testing import assert_allclose
from elastica.utils import MaxDimension, Tolerance
import pytest
import sys
from elastica.rod.data_structures import _RodSymplecticStepperMixin
from elastica.rod.factory_function import allocate
class MockRodForTest(_RodSymplecticStepperMixin):
def __init__(
self,
n_elements,
_vector_states,
_matrix_states,
radius,
mass_second_moment_of_inertia,
inv_mass_second_moment_of_inertia,
shear_matrix,
bend_matrix,
density,
volume,
mass,
dissipation_constant_for_forces,
dissipation_constant_for_torques,
internal_forces,
internal_torques,
external_forces,
external_torques,
lengths,
rest_lengths,
tangents,
dilatation,
dilatation_rate,
voronoi_dilatation,
rest_voronoi_lengths,
sigma,
kappa,
rest_sigma,
rest_kappa,
internal_stress,
internal_couple,
damping_forces,
damping_torques,
):
self.n_elems = n_elements
self._vector_states = _vector_states
self._matrix_states = _matrix_states
self.radius = radius
self.mass_second_moment_of_inertia = mass_second_moment_of_inertia
self.inv_mass_second_moment_of_inertia = inv_mass_second_moment_of_inertia
self.shear_matrix = shear_matrix
self.bend_matrix = bend_matrix
self.density = density
self.volume = volume
self.mass = mass
self.dissipation_constant_for_forces = dissipation_constant_for_forces
self.dissipation_constant_for_torques = dissipation_constant_for_torques
self.internal_forces = internal_forces
self.internal_torques = internal_torques
self.external_forces = external_forces
self.external_torques = external_torques
self.lengths = lengths
self.rest_lengths = rest_lengths
self.tangents = tangents
self.dilatation = dilatation
self.dilatation_rate = dilatation_rate
self.voronoi_dilatation = voronoi_dilatation
self.rest_voronoi_lengths = rest_voronoi_lengths
self.sigma = sigma
self.kappa = kappa
self.rest_sigma = rest_sigma
self.rest_kappa = rest_kappa
self.internal_stress = internal_stress
self.internal_couple = internal_couple
self.damping_forces = damping_forces
self.damping_torques = damping_torques
_RodSymplecticStepperMixin.__init__(self)
@classmethod
def straight_rod(
cls,
n_elements,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
# poisson_ratio,
*args,
**kwargs
):
(
n_elements,
_vector_states,
_matrix_states,
radius,
mass_second_moment_of_inertia,
inv_mass_second_moment_of_inertia,
shear_matrix,
bend_matrix,
density,
volume,
mass,
dissipation_constant_for_forces,
dissipation_constant_for_torques,
internal_forces,
internal_torques,
external_forces,
external_torques,
lengths,
rest_lengths,
tangents,
dilatation,
dilatation_rate,
voronoi_dilatation,
rest_voronoi_lengths,
sigma,
kappa,
rest_sigma,
rest_kappa,
internal_stress,
internal_couple,
damping_forces,
damping_torques,
) = allocate(
n_elements,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
# poisson_ratio,
alpha_c=4.0 / 3.0,
*args,
**kwargs
)
return cls(
n_elements,
_vector_states,
_matrix_states,
radius,
mass_second_moment_of_inertia,
inv_mass_second_moment_of_inertia,
shear_matrix,
bend_matrix,
density,
volume,
mass,
dissipation_constant_for_forces,
dissipation_constant_for_torques,
internal_forces,
internal_torques,
external_forces,
external_torques,
lengths,
rest_lengths,
tangents,
dilatation,
dilatation_rate,
voronoi_dilatation,
rest_voronoi_lengths,
sigma,
kappa,
rest_sigma,
rest_kappa,
internal_stress,
internal_couple,
damping_forces,
damping_torques,
)
@pytest.mark.parametrize("n_elems", [5, 10, 50])
def test_input_and_output_position_array(n_elems):
"""
This test, tests the case if the input position array
valid, allocate sets input position as the rod position array.
Parameters
----------
n_elems
Returns
-------
"""
start = np.array([0.0, 0.0, 0.0])
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
# Check if the input position vector and output position vector are valid and same
correct_position = np.zeros((3, n_elems + 1))
correct_position[0] = np.random.randn(n_elems + 1)
correct_position[1] = np.random.randn(n_elems + 1)
correct_position[..., 0] = start
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
mockrod = MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
position=correct_position,
)
test_position = mockrod.position_collection
assert_allclose(correct_position, test_position, atol=Tolerance.atol())
@pytest.mark.xfail(raises=AssertionError)
@pytest.mark.parametrize("n_elems", [5, 10, 50])
def test_input_and_position_array_for_different_start(n_elems):
"""
This function tests fail check, for which input position array
first element is not user defined start position.
Parameters
----------
n_elems
Returns
-------
"""
start = np.random.randn(3)
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
# Check if the input position vector start position is different than the user defined start position
correct_position = np.random.randn(3, n_elems + 1)
mockrod = MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
position=correct_position,
)
test_position = mockrod.position_collection
assert_allclose(correct_position, test_position, atol=Tolerance.atol())
def test_compute_position_array_using_user_inputs():
"""
This test checks if the allocate function can compute correctly
position vector using start, direction and base length inputs.
Returns
-------
"""
n_elems = 4
start = np.array([0.0, 0.0, 0.0])
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
# Check if without input position vector, output position vector is valid
mockrod = MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
)
correct_position = np.zeros((3, n_elems + 1))
correct_position[0, :] = np.array([0.0, 0.25, 0.5, 0.75, 1.0])
test_position = mockrod.position_collection
assert_allclose(correct_position, test_position, atol=Tolerance.atol())
@pytest.mark.parametrize("n_elems", [5, 10, 50])
def test_compute_directors_matrix_using_user_inputs(n_elems):
"""
This test checks the director array created by allocate function. For this
test case we use user defined direction, normal to compute directors.
Returns
-------
"""
start = np.array([0.0, 0.0, 0.0])
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
# Check directors, if we dont input any directors, computed ones should be valid
correct_directors = np.zeros((MaxDimension.value(), MaxDimension.value(), n_elems))
binormal = np.cross(direction, normal)
tangent_collection = np.repeat(direction[:, np.newaxis], n_elems, axis=1)
normal_collection = np.repeat(normal[:, np.newaxis], n_elems, axis=1)
binormal_collection = np.repeat(binormal[:, np.newaxis], n_elems, axis=1)
correct_directors[0, ...] = normal_collection
correct_directors[1, ...] = binormal_collection
correct_directors[2, ...] = tangent_collection
mockrod = MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
)
test_directors = mockrod.director_collection
assert_allclose(correct_directors, test_directors, atol=Tolerance.atol())
@pytest.mark.parametrize("n_elems", [5, 10, 50])
def test_directors_using_input_position_array(n_elems):
"""
This test is testing the case for which directors are computed
using the input position array and user defined normal.
Parameters
----------
n_elems
Returns
-------
"""
start = np.array([0.0, 0.0, 0.0])
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
# Check directors, give position as input and let allocate function to compute directors.
input_position = np.zeros((3, n_elems + 1))
input_position[0, :] = np.linspace(start[0], start[0] + base_length, n_elems + 1)
correct_directors = np.zeros((MaxDimension.value(), MaxDimension.value(), n_elems))
binormal = np.cross(direction, normal)
tangent_collection = np.repeat(direction[:, np.newaxis], n_elems, axis=1)
normal_collection = np.repeat(normal[:, np.newaxis], n_elems, axis=1)
binormal_collection = np.repeat(binormal[:, np.newaxis], n_elems, axis=1)
correct_directors[0, ...] = normal_collection
correct_directors[1, ...] = binormal_collection
correct_directors[2, ...] = tangent_collection
mockrod = MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
position=input_position,
)
test_directors = mockrod.director_collection
assert_allclose(correct_directors, test_directors, atol=Tolerance.atol())
@pytest.mark.parametrize("n_elems", [5, 10, 50])
def test_directors_using_input_directory_array(n_elems):
"""
This test is testing the case for which directors are given as user input.
Parameters
----------
n_elems
Returns
-------
"""
start = np.array([0.0, 0.0, 0.0])
direction = np.array([1.0, 0.0, 0.0])
angle = np.random.uniform(0, 2 * np.pi)
normal = np.array([0.0, np.cos(angle), np.sin(angle)])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
# Check directors, give position as input and let allocate function to compute directors.
input_position = np.zeros((3, n_elems + 1))
input_position[0, :] = np.linspace(start[0], start[0] + base_length, n_elems + 1)
correct_directors = np.zeros((MaxDimension.value(), MaxDimension.value(), n_elems))
binormal = np.cross(direction, normal)
tangent_collection = np.repeat(direction[:, np.newaxis], n_elems, axis=1)
normal_collection = np.repeat(normal[:, np.newaxis], n_elems, axis=1)
binormal_collection = np.repeat(binormal[:, np.newaxis], n_elems, axis=1)
correct_directors[0, ...] = normal_collection
correct_directors[1, ...] = binormal_collection
correct_directors[2, ...] = tangent_collection
mockrod = MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
position=input_position,
directors=correct_directors,
)
test_directors = mockrod.director_collection
assert_allclose(correct_directors, test_directors, atol=Tolerance.atol())
@pytest.mark.xfail(raises=AssertionError)
def test_director_if_d3_cross_d2_notequal_to_d1():
"""
This test is checking the case if the directors, d3xd2 is not equal
to d1 and creates an AssertionError.
Returns
-------
"""
n_elems = 10
start = np.array([0.0, 0.0, 0.0])
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
# Check directors, give directors as input and check their validity.
# Let the assertion fail by setting d3=d2 for the input director
input_directors = np.zeros((MaxDimension.value(), MaxDimension.value(), n_elems))
binormal = np.cross(direction, normal)
normal_collection = np.repeat(normal[:, np.newaxis], n_elems, axis=1)
binormal_collection = np.repeat(binormal[:, np.newaxis], n_elems, axis=1)
input_directors[0, ...] = normal_collection
input_directors[1, ...] = binormal_collection
input_directors[2, ...] = binormal_collection
MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
directors=input_directors,
)
@pytest.mark.xfail(raises=AssertionError)
def test_director_if_tangent_and_d3_are_not_same():
"""
This test is checking the case if the tangent and d3 of the directors
are not equal to each other.
Returns
-------
"""
n_elems = 10
start = np.array([0.0, 0.0, 0.0])
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
position = np.zeros((3, n_elems + 1))
end = start + direction * base_length
for i in range(0, 3):
position[i, ...] = np.linspace(start[i], end[i], n_elems + 1)
# Set the directors such that tangent and d3 are not same.
input_directors = np.zeros((MaxDimension.value(), MaxDimension.value(), n_elems))
binormal = np.cross(direction, normal)
normal_collection = np.repeat(binormal[:, np.newaxis], n_elems, axis=1)
binormal_collection = np.repeat(normal[:, np.newaxis], n_elems, axis=1)
new_direction = np.cross(binormal, normal)
direction_collection = np.repeat(new_direction[:, np.newaxis], n_elems, axis=1)
input_directors[0, ...] = normal_collection
input_directors[1, ...] = binormal_collection
input_directors[2, ...] = direction_collection
MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
position=position,
directors=input_directors,
)
@pytest.mark.parametrize("n_elems", [5, 10, 50])
def test_compute_radius_using_base_radius(n_elems):
"""
This test is checking the case if user defined base radius
is used to generate radius array.
Parameters
----------
n_elems
Returns
-------
"""
start = np.array([0.0, 0.0, 0.0])
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
mockrod = MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
)
correct_radius = base_radius * np.ones((n_elems))
test_radius = mockrod.radius
assert_allclose(correct_radius, test_radius, atol=Tolerance.atol())
@pytest.mark.parametrize("n_elems", [5, 10, 50])
def test_radius_using_user_defined_radius(n_elems):
"""
This test is checking if user defined radius array is valid,
and allocating radius array of rod correctly.
Parameters
----------
n_elems
Returns
-------
"""
start = np.array([0.0, 0.0, 0.0])
direction =
|
np.array([1.0, 0.0, 0.0])
|
numpy.array
|
"""Control system for tracking objects with a telescope.
The classes in this module implement the core control system algorithms.
"""
from datetime import datetime
import time
from enum import Flag, auto
from typing import Callable, NamedTuple, Tuple, Optional, Union
import numpy as np
from scipy.optimize import minimize
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle, UnitSphericalRepresentation
from astropy.time import Time, TimeDelta
from influxdb_client import Point
from track.model import MountModel
from track.mounts import TelescopeMount, MountEncoderPositions
from track.targets import Target
from track.telem import TelemLogger
def separation(sc1: SkyCoord, sc2: SkyCoord) -> Angle:
"""Calculate the on-sky separation angle between two coordinates.
This is equivalent to `SkyCoord.separation()` but is much faster because it uses the haversine
formula rather than the iterative Vincenty formula. We don't need to handle edge cases like
antipodal points on the sphere but we do need fast execution. This approach was profiled as
~70 times faster than `SkyCoord.separation()`.
Formula reference: https://en.wikipedia.org/wiki/Great-circle_distance
Args:
sc1: One of the coordinates.
sc2: The other coordinate.
Returns:
The separation between sc1 and sc2.
"""
us1 = sc1.represent_as(UnitSphericalRepresentation)
us2 = sc2.represent_as(UnitSphericalRepresentation)
lat_diff = us1.lat.rad - us2.lat.rad
lon_diff = us1.lon.rad - us2.lon.rad
return Angle(
2 * np.arcsin(np.sqrt(
np.sin(lat_diff / 2)**2
+ np.cos(us1.lat.rad)*
|
np.cos(us2.lat.rad)
|
numpy.cos
|
# coding=utf-8
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import itertools
import pickle
import logging
from typing import Tuple, List, Any, Iterator
import numpy as np
import boto3
from nums.core.storage.utils import Batch
class ArrayGrid(object):
# TODO (hme): Move to array module.
@classmethod
def from_meta(cls, d: dict):
return cls(**d)
def __init__(self, shape: Tuple, block_shape: Tuple, dtype: str):
self.shape = tuple(shape)
self.block_shape = tuple(np.min([shape, block_shape], axis=0))
self.dtype = dict if dtype == "dict" else getattr(np, dtype)
self.grid_shape = []
self.grid_slices = []
for i in range(len(self.shape)):
dim = self.shape[i]
block_dim = block_shape[i]
if dim == 0:
# Special case of empty array.
axis_slices = []
else:
axis_slices = Batch(dim, block_dim).batches
self.grid_slices.append(axis_slices)
self.grid_shape.append(len(axis_slices))
self.grid_shape = tuple(self.grid_shape)
def to_meta(self) -> dict:
return {
"shape": self.shape,
"block_shape": self.block_shape,
"dtype": self.dtype.__name__
}
def copy(self):
return self.from_meta(self.to_meta())
def get_entry_iterator(self) -> Iterator[Tuple]:
if 0 in self.shape:
return []
return itertools.product(*map(range, self.grid_shape))
def get_slice(self, grid_entry):
slices = []
for axis, slice_index in enumerate(grid_entry):
slices.append(slice(*self.grid_slices[axis][slice_index]))
return tuple(slices)
def get_slice_tuples(self, grid_entry: Tuple) -> List[Tuple[slice]]:
slice_tuples = []
for axis, slice_index in enumerate(grid_entry):
slice_tuples.append(tuple(self.grid_slices[axis][slice_index]))
return slice_tuples
def get_block_shape(self, grid_entry: Tuple):
slice_tuples = self.get_slice_tuples(grid_entry)
block_shape = []
for slice_tuple in slice_tuples:
block_shape.append(slice_tuple[1] - slice_tuple[0])
return tuple(block_shape)
class StoredArray(object):
# TODO (hme): This is no longer a useful abstraction.
def __init__(self, filename: str, grid: ArrayGrid):
self.filename = filename
self.dirname, self.array_name = os.path.split(self.filename)
self.grid = grid
def init_grid(self):
self.grid = self.get_grid()
def get_key(self, grid_entry: Tuple):
index_str = "_".join(map(str, grid_entry))
return "%s_%s" % (self.array_name, index_str)
def get_meta_key(self):
return "%s_meta" % self.array_name
def put(self, grid_entry: Tuple, block: np.ndarray) -> Any:
raise NotImplementedError()
def get(self, grid_entry: Tuple) -> np.ndarray:
raise NotImplementedError()
def delete(self, grid_entry: Tuple) -> Any:
raise NotImplementedError()
def get_grid(self) -> ArrayGrid:
raise NotImplementedError()
def put_grid(self, array_grid: ArrayGrid) -> Any:
raise NotImplementedError()
def delete_grid(self) -> Any:
raise NotImplementedError()
def del_array(self) -> Any:
raise NotImplementedError()
def put_array(self, arr: np.ndarray):
grid_entry_iterator = self.grid.get_entry_iterator()
for grid_entry in grid_entry_iterator:
grid_slice = self.grid.get_slice(grid_entry)
block = arr[grid_slice]
self.put(grid_entry, block)
def get_array(self):
grid_shape = self.grid.grid_shape
result = np.zeros(shape=self.grid.shape)
iterator = list(itertools.product(*map(range, grid_shape)))
block_shape = np.array(self.grid.block_shape, dtype=np.int)
for grid_entry in iterator:
start = block_shape * grid_entry
entry_shape = np.array(self.grid.get_block_shape(grid_entry), dtype=np.int)
end = start + entry_shape
slices = tuple(map(lambda item: slice(*item), zip(*(start, end))))
result[slices] = self.get(grid_entry)
return result
class StoredArrayS3(StoredArray):
def __init__(self, filename: str, grid: ArrayGrid = None):
self.client = boto3.client('s3')
super(StoredArrayS3, self).__init__(filename, grid)
if self.filename[0] == "/":
raise Exception("Leading / in s3 filename: %s" % filename)
fileparts = self.filename.split("/")
self.container_name = fileparts[0]
self.array_name = "/".join(fileparts[1:])
def put(self, grid_entry: Tuple, block: np.ndarray) -> Any:
block_bytes = block.tobytes()
response = self.client.put_object(
Bucket=self.container_name,
Key=self.get_key(grid_entry),
Body=block_bytes,
)
return response
def get(self, grid_entry: Tuple) -> np.ndarray:
try:
response = self.client.get_object(
Bucket=self.container_name,
Key=self.get_key(grid_entry),
)
except Exception as e:
logging.getLogger().error("[Error] StoredArrayS3: Failed to get %s %s",
self.container_name,
self.get_key(grid_entry))
raise e
block_bytes = response['Body'].read()
dtype = self.grid.dtype
shape = self.grid.get_block_shape(grid_entry)
try:
block = np.frombuffer(block_bytes, dtype=dtype).reshape(shape)
except Exception as e:
logging.getLogger().error("[Error] StoredArrayS3: Failed to read from buffer %s %s",
self.container_name,
self.get_key(grid_entry))
raise e
return block
def delete(self, grid_entry: Tuple) -> Any:
objects = [{"Key": self.get_key(grid_entry)}]
response = self.client.delete_objects(
Bucket=self.container_name,
Delete={
'Objects': objects,
},
)
return response
def delete_grid(self) -> Any:
objects = [{"Key": self.get_meta_key()}]
response = self.client.delete_objects(
Bucket=self.container_name,
Delete={
'Objects': objects,
},
)
return response
def put_grid(self, array_grid: ArrayGrid) -> Any:
self.grid = array_grid
body = pickle.dumps(self.grid.to_meta())
response = self.client.put_object(
Bucket=self.container_name,
Key=self.get_meta_key(),
Body=body,
)
return response
def get_grid(self) -> ArrayGrid:
try:
response = self.client.get_object(Bucket=self.container_name,
Key=self.get_meta_key())
meta_dict = pickle.loads(response['Body'].read())
return ArrayGrid.from_meta(meta_dict)
except Exception as _:
return None
def del_array(self):
objects = []
grid_entry_iterator = self.grid.get_entry_iterator()
for grid_entry in grid_entry_iterator:
objects.append({"Key": self.get_key(grid_entry)})
response = self.client.delete_objects(
Bucket=self.container_name,
Delete={
'Objects': objects,
},
)
return response
class BimodalGaussian(object):
@classmethod
def get_dataset(cls, n, d, p=0.9, seed=1, dtype=np.float64, theta=None):
return cls(10, 2, 30, 4, dim=d, seed=seed, dtype=dtype).sample(n, p=p, theta=theta)
def __init__(self, mu1, sigma1, mu2, sigma2, dim=2, seed=1337, dtype=np.float64):
self.dtype = dtype
self.seed = seed
self.rs = np.random.RandomState(self.seed)
self.dim = dim
self.mu1 = self.to_arr(mu1, 1)
self.sigma1 = self.to_arr(sigma1, 1)
self.mu2 = self.to_arr(mu2, 1)
self.sigma2 = self.to_arr(sigma2, 1)
def to_arr(self, sigma, num_axes):
assert num_axes == 1 or num_axes == 2
sigma_arr = sigma
if not isinstance(sigma, np.ndarray):
# Assume it's not diag.
sigma_arr = np.empty(self.dim, dtype=self.dtype)
sigma_arr[:] = sigma
if num_axes == 2:
if len(sigma_arr.shape) == 1:
sigma_arr = np.diag(sigma_arr).astype(self.dtype)
assert len(sigma_arr.shape) == 2
assert sigma_arr.shape[0] == sigma_arr.shape[1]
else:
assert len(sigma_arr.shape) == num_axes
return sigma_arr
def sample(self, n, p=0.9, theta=None):
# Larger p => more samples of first Gaussian.
# Pass theta to sample for regression.
n1 = int(n * p)
n2 = n - n1
X1 = self.rs.randn(n1, self.dim).astype(self.dtype) * self.sigma1.T + self.mu1.T
X2 = self.rs.randn(n2, self.dim).astype(self.dtype) * self.sigma2.T + self.mu2.T
if theta is None:
y1 =
|
np.ones(n1, dtype=self.dtype)
|
numpy.ones
|
import os
import pytest
from concurrent import futures
import copy
import numpy as np
import numpy.testing as npt
from paramiko.ssh_exception import NoValidConnectionsError
from batman.tasks.sample_cache import SampleCache
from batman.tasks import (ProviderFunction, ProviderFile, ProviderJob)
from batman.input_output import formater
@pytest.fixture(scope='module')
def sample_spec():
return {
'plabels': ['X1', 'X2'],
'psizes': [1, 1],
'flabels': ['F1', 'F2', 'F3'],
'fsizes': [1, 1, 2],
'space_fname': 'sample-space.json',
'space_format': 'json',
'data_fname': 'sample-data.csv',
'data_format': 'csv',
}
def test_samplecache(tmp, sample_spec):
space_fmt = formater(sample_spec['space_format'])
data_fmt = formater(sample_spec['data_format'])
savedir = os.path.join(tmp, 'snapshots')
datadir = os.path.join(os.path.dirname(__file__), 'data', 'snapshots')
cache = SampleCache(savedir=savedir, **sample_spec)
# test init --> is empty with proper labels
assert len(cache) == 0
# test discover --> load every existing snapshots
cache.discover(os.path.join(datadir, '*'))
assert len(cache) == 9
space_file = sample_spec['space_fname']
plabels = sample_spec['plabels']
result_space = np.concatenate([
space_fmt.read(os.path.join(datadir, '1', space_file), plabels),
space_fmt.read(os.path.join(datadir, '3', space_file), plabels),
space_fmt.read(os.path.join(datadir, '5', space_file), plabels),
])
data_file = sample_spec['data_fname']
flabels = sample_spec['flabels']
result_data = np.concatenate([
data_fmt.read(os.path.join(datadir, '1', data_file), flabels),
data_fmt.read(os.path.join(datadir, '3', data_file), flabels),
data_fmt.read(os.path.join(datadir, '5', data_file), flabels),
])
npt.assert_array_equal(result_space, cache.space)
npt.assert_array_equal(result_data, cache.data)
# test save --> write to file (and reload)
cache.save()
assert os.path.isfile(os.path.join(savedir, space_file))
assert os.path.isfile(os.path.join(savedir, data_file))
result_space = space_fmt.read(os.path.join(savedir, space_file), plabels)
result_data = data_fmt.read(os.path.join(savedir, data_file), flabels)
npt.assert_array_equal(cache.space, result_space)
npt.assert_array_equal(cache.data, result_data)
cache.save(tmp)
assert os.path.isfile(os.path.join(tmp, space_file))
assert os.path.isfile(os.path.join(tmp, data_file))
# test locate --> return proper location for existing and new points
points = cache.space[:4] * np.reshape([1, -1, -1, 1], (-1, 1))
index = cache.locate(points)
npt.assert_array_equal([0, 9, 10, 3], index)
def test_provider_function(tmp, sample_spec):
space_fmt = formater(sample_spec['space_format'])
data_fmt = formater(sample_spec['data_format'])
space_file = sample_spec['space_fname']
plabels = sample_spec['plabels']
data_file = sample_spec['data_fname']
flabels = sample_spec['flabels']
datadir = os.path.join(os.path.dirname(__file__), 'data', 'snapshots')
provider = ProviderFunction(module='tests.plugins', function='f_snapshot',
discover_pattern=os.path.join(datadir, '*'),
**sample_spec)
# test return existing
points = space_fmt.read(os.path.join(datadir, '3', space_file), plabels)
data = data_fmt.read(os.path.join(datadir, '3', data_file), flabels)
sample = provider.require_data(points)
npt.assert_array_equal(points, sample.space)
npt.assert_array_equal(data, sample.data)
# test return new
points *= -1
data = np.tile([42, 87, 74, 74], (len(points), 1))
sample = provider.require_data(points)
npt.assert_array_equal(points, sample.space)
|
npt.assert_array_equal(data, sample.data)
|
numpy.testing.assert_array_equal
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 22 12:56:14 2020
Finds latitudes and longitudes (out_lat, out_lon, min_idx) of unmasked grid
cells in the grid of the input CMIP6 data array (da) nearest (min_dists) to
query latitude/longitude points (qlats, qlons).
@author: thermans
"""
import numpy as np
import xarray as xr
def angdist(lats,lons,qlats,qlons): #calculate angular distance
lat0,lat = np.meshgrid(np.radians(lats),np.radians(qlats))
lon0,lon = np.meshgrid(np.radians(lons),np.radians(qlons))
temp = np.arctan2(np.sqrt((np.cos(lat)*np.sin(lon-lon0))**2 + (np.cos(lat0)*
|
np.sin(lat)
|
numpy.sin
|
import cv2
import os
import numpy as np
import network
import preprocessor
def main():
print('OpenCV version {} '.format(cv2.__version__))
current_dir = os.path.dirname(__file__)
author = '021'
training_folder = os.path.join(current_dir, 'data/training/', author)
test_folder = os.path.join(current_dir, 'data/test/', author)
training_data = []
for filename in os.listdir(training_folder):
img = cv2.imread(os.path.join(training_folder, filename), 0)
if img is not None:
data = np.array(preprocessor.prepare(img))
data = np.reshape(data, (901, 1))
result = [[0], [1]] if "genuine" in filename else [[1], [0]]
result = np.array(result)
result = np.reshape(result, (2, 1))
training_data.append((data, result))
test_data = []
for filename in os.listdir(test_folder):
img = cv2.imread(os.path.join(test_folder, filename), 0)
if img is not None:
data = np.array(preprocessor.prepare(img))
data =
|
np.reshape(data, (901, 1))
|
numpy.reshape
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for normals."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from six.moves import range
import tensorflow as tf
from tensorflow_graphics.geometry.representation.mesh import normals
from tensorflow_graphics.util import test_case
class MeshTest(test_case.TestCase):
@parameterized.parameters(
(((None, 3), (None, 3)), (tf.float32, tf.int32)),
(((3, 6, 3), (3, 5, 4)), (tf.float32, tf.int32)),
)
def test_gather_faces_exception_not_raised(self, shapes, dtypes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(normals.gather_faces, shapes, dtypes)
@parameterized.parameters(
("Not all batch dimensions are identical", (3, 5, 4, 4), (1, 2, 4, 4)),
("Not all batch dimensions are identical", (5, 4, 4), (1, 2, 4, 4)),
("Not all batch dimensions are identical", (3, 5, 4, 4), (2, 4, 4)),
("vertices must have a rank greater than 1", (4,), (1, 2, 4, 4)),
("indices must have a rank greater than 1", (3, 5, 4, 4), (4,)),
)
def test_gather_faces_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(normals.gather_faces, error_msg, shapes)
def test_gather_faces_jacobian_random(self):
"""Test the Jacobian of the face extraction function."""
tensor_size = np.random.randint(2, 5)
tensor_shape = np.random.randint(1, 5, size=tensor_size).tolist()
vertex_init = np.random.random(size=tensor_shape)
indices_init = np.random.randint(0, tensor_shape[-2], size=tensor_shape)
indices_tensor = tf.convert_to_tensor(value=indices_init)
def gather_faces(vertex_tensor):
return normals.gather_faces(vertex_tensor, indices_tensor)
self.assert_jacobian_is_correct_fn(gather_faces, [vertex_init])
@parameterized.parameters(
((((0.,), (1.,)), ((1, 0),)), ((((1.,), (0.,)),),)),
((((0., 1.), (2., 3.)), ((1, 0),)), ((((2., 3.), (0., 1.)),),)),
((((0., 1., 2.), (3., 4., 5.)), ((1, 0),)), ((((3., 4., 5.),
(0., 1., 2.)),),)),
)
def test_gather_faces_preset(self, test_inputs, test_outputs):
"""Tests the extraction of mesh faces."""
self.assert_output_is_correct(
normals.gather_faces, test_inputs, test_outputs, tile=False)
def test_gather_faces_random(self):
"""Tests the extraction of mesh faces."""
tensor_size = np.random.randint(3, 5)
tensor_shape = np.random.randint(1, 5, size=tensor_size).tolist()
vertices = np.random.random(size=tensor_shape)
indices = np.arange(tensor_shape[-2])
indices = indices.reshape([1] * (tensor_size - 1) + [-1])
indices = np.tile(indices, tensor_shape[:-2] + [1, 1])
expected =
|
np.expand_dims(vertices, -3)
|
numpy.expand_dims
|
import pandas as pd
import numpy as np
import os
from transformers import AutoTokenizer
import re
from prcs import digit_place_embed
from functools import partial
import tqdm
def pad_list(lst : list):
return np.pad(lst, (0, 150-len(lst)), 'constant', constant_values=(0))
def numpy_array(lst :list):
#Convert list to array
return np.array(lst).tolist()
def word2index(word_list, vocabs):
return vocabs[word_list]
def re_sub(x):
return re.sub(r'[,|!?"\':;~()\[\]]', '', x)
def null_fill(df, value_mode):
def _fillNA(seq, rp_value):
return [rp_value if x!=x else x for x in seq ]
if value_mode =='VC':
df['value'] = df['value'].map(lambda x : _fillNA(x, 0.0))
df['uom'] = df['uom'].map(lambda x : _fillNA(x, ' '))
else:
df['value'] = df['value'].map(lambda x : _fillNA(x, ' '))
df['uom'] = df['uom'].map(lambda x : _fillNA(x, ' '))
return df
def agg_col(df, value_mode):
def _agg(a, b):
return [str(x) + str(y) for x,y in zip(a, b)]
def _value_split(x):
# value seq list
seq = [' '.join(str(y)) for y in x ]
return seq
def _round(seq):
return [round(x, 6) if type(x)==float else x for x in seq ]
# NV => code_name
# VA => code_name + value + uom
# DSVA => code_name + value(split) + uom
# VC => code_name + uom / value
if value_mode == 'NV':
df['code_name'] = pd.Series([list(map(str, a)) for a in df['code_name']])
elif value_mode =='VA':
df['value'] = df['value'].map(lambda x : _round(x))
df['code_name'] = pd.Series([_agg(a,b) for a, b in zip(df['code_name'], df['value'])])
df['code_name'] = pd.Series([_agg(a,b) for a, b in zip(df['code_name'], df['uom'])])
elif value_mode =='DSVA':
df['value'] = df['value'].map(lambda x : _round(x))
df['value'] = df['value'].map(lambda x : _value_split(x))
df['code_name'] = pd.Series([_agg(a,b) for a, b in zip(df['code_name'], df['value'])])
df['code_name'] = pd.Series([_agg(a,b) for a, b in zip(df['code_name'], df['uom'])])
elif value_mode =='VC':
df['value'] = df['value'].map(lambda x : _round(x))
df['code_name'] = pd.Series([_agg(a,b) for a, b in zip(df['code_name'], df['uom'])])
return df
def making_vocab(df):
vocab_dict = {}
vocab_dict['[PAD]'] = 0
vocab_dict['[CLS]'] = 1
vocab_dict['[MASK]'] = 2
df['merge_code_set'] = df['code_name'].apply(lambda x : list(set(x)))
vocab_set = []
for codeset in df['merge_code_set']:
vocab_set.extend(codeset)
vocab_set = list(set(vocab_set))
for idx, vocab in enumerate(vocab_set):
vocab_dict[vocab] = idx+3
return vocab_dict
def _tokenized_max_length(vocab, tokenizer):
tokenized_vocab= tokenizer(list(vocab.keys()))
max_word_len = max(list(map(len, tokenized_vocab['input_ids'])))
return max_word_len
def _organize(seq):
return re.sub(r'[,|!?"\':;~()\[\]]', '', seq)
def tokenize_seq(seq, word_max_len, tokenizer):
seq = list(map(_organize, seq))
seq = ['[PAD]' if x=='0.0' else x for x in seq]
tokenized_seq= tokenizer(seq, padding = 'max_length', return_tensors='pt', max_length=word_max_len)
return tokenized_seq
def convert2numpy(input_path, output_path):
value_mode_list = ['NV', 'DSVA', 'VC']
sources = ['mimic','eicu']
tokenizer= AutoTokenizer.from_pretrained("emilyalsentzer/Bio_ClinicalBERT")
for src in sources:
save_path = f'{output_path}/input/{src}'
filename = '{}_df.pkl'.format(src)
df = pd.read_pickle(os.path.join(input_path, filename))
print('{} input files load !'.format(src))
for value_mode in value_mode_list:
print(value_mode)
save_name = f'{src}_input_index_{value_mode}'
print('save_name', save_name)
df = null_fill(df, value_mode)
df = agg_col(df, value_mode)
vocab = making_vocab(df)
vocab['0.0'] = 0
src2index= partial(word2index, vocabs=vocab)
# input_index
index =[list(map(src2index, icu)) for icu in df['code_name']]
array = np.array(index)
np.save(os.path.join(save_path, save_name), array)
print('tokenization start!')
# tokenized
word_max_len = _tokenized_max_length(vocab, tokenizer)
token_tmp = [tokenize_seq(seq, word_max_len, tokenizer) for seq in tqdm.tqdm(df['code_name'])]
df['input_ids'] =pd.Series([token['input_ids'] for token in token_tmp])
df['token_type_ids'] =pd.Series([token['token_type_ids'] for token in token_tmp])
df['attention_mask'] =pd.Series([token['attention_mask'] for token in token_tmp])
#tokenized save
np.save(os.path.join(save_path, f'input_ids_{value_mode}.npy'), np.array(df['input_ids']))
np.save(os.path.join(save_path, f'token_type_ids_{value_mode}.npy'), np.array(df['token_type_ids']))
np.save(os.path.join(save_path, f'attention_mask_{value_mode}.npy'), np.array(df['attention_mask']))
if value_mode == 'NV':
#value
value =
|
np.array([df['value']])
|
numpy.array
|
# To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
from matplotlib.colors import LogNorm #for log color scales
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
from src.functions_compute_RVs import *
##### To load the underlying and observed populations:
savefigures = False
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_med/Conditional_P8_12d_R1p5_2_transiting/' #'Conditional_Venus/' #'Conditional_P8_12d_R1p5_2_transiting/'
savefigures_directory = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/Figures/Model_Optimization/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/Best_models/GP_med/Systems_conditional/Conditional_P8_12d_R1p5_2_transiting/' #'Conditional_Venus/' #'Conditional_P8_12d_R1p5_2_transiting/'
run_number = ''
model_name = 'Maximum_AMD_model' + run_number
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory + 'periods%s.out' % run_number)
param_vals_all = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
sssp_per_sys, sssp = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory, run_number=run_number, load_full_tables=True)
P_cond_bounds, Rp_cond_bounds, Mp_cond_bounds = [8.,11.3137], [1.5874,2.0], [0.,np.inf]
#P_cond_bounds, Rp_cond_bounds, Mp_cond_bounds = [8.,12.], [1.8,2.0], [0.,np.inf]
#P_cond_bounds, Rp_cond_bounds, Mp_cond_bounds = [8.,12.], [0.9,1.1], [0.,np.inf]
#P_cond_bounds, Rp_cond_bounds, Mp_cond_bounds = [8.,12.], [3.,4.], [0.,np.inf]
#P_cond_bounds, Rp_cond_bounds, Mp_cond_bounds = [215.,235.], [0.9,1.0], [0.77,0.86] # Venus
det = True # set False for Venus
conds = conditionals_dict(P_cond_bounds=P_cond_bounds, Rp_cond_bounds=Rp_cond_bounds, Mp_cond_bounds=Mp_cond_bounds, det=det)
n_per_sys = sssp_per_sys['Mtot_all']
bools_cond_per_sys = condition_planets_bools_per_sys(sssp_per_sys, conds)
i_cond = condition_systems_indices(sssp_per_sys, conds)
n_sys_cond = len(i_cond)
P_all_cond = sssp_per_sys['P_all'][i_cond]
Rp_all_cond = sssp_per_sys['radii_all'][i_cond]
det_all_cond = sssp_per_sys['det_all'][i_cond]
bools_cond_all_cond = bools_cond_per_sys[i_cond]
# To also load in a full simulated catalog for normalizing the relative occurrence of planets in each bin:
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_med/'
run_number = ''
param_vals_all_full = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
sssp_per_sys_full, sssp_full = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory, run_number=run_number, load_full_tables=True)
n_sys_full = len(sssp_per_sys_full['Mtot_all'])
P_all_full = sssp_per_sys_full['P_all']
Rp_all_full = sssp_per_sys_full['radii_all']
##### To plot period-radius diagrams:
afs = 20 # axes labels font size
tfs = 20 # text labels font size
lfs = 16 # legend labels font size
mfs = 12 # main numbers font size
sfs = 12 # secondary numbers font size
bins = 100
##### Scatter plot of period vs radius for the conditioned systems:
#P_max = 256. # if want to further truncate periods for plot
n_sys_plot = 1000
P_all_cond_plot = P_all_cond[:n_sys_plot]
Rp_all_cond_plot = Rp_all_cond[:n_sys_plot]
det_all_cond_plot = det_all_cond[:n_sys_plot]
bools_cond_all_cond_plot = bools_cond_all_cond[:n_sys_plot]
fig = plt.figure(figsize=(16,8))
plot = GridSpec(6,10,left=0.1,bottom=0.1,right=0.95,top=0.95,wspace=0,hspace=0)
ax = plt.subplot(plot[1:,:9])
# Contour plot:
#cmap = cm.get_cmap('viridis', 256)
corner.hist2d(np.log10(P_all_cond[(~bools_cond_all_cond) & (P_all_cond > 0) & (P_all_cond < P_max)]), np.log10(Rp_all_cond[(~bools_cond_all_cond) & (P_all_cond > 0) & (P_all_cond < P_max)]), bins=30, plot_datapoints=False, plot_density=False, fill_contours=True, contour_kwargs={'colors': ['0.6','0.4','0.2','0']}, data_kwargs={'color': 'k'}) #{'colors': ['0.6','0.4','0.2','0']}; {'colors': [cmap(0.2), cmap(0.4), cmap(0.6), cmap(0.8)]}
# Scatter dots:
#plt.scatter(np.log10(P_all_cond_plot[bools_cond_all_cond_plot]), np.log10(Rp_all_cond_plot[bools_cond_all_cond_plot]), s=5, marker='.', c='g', label='Conditioned planets')
#plt.scatter(np.log10(P_all_cond_plot[(~bools_cond_all_cond_plot) & (P_all_cond_plot > 0) & (det_all_cond_plot == 1)]), np.log10(Rp_all_cond_plot[(~bools_cond_all_cond_plot) & (P_all_cond_plot > 0) & (det_all_cond_plot == 1)]), s=5, marker='.', c='b', label='Other planets in the conditioned systems')
# Scatter circles with/without outlines for detected/undetected planets:
sc11 = plt.scatter(np.log10(P_all_cond_plot[(bools_cond_all_cond_plot) & (det_all_cond_plot == 1)]), np.log10(Rp_all_cond_plot[(bools_cond_all_cond_plot) & (det_all_cond_plot == 1)]), s=10, marker='o', facecolors='g', edgecolors='g', label='Conditioned planets')
sc12 = plt.scatter(np.log10(P_all_cond_plot[(bools_cond_all_cond_plot) & (det_all_cond_plot == 0)]), np.log10(Rp_all_cond_plot[(bools_cond_all_cond_plot) & (det_all_cond_plot == 0)]), s=10, marker='o', facecolors='none', edgecolors='g')
sc21 = plt.scatter(np.log10(P_all_cond_plot[(~bools_cond_all_cond_plot) & (P_all_cond_plot > 0) & (det_all_cond_plot == 1)]), np.log10(Rp_all_cond_plot[(~bools_cond_all_cond_plot) & (P_all_cond_plot > 0) & (det_all_cond_plot == 1)]), s=10, marker='o', facecolors='b', edgecolors='b', label='Other planets in the conditioned systems')
sc22 = plt.scatter(np.log10(P_all_cond_plot[(~bools_cond_all_cond_plot) & (P_all_cond_plot > 0) & (det_all_cond_plot == 0)]), np.log10(Rp_all_cond_plot[(~bools_cond_all_cond_plot) & (P_all_cond_plot > 0) & (det_all_cond_plot == 0)]), s=10, marker='o', facecolors='none', edgecolors='b')
ax.tick_params(axis='both', labelsize=20)
xtick_vals = np.array([3,10,30,100,300])
ytick_vals = np.array([0.5,1,2,4,8])
plt.xticks(np.log10(xtick_vals), xtick_vals)
plt.yticks(np.log10(ytick_vals), ytick_vals)
plt.xlim([np.log10(P_min), np.log10(P_max)])
plt.ylim([np.log10(radii_min), np.log10(radii_max)])
plt.xlabel(r'Orbital period $P$ (days)', fontsize=20)
plt.ylabel(r'Planet radius $R_p$ ($R_\oplus$)', fontsize=20)
legend1 = plt.legend([sc11, sc21], ['Conditioned planets', 'Other planets in the conditioned systems'], loc='upper left', bbox_to_anchor=(0,1), ncol=1, frameon=False, fontsize=lfs) # for conditioned/other planets (colors)
#plt.legend([sc21, sc22], ['Kepler-detected', 'Kepler-undetected'], loc='upper right', bbox_to_anchor=(1,1), ncol=1, frameon=False, fontsize=lfs) # for detected/undetected planets (markers)
#plt.gca().add_artist(legend1)
ax = plt.subplot(plot[0,:9]) # top histogram
x_cond = P_all_cond[(~bools_cond_all_cond) & (P_all_cond > 0)]
plt.hist(x_cond, bins=np.logspace(np.log10(P_min), np.log10(P_max), bins+1), weights=np.ones(len(x_cond))/len(x_cond), histtype='step', color='b', ls='-', label='Other planets in the conditioned systems')
plt.hist(sssp_full['P_all'], bins=np.logspace(np.log10(P_min), np.log10(P_max), bins+1), weights=np.ones(len(sssp_full['P_all']))/len(sssp_full['P_all']), histtype='step', color='k', ls='-', label='Underlying distribution (all systems)')
plt.gca().set_xscale("log")
plt.xlim([P_min, P_max])
plt.xticks([])
plt.yticks([])
plt.legend(loc='upper left', bbox_to_anchor=(0,1), ncol=1, frameon=False, fontsize=lfs)
ax = plt.subplot(plot[1:,9]) # side histogram
x_cond = Rp_all_cond[(~bools_cond_all_cond) & (P_all_cond > 0)]
plt.hist(x_cond, bins=np.logspace(np.log10(radii_min), np.log10(radii_max), bins+1), weights=np.ones(len(x_cond))/len(x_cond), histtype='step', orientation='horizontal', color='b', ls='-')
plt.hist(sssp_full['radii_all'], bins=np.logspace(np.log10(radii_min), np.log10(radii_max), bins+1), weights=np.ones(len(sssp_full['radii_all']))/len(sssp_full['radii_all']), histtype='step', orientation='horizontal', color='k', ls='-')
plt.gca().set_yscale("log")
plt.ylim([radii_min, radii_max])
plt.xticks([])
plt.yticks([])
if savefigures:
fig_name = savefigures_directory + model_name + '_P%s_%s_R%s_%s_cond_PR_loglog.pdf' % (P_cond_bounds[0], P_cond_bounds[1], Rp_cond_bounds[0], Rp_cond_bounds[1])
plt.savefig(fig_name)
plt.close()
##### Period-radius grids (custom bins):
#P_bins = np.logspace(np.log10(P_min), np.log10(P_max), 5+1)
#R_bins = np.array([0.5, 1., 1.5, 2., 3., 5., 10.])
#P_bins = np.array([4., 8., 16., 32., 64., 128., 256.])
#R_bins = np.array([0.5, 1., 1.5, 2., 3., 4., 6.])
P_bins = np.logspace(np.log10(4.), np.log10(256.), 12+1)
R_bins = np.logspace(np.log10(0.5), np.log10(8.), 12+1) #np.arange(0.5, 6.1, 0.5)
n_P_bins, n_R_bins = len(P_bins)-1, len(R_bins)-1
# Specify edges of GridSpec panels to ensure that the legend is the same size as a cell:
bgrid, tgrid = 0.1, 0.9 # bottom and top of grid
lgrid, rleg, wcb = 0.08, 0.97, 0.09 # left of grid, width of space for colorbar, and right of legend
rgrid = (rleg-lgrid-wcb)*(n_P_bins/(n_P_bins+1)) + lgrid
lleg = rgrid + wcb
bleg, tleg = tgrid - (tgrid-bgrid)/n_R_bins, tgrid
# First, plot overall occurrence rates (all systems):
fig = plt.figure(figsize=(16,8))
plot = GridSpec(1,1,left=lgrid,bottom=bgrid,right=rgrid,top=tgrid)
plt.figtext(0.5, 0.95, r'Occurrence rates', va='center', ha='center', fontsize=tfs)
ax = plt.subplot(plot[:,:])
mpps_grid = np.zeros((n_R_bins, n_P_bins))
mpps_dlnPR_grid = np.zeros((n_R_bins, n_P_bins))
for j in range(n_R_bins):
for i in range(n_P_bins):
dlnP, dlnR = np.log(P_bins[i+1]/P_bins[i]), np.log(R_bins[j+1]/R_bins[j])
pl_cell_bools_full = (P_all_full > P_bins[i]) & (P_all_full < P_bins[i+1]) & (Rp_all_full > R_bins[j]) & (Rp_all_full < R_bins[j+1])
pl_tot_cell_full = np.sum(pl_cell_bools_full)
sys_cell_bools_full = np.any(pl_cell_bools_full, axis=1)
sys_tot_cell_full = np.sum(sys_cell_bools_full)
print('(i=%s,j=%s): n_pl (all) = %s/%s' % (i,j,pl_tot_cell_full,n_sys_full))
mpps_cell_full = pl_tot_cell_full/n_sys_full # mean number of planets in bin per star, for all systems
mpps_dlnPR_cell_full = mpps_cell_full/(dlnP*dlnR)
mpps_grid[j,i] = mpps_cell_full
mpps_dlnPR_grid[j,i] = mpps_dlnPR_cell_full
plt.text(x=(i+0.95)*(1./n_P_bins), y=(j+0.95)*(1./n_R_bins), s=r'${:.3f}$'.format(np.round(mpps_cell_full, 3)), ha='right', va='top', color='b', fontsize=sfs, transform=ax.transAxes)
plt.text(x=(i+0.05)*(1./n_P_bins), y=(j+0.5)*(1./n_R_bins), s=r'${:.2f}$'.format(np.round(mpps_dlnPR_cell_full, 2)), ha='left', va='center', color='k', fontsize=mfs, fontweight='bold', transform=ax.transAxes)
img = plt.imshow(mpps_dlnPR_grid, cmap='coolwarm', aspect='auto', interpolation="nearest", origin='lower', extent=np.log10((P_bins[0], P_bins[-1], R_bins[0], R_bins[-1]))) #cmap='coolwarm'
ax.tick_params(axis='both', labelsize=afs)
plt.xticks(np.log10(P_bins[::2]), ['{:.1f}'.format(x) for x in P_bins[::2]])
plt.yticks(np.log10(R_bins[::3]), ['{:.1f}'.format(x) for x in R_bins[::3]])
plt.xlabel(r'Orbital period $P$ (days)', fontsize=tfs)
plt.ylabel(r'Planet radius $R_p$ ($R_\oplus$)', fontsize=tfs)
plot = GridSpec(1,1,left=rgrid+0.01,bottom=bgrid,right=rgrid+0.03,top=tgrid) # colorbar
cax = plt.subplot(plot[:,:])
cbar = plt.colorbar(img, cax=cax)
cbar.ax.tick_params(labelsize=lfs)
cbar.set_label(r'$\frac{\bar{n}_{\rm bin}}{d(\ln{R_p}) d(\ln{P})}$', rotation=270, va='bottom', fontsize=tfs)
plot = GridSpec(1,1,left=lleg,bottom=bleg,right=rleg,top=tleg) # legend
ax = plt.subplot(plot[:,:])
plt.text(x=0.95, y=0.9, s='(2)', ha='right', va='top', color='b', fontsize=sfs, transform=ax.transAxes)
plt.text(x=0.05, y=0.5, s='(1)', ha='left', va='center', color='k', fontsize=mfs, transform=ax.transAxes)
plt.text(x=-0.3, y=-1.5, s=r'(1) $\frac{\bar{n}_{\rm bin}}{d(\ln{R_p}) d(\ln{P})}$', ha='left', va='center', color='k', fontsize=lfs, transform=ax.transAxes)
plt.text(x=-0.3, y=-2.25, s=r'(2) $\bar{n}_{\rm bin}$', ha='left', va='center', color='b', fontsize=lfs, transform=ax.transAxes)
plt.xticks([])
plt.yticks([])
plt.xlabel('Legend', fontsize=tfs)
if savefigures:
fig_name = savefigures_directory + model_name + '_all_PR_grid_rates.pdf'
plt.savefig(fig_name)
plt.close()
##### Remake for defense talk:
fig = plt.figure(figsize=(16,8))
plot = GridSpec(1,1,left=0.1,bottom=bgrid,right=0.865,top=tgrid,wspace=0,hspace=0)
plt.figtext(0.5, 0.95, r'Occurrence rates over all systems', va='center', ha='center', fontsize=tfs)
ax = plt.subplot(plot[:,:])
mpps_grid = np.zeros((n_R_bins, n_P_bins))
mpps_dlnPR_grid = np.zeros((n_R_bins, n_P_bins))
for j in range(n_R_bins):
for i in range(n_P_bins):
dlnP, dlnR = np.log(P_bins[i+1]/P_bins[i]), np.log(R_bins[j+1]/R_bins[j])
pl_cell_bools_full = (P_all_full > P_bins[i]) & (P_all_full < P_bins[i+1]) & (Rp_all_full > R_bins[j]) & (Rp_all_full < R_bins[j+1])
pl_tot_cell_full = np.sum(pl_cell_bools_full)
sys_cell_bools_full = np.any(pl_cell_bools_full, axis=1)
sys_tot_cell_full = np.sum(sys_cell_bools_full)
print('(i=%s,j=%s): n_pl (all) = %s/%s' % (i,j,pl_tot_cell_full,n_sys_full))
mpps_cell_full = pl_tot_cell_full/n_sys_full # mean number of planets in bin per star, for all systems
mpps_dlnPR_cell_full = mpps_cell_full/(dlnP*dlnR)
mpps_grid[j,i] = mpps_cell_full
mpps_dlnPR_grid[j,i] = mpps_dlnPR_cell_full
plt.text(x=(i+0.5)*(1./n_P_bins), y=(j+0.5)*(1./n_R_bins), s=r'${:.3f}$'.format(np.round(mpps_cell_full, 3)), ha='center', va='center', color='k', fontsize=16, fontweight='bold', transform=ax.transAxes)
img = plt.imshow(mpps_grid, cmap='coolwarm', aspect='auto', interpolation="nearest", vmax=0.072, origin='lower', extent=np.log10((P_bins[0], P_bins[-1], R_bins[0], R_bins[-1]))) #cmap='coolwarm'
ax.tick_params(axis='both', labelsize=afs)
plt.xticks(np.log10(P_bins[::2]), ['{:.1f}'.format(x) for x in P_bins[::2]])
plt.yticks(np.log10(R_bins[::3]), ['{:.1f}'.format(x) for x in R_bins[::3]])
plt.xlabel(r'Orbital period $P$ (days)', fontsize=tfs)
plt.ylabel(r'Planet radius $R_p$ ($R_\oplus$)', fontsize=tfs)
plot = GridSpec(1,1,left=0.89,bottom=bgrid,right=0.92,top=tgrid) # colorbar
cax = plt.subplot(plot[:,:])
cbar = plt.colorbar(img, cax=cax)
cbar.ax.tick_params(labelsize=lfs)
cbar.set_label(r'$\bar{n}_{\rm bin,all}$', rotation=270, va='bottom', fontsize=tfs)
if savefigures:
fig_name = savefigures_directory + model_name + '_all_PR_grid_rates_simple.pdf'
plt.savefig(fig_name)
plt.close()
# Occurrence rates in the conditioned systems:
fig = plt.figure(figsize=(16,8))
plot = GridSpec(1,1,left=lgrid,bottom=bgrid,right=rgrid,top=tgrid)
plt.figtext(0.5, 0.95, r'Occurrence rates conditioned on a planet in $P = [{:.1f},{:.1f}]$d, $R_p = [{:.2f},{:.2f}] R_\oplus$'.format(conds['P_lower'], conds['P_upper'], conds['Rp_lower'], conds['Rp_upper']), va='center', ha='center', fontsize=tfs)
#plt.figtext(0.5, 0.95, r'Occurrence rates conditioned on a Venus-like planet', va='center', ha='center', fontsize=tfs)
ax = plt.subplot(plot[:,:])
mpps_grid = np.zeros((n_R_bins, n_P_bins))
mpps_dlnPR_grid = np.zeros((n_R_bins, n_P_bins))
fswp_grid = np.zeros((n_R_bins, n_P_bins))
for j in range(n_R_bins):
for i in range(n_P_bins):
dlnP, dlnR = np.log(P_bins[i+1]/P_bins[i]), np.log(R_bins[j+1]/R_bins[j])
pl_cell_bools = (P_all_cond > P_bins[i]) & (P_all_cond < P_bins[i+1]) & (Rp_all_cond > R_bins[j]) & (Rp_all_cond < R_bins[j+1]) & (~bools_cond_all_cond) # last condition is to NOT count the conditioned planets themselves
pl_tot_cell = np.sum(pl_cell_bools)
sys_cell_bools = np.any(pl_cell_bools, axis=1)
sys_tot_cell = np.sum(sys_cell_bools)
mpps_cell = pl_tot_cell/n_sys_cond # mean number of planets in bin per star, for conditioned systems
mpps_dlnPR_cell = mpps_cell/(dlnP*dlnR)
fswp_cell = sys_tot_cell/n_sys_cond # fraction of stars with planets in bin, for conditioned systems
mpps_grid[j,i] = mpps_cell
mpps_dlnPR_grid[j,i] = mpps_dlnPR_cell
fswp_grid[j,i] = fswp_cell
plt.text(x=(i+0.95)*(1./n_P_bins), y=(j+0.95)*(1./n_R_bins), s=r'${:.3f}$'.format(np.round(mpps_cell, 3)), ha='right', va='top', color='b', fontsize=sfs, transform=ax.transAxes)
plt.text(x=(i+0.05)*(1./n_P_bins), y=(j+0.5)*(1./n_R_bins), s=r'${:.2f}$'.format(np.round(mpps_dlnPR_cell, 2)), ha='left', va='center', color='k', fontsize=mfs, fontweight='bold', transform=ax.transAxes)
img = plt.imshow(mpps_dlnPR_grid, cmap='coolwarm', aspect='auto', interpolation="nearest", origin='lower', extent=np.log10((P_bins[0], P_bins[-1], R_bins[0], R_bins[-1]))) #cmap='coolwarm'
box_cond = patches.Rectangle(np.log10((conds['P_lower'], conds['Rp_lower'])), np.log10(conds['P_upper']/conds['P_lower']), np.log10(conds['Rp_upper']/conds['Rp_lower']), linewidth=2, edgecolor='g', facecolor='none')
ax.add_patch(box_cond)
ax.tick_params(axis='both', labelsize=afs)
plt.xticks(np.log10(P_bins[::2]), ['{:.1f}'.format(x) for x in P_bins[::2]])
plt.yticks(np.log10(R_bins[::3]), ['{:.1f}'.format(x) for x in R_bins[::3]])
plt.xlabel(r'Orbital period $P$ (days)', fontsize=tfs)
plt.ylabel(r'Planet radius $R_p$ ($R_\oplus$)', fontsize=tfs)
plot = GridSpec(1,1,left=rgrid+0.01,bottom=bgrid,right=rgrid+0.03,top=tgrid) # colorbar
cax = plt.subplot(plot[:,:])
cbar = plt.colorbar(img, cax=cax)
cbar.ax.tick_params(labelsize=lfs)
cbar.set_label(r'$\frac{\bar{n}_{\rm bin}}{d(\ln{R_p}) d(\ln{P})}$', rotation=270, va='bottom', fontsize=tfs)
plot = GridSpec(1,1,left=lleg,bottom=bleg,right=rleg,top=tleg) # legend
ax = plt.subplot(plot[:,:])
plt.text(x=0.95, y=0.9, s='(2)', ha='right', va='top', color='b', fontsize=sfs, transform=ax.transAxes)
plt.text(x=0.05, y=0.5, s='(1)', ha='left', va='center', color='k', fontsize=mfs, transform=ax.transAxes)
plt.text(x=-0.3, y=-1.5, s=r'(1) $\frac{\bar{n}_{\rm bin}}{d(\ln{R_p}) d(\ln{P})}$', ha='left', va='center', color='k', fontsize=lfs, transform=ax.transAxes)
plt.text(x=-0.3, y=-2.25, s=r'(2) $\bar{n}_{\rm bin}$', ha='left', va='center', color='b', fontsize=lfs, transform=ax.transAxes)
plt.xticks([])
plt.yticks([])
plt.xlabel('Legend', fontsize=tfs)
if savefigures:
fig_name = savefigures_directory + model_name + '_P%s_%s_R%s_%s_cond_PR_grid_rates.pdf' % (P_cond_bounds[0], P_cond_bounds[1], Rp_cond_bounds[0], Rp_cond_bounds[1])
plt.savefig(fig_name)
plt.close()
##### Remake for defense talk:
fig = plt.figure(figsize=(16,8))
plot = GridSpec(1,1,left=0.1,bottom=bgrid,right=0.865,top=tgrid,wspace=0,hspace=0)
plt.figtext(0.5, 0.95, r'Occurrence rates conditioned on a planet in $P = [{:.1f},{:.1f}]$d, $R_p = [{:.2f},{:.2f}] R_\oplus$'.format(conds['P_lower'], conds['P_upper'], conds['Rp_lower'], conds['Rp_upper']), va='center', ha='center', fontsize=tfs)
#plt.figtext(0.5, 0.95, r'Occurrence rates conditioned on a Venus-like planet', va='center', ha='center', fontsize=tfs)
ax = plt.subplot(plot[:,:])
mpps_grid = np.zeros((n_R_bins, n_P_bins))
mpps_dlnPR_grid = np.zeros((n_R_bins, n_P_bins))
fswp_grid = np.zeros((n_R_bins, n_P_bins))
for j in range(n_R_bins):
for i in range(n_P_bins):
dlnP, dlnR = np.log(P_bins[i+1]/P_bins[i]), np.log(R_bins[j+1]/R_bins[j])
pl_cell_bools = (P_all_cond > P_bins[i]) & (P_all_cond < P_bins[i+1]) & (Rp_all_cond > R_bins[j]) & (Rp_all_cond < R_bins[j+1]) & (~bools_cond_all_cond) # last condition is to NOT count the conditioned planets themselves
pl_tot_cell = np.sum(pl_cell_bools)
sys_cell_bools = np.any(pl_cell_bools, axis=1)
sys_tot_cell = np.sum(sys_cell_bools)
mpps_cell = pl_tot_cell/n_sys_cond # mean number of planets in bin per star, for conditioned systems
mpps_dlnPR_cell = mpps_cell/(dlnP*dlnR)
fswp_cell = sys_tot_cell/n_sys_cond # fraction of stars with planets in bin, for conditioned systems
mpps_grid[j,i] = mpps_cell
mpps_dlnPR_grid[j,i] = mpps_dlnPR_cell
fswp_grid[j,i] = fswp_cell
plt.text(x=(i+0.5)*(1./n_P_bins), y=(j+0.5)*(1./n_R_bins), s=r'${:.3f}$'.format(np.round(mpps_cell, 3)), ha='center', va='center', color='k', fontsize=16, fontweight='bold', transform=ax.transAxes)
img = plt.imshow(mpps_grid, cmap='coolwarm', aspect='auto', interpolation="nearest", vmax=0.072, origin='lower', extent=np.log10((P_bins[0], P_bins[-1], R_bins[0], R_bins[-1]))) #cmap='coolwarm'
box_cond = patches.Rectangle(np.log10((conds['P_lower'], conds['Rp_lower'])), np.log10(conds['P_upper']/conds['P_lower']), np.log10(conds['Rp_upper']/conds['Rp_lower']), linewidth=2, edgecolor='g', facecolor='none')
ax.add_patch(box_cond)
ax.tick_params(axis='both', labelsize=afs)
plt.xticks(np.log10(P_bins[::2]), ['{:.1f}'.format(x) for x in P_bins[::2]])
plt.yticks(np.log10(R_bins[::3]), ['{:.1f}'.format(x) for x in R_bins[::3]])
plt.xlabel(r'Orbital period $P$ (days)', fontsize=tfs)
plt.ylabel(r'Planet radius $R_p$ ($R_\oplus$)', fontsize=tfs)
plot = GridSpec(1,1,left=0.89,bottom=bgrid,right=0.92,top=tgrid) # colorbar
cax = plt.subplot(plot[:,:])
cbar = plt.colorbar(img, cax=cax)
cbar.ax.tick_params(labelsize=lfs)
cbar.set_label(r'$\bar{n}_{\rm bin,cond}$', rotation=270, va='bottom', fontsize=tfs)
if savefigures:
fig_name = savefigures_directory + model_name + '_P%s_%s_R%s_%s_cond_PR_grid_rates_simple.pdf' % (P_cond_bounds[0], P_cond_bounds[1], Rp_cond_bounds[0], Rp_cond_bounds[1])
plt.savefig(fig_name)
plt.close()
# Relative occurrence rates (intrinsic mean numbers of planets in conditioned systems vs. in general) in each bin:
fig = plt.figure(figsize=(16,8))
plot = GridSpec(1,1,left=lgrid,bottom=bgrid,right=rgrid,top=tgrid)
plt.figtext(0.5, 0.95, r'Relative occurrence rates conditioned on a planet in $P = [{:.1f},{:.1f}]$d, $R_p = [{:.2f},{:.2f}] R_\oplus$'.format(conds['P_lower'], conds['P_upper'], conds['Rp_lower'], conds['Rp_upper']), va='center', ha='center', fontsize=tfs)
#plt.figtext(0.5, 0.95, r'Relative occurrence rates conditioned on a Venus-like planet', va='center', ha='center', fontsize=tfs)
ax = plt.subplot(plot[:,:])
rel_mpps_grid = np.zeros((n_R_bins, n_P_bins))
for j in range(n_R_bins):
for i in range(n_P_bins):
dlnP, dlnR = np.log(P_bins[i+1]/P_bins[i]), np.log(R_bins[j+1]/R_bins[j])
pl_cell_bools = (P_all_cond > P_bins[i]) & (P_all_cond < P_bins[i+1]) & (Rp_all_cond > R_bins[j]) & (Rp_all_cond < R_bins[j+1]) & (~bools_cond_all_cond) # last condition is to NOT count the conditioned planets themselves
pl_tot_cell = np.sum(pl_cell_bools)
sys_cell_bools = np.any(pl_cell_bools, axis=1)
sys_tot_cell = np.sum(sys_cell_bools)
pl_cell_bools_full = (P_all_full > P_bins[i]) & (P_all_full < P_bins[i+1]) & (Rp_all_full > R_bins[j]) & (Rp_all_full < R_bins[j+1])
pl_tot_cell_full = np.sum(pl_cell_bools_full)
sys_cell_bools_full = np.any(pl_cell_bools_full, axis=1)
sys_tot_cell_full = np.sum(sys_cell_bools_full)
print('(i=%s,j=%s): n_pl (cond) = %s/%s, n_pl (all) = %s/%s' % (i,j,pl_tot_cell,n_sys_cond,pl_tot_cell_full,n_sys_full))
mpps_cell = pl_tot_cell/n_sys_cond # mean number of planets in bin per star, for conditioned systems
mpps_dlnPR_cell = mpps_cell/(dlnP*dlnR)
mpps_cell_full = pl_tot_cell_full/n_sys_full # mean number of planets in bin per star, for all systems
mpps_dlnPR_cell_full = mpps_cell_full/(dlnP*dlnR)
rel_mpps_grid[j,i] = mpps_cell/mpps_cell_full
plt.text(x=(i+0.95)*(1./n_P_bins), y=(j+0.7)*(1./n_R_bins), s=r'${:.2f}$'.format(np.round(mpps_dlnPR_cell, 2)), ha='right', va='center', color='b', fontsize=sfs, transform=ax.transAxes)
plt.text(x=(i+0.95)*(1./n_P_bins), y=(j+0.3)*(1./n_R_bins), s=r'${:.2f}$'.format(np.round(mpps_dlnPR_cell_full, 2)), ha='right', va='center', color='r', fontsize=sfs, transform=ax.transAxes)
plt.text(x=(i+0.05)*(1./n_P_bins), y=(j+0.5)*(1./n_R_bins), s=r'${:.2f}$'.format(np.round(mpps_cell/mpps_cell_full, 2)), ha='left', va='center', color='k', fontsize=mfs, fontweight='bold', transform=ax.transAxes)
img = plt.imshow(rel_mpps_grid, cmap='coolwarm', norm=MidPointLogNorm(vmin=0.1,vmax=10.,midpoint=1.), aspect='auto', interpolation="nearest", origin='lower', extent=np.log10((P_bins[0], P_bins[-1], R_bins[0], R_bins[-1]))) # log colorscale; norm=matplotlib.colors.LogNorm(vmin=0.1,vmax=10.); MidPointLogNorm(vmin=0.1,vmax=10.,midpoint=1.)
#img = plt.imshow(rel_mpps_grid, cmap='coolwarm', norm=matplotlib.colors.TwoSlopeNorm(vcenter=1.), aspect='auto', interpolation="nearest", origin='lower', extent=np.log10((P_bins[0], P_bins[-1], R_bins[0], R_bins[-1]))) # linear colorscale
box_cond = patches.Rectangle(np.log10((conds['P_lower'], conds['Rp_lower'])),
|
np.log10(conds['P_upper']/conds['P_lower'])
|
numpy.log10
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
def get_k_b(xy1,xy2):
k=(xy2[1]-xy1[1])/(xy2[0]-xy1[0])
b=xy1[1]-k*xy1[0]
return k,b
def moving_average(a, n):
ret=np.copy(a)
for i in range(n//2,len(a)-n//2):
ret[i]=np.mean(a[i-n//2:i+n//2+1])
return ret
def get_see_ranges(roi_xy):
roi_xy_new_0=np.append(roi_xy[:,0],roi_xy[0,0])
roi_xy_new_1=np.append(roi_xy[:,1],roi_xy[0,1])
roi_xy_new=np.vstack((roi_xy_new_0,roi_xy_new_1)).T
# print(roi_xy_new)
bum_f_axe=np.arange(np.min(np.array(roi_xy)[:,0])+1,np.max(np.array(roi_xy_new)[:,0]))
f0_arange=[]
df_arange=[]
for j in range(len(roi_xy_new)-1):
p1=roi_xy_new[j]
p2=roi_xy_new[j+1]
f0_arange.append(np.arange(np.minimum(roi_xy_new[j][0],roi_xy_new[j+1][0]),np.maximum(roi_xy_new[j][0],roi_xy_new[j+1][0])))
#~ print(j,p1,p2)
k,b=get_k_b(p1,p2)
df_arange.append((k*f0_arange[j]+b).astype(np.int))
#~ print("WOW")
bum_ranges=[]
for jj in range(len(bum_f_axe)):
f=bum_f_axe[jj]
dfs=[]
for j in range(len(f0_arange)):
if f in f0_arange[j]:
f_ind=np.where(f0_arange[j]==f)[0][0]
dfs.append(df_arange[j][f_ind])
bum_ranges.append((f,sorted(dfs)))
return bum_ranges
db_offset=[-8.7,0,-2.5]
spec_db_file="spm_database_100Hz.npy"
spec_file="spectrogram_A_n2500.npz"
proc_files=[
'proc_0100.npz',
'proc_0101.npz',
'proc_0120.npz',
'proc_0121.npz',
'proc_0140.npz',
'proc_0141.npz',
'proc_0160.npz',
'proc_0161.npz',
'proc_0180.npz',
'proc_0181.npz',
]
SPM_DB=np.load(spec_db_file, allow_pickle=True)
data=np.load(spec_file)
spectrogram=data['spectrogram']
f_axe=data['f_axe']
t_axe=data['t_axe']
rngs=[[600+i*750,1150+i*750] for i in range(18)]
rngs[-1][1]=13500
rngs.insert(0,[0,400])
spm_mean=np.zeros(2500)
for i in range(len(rngs)):
spm_mean+=np.mean(spectrogram[rngs[i][0]:rngs[i][1],:],axis=0)
spm_mean/=len(rngs)
# plt.figure()
temp=spm_mean[0:360]/spm_mean[350]
# plt.plot(temp)
k,b=get_k_b([124,temp[124]],[127,temp[127]]); temp[125:127]=np.array([125,126])*k+b
k,b=get_k_b([148,temp[148]],[152,temp[152]]); temp[149:152]=np.array(range(149,152))*k+b
k,b=get_k_b([167,temp[167]],[171,temp[171]]); temp[168:171]=np.array(range(168,171))*k+b
k,b=get_k_b([196,temp[196]],[200,temp[200]]); temp[197:200]=np.array(range(197,200))*k+b
k,b=get_k_b([229,temp[229]],[231,temp[231]]); temp[230:231]=np.array(range(230,231))*k+b
k,b=get_k_b([248,temp[248]],[252,temp[252]]); temp[249:252]=np.array(range(249,252))*k+b
k,b=get_k_b([267,temp[267]],[271,temp[271]]); temp[268:271]=np.array(range(268,271))*k+b
k,b=get_k_b([298,temp[298]],[302,temp[302]]); temp[299:302]=np.array(range(299,302))*k+b
k,b=get_k_b([348,temp[348]],[352,temp[352]]); temp[349:352]=np.array(range(349,352))*k+b
temp=moving_average(temp, 11)
temp=temp[0:351]
for angle_ind in range(9):
for dir_ind in range(2):
for session_ind in range(2):
for i in range(len(SPM_DB[0][angle_ind][dir_ind][session_ind][0])):
ind=np.where(SPM_DB[0][angle_ind][dir_ind][session_ind][2][i,0:2500]==1e-50)[0][-1]+1
SPM_DB[0][angle_ind][dir_ind][session_ind][2][i,ind:ind+351]=SPM_DB[0][angle_ind][dir_ind][session_ind][2][i,ind:ind+351]/temp
DM_F0, DM_INT, DM_FREQS =[],[],[]
BUM_F0, BUM_INT, BUM_FREQS =[],[],[]
BUMD_F0, BUMD_INT, BUMD_FREQS =[],[],[]
# file_ind=0
# for file_ind in range(len(proc_files)):
for file_ind in [8,9,6,7,4,5,2,3,0,1]:
# for file_ind in [7]:
# for file_ind in range(9,10):
filename=proc_files[file_ind]
proc_data=np.load(filename)
list(proc_data.keys())
temp_name=filename.split('.')[0].split('_')[-1]
site_ind=int(temp_name[0])
series_ind=int(temp_name[1])
angle_ind=int(temp_name[2])
dir_ind=int(temp_name[3])
# site_ind, series_ind, angle_ind, dir_ind
interference_mask=proc_data['interference_mask']
if dir_ind==0: interference_mask=np.flipud(interference_mask)
roi_bum_xy=proc_data['roi_bum_xy']
roi_bumd_xy=proc_data['roi_bumd_xy']
roi_dm_xy=proc_data['roi_dm_xy']
spectrogram=SPM_DB[site_ind][angle_ind][dir_ind][session_ind][2]
spec_log=10*np.log10(spectrogram)
spec_filt=spec_log*(1-interference_mask)-200*interference_mask
f0_axe=SPM_DB[site_ind][angle_ind][dir_ind][session_ind][0]
f_axe=SPM_DB[site_ind][angle_ind][dir_ind][session_ind][1]
if dir_ind==0:
spec_filt=np.flipud(spec_filt)
f0_axe=np.flip(f0_axe)
bum_ranges=get_see_ranges(roi_bum_xy)
bum_int=np.ones(len(bum_ranges))*np.nan
bum_f0=np.zeros(len(bum_ranges))
bum_freqs=np.ones(len(bum_ranges))*np.nan
for j in range(len(bum_ranges)):
f0=bum_ranges[j][0]
f0_ind=np.where(np.abs(f0_axe-f0)==np.min(np.abs(f0_axe-f0)))[0][0]
df_min=bum_ranges[j][1][0]
df_max=bum_ranges[j][1][1]
df_min_ind=np.where(np.abs(f_axe/1000-df_min)==np.min(np.abs(f_axe/1000-df_min)))[0][0]
df_max_ind=np.where(np.abs(f_axe/1000-df_max)==np.min(np.abs(f_axe/1000-df_max)))[0][0]
bum_f0[j]=f0
bum_int[j]=np.max(spec_filt[f0_ind,df_min_ind:df_max_ind+1],axis=0)
bum_freqs[j]=f_axe[df_min_ind+np.argmax(spec_filt[f0_ind,df_min_ind:df_max_ind+1],axis=0)]/1000
BUM_F0.append(bum_f0), BUM_INT.append(bum_int), BUM_FREQS.append(bum_freqs)
bumd_ranges=get_see_ranges(roi_bumd_xy)
# print(len(bumd_ranges),bumd_ranges)
bumd_int=np.ones(len(bumd_ranges))*np.nan
bumd_f0=np.zeros(len(bumd_ranges))
bumd_freqs=np.ones(len(bumd_ranges))*np.nan
for j in range(len(bumd_ranges)):
f0=bumd_ranges[j][0]
f0_ind=np.where(np.abs(f0_axe-f0)==np.min(np.abs(f0_axe-f0)))[0][0]
df_min=bumd_ranges[j][1][0]
df_max=bumd_ranges[j][1][1]
df_min_ind=np.where(np.abs(f_axe/1000-df_min)==np.min(np.abs(f_axe/1000-df_min)))[0][0]
df_max_ind=np.where(np.abs(f_axe/1000-df_max)==np.min(np.abs(f_axe/1000-df_max)))[0][0]
bumd_f0[j]=f0
bumd_int[j]=np.max(spec_filt[f0_ind,df_min_ind:df_max_ind+1],axis=0)
bumd_freqs[j]=f_axe[df_min_ind+np.argmax(spec_filt[f0_ind,df_min_ind:df_max_ind+1],axis=0)]/1000
BUMD_F0.append(bumd_f0), BUMD_INT.append(bumd_int), BUMD_FREQS.append(bumd_freqs)
dm_ranges=get_see_ranges(roi_dm_xy)
dm_int=np.ones(len(dm_ranges))*np.nan
dm_f0=np.zeros(len(dm_ranges))
dm_freqs=np.ones(len(dm_ranges))*np.nan
for j in range(len(dm_ranges)):
f0=dm_ranges[j][0]
f0_ind=np.where(np.abs(f0_axe-f0)==np.min(np.abs(f0_axe-f0)))[0][0]
df_min=dm_ranges[j][1][0]
df_max=dm_ranges[j][1][1]
df_min_ind=np.where(np.abs(f_axe/1000-df_min)==np.min(np.abs(f_axe/1000-df_min)))[0][0]
df_max_ind=np.where(np.abs(f_axe/1000-df_max)==np.min(np.abs(f_axe/1000-df_max)))[0][0]
dm_f0[j]=f0
dm_int[j]=np.max(spec_filt[f0_ind,df_min_ind:df_max_ind+1],axis=0)
dm_freqs[j]=f_axe[df_min_ind+np.argmax(spec_filt[f0_ind,df_min_ind:df_max_ind+1],axis=0)]/1000
DM_F0.append(dm_f0), DM_INT.append(dm_int), DM_FREQS.append(dm_freqs)
if site_ind==0 and series_ind==1 and angle_ind==0 and dir_ind==0:
dm_f0[np.where(dm_f0==5777.)]=np.nan
dm_f0[np.where(dm_f0==5837.)]=np.nan
dm_f0[np.where(dm_f0==5927.)]=np.nan
if site_ind==0 and series_ind==1 and angle_ind==4 and dir_ind==0:
dm_f0[np.where(dm_f0==5927.)]=np.nan
if site_ind==0 and series_ind==1 and angle_ind==8 and dir_ind==0:
dm_f0[np.where(dm_f0==5929.)]=np.nan
if site_ind==0 and series_ind==1 and angle_ind==8 and dir_ind==1:
dm_f0[np.where(dm_f0==5917.)]=np.nan
# ~ plt.figure(figsize=(9,9))
# ~ plt.subplot(211)
# ~ # plt.pcolormesh(f0_axe,f_axe/1000,10*np.log10(spectrogram.T),vmin=-120, vmax=-80)
# ~ plt.pcolormesh(f0_axe,f_axe/1000,spec_filt.T+db_offset[site_ind],vmin=-120, vmax=-80, cmap='jet')
# ~ plt.plot(np.append(roi_bum_xy[:,0],roi_bum_xy[0,0]),np.append(roi_bum_xy[:,1],roi_bum_xy[0,1]),'k',lw=2)
# ~ plt.plot(bum_f0,bum_freqs,'k',lw=2)
# ~ plt.plot(dm_f0,dm_freqs,'k',lw=2)
# ~ if angle_ind in [1,2,3]:
# ~ plt.plot(np.append(roi_bumd_xy[:,0],roi_bumd_xy[0,0]),np.append(roi_bumd_xy[:,1],roi_bumd_xy[0,1]),'k',lw=2)
# ~ plt.plot(bumd_f0,bumd_freqs,'k',lw=2)
# ~ # plt.plot(np.append(roi_dm_xy[:,0],roi_dm_xy[0,0]),np.append(roi_dm_xy[:,1],roi_dm_xy[0,1]),'k',lw=2)
# ~ plt.ylim([-50,200])
# ~ plt.xlim(5730,5930)
# ~ plt.title(temp_name)
# ~ plt.subplot(212)
# ~ plt.ylim(-120,-80)
# ~ plt.xlim(5730,5930)
# ~ plt.plot(bum_f0,bum_int+db_offset[site_ind],'m')
# ~ plt.plot(dm_f0,dm_int+db_offset[site_ind],'r')
# ~ if angle_ind in [1,2,3]: plt.plot(bumd_f0,bumd_int+db_offset[site_ind],'g')
# ~ plt.show()
bb=[
[0.05, 0.19999999999999996, 0.13042372881355932-0.05, 0.95-0.19999999999999996],
[0.1345084745762712, 0.19999999999999996, 0.2149322033898305-0.1345084745762712, 0.95-0.19999999999999996],
[0.24301694915254235, 0.19999999999999996, 0.32344067796610165-0.24301694915254235, 0.95-0.19999999999999996],
[0.3275254237288135, 0.19999999999999996, 0.4079491525423728-0.3275254237288135, 0.95-0.19999999999999996],
[0.43603389830508466, 0.19999999999999996, 0.516457627118644-0.43603389830508466, 0.95-0.19999999999999996],
[0.5205423728813559, 0.19999999999999996, 0.6009661016949152-0.5205423728813559, 0.95-0.19999999999999996],
[0.6290508474576271, 0.19999999999999996, 0.7094745762711864-0.6290508474576271, 0.95-0.19999999999999996],
[0.7135593220338983, 0.19999999999999996, 0.7939830508474576-0.7135593220338983, 0.95-0.19999999999999996],
[0.8220677966101695, 0.19999999999999996, 0.9024915254237288-0.8220677966101695, 0.95-0.19999999999999996],
[0.9065762711864407, 0.19999999999999996, 0.987-0.9065762711864407, 0.95-0.19999999999999996]
]
#DM_PROC=np.load("dm_proc.npy")
dm_proc_table_fname='dm_proc_100Hz.csv'
dm_proc_table=np.loadtxt(dm_proc_table_fname,skiprows=1,delimiter=',')
step=0
DM_PROC=[]
for site_ind in range(3):
DM_PROC.append([])
for angle_ind in range(9):
DM_PROC[site_ind].append([])
for dir_ind in range(2):
DM_PROC[site_ind][angle_ind].append([])
for series_ind in range(2):
DM_PROC[site_ind][angle_ind][dir_ind].append((dm_proc_table[step,5],dm_proc_table[step,6],dm_proc_table[step,7],dm_proc_table[step,8]))
step+=1
text_size=16
dx=0.011
fig=plt.figure(figsize=(16,15))
axs1=[]
i_sh1=0.001
i_sh2=0.001
for i in range(len(bb)):
if i in [1,3,5,7,9]:
axs1.append(plt.axes(position=[bb[i][0]-dx+0.02-i_sh1*i,bb[i][1]+0.267*2-0.02,bb[i][2]-0.008,bb[i][3]/3]))
# print(i)
else:
axs1.append(plt.axes(position=[bb[i][0]+0.0199-i_sh2*i,bb[i][1]+0.267*2-0.02,bb[i][2]-0.008,bb[i][3]/3]))
cbaxes = plt.axes([0.060000000000000005, 0.05+0.269*2+0.10-0.02, 0.9769152542372881-0.060000000000000005, 0.05/3])
angle_inds=[8,6,4,2,0]
site_ind=0
series_ind=1
for angle_ind2 in range(5):
for dir_ind in range(2):
angle_ind=angle_inds[angle_ind2]
ind=angle_ind2*2+dir_ind
f0_axe=SPM_DB[site_ind][angle_ind][dir_ind][series_ind][0]
f_axe=SPM_DB[site_ind][angle_ind][dir_ind][series_ind][1]
spectrogram=SPM_DB[site_ind][angle_ind][dir_ind][series_ind][2]
pcm=axs1[ind].pcolormesh(f0_axe, f_axe/1000,10*np.log10(spectrogram.T)+db_offset[site_ind],vmax=-80,vmin=-120,cmap='jet',rasterized=True)
axs1[ind].set_ylim([-20,150])
axs1[ind].set_xlim([5930,5730])
axs1[ind].set_xticks([5930])
if ind==0:
axs1[ind].set_ylabel(r'$\Delta f$, kHz')
if ind>0:
axs1[ind].set_yticks([])
if dir_ind==1:
axs1[ind].set_xticks([5730,5930])
axs1[ind].set_xlim([5730,5930])
cb = plt.colorbar(pcm, cax = cbaxes, orientation='horizontal')
cbaxes.set_xlabel('Intensity, dB')
axs1[7].text(5985, -12, 'DM', {'ha': 'center', 'va': 'center'}, rotation=0, fontsize=text_size, backgroundcolor='w')
axs1[7].text(5920, 70, 'BUM', {'ha': 'center', 'va': 'center'}, rotation=0, fontsize=text_size, backgroundcolor='w')
axs1[2].text(5920, 25, r'BUM$_{\mathrm{D}}$', {'ha': 'center', 'va': 'center'}, rotation=0, fontsize=text_size, backgroundcolor='w')
axs1[6].text(5930, 25, r'BUM$_{\mathrm{D}}$', {'ha': 'center', 'va': 'center'}, rotation=0, fontsize=text_size, backgroundcolor='w')
axs1[6].text(5985, 9, 'UM', {'ha': 'center', 'va': 'center'}, rotation=0, fontsize=text_size, backgroundcolor='w')
axs1[6].annotate('', xy=(5890, 9), xycoords='data', xytext=(5950, 9), textcoords='data', annotation_clip=False, arrowprops=dict(arrowstyle="->",ec="k",lw=2))
axs1[7].text(5950, 25, r'BUM$_{\mathrm{D}}$', {'ha': 'center', 'va': 'center'}, rotation=0, fontsize=text_size, backgroundcolor='w')
axs1[7].text(5775, 117, '2BUM', {'ha': 'center', 'va': 'center'}, rotation=0, fontsize=text_size, backgroundcolor='w')
axs1[7].annotate('', xy=(5890, -11), xycoords='data', xytext=(5950, -11), textcoords='data', annotation_clip=False, arrowprops=dict(arrowstyle="->",ec="k",lw=2))
axs1[7].annotate('', xy=(5775, 90), xycoords='data', xytext=(5775, 110), textcoords='data', annotation_clip=False, arrowprops=dict(arrowstyle="->",ec="k",lw=2))
axs1[7].annotate('', xy=(5790, 50), xycoords='data', xytext=(5870, 70), textcoords='data', annotation_clip=False, arrowprops=dict(arrowstyle="->",ec="k",lw=2))
axs1[2].annotate('', xy=(5790, 25), xycoords='data', xytext=(5855, 25), textcoords='data', annotation_clip=False, arrowprops=dict(arrowstyle="->",ec="k",lw=2))
axs1[7].annotate('', xy=(5830, 25), xycoords='data', xytext=(5885, 25), textcoords='data', annotation_clip=False, arrowprops=dict(arrowstyle="->",ec="k",lw=2))
axs1[6].annotate('', xy=(5800, 25), xycoords='data', xytext=(5865, 25), textcoords='data', annotation_clip=False, arrowprops=dict(arrowstyle="->",ec="k",lw=2))
fig.text(axs1[1].get_position().bounds[0],0.98,r'$\alpha=-28^\circ$',horizontalalignment='center', verticalalignment='center', fontsize=18)
fig.text(axs1[3].get_position().bounds[0],0.98,r'$\alpha=-14^\circ$',horizontalalignment='center', verticalalignment='center', fontsize=18)
fig.text(axs1[5].get_position().bounds[0],0.98,r'$\alpha=0^\circ$',horizontalalignment='center', verticalalignment='center', fontsize=18)
fig.text(axs1[7].get_position().bounds[0],0.98,r'$\alpha=14^\circ$',horizontalalignment='center', verticalalignment='center', fontsize=18)
fig.text(axs1[9].get_position().bounds[0],0.98,r'$\alpha=28^\circ$',horizontalalignment='center', verticalalignment='center', fontsize=18)
######
axs2=[]
i_sh=0.002
for i in range(0,len(bb),2):
axs2.append(plt.axes(position=[bb[i][0]+0.02-i_sh*i,bb[i][1]+0.267-0.08-0.03,bb[i][2]*2-0.008,bb[i][3]/3]))
angle_inds=[8,6,4,2,0]
site_ind=0
series_ind=1
for angle_ind2 in range(5):
angle_ind=angle_inds[angle_ind2]
ind=angle_ind2
dir_ind=0
dm_f0= DM_F0[angle_ind2*2]
dm_freq= DM_FREQS[angle_ind2*2]
dm_int= DM_INT[angle_ind2*2]
bum_f0= BUM_F0[angle_ind2*2]
bum_freq= BUM_FREQS[angle_ind2*2]
bum_int= BUM_INT[angle_ind2*2]
if angle_ind in [1,2,3]:
bumd_f0= BUMD_F0[angle_ind2*2]
bumd_freq= BUMD_FREQS[angle_ind2*2]
bumd_int= BUMD_INT[angle_ind2*2]
axs2[ind].plot(bumd_f0,bumd_int+db_offset[site_ind],'g',lw=2, label=r'BUM$_\mathrm{D}$/down')
if ind==0:
axs2[ind].plot(dm_f0,dm_int+db_offset[site_ind],'r',lw=2,label='DM/down')
axs2[ind].plot(bum_f0,bum_int+db_offset[site_ind],'m',lw=2,label='BUM/down')
else:
axs2[ind].plot(dm_f0,dm_int+db_offset[site_ind],'r',lw=2)
axs2[ind].plot(bum_f0,bum_int+db_offset[site_ind],'m',lw=2)
dir_ind=1
dm_f0= DM_F0[angle_ind2*2+1]
dm_freq= DM_FREQS[angle_ind2*2+1]
dm_int= DM_INT[angle_ind2*2+1]
bum_f0= BUM_F0[angle_ind2*2+1]
bum_freq= BUM_FREQS[angle_ind2*2+1]
bum_int= BUM_INT[angle_ind2*2+1]
if angle_ind in [1,2,3]:
bumd_f0= BUMD_F0[angle_ind2*2+1]
bumd_freq= BUMD_FREQS[angle_ind2*2+1]
bumd_int= BUMD_INT[angle_ind2*2+1]
if ind==3:
axs2[ind].plot(bumd_f0,bumd_int+db_offset[site_ind],'c',lw=2,label=r'BUM$_\mathrm{D}$/up')
else:
axs2[ind].plot(bumd_f0,bumd_int+db_offset[site_ind],'c',lw=2)
if ind==0:
axs2[ind].plot(dm_f0,dm_int+db_offset[site_ind],'b',lw=2, label= 'DM/up')
axs2[ind].plot(bum_f0,bum_int+db_offset[site_ind],'k',lw=2, label= 'BUM/up')
axs2[ind].set_ylabel("SEE intensity, dB", labelpad=0)
else:
axs2[ind].plot(dm_f0,dm_int+db_offset[site_ind],'b',lw=2)
axs2[ind].plot(bum_f0,bum_int+db_offset[site_ind],'k',lw=2)
axs2[ind].set_ylim([-120,-80])
axs2[ind].set_xlim([5700,5930])
axs2[ind].set_xticks([5730,5830,5930])
if ind>0:
axs2[ind].set_yticklabels({''})
axs2[0].legend(loc=2, handlelength=1)
axs2[3].legend(loc=3, handlelength=1)
######
axs3=[]
i_sh=0.002
for i in range(0,len(bb),2):
axs3.append(plt.axes(position=[bb[i][0]+0.02-i_sh*i,bb[i][1]-0.08-0.05,bb[i][2]*2-0.008,bb[i][3]/3]))
angle_inds=[8,6,4,2,0]
site_ind=0
series_ind=1
for angle_ind2 in range(5):
angle_ind=angle_inds[angle_ind2]
ind=angle_ind2
dir_ind=0
dm_f0= DM_F0[angle_ind2*2]
dm_freq= DM_FREQS[angle_ind2*2]
dm_int= DM_INT[angle_ind2*2]
bum_f0= BUM_F0[angle_ind2*2]
bum_freq= BUM_FREQS[angle_ind2*2]
bum_int= BUM_INT[angle_ind2*2]
if angle_ind in [1,2,3]:
bumd_f0= BUMD_F0[angle_ind2*2]
bumd_freq= BUMD_FREQS[angle_ind2*2]
bumd_int= BUMD_INT[angle_ind2*2]
axs3[ind].plot(bumd_f0,bumd_freq,'g',lw=2)
axs3[ind].plot(bum_f0,bum_freq,color=(0.75, 0.0, 0.75, .5),lw=1)
axs3[ind].plot(bum_f0,bum_freq,'m',lw=1)
dir_ind=1
dm_f0= DM_F0[angle_ind2*2+1]
dm_freq= DM_FREQS[angle_ind2*2+1]
dm_int= DM_INT[angle_ind2*2+1]
bum_f0= BUM_F0[angle_ind2*2+1]
bum_freq= BUM_FREQS[angle_ind2*2+1]
bum_int= BUM_INT[angle_ind2*2+1]
if angle_ind in [1,2,3]:
bumd_f0= BUMD_F0[angle_ind2*2+1]
bumd_freq= BUMD_FREQS[angle_ind2*2+1]
bumd_int= BUMD_INT[angle_ind2*2+1]
axs3[ind].plot(bumd_f0,bumd_freq,color=(0.0, 0.75, 0.75, 1.),lw=1)
bumd_f0_full=np.arange(5700,5850)
bumd_coefs=np.polyfit(bumd_f0[10::],bumd_freq[10::],1)
axs3[ind].plot(bumd_f0_full,bumd_f0_full*bumd_coefs[0]+bumd_coefs[1],color=(0.0, 0.75, 0.75, 1.),lw=2)
axs3[ind].plot(bum_f0,bum_freq,'k',lw=1)
bum_f0_full=np.arange(5700,5800)
bum_coefs=np.polyfit(bum_f0[10::],bum_freq[10::],1)
if ind==3:
bum_coefs=
|
np.polyfit(bum_f0[10:-10],bum_freq[10:-10],1)
|
numpy.polyfit
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""The logics of robot calligraphy
This module contains all the methods needed to convert a paths extracted from GIFs to trajectory information for UR5
The module would use data.json as the paths extracted from GIFs,
please use cv.cv to obtain data.json if you cannot find data.py
The module would store all the trajectories it ever calculated in previously_calculated_trajectory. And it would use
this information when writing the same string.
Please make sure previously_calculated_trajectory.json is existed in ..\data\
Please make sure data.json is existed in ..\data\
"""
import time
import copy
import os
import json
import math
import numpy
import easy_ur5
START_POSITION = [0.10018570816351019, -0.4535427417650308,
0.2590640572333883]
ORIENTATION = [0, math.pi, 0]
R = 0.0
STRAIGHT_HEIGHT = 0.01 * 1.3
STRAIGHT_DEVIATION = 0.01 * 0
STRAIGHT_WIDTH = 0.01 * 0
MIDDLE_HEIGHT = 0.01 * 0.83
MIDDLE_DEVIATION = 0.01 * 0.21
MIDDLE_WIDTH = 0.01 * 000.3
DEEPEST_HEIGHT = 0.01 * 0.37
DEEPEST_DEVIATION = 0.01 * 0.1
DEEPEST_WIDTH = 0.01 * 1.17
MY_PATH = os.path.abspath(os.path.dirname(__file__))
JSON_DIR = os.path.join(MY_PATH, r"..\data\previously_calculated_trajectory.json")
CHAR_LIB_DIR = os.path.join(MY_PATH, r"..\data\data.json")
def reduce_by_multiple(trajectory, integer):
"""
keep only control points in given trajectory which are multiple of @integer
:param trajectory: array that describes the trajectory
:param integer: the multiple used for reducing
:return: the reduced trajectory
"""
reduced = trajectory[0:len(trajectory) - 1:integer]
if len(reduced) == 0:
return trajectory
if reduced[-1] != trajectory[len(trajectory) - 1]:
reduced.append(trajectory[len(trajectory) - 1])
return reduced
def naive_width2z(width):
# assert width <= 0.01
"""
a naive way to map width to z axis position
:param width: width of a corresponding control point
:return: the z axis position
"""
assert width >= 0
return 0.01 - width
def get_mover(
map_3d,
stroke_info,
start_position,
orientation,
scale_factor,
):
"""
get calculated trajectory of a stroke
:param map_3d: functions for mapping width to z-axis, a polymorphism design
:param stroke_info: array that describes a stroke
:param start_position: starting position for a character
:param orientation: orientation of tool
:param scale_factor: a constant scalar, used for adjust the size of character you wanted to write
:return: the calculated trajectory of the given stroke
"""
three_d_trajectory = []
map_3d(stroke_info, three_d_trajectory, scale_factor,
start_position)
start_lift = copy.deepcopy(three_d_trajectory[0])
start_lift[2] = start_lift[2] + 0.02
# add tilt value
if len(three_d_trajectory) > 1:
vector_a = numpy.array(three_d_trajectory[0])
vector_b = numpy.array(three_d_trajectory[1])
vector_ba = vector_a - vector_b
dev_start = vector_ba / numpy.linalg.norm(vector_ba) * 0.009
start_lift[0] += dev_start[0]
start_lift[1] += dev_start[1]
three_d_trajectory.insert(0, start_lift)
mover = []
for i in three_d_trajectory:
real_pos = i
real_ori = orientation
mover.append(real_pos + real_ori)
time.sleep(1)
return mover
def broke_stroke(trajectory):
"""
break one stroke into one or multiple sub-stroke for preventing error caused by UR5 cannot maintain a speed
:param trajectory: the trajectory of a stroke
:return: a array of broken sub-stroke
"""
stroke_group = []
pointer1 = 0
if len(trajectory) < 11:
print(len(trajectory))
stroke_group.append(trajectory)
return stroke_group
for i in range(5, len(trajectory) - 5):
y0 = trajectory[i - 1][1]
y1 = trajectory[i][1]
y2 = trajectory[i + 1][1]
y_dif0 = y1 - y0
y_dif1 = y2 - y1
x0 = trajectory[i - 1][0]
x1 = trajectory[i][0]
x2 = trajectory[i + 1][0]
x_dif0 = x1 - x0
x_dif1 = x2 - x1
max_tolerance = 0.006
if (x_dif0 * x_dif1 < 0 and (abs(x_dif0) > max_tolerance or abs(x_dif1)
> max_tolerance)) or (y_dif0 * y_dif1 < 0 \
and (abs(y_dif0) > max_tolerance or abs(y_dif1) > max_tolerance)):
stroke_group.append([trajectory[pointer1:i + 1], True])
pointer1 = i + 1
if pointer1 == len(trajectory) - 1:
stroke_group[0].append([trajectory[len(trajectory) - 1], False])
else:
stroke_group.append([trajectory[pointer1:len(trajectory)],
False])
# mark first:
stroke_group[0].append(True)
for i in range(1, len(stroke_group)):
stroke_group[i].append(False)
return stroke_group
def double_linear3_mapping(
stroke_info,
three_d_trajectory,
scale_factor,
start_position,
):
"""
a way of mapping considering the offset of the brush increases then decreases when z axis position is decreasing
:param stroke_info: array that describes the stroke information
:param three_d_trajectory: array that describes the 3D trajectory
:param scale_factor: a constant scalar, used for adjust the size of character you wanted to write
:param start_position: start position of the stroke
:return: a array that describes processed position of the tool
"""
prev_point2d = stroke_info[0][0]
for point in stroke_info:
point3d = copy.deepcopy(point[0])
direction =
|
numpy.array(point3d)
|
numpy.array
|
#!/usr/bin/env python
# coding: utf-8
# <script>
# jQuery(document).ready(function($) {
#
# $(window).load(function(){
# $('#preloader').fadeOut('slow',function(){$(this).remove();});
# });
#
# });
# </script>
#
# <style type="text/css">
# div#preloader { position: fixed;
# left: 0;
# top: 0;
# z-index: 999;
# width: 100%;
# height: 100%;
# overflow: visible;
# background: #fff url('http://preloaders.net/preloaders/720/Moving%20line.gif') no-repeat center center;
# }
#
# </style>
#
# <div id="preloader"></div>
# <script>
# function code_toggle() {
# if (code_shown){
# $('div.input').hide('500');
# $('#toggleButton').val('Show Code')
# } else {
# $('div.input').show('500');
# $('#toggleButton').val('Hide Code')
# }
# code_shown = !code_shown
# }
#
# $( document ).ready(function(){
# code_shown=false;
# $('div.input').hide()
# });
# </script>
# <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
# ### Latex Macros
# $\newcommand{\Re}[1]{{\mathbb{R}^{{#1}}}}
# \newcommand{\Rez}{{\mathbb{R}}}$
# In[41]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
#%tableofcontents
# In[42]:
import copy
import ipywidgets as widgets
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import sympy
import torch
from ipywidgets import fixed, interact, interact_manual, interactive
from scipy.stats import ortho_group # compute unitary matrices
import spectral_function_library as spec_lib
get_ipython().run_line_magic('matplotlib', 'inline')
# # Convolution Neural Networks
# Material is taken from [this Blog](https://www.instapaper.com/read/1477946505)
#
# Starting from an RGB image:
#
# <img src="images/rgb_image_2022-01-24_10-15-38.png" width="800">
#
# the idea is pass this image through as series of steps in order to extract information. The filter is used for this task.
#
# <img src="images/convolution_2022-01-24_10-17-28.png" width="800">
#
# after image src.
#
# <img src="images/multihead_2022-01-24_10-19-47.png" width="800">
#
# <img src="images/step-by-step_2022-01-24_10-18-45.png" width="800">
#
#
# ## Two important points about the convolutional layer:
#
# 1. The filter is identical for each pixel. This reduces the number of parameters to calculate.
# The constant filter helps satisfy the inductive bias of *translation invariance*.
#
# 2. The convolution is local to the image pixel to which it is applied. Thus, the structure of the image is taken into account during the calculation.
#
# A typical CNN architecture:
#
# <img src="images/cnn_2022-01-24_10-25-56.png" width="800">
# jupyter nbextension enable --py widgetsnbextensionjupyter nbextension enable --py widgetsnbextensionjupyter nbextension enable --py widgetsnbextensionjupyter nbextension enable --py widgetsnbextension# Alternative view of CNN
#
# <img src="images/image_graph_2022-01-24_11-00-16.png" width="800">
#
# * An image can be considered to be a graph
# * The nodes $V$ are the centers of the pixels
# * If a filter has width 3, each nodes is connected to $8 * d$ adjacent nodes, where $d$ is the number of channels
# # Motivation
# Consider a set of nodes $x_i$, and associated attributes $y_i$. This can be graphed. Let us connect these nodes with edges $e_{ij} = (x_i, x_{i+1})$.
# In[43]:
@interact(N=(5, 40))
def plot1d(N):
x = np.linspace(0, 10, N)
plt.plot(x, 0 * x, "-o")
plt.show()
# Add an attribute to each of these nodes. I will add a random noise in $N(0,\sigma)$ and $\sigma=1.5$, which is fairly large.
#
# Consider the problem of computing *embeddings* of each node with the requirement that nearby nodes with similar attributes should have similar embeddings.
#
# Without further constraints imposed on the problem (also called *inductive biases*, we will apply a local transformation to this function, and specifically an averaging operation. We will replace $y_i$ by the average of its neighbors :
# $$ y_i \longrightarrow \frac12 (y_{i-1} + y_{i+1})$$
# The boundary points need special treatment. There are three main ehoices:
# 1. Do not move the point
# 2. Move the point in such a way as to satisfy some condition on the slope.
# 3. Develop an algorithm that figures out the proper treatment
#
# We will consider the first choice for simplicity. For future reference, we call the collection of points $V$, the collection of edges $E$. We denote the boundary nodes by $\partial V$, and the boundary edges (edges attached to $\partial V$ by $\partial E$, which is a common notation in discrete and differential geometry.
# In[44]:
@interact(seed=(1, 100), eps=(0, 1.5), N=(5, 40))
def plot1d(seed, eps, N):
np.random.seed(seed)
x = np.linspace(0, 10, N)
noise = eps * np.random.randn(N)
y = np.sin((x / x[-1]) * 2 * np.pi * 2.5) + noise
plt.plot(x, y, "-o")
plt.show()
# More generally, each point might have multiple attribute. Thus, the node $x_i$, would have $d$ attributes $y_0, \cdots, y_{d-1}$. These attributes could be categorical or continuous, and the categorical attributes could be nominal (there is nor ordering, such as 'red', 'blue', 'orange') or ordinal (bad, poor, average, good, very good excellent).
# In[45]:
dSlider = widgets.IntSlider(min=1, max=5, value=3, description="Nb Attributes")
seedSlider = widgets.IntSlider(min=1, max=100, value=50, description="Seed")
epsSlider = widgets.FloatSlider(
min=0.0, max=1.5, value=0.30, description="Noise $\sigma$"
)
@interact(seed=seedSlider, eps=epsSlider, N=(5, 40), d=dSlider, nb_blur_iter=(0, 5))
def plot1d(seed, eps, N, d, nb_blur_iter):
np.random.seed(seed)
eps = eps * np.array([1.0, 2.0, 0.5, 3.0, 4.0])
x = np.linspace(0, 10, N)
noise = np.random.randn(d, N)
y = np.zeros([5, N])
fcts = {}
fcts[0] = np.sin((x / x[-1]) * 2 * np.pi * 2.5)
fcts[1] = 1.5 * np.cos((x / x[-1]) * 2 * np.pi * 2.5) ** 2
fcts[2] = x ** 2 / 10 * np.exp(3 - 0.5 * x)
fcts[3] = np.cos((x / x[-1]) * 2 * np.pi * 4.5)
fcts[4] = 1.5 * np.cos((x / x[-1]) * 2 * np.pi * 2.5)
for i in range(0, 5):
y[i] = fcts[i]
for i in range(0, d):
y[i] += eps[i] * noise[i]
yy = copy.copy(y)
for i in range(0, d):
for n in range(0, nb_blur_iter):
yy[i][0] = y[i][0]
yy[i][N - 1] = y[i][N - 1]
yy[i][1 : N - 2] = 0.5 * (y[i][0 : N - 3] + y[i][2 : N - 1])
y = copy.copy(yy)
for i in range(0, d):
plt.plot(x, yy[i], "-o")
plt.grid(True)
plt.ylim(-2, 5)
plt.show()
# So far, I am describing vector-valued discrete functions of $x$, which is a 1-D representation of a graph $d$ attributes at each node $x_i$. More generally, nodes are points in *some* space, which can be 1-D, 2-D, higher-D, or more abstract, namely, a space of *points*.
#
# Now consider adding attributes $y_{Eij}$ to the edges. What kind of transformation functions should one consider?
#
# This averaging function is an example of a local filter defined in physical space. This filter takes attributes at nodes and transforms them into a new set of number defined at these same nodes. More generally, in Graph Neural networks, we will consider operators that take attributes defined at nodes, edges, and the graph, and transform them into a new set of vectors defined on these same nodes, vectors and graphs.
#
# Filters can be defined either in physical space or in spectral space. We will illustrate the concept by considering the derivative operator on continuous and discrete grids.
# ## First Derivative operator (also a filter) on 1D grid in physical space
# Consider points $x_i$, $i=0,\cdots, N-1$ connected by edges $e_{i,i+1} = (x_i, x_{i+1})$. The central difference operator of the function $f_i = f(x_i)$ is defined by
# $$
# f'_i = \frac{f_{i+1} - f_{i-1}}{x_{i+1} - x_{i-1}}
# $$ for $i=1,\cdots,N-2$, with one-sided operators defined at the boundaries (which is one of many possibilities):
# \begin{align}
# f'_0 &= \frac{f_1 - f_0}{x_1-x_0} \\
# f'_{N-1} &= \frac{f_{N-1} - f_{N-2}}{x_{N-1} - x_{N-2}}
# \end{align}
# where $f'_i$ is the approximation of $f'(x)$ evaluated at $x=x_i$. Note that the derivative can be expressed as a vector
# $f' = (f'_0,\cdots,f'_{N-1})$, and $f'_i$ is linear with respect to the values $f_j$. Therefore one can write the matrix
# expression:
# $$ f' = D f $$
# where $D \in \Re{N\times N}$ is an $N \times N$ matrix. The matrix $D$ is a derivative filter. More specifically, it is a
# *global* filter since it updates the values at all nodes at once. To the contrary, a *local* filter is defined as the matrix that updates the derivative at a single point. Thus:
# $$
# f'_i = (\begin{matrix}-\alpha & 0 & \alpha\end{matrix})^T
# (\begin{matrix} f_{i+1} & 0 & f_{i-1}) \end{matrix}
# $$
# where a superscript $T$ denotes transpose, and $\alpha = (x_{i+1} - x_{i-1})^{-1}$. Clearly, the local
# filter is local to the point at which it applies. The new value only depends on the values of its immediate neighbors.
# ***
# # Spectral Analysis of graphs
# ## Continuous Fourier Transform (CFT)
# When working in the continuous domain $\Rez$, a function $f(x)\in\Rez$ has a Fourier Transform $\hat{f}(k)$ related by
# $$ \hat{f}(k) = \frac{1}{2\pi} \int_{-\infty}^\infty e^{\iota k x} f(x) \, dx $$
# Conversely, one can apply a similar operation to recover $f(x)$ from its Fourier Transform:
#
# $$ f(x) = \frac{1}{2\pi} \int_{-\infty}^\infty e^{-\iota k x} \hat{f}(k) \, dk $$
#
# Notice the sign in the exponent: positive when transforming from physical to Fourier space, and negative when returning to physical space. The sign is a convention. Different authors might use the opposite sign. So always pay attention to the conventions in any paper you read.
#
# (you should all have learned about the Fourier transform previously).
#
# Let us compute the first derivative of $f(x)$:
# $$\frac{d}{dx} f(x) = f'(x)$$
# The conventional approach would be to calculate the derivative manually, or discretize the expression in physical space. However, the alternative is to compute the derivative by first transforming the expression to Fourier (also called spectral) space:
# \begin{align}
# \frac{d}{dx} f(x) &= \frac{d}{dx} \frac{1}{2\pi} \int_{-\infty}^\infty e^{-\iota k x} \hat{f}(k) d k \\
# &= \frac{1}{2\pi} \int_{-\infty}^\infty (-\iota k) e^{-\iota k x} \hat{f}(k) dk \\
# &= \cal{F}^{-1} [-\iota k \hat{f}(k)]
# \end{align}
# where
# \begin{align}
# \cal{F}f(x) &= \hat{f}(k) \\
# \cal{F}^{-1} \hat{f}(k) &= f(x) \\
# \end{align}
# So to given a function $f(x)$, one can compute the derivative with the following three steps:
# 1. $f(x) \longrightarrow \hat{f}(k)$
# 2. $\hat{f}(k) \longrightarrow (-\iota k) \hat{f}(k)$
# 3. $(-\iota k)\hat{f}(k) \longrightarrow \cal{F}^{-1} \left[(-\iota k)\hat{f}(k)\right] = \frac{d}{dx} f(x)$
# Thus, the derivative operation is applied in Fourier space. A complex operation in physical space becomes a simple multiplication in Fourier space, *at the cost* of two Fourier Transforms.
# ### Fourier Spectrum
# $\hat{f}(k)$ is called the Fourier Spectrum and is generally a complex variable.
# $P(k) = |\hat{f}(k)|^2$ is the power spectrum, and satisfies the property:
# $$
# \int_{-\infty}^\infty P(k) dk = \int_{-\infty}^\infty |\hat{f}(k)|^2 dx = \int_{-\infty}^\infty |f(x)|^2 dx
# $$
# a rule that generalizes to and holds in $\Re{n}$.
# ### Filter
# The coefficient $(-\iota k)$ above is an example of a complex operator in Fourier space. This operator tranforms a function $\hat{f}(k)$ into a "filtered" function $\hat{g}(k)$:
# $$
# \hat{g}(k) = (-\iota k) \hat{f}(k)
# $$
# and in this particular case, results in the Fourier transform of the $x$-derivative of $f(x)$. More generally, one can define an operator $\hat{H}(k)$ acting on $\hat{f}(k)$, which "shapes" the power spectrum, leading to filters with different characteristics: low-pass, band-pass, high-pass, custom.
#
# Given a function $f(x)$, the resulting filtered function $f_H(x)$ can be defined similarly to the derivative:
#
# \begin{align}
# f(x) & \longrightarrow \cal{F}(f(x)) = \hat{f}(k) \\
# \hat{f}(k) & \longrightarrow \hat{H}(k) \hat{f}(k) \\
# \hat{H}(k)\hat{f}(k) & \longrightarrow \cal{F}^{-1} (\hat{H}(k)\hat{f}(k)) = f_H(x)
# \end{align}
#
# We will often omit the argument $x$ or $k$, letting the "hat" notation indicate whether or not we are in Fourier space. Thus, we can write
# $$
# f_H = \cal{F}^{-1} [\hat{H} \; \cal{F}(f) ]
# $$ or the equivalent form (requiring the definition of product of operators):
#
# \begin{align}
# f_H &= (\cal{F}^{-1} \, \hat{H} \, \cal{F}) \; f \\
# &= H f
# \end{align}
# which defines the filter $H(x)$ in physical space, acting on $f(x)$ to produce $f_H(x)$:
# $$
# f_H(x) = H(x) * f(x)
# $$
# where $*$ denotes the convolution operator:
# $$
# H(x) * f(x) = \int_{-\infty}^\infty H(x-s) f(s) \, ds
# $$
# ## Formal proof of convolution theorem in continuous space
# We start with the relation:
# $$ H = \cal{F}^{-1} \hat{H} \cal{F} $$
# and express both sides of the equation in integral form:
# \begin{align}
# \int e^{-\iota k x} \left( \hat{H}(k)\hat{f}(k)\right) \, dk &=
# \int e^{-\iota k x}\, dk \left( \int e^{\iota k x''} H(x'')\,dx'' \int e^{\iota k x'} f(x') \, dx' \right) \\
# &= \int dk \int e^{\iota k (x'' + x' - x)} H(x'') f(x) \, dx' \, dx''
# \end{align}
# Now make use of the following integral definition of the Dirac function:
# $$
# \int e^{\iota k x} \, dk = 2\pi \delta(x)
# $$
# which leads to
# \begin{align}
# \int e^{-\iota k x} \left( \hat{H}(k)\hat{f}(k)\right) \, dk &=
# \int dk \int e^{\iota k (x'' + x' - x)} H(x'') f(x') \, dx' \, dx'' \\
# &= 2\pi \int \delta(x'' + x' - x) H(x'') f(x') \, dx' \, dx'' \\
# &= 2\pi \int H(x-x') f(x') \, dx' \\
# &= C \; H(x) * f(x) = L(x)
# \end{align}
# where $C$ is a constant of proportionality.
# I was not careful with constants in front of the integrals when taking Fourier transforms and their
# inverses.
#
# We thus find that
# $$
# \cal{F}^{-1} \left(\hat{H}(k)\hat{f}(k)\right) = H * f
# $$
# Careful calculations show that the constant $C=1$.
#
# Integrating $A(x)$ over $x$ leads to:
# $$
# \int \hat{H}(k) \hat{f}(k) \, dk = \int H(x) f(x) \, dx
# $$
# often referred to as [Plancherel's identity](https://en.wikipedia.org/wiki/Parseval%27s_identity).
#
# All integrals are taken over the domain $[-\infty, \infty]$.
# ---
# # Ideal Low-, Mid-, High-pass filters
# ## Low-pass filter
#
# \begin{align}
# H(k) &= 1, \hspace{1in} k < k_0 \\
# &= 0, \hspace{1in} k \ge k_0
# \end{align}
# ## Band-pass filter
#
# \begin{align}
# H(k) &= 1, \hspace{1in} k_0 < k < k_1, \; k_0 < k_1 \\
# &= 0 \hspace{1in} \rm{otherwise}
# \end{align}
# ## High-pass filter
#
# \begin{align}
# H(k) &= 1, \hspace{1in} k > k_0 \\
# &= 0, \hspace{1in} k \le k_0
# \end{align}
#
# #### Notes:
# * np.fft uses the discrete Fourier Tranform since the grid is discrete (we skip over these details)
# * The $x$-domain is $[0,0.5]$.
# * $\sin(2\pi f_1 x)= 0$ at $x=0$ and $x=0.5$. The $x-derivative is $2\pi f_1\cos(f_1 2\pi x)$, equal
# to $2\pi f_1$ at $x=0$ and $2\pi f_1 \cos(\pi f_1)$ at $x=0.5$, equal to 2\pi f_1$ if $f_1$ is even.
# Therefore the function is periodic over the domain, since the $f_1$ slider ranges from -40 to 40 by increments of 10.
# On the other hand, $\cos(2\pi f_3 x + 0.7)$ is not periodic over the $x$ domain (the phase is 0.7, which is not a multiple of $2\pi$. The frequencies are obtained by
# decomposing this function into a series of $\sin$ and $\cos$ at different frequencies with zero phase.
# In[46]:
grid = widgets.GridspecLayout(3, 3)
# In[47]:
freq1Slider = widgets.IntSlider(min=0, max=60, value=30)
freq2Slider = widgets.IntSlider(min=30, max=120, value=70)
freq3Slider = widgets.IntSlider(min=90, max=200, value=110)
ampl1Slider = widgets.FloatSlider(min=-15, max=15, value=5)
ampl2Slider = widgets.FloatSlider(min=-15, max=15, value=10)
ampl3Slider = widgets.FloatSlider(min=-15, max=15, value=10)
k0Slider = widgets.IntSlider(min=0, max=50, value=15)
k1Slider = widgets.IntSlider(min=5, max=150, value=100, Description="k1")
# In[48]:
@interact_manual(
freq1=freq1Slider, # (-20, 60, 10),
freq2=freq2Slider, # (-90, 90, 10),
freq3=freq3Slider, # (-300, 300, 15),
ampl1=ampl1Slider, # 1,
ampl2=ampl2Slider, # 0.5,
ampl3=ampl3Slider, # 1,
k0=k0Slider, # (0, 50, 5),
k1=k1Slider, # (5, 150, 10),
)
def plotSin2(freq1, freq2, freq3, ampl1, ampl2, ampl3, k0, k1):
fig = plt.figure(figsize=(16, 7))
x = np.linspace(0, 0.5, 500)
k = np.linspace(0, 499, 500)
# NOTE: These functions are NOT periodic over the domain.
# Therefore, the spectrum is not exactly a collection of delta functions
# I could be more precise, but that is not the point of this demonstration.
s = (
ampl1 * np.sin(freq1 * 2 * np.pi * x)
+ ampl2 * np.sin(freq2 * 2 * np.pi * x)
+ ampl3 * np.cos(freq3 * 2 * np.pi * x + 0.7)
)
nrows, ncols = 3, 2
# ax1.clear() # to avoid flicker, does not work
ax = fig.add_subplot(nrows, ncols, 1)
# fig, axes = plt.subplots(nrows, ncols, figsize=(16, 5))
ax.set_ylabel("Amplitude")
ax.set_xlabel("Time [s]")
ax.plot(x, s)
fft = np.fft.fft(s)
ifft = np.fft.ifft(s)
# print("s: ", s[0:10])
# print("ifft: ", ifft[0:11])
# print("fft[0-10]: ", fft[0:11])
# print("fft[:-10,:]: ", fft[-10:])
power_spec = np.abs(fft) ** 2
# power_spec[0] = 0 # REMOVE MEAN COMPONENT (simply equal to the mean of the function)
ax2 = fig.add_subplot(nrows, ncols, 2)
ax = ax2
ax.plot(power_spec[0:250])
ax.set_ylabel("Power Spectrum")
ax.set_xlabel("k")
heaviside = np.where((k > k0) & (k < k1), 1, 0)
# Symmetrize this function with respect to $k=500/2$
for i in range(1, 250): # 250 = 500/2
heaviside[500 - i] = heaviside[i] # in Fourier space
# print(heaviside)
filtered_power_spectrum = power_spec * heaviside
# print(list(zip(power_spec, heaviside, filtered_power_spectrum)))
# print("power spec: ", power_spec[0:50])
# print("filtered_spec: ", filtered_power_spectrum[0:50])
filtered_function = np.fft.ifft(filtered_power_spectrum)
ax = fig.add_subplot(nrows, ncols, 3)
ax.plot(filtered_function)
ax.set_ylabel("Filtered $f_H(x) = H(x) f(x)$")
ax.set_xlabel("x")
ax = fig.add_subplot(nrows, ncols, 4)
ax.plot(filtered_power_spectrum[0:250])
ax.set_xlabel("k")
ax.set_ylabel("Filtered Power Spectrum")
filter_phys = np.fft.ifft(heaviside)
ax = fig.add_subplot(nrows, ncols, 5)
ax.plot(filter_phys)
ax.set_ylabel("Filter $H(x)$")
ax.set_xlabel("k")
ax = fig.add_subplot(nrows, ncols, 6)
ax.plot(heaviside[0:250])
ax.set_ylabel("Filter $\hat{H}(k)$")
ax.set_xlabel("k")
plt.tight_layout()
plt.show()
sumf2 = np.sum(s ** 2)
sump2 = np.sum(power_spec[0:250])
sump3 = np.sum(power_spec)
# print(sum2, sump2, sump2 / sumf2, sump3 / sumf2)
# print(np.sum(power_spec[0:250]), np.sum(power_spec[0:500]), power_spec.shape)
# The ratio sump2 / sumf2 = 250 (when there is no mean component)
# The k=0 component has no complex conjugate. All other components have a complex conjugate.
# These details are beyond the scope of this lecture.
# = Number of points N / 2
# sum f[i]^2 dx = sum f[i]^2 (0.5/N) = sum power_spectrum * normalizing constant
# (one must be careful with this constant)
# Alternative to @interact
# interact(plotSin2, freq1=(-40,40,10), freq2=(-90,90,10), freq3=(-300,300,15), ampl1=1, ampl2=.5, ampl3=1)
# The strong oscilations in the Filter $H(x)$ are due to the discontinuity of the filter in Fourier space.
# A property of these 1-D filters is that localization in Fourier space (the filter is nonzero for very few $k$) leads
# to non-local filters $H(x)$ in physical space, and vice-versa.
#
# The challenge is to construct filters local in both physical and Fourier space, which is the strength of wavelets (beyond the scope of these lectures). Note that the Fourier transform of a Gaussian is a Gaussian, and it is local in both spaces. (Demonstrate it for yourself as a homework exercise).
#
# ### Discrete 1D domain
# * A set of nodes $x_i$, $i=0,1,\cdots,N-1$, such that $x_i$ is connected to $x_{i+1}$. This graph is acyclic (there are no cycles.
# * If the first and last node are connected, we add the edge $(x_{N-1}, x_{0})$ and create a cyclic graph.
# * The adjacency matrix of the cyclic graph is as follows:
# $$
# A = \left(\begin{matrix}
# 0 & 0 & 0 & \cdots & 0 & 1 \\
# 1 & 0 & 0 & \cdots & 0 & 0 \\
# 0 & 1 & 0 & \cdots & 0 & 0 \\
# 0 & 0 & 1 & \cdots & 0 & 0 \\
# \cdots
# \end{matrix}\right)
# $$
# * A signal $s$ on a graph is defined as the sequence of $N$ elements
# $$ x = (x_0, x_1, \cdots, x_{N-1}) $$
# where each $x_i\in\Rez$.
# ### 1-D Periodic Domain
# #### Fourier Filter
# ### 1-D Non-periodic Domain
# ## Fourier Transform, Discrete (DFT)
# ### 1-D Periodic Domain
# ### 1-D Non-periodic Domain
# ## Graph Signal Processing, Discrete
# ### 1-D cyclic graph
# ### 2=D Discrete periodic
# ### Adjoint $A$
# ### Degree Matrix $D$
# ### Laplacian $L$
# ###
# In[49]:
# layout = ['circular','planar','random']
seed_slider = widgets.IntSlider(min=100, max=120, step=2, value=110)
N_slider = widgets.IntSlider(min=5, max=40, step=1, value=10)
# matrix = ['Adjacency Matrix', 'Laplacian', 'D^-1 A', 'D^-1 L', 'D^-1/2 L D^-1/2']
@interact(N=N_slider, seed=seed_slider)
def generate_graph_from_adjacency_matrix(N, seed):
"""
Arguments
N: number of nodes
"""
np.random.seed(seed)
ints = np.random.randint(0, 2, N * N).reshape(N, N)
for i in range(N):
ints[i,i] = 0
# Symmetric array
ints = ints + ints.transpose()
ints = np.clip(ints, 0, 1) # the elements should be zero or 1
# Different matrices
A = ints
D = np.sum(A, axis=0)
D = np.diag(D)
L = D - A
invD = np.linalg.inv(D)
invDA = A * invD
invDL = invD * L
invDLinvD = np.sqrt(invD) * L * np.sqrt(invD)
matrix = ["A", "D", "L", "invD", "invDA", "invDL", "invDinvD"]
matrices = [A, D, L, invD, invDA, invDL, invDLinvD]
# Eigenvalues
fig, axes = plt.subplots(3, 3, figsize=(10, 8))
axes = axes.reshape(-1)
fig.suptitle("Sorted Eigenvalues of various matrices")
for i, m in enumerate(matrices):
ax = axes[i]
eigs = np.linalg.eigvals(m)
eigs = np.sort(eigs)[::-1]
ax.set_title(matrix[i])
ax.grid(True)
ax.plot(eigs, "-o")
for i in range(i + 1, axes.shape[-1]):
axes[i].axis("off")
plt.tight_layout()
plt.show()
# ### Notes
# * The eigenvalues (spectrum )of A and L are approximatley related (the plots look very similar) but not equal.
# * The spectra shape depend very little on the seed (A is filled with random numbers (0,1) and is symmetrized to make sure that the eigenvalues $\lambda_i \in \Rez$.
# ***
# ## Same plot as above but allowing for different types of graph types.
# * Generate the graph, compute the adjacent matrix, and call the previous function
#
#
# In[50]:
def generate_graph_from_adjacency_matrix_1(G, N, seed):
"""
Arguments
N: number of nodes
"""
np.random.seed(seed)
# Convert to np.ndArray
A = nx.linalg.graphmatrix.adjacency_matrix(G).toarray()
nx.linalg
# print("Adj: ", A, "\n", A.shape, "\n", type(A))
# Different matrices
D = np.sum(A, axis=0)
D = np.diag(D)
L = D - A
invD = np.linalg.inv(D)
invDA = A * invD
invDL = invD * L
invDLinvD = np.sqrt(invD) * L * np.sqrt(invD)
Ln = nx.normalized_laplacian_matrix(G)
Ln = Ln.toarray() # from sparse array to ndarray
matrix = ["A", "D", "L", "invD", "invDA", "invDL", "invDinvD", "Ln"]
matrices = [A, D, L, invD, invDA, invDL, invDLinvD, Ln]
# Eigenvalues
fig, axes = plt.subplots(3, 3, figsize=(10, 8))
axes = axes.reshape(-1)
fig.suptitle("Eigenvalues of various matrices")
for i, m in enumerate(matrices):
ax = axes[i]
eigs = np.linalg.eigvals(m)
eigs = np.sort(eigs)[::-1]
ax.set_title(matrix[i])
ax.grid(True)
ax.plot(eigs, "-o")
for i in range(i + 2, axes.shape[-1]):
axes[i].axis("off")
plt.tight_layout()
plt.show()
# In[51]:
prob_slider = widgets.FloatSlider(min=0, max=1, step=0.1, value=0.5)
node_slider = widgets.IntSlider(min=3, max=30, step=1, value=10)
nb_neigh_slider = widgets.IntSlider(min=1, max=10, step=1, value=4)
nb_edges_per_node_slider = widgets.IntSlider(min=1, max=20, step=2, value=5)
seed_slider = widgets.IntSlider(int=1, max=50, step=1, value=25)
graph_type = ["connected_watts_strogatz", "powerlaw_cluster_graph"]
@interact(
nb_nodes=node_slider,
prob=prob_slider,
nb_neigh=nb_neigh_slider,
nb_edges_per_node=nb_edges_per_node_slider,
seed=seed_slider,
graph_type=graph_type,
# directed=True,
)
def drawGraph(nb_nodes, nb_neigh, prob, seed, nb_edges_per_node, graph_type):
if graph_type == "connected_watts_strogatz":
nb_edges_per_node_slider.style.handle_color = 'red'
nb_neigh_slider.style.handle_color = 'black'
nb_tries = 20
edge_prob = prob
G = nx.connected_watts_strogatz_graph(
nb_nodes, nb_neigh, edge_prob, nb_tries, seed
)
elif graph_type == "powerlaw_cluster_graph":
nb_neigh_slider.style.handle_color = 'red'
nb_edges_per_node_slider.style.handle_color = 'black'
add_tri_prob = prob
if nb_edges_per_node >= nb_nodes:
nb_edges_per_node = nb_nodes - 1
G = nx.powerlaw_cluster_graph(nb_nodes, nb_edges_per_node, add_tri_prob, seed)
generate_graph_from_adjacency_matrix_1(G, nb_nodes, seed)
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#999; background:#fff;">
# Created with Jupyter, delivered by Fastly, rendered by Rackspace.
# </footer>
# # prob_slider = widgets.FloatSlider(
# min=0, max=1, step=0.1, value=0.5, description="Probability"
# )
# node_slider = widgets.IntSlider(min=3, max=20, step=1, value=7)
# nb_neigh_slider = widgets.IntSlider(min=1, max=10, step=1, value=4)
# nb_edges_per_node_slider = widgets.IntSlider(min=1, max=20, step=2, value=5)
# seed_slider = widgets.IntSlider(int=1, max=50, step=1, value=25)
# graph_type = ["connected_watts_strogatz", "powerlaw_cluster_graph", "circular_graph"]
#
# # Also draw the eigenfunctions for the cyclic case where the nodes are arranged in a circular layout,
# # with labels in the nodes
#
#
# @interact_manual(
# nb_nodes=node_slider,
# prob=prob_slider,
# nb_neigh=nb_neigh_slider,
# nb_edges_per_node=nb_edges_per_node_slider,
# seed=seed_slider,
# graph_type=graph_type,
# )
# def drawGraphEigenvalues(nb_nodes, nb_neigh, prob, seed, nb_edges_per_node, graph_type):
# if graph_type == "connected_watts_strogatz":
# nb_edges_per_node_slider.style.handle_color = "red"
# nb_neigh_slider.style.handle_color = "black"
# nb_tries = 20
# edge_prob = prob
# G = nx.connected_watts_strogatz_graph(
# nb_nodes, nb_neigh, edge_prob, nb_tries, seed
# )
# elif graph_type == "powerlaw_cluster_graph":
# nb_neigh_slider.style.handle_color = "red"
# nb_edges_per_node_slider.style.handle_color = "black"
# add_tri_prob = prob
# if nb_edges_per_node >= nb_nodes:
# nb_edges_per_node = nb_nodes - 1
# G = nx.powerlaw_cluster_graph(nb_nodes, nb_edges_per_node, add_tri_prob, seed)
# elif graph_type == "circular_graph":
# nb_neigh_slider.style.handle_color = "red"
# nb_edges_per_node_slider.style.handle_color = "red"
# nb_neigh_slider.style.handle_color = "red"
# prob_slider.style.handle_color = "red"
# seed_slider.style.handle_color = "red"
#
# G = nx.Graph()
# for n in range(nb_nodes):
# G.add_node(n)
# for n in range(nb_nodes):
# G.add_edge(n, n + 1)
# G.add_edge(nb_nodes - 1, 0)
#
# spec_lib.generate_eigenvectors_from_adjacency_matrix_1(G, nb_nodes, seed)
# In[52]:
# Test Eigenfunction, sorting, etc. by creating a matrix whose eigenvalues I know
N_slider = widgets.IntSlider(min=3, max=10, step=1, value=5)
seed_slider = widgets.IntSlider(min=100, max=200, step=1)
@interact(N=N_slider, seed=seed_slider)
def test_eigen(N, seed):
# generate eigenvalues
np.random.seed(seed)
# large variance for wider spread of spectrum
eigens = (20.0 + 100.0 * np.random.randn(N)) / 20
eigens = np.where(eigens < 0, -eigens, eigens)
print("eigens= ", eigens)
print("eigens[0]= ", eigens[0])
print("eigens[1]= \n", eigens[1])
# print("eigens= \n", eigens)
eigens = np.diag(eigens)
ee = np.linalg.eig(eigens)
print("ee= \n", ee)
print("ee[0]= ", ee[0], type(ee[0]))
print("ee[1]= \n", ee[1])
args = np.argsort(ee[0])
print("args:", args, type(args))
ee0 = ee[0][args]
ee1 = ee[1][:, args]
print("sorted ee")
print("ee[0]= ", ee0)
print("ee[1]= \n", ee1)
recursivelyrecursively
# create eigenvectors
x = ortho_group.rvs(N)
# Similarity transform (eigenvalues of A are invariant)
A = x.T @ eigens @ x
# A = x @ np.linalg.inv(x)
# print("A= \n", A)
# print("x.T= \n", x.T)
# print("inv(x)= \n", np.linalg.inv(x))
eigens = np.linalg.eig(A)
args = np.argsort(eigens[0])
print("===============================")
print("args: \n", args)
eigs = eigens[0][args]
print("unsorted eigs: \n", eigens[0])
print("sorted eigs: \n", eigs)
eigv = eigens[1][:, args]
print("unsorted x:\n ", x.T)
print("unsorted eigv: \n", eigens[1])
print("sorted x: \n", x.T[:, args])
print("sorted eigv= \n", eigv)
pass
# # Exploration of eigenvalue and eigenfunctions for the 1-D cyclic and non-cyclic cases
# As we have seen, a signal $s^1=(s_0, s_1, \cdots, s_{N-1})\in\Re{N}$, is transformed into a signal $s^2\in\Re{N}$ by a filter $H$ according to
# $$ s^2 = H s^1$$ where $H$ is a matrix in $\Re{N\times N}$. Applying this filter recursively, one finds that
# \begin{align}
# s^3 &= H s^2 \\
# s^4 &= H s^3 \\
# s^l &= H s^{l-1}
# \end{align}
# If this is done a large number of times, and if one assumes convergence of $s^l$ to a vector of finite norm, one finds in the limit:
# $$
# s^\infty = H s^\infty
# $$
# which states that $s^\infty$ is an eigenvector of the filter $H$ with a unit eigenvalue $\lambda=1$.
# ## Cyclic case, directed graph
# The adjoint matrix is
# $$
# A = \left(\begin{matrix}
# 0 & 0 & 0 & \cdots & 0 & 1 \\
# 1 & 0 & 0 & \cdots & 0 & 0 \\
# 0 & 1 & 0 & \cdots & 0 & 0 \\
# 0 & 0 & 1 & \cdots & 0 & 0 \\
# \cdots
# \end{matrix}\right)
# $$
# Recall: $A_{i,j} = 1$ means an edge goes from node $j$ to node $i$. In this case, there is an edge from node $i+1$ to node $i$
# for all nodes. There is also an edge from node $N-1$ to node $0$. This matrix is periodic.
#
# Given a signal
# $$
# s = (s_0, s_1, \cdots, s_{N-1})
# $$
# the action of $A$ on $s$ simply shifts the value $s_i$ on node $i$ to node $i-1$:
# $$
# s^1 = A s = (s_{N-1}, s_0, s_1, \cdots, s_{N-2})
# $$
#
# In the next animation, we define a graph over a set of nodes, and a signal on this graph, and we apply the operator
# $A$ multiple times.
# In[53]:
j = -1
@interact_manual(seed=(1, 100), eps=(0, 1.5), N=(5, 40))
def plot1d(seed, eps, N=15):
global j
np.random.seed(seed)
# Define a NxN matrix
A = np.zeros([N, N])
for i in range(1, N):
A[i, i - 1] = 1
A[0, N - 1] = 1
x = np.linspace(0, 10, N)
# Signal s
noise = eps * np.random.randn(N)
s = np.sin((x / x[-1]) * 2 * np.pi * 2.5) + noise
j += 1
Aj = np.linalg.matrix_power(A, j)
new_s = Aj @ s
print(Aj)
plt.plot(x, s, "-o", color="red")
plt.plot(x, new_s, "-o")
plt.title("Press button to apply $A$")
plt.show()
# A is called the shift operator in 1-D signal processing. Application of $A$ to a time signal translates the signal by $\Delta t$. The same is true with our graph. Of course, we are working with a special kind of graph. Let us now repeat this process with an undirected cyclic graph. Since node $i$ has a bidirectional connection to node $j$, each row of $A$ has two columns with a unit value. Thus, the adjacency matrix (now symmetric) becomes:
# $$
# A = \left(\begin{matrix}
# 0 & 1 & 0 & \cdots & 0 & 1 \\
# 1 & 0 & 1 & \cdots & 0 & 0 \\
# 0 & 1 & 0 & \cdots & 0 & 0 \\
# 0 & 0 & 1 & \cdots & 0 & 0 \\
# \cdots \\
# 0 & 0 & 0 & \cdots & 0 & 1 \\
# 1 & 0 & 0 & \cdots & 1 & 0 \\
# \end{matrix}\right)
# $$
#
# In[54]:
j = -1
@interact_manual(seed=(1, 100), eps=(0, 1.5), N=(5, 40))
def plot1d(seed, eps, N=15):
global j
np.random.seed(seed)
# Define a NxN matrix
A = np.zeros([N, N])
for i in range(1, N):
A[i, i - 1] = 1
A[0, N - 1] = 1
A = A + A.T
x = np.linspace(0, 10, N)
# Signal s
noise = eps * np.random.randn(N)
s = np.sin((x / x[-1]) * 2 * np.pi * 2.5) + noise
j += 1
Aj = np.linalg.matrix_power(A, j)
new_s = Aj @ s
print(Aj)
plt.plot(x, s, "-", color="red")
plt.plot(x, new_s, "-o")
plt.title("Press button to apply $A$")
plt.show()
# The result: instability. The signal $A^n s$ goes to infinity as the number of iterations grows without bound (i.e., $n\rightarrow\infty$). Later, when working with neural networks, we want to avoid weights that converge towards infinity or zero.
#
# This justifies the use of normalized adjacency matrices. The most common normalization is to premultiply $A$ by $D^{-1}$, where $D$ is the degree matrix. For our graph, all nodes have degree 2. Let us try again. We define a left normalization:
# $$
# A^* = D^{-1} A
# $$
# Another popular normalization technique is the symmetric version of the preceding one:
# $$
# A^* = D^{-1/2} A D^{-1/2}
# $$
# In[55]:
j = -1
@interact_manual(
seed=(1, 100),
eps=(0, 1.5),
N=(5, 40),
jincr=(1, 10),
normalization=["left", "symmetric"],
)
def plot1d(seed, eps=0.1, N=15, normalization="left", jincr=1):
global j
np.random.seed(seed)
# Define a NxN matrix
A = np.zeros([N, N])
for i in range(1, N):
A[i, i - 1] = 1
A[0, N - 1] = 1
A = A + A.T
D = np.sum(A, axis=1) # works for all A
Dinv = np.diag(1.0 / D)
if normalization == "left":
Dinv = np.diag(1.0 / D)
A = Dinv @ A
print("DinvSq @ A @ DinvSq= ", A)
else:
DinvSq = np.sqrt(Dinv)
A = DinvSq @ A @ DinvSq
x = np.linspace(0, 10, N)
# Signal s
noise = eps * np.random.randn(N)
s = np.sin((x / x[-1]) * 2 * np.pi * 2.5) + noise
print("mean(s) = ", np.mean(s))
j += jincr
Aj = np.linalg.matrix_power(A, j)
new_s = Aj @ s
print("mean(new_s) = ", np.mean(new_s))
print("new_s= ", new_s)
plt.plot(x, s, "-", color="red")
plt.plot(x, new_s, "-o")
plt.title("Press button to apply $A$")
plt.show()
# One observes that after many repetitions of normalized (left or symmetric), $A$, the signal converges to a constant equal to the mean of the original signal:
# $$
# \lim_{n\rightarrow\infty} s_{new} = \text{mean}(s) = \frac1N\sum_0^{n-1} s_i
# $$
#
# From a theoretical point of view, if $s_{new}$ converges to a constant, it means that in the limit of $n\rightarrow\infty$,
# $$
# (A^*)^n s_{new} = (A^*)^{n-1} s_{new}
# $$
# which implies that
# $$ A^* s_{new} = s_{new} $$
# In other words, $\lambda=1$ is an eigenvalue of the normalized adjacency matrix (corresonding to a bidirectional cyclic graph), either
# $A^* = D^{-1} A$ or $A^* = D^{-1/2} A D^{-1/2}$.
#
# One can easily show that if a single eigenvalue is greater than 1, $s_{new} \rightarrow \infty$. Since that does not happen, the maximum eigenvalue must be unity.
#
# We check this out by computing the eigenvalues of the normalized matrix (which must be real since the matrix is symmetric). One also notices that since $A$ is symmetric, both normalizations produce the same results.
#
# Exercise: Can you prove this?
# In[56]:
@interact_manual(N=(5, 40), normalization=["left", "symmetric"])
def plot1d(N=15, normalization="left"):
# Define a NxN matrix
A = np.zeros([N, N])
# cyclic linear chain with two connections per node
for i in range(1, N):
A[i, i - 1] = 1
A[0, N - 1] = 1
A = A + A.T
D = np.sum(A, axis=1) # works for all A
Dinv = np.diag(1.0 / D)
if normalization == "left":
Dinv = np.diag(1.0 / D)
A = Dinv @ A
else:
DinvSq = np.sqrt(Dinv)
A = DinvSq @ A @ DinvSq
print("A^*= ", A)
evalue, evector =
|
np.linalg.eig(A)
|
numpy.linalg.eig
|
"""Segments detected regions in a chunked dask array.
Uses overlapping chunks during segmentation, and determines how to link segments
between neighboring chunks by examining the overlapping border regions.
Heavily based on dask_image.ndmeasure.label, which uses non-overlapping blocks
with a structuring element that links segments at the chunk boundaries.
"""
import functools
import logging
import operator
import dask
import dask.array as da
import numpy as np
class DistSegError(Exception):
"""Error in image segmentation."""
try:
from dask_image.ndmeasure._utils import _label
from sklearn import metrics as sk_metrics
except ModuleNotFoundError as e:
raise DistSegError("Install 'cellpose[distributed]' for distributed segmentation dependencies") from e
logger = logging.getLogger(__name__)
def segment(
image,
channels,
model_type,
diameter,
fast_mode=False,
use_anisotropy=True,
iou_depth=2,
iou_threshold=0.7,
):
"""Use cellpose to segment nuclei in fluorescence data.
Parameters
----------
image : array of shape (z, y, x, channel)
Image used for detection of objects
channels : array of int with size 2
See cellpose
model_type : str
"cyto" or "nuclei"
diameter : tuple of size 3
Approximate diameter (in pixels) of a segmented region, i.e. cell width
fast_mode : bool
In fast mode, network averaging, tiling, and augmentation are turned off.
use_anisotropy : bool
If true, use anisotropy parameter of cellpose
iou_depth: dask depth parameter
Number of pixels of overlap to use in intersection-over-union calculation when
linking segments across neighboring, overlapping dask chunk regions.
iou_threshold: float
Minimum intersection-over-union in neighboring, overlapping dask chunk regions
to be considered the same segment. The region for calculating IOU is given by the
iou_depth parameter.
Returns:
segments : array of int32 with same shape as input
Each segmented cell is assigned a number and all its pixels contain that value (0 is background)
"""
assert image.ndim == 4, image.ndim
assert image.shape[-1] in {1, 2}, image.shape
assert diameter[1] == diameter[2], diameter
diameter_yx = diameter[1]
anisotropy = diameter[0] / diameter[1] if use_anisotropy else None
image = da.asarray(image)
image = image.rechunk({-1: -1}) # color channel is chunked together
depth = tuple(np.ceil(diameter).astype(np.int64))
boundary = "reflect"
# No chunking in channel direction
image = da.overlap.overlap(image, depth + (0,), boundary)
block_iter = zip(
np.ndindex(*image.numblocks),
map(
functools.partial(operator.getitem, image),
da.core.slices_from_chunks(image.chunks),
),
)
labeled_blocks = np.empty(image.numblocks[:-1], dtype=object)
total = None
for index, input_block in block_iter:
labeled_block, n = dask.delayed(segment_chunk, nout=2)(
input_block,
channels,
model_type,
diameter_yx,
anisotropy,
fast_mode,
index,
)
shape = input_block.shape[:-1]
labeled_block = da.from_delayed(labeled_block, shape=shape, dtype=np.int32)
n = dask.delayed(np.int32)(n)
n = da.from_delayed(n, shape=(), dtype=np.int32)
total = n if total is None else total + n
block_label_offset = da.where(labeled_block > 0, total, np.int32(0))
labeled_block += block_label_offset
labeled_blocks[index[:-1]] = labeled_block
total += n
# Put all the blocks together
block_labeled = da.block(labeled_blocks.tolist())
depth = da.overlap.coerce_depth(len(depth), depth)
if np.prod(block_labeled.numblocks) > 1:
iou_depth = da.overlap.coerce_depth(len(depth), iou_depth)
if any(iou_depth[ax] > depth[ax] for ax in depth.keys()):
raise DistSegError("iou_depth (%s) > depth (%s)" % (iou_depth, depth))
trim_depth = {k: depth[k] - iou_depth[k] for k in depth.keys()}
block_labeled = da.overlap.trim_internal(
block_labeled, trim_depth, boundary=boundary
)
block_labeled = link_labels(
block_labeled,
total,
iou_depth,
iou_threshold=iou_threshold,
)
block_labeled = da.overlap.trim_internal(
block_labeled, iou_depth, boundary=boundary
)
else:
block_labeled = da.overlap.trim_internal(
block_labeled, depth, boundary=boundary
)
return block_labeled
def segment_chunk(
chunk,
channels,
model_type,
diameter_yx,
anisotropy,
fast_mode,
index,
):
"""Perform segmentation on an individual chunk."""
# Cellpose seems to have some randomness, which is made deterministic by using the block
# details as a random seed.
np.random.seed(index)
from cellpose import models
model = models.Cellpose(gpu=True, model_type=model_type, net_avg=not fast_mode)
logger.info("Evaluating model")
segments, _, _, _ = model.eval(
chunk,
channels=channels,
z_axis=0,
channel_axis=3,
diameter=diameter_yx,
do_3D=True,
anisotropy=anisotropy,
net_avg=not fast_mode,
augment=not fast_mode,
tile=not fast_mode,
)
logger.info("Done segmenting chunk")
return segments.astype(np.int32), segments.max()
def link_labels(block_labeled, total, depth, iou_threshold=1):
"""
Build a label connectivity graph that groups labels across blocks,
use this graph to find connected components, and then relabel each
block according to those.
"""
label_groups = label_adjacency_graph(block_labeled, total, depth, iou_threshold)
new_labeling = _label.connected_components_delayed(label_groups)
return _label.relabel_blocks(block_labeled, new_labeling)
def label_adjacency_graph(labels, nlabels, depth, iou_threshold):
all_mappings = [da.empty((2, 0), dtype=np.int32, chunks=1)]
slices_and_axes = get_slices_and_axes(labels.chunks, labels.shape, depth)
for face_slice, axis in slices_and_axes:
face = labels[face_slice]
mapped = _across_block_iou_delayed(face, axis, iou_threshold)
all_mappings.append(mapped)
i, j = da.concatenate(all_mappings, axis=1)
result = _label._to_csr_matrix(i, j, nlabels + 1)
return result
def _across_block_iou_delayed(face, axis, iou_threshold):
"""Delayed version of :func:`_across_block_label_grouping`."""
_across_block_label_grouping_ = dask.delayed(_across_block_label_iou)
grouped = _across_block_label_grouping_(face, axis, iou_threshold)
return da.from_delayed(grouped, shape=(2, np.nan), dtype=np.int32)
def _across_block_label_iou(face, axis, iou_threshold):
unique = np.unique(face)
face0, face1 = np.split(face, 2, axis)
intersection = sk_metrics.confusion_matrix(face0.reshape(-1), face1.reshape(-1))
sum0 = intersection.sum(axis=0, keepdims=True)
sum1 = intersection.sum(axis=1, keepdims=True)
# Note that sum0 and sum1 broadcast to square matrix size.
union = sum0 + sum1 - intersection
# Ignore errors with divide by zero, which the np.where sets to zero.
with np.errstate(divide="ignore", invalid="ignore"):
iou =
|
np.where(intersection > 0, intersection / union, 0)
|
numpy.where
|
import os
import cv2
import numpy as np
import tensorflow as tf
model = tf.keras.models.load_model('./model_Rain100H/')
path = './Rain100H/rain/'
file_list = os.listdir(path)
step = 0
for pic in file_list:
p = tf.io.read_file('./Rain100H/rain/'+pic)
pics_1 = tf.io.decode_image(p)
pics_1 = pics_1.numpy()[np.newaxis,:,:,:]
pics = pics_1/255
a = model.predict(pics)
output_img1 =
|
np.squeeze(a)
|
numpy.squeeze
|
import numbers
import numpy as np
from scipy.sparse import coo_matrix
from sklearn.utils.validation import check_array
from .cabess import pywrap_PCA, pywrap_RPCA
from .bess_base import bess_base
def fix_docs(cls):
# This function is to inherit the docstring from base class
# and avoid unnecessary duplications on description.
index = cls.__doc__.find("Examples\n --------\n")
if index != -1:
cls.__doc__ = cls.__doc__[:index] + \
cls.__bases__[0].__doc__ + cls.__doc__[index:]
return cls
@fix_docs
class SparsePCA(bess_base):
"""
Adaptive Best-Subset Selection(ABESS) algorithm for principal component analysis.
Parameters
----------
splicing_type: {0, 1}, optional
The type of splicing in `fit()` (in Algorithm.h).
"0" for decreasing by half, "1" for decresing by one.
Default: splicing_type = 1.
Examples
--------
>>> ### Sparsity known
>>>
>>> from abess.decomposition import SparsePCA
>>> import numpy as np
>>> np.random.seed(12345)
>>> model = SparsePCA(support_size = 10)
>>>
>>> ### X known
>>> X = np.random.randn(100, 50)
>>> model.fit(X)
>>> print(model.coef_)
>>>
>>> ### X unknown, but Sigma known
>>> model.fit(Sigma = np.cov(X.T))
>>> print(model.coef_)
"""
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
thread=1,
sparse_matrix=False,
splicing_type=1
):
super().__init__(
algorithm_type="abess", model_type="PCA", normalize_type=1, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
thread=thread,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type
)
def transform(self, X):
"""
For PCA model, apply dimensionality reduction
to given data.
Parameters
----------
X : array-like of shape (n_samples, p_features)
Test data.
"""
X = self.new_data_check(X)
return X.dot(self.coef_)
def ratio(self, X):
"""
Give new data, and it returns the explained ratio.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
"""
X = self.new_data_check(X)
s = np.cov(X.T)
if len(self.coef_.shape) == 1:
explain = self.coef_.T.dot(s).dot(self.coef_)
else:
explain = np.sum(np.diag(self.coef_.T.dot(s).dot(self.coef_)))
if isinstance(s, (int, float)):
full = s
else:
full = np.sum(np.diag(s))
return explain / full
def fit(self, X=None, is_normal=False,
group=None, Sigma=None, number=1, n=None, A_init=None):
"""
The fit function is used to transfer the information of data and return the fit result.
Parameters
----------
X : array-like of shape (n_samples, p_features)
Training data
is_normal : bool, optional
whether normalize the variables array before fitting the algorithm.
Default: is_normal=False.
weight : array-like of shape (n_samples,)
Individual weights for each sample. Only used for is_weight=True.
Default is 1 for each observation.
group : int, optional
The group index for each variable.
Default: group = \\code{numpy.ones(p)}.
Sigma : array-like of shape (n_features, n_features), optional
Sample covariance matrix.
For PCA, it can be given as input, instead of X. But if X is given, Sigma will be set to \\code{np.cov(X.T)}.
Default: Sigma = \\code{np.cov(X.T)}.
number : int, optional
Indicates the number of PCs returned.
Default: 1
n : int, optional
Sample size. If X is given, it would be X.shape[0]; if Sigma is given, it would be 1 by default.
Default: X.shape[0] or 1.
"""
# Input check
if isinstance(X, (list, np.ndarray, np.matrix, coo_matrix)):
if isinstance(X, coo_matrix):
self.sparse_matrix = True
X = check_array(X, accept_sparse=True)
n = X.shape[0]
p = X.shape[1]
X = X - X.mean(axis=0)
Sigma = np.cov(X.T)
self.n_features_in_ = p
elif isinstance(Sigma, (list, np.ndarray, np.matrix)):
if self.cv > 1:
raise ValueError("X should be given to use CV.")
Sigma = check_array(Sigma)
if (Sigma.shape[0] != Sigma.shape[1] or np.any(Sigma.T != Sigma)):
raise ValueError("Sigma should be symmetrical matrix.")
if np.any(np.linalg.eigvals(Sigma) < 0):
raise ValueError("Sigma should be semi-positive definite.")
if n is None:
n = 1
p = Sigma.shape[0]
X = np.zeros((1, p))
self.n_features_in_ = p
is_normal = False
else:
raise ValueError("X or Sigma should be given in PCA.")
# # Algorithm_type
# if self.algorithm_type == "abess":
# algorithm_type_int = 6
# else:
# raise ValueError("algorithm_type should not be " +
# str(self.algorithm_type))
# for PCA,
# model_type_int = 7
path_type_int = 1
# Ic_type
if self.ic_type == "aic":
ic_type_int = 1
elif self.ic_type == "bic":
ic_type_int = 2
elif self.ic_type == "gic":
ic_type_int = 3
elif self.ic_type == "ebic":
ic_type_int = 4
else:
raise ValueError(
"ic_type should be \"aic\", \"bic\", \"ebic\" or \"gic\"")
# cv
if (not isinstance(self.cv, int) or self.cv <= 0):
raise ValueError("cv should be an positive integer.")
if self.cv > n:
raise ValueError("cv should be smaller than n.")
# Group
if group is None:
g_index = list(range(p))
else:
group = np.array(group)
if group.ndim > 1:
raise ValueError("group should be an 1D array of integers.")
if group.size != p:
raise ValueError(
"The length of group should be equal to X.shape[1].")
g_index = []
group.sort()
group_set = list(set(group))
j = 0
for i in group_set:
while group[j] != i:
j += 1
g_index.append(j)
# path parameter (note that: path_type_int = 1)
if self.support_size is None:
support_sizes = np.ones(((int(p / 3) + 1), number))
else:
if isinstance(self.support_size, (numbers.Real, numbers.Integral)):
support_sizes =
|
np.zeros((self.support_size, 1))
|
numpy.zeros
|
'''
Created on May 15, 2018
@author: melnikov
'''
import numpy
from scipy import stats, signal, spatial
import base64
import random
import matplotlib
from matplotlib import pyplot as plt
import multiprocessing as mp
import ctypes
try:
from workflow_lib import workflow_logging
logger = workflow_logging.getLogger()
except:
import logging
logger = logging.getLogger("MeshBest")
def triangle(x0, y0, length):
x = numpy.linspace(0, 99, 100)
array = y0 - 2 * y0 * numpy.abs(x - x0) / length
array = array * (array > 0)
return array
def From64ToSpotArray(string64):
array = numpy.frombuffer(base64.b64decode(string64))
array = array.reshape((int(array.size/5), 5))
return array
def AMPDiter(array):
L = int(len(array) / 2)
matrix = numpy.zeros((L, len(array)))
for k in range(1, L + 1):
for i in range(1, len(array) + 1):
if i >= k + 2 and i < len(array) - k + 2:
# W = 2 * k
if array[i - 2] > array[i - k - 2] and array[i - 2] > array[i + k - 2]:
matrix[k - 1, i - 1] = 0
else:
matrix[k - 1, i - 1] = 1 + random.random() / 2
else:
matrix[k - 1, i - 1] = 1 + random.random() / 2
gammas = numpy.sum(matrix, axis=1)
# logger.debug(gammas)
Lambda = numpy.where(gammas == numpy.min(gammas))[0][0] + 1
# logger.debug(Lambda)
matrix = matrix[:Lambda, :]
Sigma = numpy.std(matrix, axis=0)
peaks = []
for i in range(len(Sigma)):
if Sigma[i] == 0:
if (i - 1) / float(len(array)) > 0.00:
peaks.append(i - 1)
peaks = numpy.array(peaks, dtype=int)
# logger.debug('AMPD-result_PEAKS: ', peaks)
return peaks, Lambda
def AMPD(array_orig):
fullpeaklist = numpy.array([], dtype=int)
for cycle in range(10):
M = numpy.mean(array_orig)
# SD = numpy.std(array_orig)
X = numpy.arange(0, len(array_orig))
linfit = stats.linregress(X, array_orig)
array = array_orig - (linfit[0] * X + linfit[1])
array = array * (array > 0)
MAX = numpy.max(array) - M
allpeaks = numpy.array([], dtype=int)
while True:
substract = numpy.zeros(len(array_orig))
peaks, Lambda = AMPDiter(array)
peaks = peaks[(array_orig[peaks] - M > MAX / 5)]
if len(peaks) > 0:
pass
else:
break
allpeaks = numpy.append(allpeaks, peaks)
for peak in peaks:
substract += triangle(peak, array[peak], Lambda)[:len(array_orig)]
array = array - substract
array = array * (array > 0)
if len(numpy.atleast_1d(allpeaks)) == 0:
break
allpeaks = numpy.sort(allpeaks)
allpeaks = allpeaks.astype(int)
dels = []
for i in range(len(allpeaks)):
peak = allpeaks[i]
if peak > 1 and peak < (len(array_orig) - 1):
if array_orig[peak] < array_orig[peak + 1] or array_orig[peak] < array_orig[peak - 1]:
dels.append(i)
allpeaks = numpy.delete(allpeaks, dels)
fullpeaklist = numpy.append(fullpeaklist, allpeaks)
fullpeaklist = numpy.unique(fullpeaklist)
# fig = plt.plot(array_orig)
# sc = plt.scatter(fullpeaklist, array_orig[fullpeaklist], color='red')
# plt.show()
if len(fullpeaklist)>1:
return fullpeaklist
else:
return None
def CalcSeff(array):
# N = numpy.size(array) / 5
rarray = numpy.sqrt((array[:, 1] - BeamCenter[0])**2+(array[:, 2] - BeamCenter[1])**2)
rmax = min(int(BeamCenter[0]), int(BeamCenter[1]))
HIST = numpy.histogram(rarray, bins=50, range=(0, rmax))
binsize = HIST[1][1]-HIST[1][0]
Rspace = numpy.linspace(0, rmax, 50)
density = numpy.zeros(50)
for i in range(50):
density[i] = HIST[0][i]/(2*3.14159*(binsize**2)*(i+0.5))
Sdetec = numpy.diff(Rspace**2, 1)
Sdetec = numpy.append(Sdetec, [0])
Sreal = (DetectorPixel**2)*(Sdetec*DetectorDistance/(Wavelength**2)) / (numpy.sqrt(DetectorDistance**2+(DetectorPixel*Rspace)**2))**3
Seff = numpy.sum(Sreal*(density>0))
# plt.plot(density)
# plt.show()
return Seff
def SaltRingCheck_MP(queue, BeamCenter, Buffer):
while True:
spot = queue.get()
if spot == None:
break
try:
string = spot['dozorSpotList']
array = From64ToSpotArray(string)
if len(numpy.atleast_1d(array)) > 250:
xsize = int(BeamCenter[0]) * 2 + 1
ysize = int(BeamCenter[1]) * 2 + 1
detector = numpy.zeros((ysize, xsize))
for i in range(numpy.shape(array)[0]):
detector[int(array[i, 2]), int(array[i, 1])] = 1
radius_array = numpy.sqrt((array[:, 1] - BeamCenter[0]) ** 2 + (array[:, 2] - BeamCenter[1]) ** 2)
density = numpy.zeros(numpy.size(radius_array))
for i in range(numpy.shape(array)[0]):
x0, y0 = int(array[i, 1]), int(array[i, 2])
density[i] = numpy.mean(detector[y0 - 5:y0 + 5, x0 - 5:x0 + 5])
HIST = numpy.histogram(radius_array, bins=400, range=(10, min(BeamCenter)), weights=(density))[0]
#---SALT_RING_CHECK_ANALYSIS---
M = numpy.mean(HIST)
# remove_outliers
X = numpy.arange(400)
p = numpy.polyfit(X[(HIST < 3 * M)], HIST[X[(HIST < 3 * M)]], 2)
V = numpy.poly1d(p)
based = HIST - 2 * V(X) * (V(X) > 0)
based = based * (based > 0)
M1 = numpy.mean(based[X[(HIST < 3 * M)]])
SD = numpy.std(based[X[(HIST < 3 * M)]])
peaks = X[(based > M1 + 10 * SD) * (HIST > 0.1)]
#---EXCLUDE_BAD_REGIONS---
Excluded_regions = [((peak - 2) * (min(BeamCenter) - 10) / 399 + 10, (peak + 2) * (min(BeamCenter) - 10) / 399 + 10) for peak in peaks]
r = numpy.zeros(numpy.shape(array)[0])
for ring in Excluded_regions:
r += (radius_array > ring[0]) * (radius_array < ring[1])
array = numpy.delete(array, numpy.where(r), 0)
if len(array) > 5:
newstring = base64.b64encode(array).decode()
Buffer[spot['index']] = newstring
except KeyError:
pass
def MakeHistogram_MP(queue):
limits = (0.001, 0.04)
while True:
spot = queue.get()
if spot == None:
break
if 'dozorSpotList_saltremoved' in spot.keys():
string = spot['dozorSpotList_saltremoved']
array = From64ToSpotArray(string)
elif 'dozorSpotList' in spot.keys():
string = spot['dozorSpotList']
array = From64ToSpotArray(string)
else:
string = False
result = numpy.zeros(100)
if string != False:
if array.size > 5:
RealCoords = numpy.zeros((numpy.shape(array)[0], 5))
x = (array[:, 1] - BeamCenter[0]) * DetectorPixel
y = (array[:, 2] - BeamCenter[1]) * DetectorPixel
divider = Wavelength * numpy.sqrt(x ** 2 + y ** 2 + DetectorDistance ** 2)
RealCoords[:, 0] = x / divider
RealCoords[:, 1] = y / divider
RealCoords[:, 2] = (1/Wavelength) - DetectorDistance/divider
# RealCoords[i, 3] = array[i, 0]
# RealCoords[i, 4] = float(array[i, 3]) / float(array[i, 4])
array = spatial.distance.pdist(RealCoords[:, :3], metric='euclidean')
# array = numpy.array([])
#
# for i in range(len(RealCoords[:, 0])):
# for j in range(i + 1, len(RealCoords[:, 0])):
# if numpy.abs(RealCoords[i, 3] - RealCoords[j, 3]) < 15:
# L = numpy.sqrt((RealCoords[i, 0] - RealCoords[j, 0]) ** 2 + (RealCoords[i, 1] - RealCoords[j, 1]) ** 2 + (RealCoords[i, 2] - RealCoords[j, 2]) ** 2)
# if L < limits[1] and L >= limits[0]:
# array = numpy.append(array, L)
if len(array) > 1:
result = numpy.histogram(array, bins=100, range=limits)[0]
# string = str(base64.b64encode(result))
# Buffer[spot['index']] = ' '.join(result.astype(str).tolist())
Buffer[spot['index']] = base64.b64encode(result.astype(float)).decode()
def MakeHistogram(spot):
limits = (0.001, 0.04)
if 'dozorSpotList_saltremoved' in spot.keys():
string = spot['dozorSpotList_saltremoved']
array = From64ToSpotArray(string)
elif 'dozorSpotList' in spot.keys():
string = spot['dozorSpotList']
array = From64ToSpotArray(string)
else:
string = False
if string != False:
if len(numpy.atleast_1d(array)) > 5:
RealCoords = numpy.zeros((numpy.shape(array)[0], 5))
x = (array[:, 1] - BeamCenter[0]) * DetectorPixel
y = (array[:, 2] - BeamCenter[1]) * DetectorPixel
divider = Wavelength * numpy.sqrt(x ** 2 + y ** 2 + DetectorDistance ** 2)
RealCoords[:, 0] = x / divider
RealCoords[:, 1] = y / divider
RealCoords[:, 2] = (1/Wavelength) - DetectorDistance/divider
# RealCoords[i, 3] = array[i, 0]
# RealCoords[i, 4] = float(array[i, 3]) / float(array[i, 4])
array = spatial.distance.pdist(RealCoords[:, :3], metric='euclidean')
# array = numpy.array([])
#
# for i in range(len(RealCoords[:, 0])):
# for j in range(i + 1, len(RealCoords[:, 0])):
# if numpy.abs(RealCoords[i, 3] - RealCoords[j, 3]) < 15:
# L = numpy.sqrt((RealCoords[i, 0] - RealCoords[j, 0]) ** 2 + (RealCoords[i, 1] - RealCoords[j, 1]) ** 2 + (RealCoords[i, 2] - RealCoords[j, 2]) ** 2)
# if L < limits[1] and L >= limits[0]:
# array = numpy.append(array, L)
if array.size > 1:
histogr = numpy.histogram(array, bins=100, range=limits)
return histogr[0]
def OverlapCheck_MP(queue, base_regions):
while True:
item = queue.get()
if item == None:
break
Buffer[item['index']] = 0
try:
array = From64ToSpotArray(item['dozorSpotList_saltremoved'])
except KeyError:
array = From64ToSpotArray(item['dozorSpotList'])
N = array.size / 5
Smax = CalcSeff(array)
H0 = numpy.frombuffer(base64.b64decode(item['DVHistogram']))
# H0 = numpy.array(item['DVHistogram'].split(' ')).astype(int)
H = H0[base_regions]
X = numpy.linspace(0.001, 0.04, 100)[base_regions]
if numpy.all(H==0):
k = 0
else:
k = numpy.mean(X*H)/numpy.mean(X*X)
std = numpy.sqrt(numpy.mean((k*X-H)**2))
weights = numpy.exp((N/450.0)*(k*X-H)/std)
k =
|
numpy.mean(X*H*weights)
|
numpy.mean
|
"""Tools for project."""
import os
import json
import pickle
from pathlib import Path
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
rootpath = os.path.dirname(os.path.abspath(__file__))
FIGPATH = os.path.join(rootpath, 'figures')
mpl.rcParams['font.size'] = 7
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
mpl.rcParams['font.family'] = 'arial'
def get_figname(save_path, figname=''):
# For backward compatability
if isinstance(save_path, str):
save_name = os.path.split(save_path)[-1]
else:
# ugly hack to get experiment name
save_name = os.path.split(os.path.split(save_path[0])[-2])[-1]
path = os.path.join(FIGPATH, save_name)
os.makedirs(path, exist_ok=True)
figname = os.path.join(path, save_name + figname)
return figname
def save_fig(save_path, figname='', dpi=300, pdf=True, show=False):
figname = get_figname(save_path, figname)
plt.savefig(os.path.join(figname + '.png'), dpi=dpi)
print('Figure saved at: ' + figname)
if pdf:
plt.savefig(os.path.join(figname + '.pdf'), transparent=True)
# plt.savefig(os.path.join(figname + '.svg'), transparent=True, format='svg')
if show:
plt.show()
# plt.close()
def save_config(config, save_path, also_save_as_text = True):
"""Save config."""
config_dict = config.__dict__
with open(os.path.join(save_path, 'config.json'), 'w') as f:
json.dump(config_dict, f)
if also_save_as_text:
with open(os.path.join(save_path, 'config.txt'), "w") as f:
for k, v in config_dict.items():
f.write(str(k) + ' >>> ' + str(v) + '\n\n')
def load_config(save_path):
"""Load config."""
import configs
with open(os.path.join(save_path, 'config.json'), 'r') as f:
config_dict = json.load(f)
model_type = config_dict.get('model', None)
if model_type == 'full':
if 'meta_lr' in config_dict:
config = configs.MetaConfig()
else:
config = configs.FullConfig()
elif model_type == 'rnn':
config = configs.RNNConfig()
else:
config = configs.BaseConfig()
for key, val in config_dict.items():
setattr(config, key, val)
try:
config.n_trueclass_ratio = config.n_trueclass / config.N_CLASS
except AttributeError:
pass
return config
def vary_config(base_config, config_ranges, mode):
"""Return configurations.
Args:
base_config: dict, a base configuration
config_ranges: a dictionary of hyperparameters values
config_ranges = {
'hp1': [hp1_val1, hp1_val2, ...],
'hp2': [hp2_val1, hp2_val2, ...],
}
mode: str, can take 'combinatorial', 'sequential', and 'control'
Return:
configs: a list of config dict [config1, config2, ...]
"""
if mode == 'combinatorial':
_vary_config = _vary_config_combinatorial
elif mode == 'sequential':
_vary_config = _vary_config_sequential
elif mode == 'control':
_vary_config = _vary_config_control
else:
raise ValueError('Unknown mode {}'.format(str(mode)))
configs, config_diffs = _vary_config(base_config, config_ranges)
# Automatic set names for configs
# configs = autoname(configs, config_diffs)
for i, config in enumerate(configs):
config.model_name = str(i).zfill(6) # default name
return configs
# def autoname(configs, config_diffs):
# """Helper function for automatically naming models based on configs."""
# new_configs = list()
# for config, config_diff in zip(configs, config_diffs):
# name = 'model'
# for key, val in config_diff.items():
# name += '_' + str(key) + str(val)
# config['save_path'] = Path(config['save_path']) / name
# new_configs.append(config)
# return new_configs
def _vary_config_combinatorial(base_config, config_ranges):
"""Return combinatorial configurations.
Args:
base_config: dict, a base configuration
config_ranges: a dictionary of hyperparameters values
config_ranges = {
'hp1': [hp1_val1, hp1_val2, ...],
'hp2': [hp2_val1, hp2_val2, ...],
}
Return:
configs: a list of config dict [config1, config2, ...]
Loops over all possible combinations of hp1, hp2, ...
config_diffs: a list of config diff from base_config
"""
# Unravel the input index
keys = config_ranges.keys()
dims = [len(config_ranges[k]) for k in keys]
n_max = int(np.prod(dims))
configs, config_diffs = list(), list()
for i in range(n_max):
new_config = deepcopy(base_config)
config_diff = dict()
indices = np.unravel_index(i, dims=dims)
# Set up new config
for key, index in zip(keys, indices):
val = config_ranges[key][index]
setattr(new_config, key, val)
config_diff[key] = val
configs.append(new_config)
config_diffs.append(config_diff)
return configs, config_diffs
def _vary_config_sequential(base_config, config_ranges):
"""Return sequential configurations.
Args:
base_config: dict, a base configuration
config_ranges: a dictionary of hyperparameters values
config_ranges = {
'hp1': [hp1_val1, hp1_val2, ...],
'hp2': [hp2_val1, hp2_val2, ...],
}
Return:
configs: a list of config dict [config1, config2, ...]
Loops over all hyperparameters hp1, hp2 together sequentially
config_diffs: a list of config diff from base_config
"""
keys = config_ranges.keys()
dims = [len(config_ranges[k]) for k in keys]
n_max = dims[0]
configs, config_diffs = list(), list()
for i in range(n_max):
new_config = deepcopy(base_config)
config_diff = dict()
for key in keys:
val = config_ranges[key][i]
setattr(new_config, key, val)
config_diff[key] = val
configs.append(new_config)
config_diffs.append(config_diff)
return configs, config_diffs
def _vary_config_control(base_config, config_ranges):
"""Return control configurations.
Each config_range is gone through sequentially. The base_config is
trained only once.
Args:
base_config: dict, a base configuration
config_ranges: a dictionary of hyperparameters values
config_ranges = {
'hp1': [hp1_val1, hp1_val2, ...],
'hp2': [hp2_val1, hp2_val2, ...],
}
Return:
configs: a list of config dict [config1, config2, ...]
Loops over all hyperparameters hp1, hp2 independently
config_diffs: a list of config diff from base_config
"""
keys = list(config_ranges.keys())
# Remove the baseconfig value from the config_ranges
new_config_ranges = {}
for key, val in config_ranges.items():
base_config_val = getattr(base_config, key)
new_config_ranges[key] = [v for v in val if v != base_config_val]
# Unravel the input index
dims = [len(new_config_ranges[k]) for k in keys]
n_max = int(np.sum(dims))
configs, config_diffs = list(), list()
configs.append(deepcopy(base_config))
config_diffs.append({})
for i in range(n_max):
new_config = deepcopy(base_config)
index = i
for j, dim in enumerate(dims):
if index >= dim:
index -= dim
else:
break
config_diff = dict()
key = keys[j]
val = new_config_ranges[key][index]
setattr(new_config, key, val)
config_diff[key] = val
configs.append(new_config)
config_diffs.append(config_diff)
return configs, config_diffs
def _islikemodeldir(d):
"""Check if directory looks like a model directory."""
try:
files = os.listdir(d)
except NotADirectoryError:
return False
fs = ['model.ckpt', 'model.pkl', 'model.pt', 'log.pkl', 'log.npz']
for f in fs:
if f in files:
return True
return False
def _get_alldirs(dir, model, sort):
"""Return sorted model directories immediately below path.
Args:
model: bool, if True find directories containing model files
sort: bool, if True, sort directories by name
"""
dirs = os.listdir(dir)
if model:
dirs = [d for d in dirs if _islikemodeldir(os.path.join(dir, d))]
if _islikemodeldir(dir): # if root is mode directory, return it
return [dir]
if sort:
ixs = np.argsort([int(n) for n in dirs]) # sort by epochs
dirs = [os.path.join(dir, dirs[n]) for n in ixs]
return dirs
def select_modeldirs(modeldirs, select_dict=None, acc_min=None):
"""Select model directories.
Args:
modeldirs: list of model directories
select_dict: dict, config must match select_dict to be selected
acc_min: None or float, minimum validation acc to be included
"""
new_dirs = []
for d in modeldirs:
selected = True
if select_dict is not None:
config = load_config(d) # epoch modeldirs have no configs
for key, val in select_dict.items():
if key == 'data_dir':
# If data_dir, only compare last
if Path(config.data_dir).name != Path(val).name:
selected = False
break
else:
if getattr(config, key) != val:
selected = False
break
if acc_min is not None:
log = load_log(d)
if log['val_acc'][-1] < acc_min:
selected = False
if selected:
new_dirs.append(d)
return new_dirs
def exclude_modeldirs(modeldirs, exclude_dict=None):
"""Exclude model directories."""
new_dirs = []
for d in modeldirs:
excluded = False
if exclude_dict is not None:
config = load_config(d) # epoch modeldirs have no configs
for key, val in exclude_dict.items():
if key == 'data_dir':
# If data_dir, only compare last
if Path(config.data_dir).name == Path(val).name:
excluded = True
break
else:
if getattr(config, key) == val:
excluded = True
break
if not excluded:
new_dirs.append(d)
return new_dirs
def sort_modeldirs(modeldirs, key):
"""Sort modeldirs by value of key."""
val = []
for d in modeldirs:
config = load_config(d)
val.append(getattr(config, key))
ind_sort = np.argsort(val)
modeldirs = [modeldirs[i] for i in ind_sort]
return modeldirs
def get_modeldirs(path, select_dict=None, exclude_dict=None, acc_min=None):
dirs = _get_alldirs(path, model=True, sort=True)
dirs = select_modeldirs(dirs, select_dict=select_dict, acc_min=acc_min)
dirs = exclude_modeldirs(dirs, exclude_dict=exclude_dict)
return dirs
def get_experiment_name(model_path):
"""Get experiment name for saving."""
if _islikemodeldir(model_path):
config = load_config(model_path)
experiment_name = config.experiment_name
if experiment_name is None:
# model_path is assumed to be experiment_name/model_name
experiment_name = os.path.normpath(model_path).split(os.path.sep)[-2]
else:
# Assume this is path to experiment
experiment_name = os.path.split(model_path)[-1]
return experiment_name
def get_model_name(model_path):
"""Get model name for saving."""
if _islikemodeldir(model_path):
config = load_config(model_path)
model_name = config.model_name
if model_name is None:
# model_path is assumed to be experiment_name/model_name
model_name = os.path.split(model_path)[-1]
else:
# Assume this is path to experiment
model_name = os.path.split(model_path)[-1]
return model_name
def save_pickle(modeldir, obj, epoch=None):
"""Save model weights in numpy.
Args:
modeldir: str, model directory
obj: dictionary of numpy arrays
epoch: int or None, epoch of training
"""
if epoch is not None:
modeldir = os.path.join(modeldir, 'epoch', str(epoch).zfill(4))
os.makedirs(modeldir, exist_ok=True)
fname = os.path.join(modeldir, 'model.npz')
np.savez_compressed(fname, **obj)
def load_pickle(modeldir):
file_np = os.path.join(modeldir, 'model.npz')
file_pkl = os.path.join(modeldir, 'model.pkl')
if os.path.isfile(file_np):
var_dict = np.load(file_np)
else:
with open(file_pkl, 'rb') as f:
var_dict = pickle.load(f)
return var_dict
def load_pickles(dir, var):
"""Load pickle by epoch in sorted order."""
out = []
dirs = get_modeldirs(dir)
for i, d in enumerate(dirs):
var_dict = load_pickle(d)
try:
cur_val = var_dict[var]
out.append(cur_val)
except:
print(var + ' is not in directory:' + d)
return out
def save_log(modeldir, log):
np.savez_compressed(os.path.join(modeldir, 'log.npz'), **log)
def load_log(modeldir):
file_np = os.path.join(modeldir, 'log.npz')
file_pkl = os.path.join(modeldir, 'log.pkl')
if os.path.isfile(file_np):
log = np.load(file_np)
else:
with open(file_pkl, 'rb') as f:
log = pickle.load(f)
save_log(modeldir, log) # resave with npz
return log
def has_nobadkc(modeldir, bad_kc_threshold=0.2):
"""Check if model has too many bad KCs."""
log = load_log(modeldir)
if 'bad_KC' not in log:
return True
# After training, bad KC proportion should lower 'bad_kc_threshold'
return log['bad_KC'][-1] < bad_kc_threshold
def filter_modeldirs_badkc(modeldirs, bad_kc_threshold=0.2):
"""Filter model dirs with too many bad KCs."""
return [d for d in modeldirs if has_nobadkc(d, bad_kc_threshold)]
def has_singlepeak(modeldir, peak_threshold=None):
"""Check if model has a single peak."""
# TODO: Use this method throughout to replace similar methods
log = load_log(modeldir)
if ('lin_bins' not in log) or ('lin_hist' not in log):
return True
config = load_config(modeldir)
if peak_threshold is None:
peak_threshold = 2./config.N_PN # heuristic
if config.kc_prune_weak_weights:
thres = config.kc_prune_threshold
else:
thres = log['thres_inferred'][-1] # last epoch
if len(log['lin_bins'].shape) == 1:
bins = log['lin_bins'][:-1]
else:
bins = log['lin_bins'][-1, :-1]
bin_size = bins[1] - bins[0]
hist = log['lin_hist'][-1] # last epoch
# log['lin_bins'] shape (nbin+1), log['lin_hist'] shape (n_epoch, nbin)
ind_thres = np.argsort(np.abs(bins - thres))[0]
ind_grace = int(0.01 / bin_size) # grace distance to start find peak
hist_abovethres = hist[ind_thres + ind_grace:]
ind_peak = np.argmax(hist_abovethres)
# Value at threshold and at peak
thres_value = hist_abovethres[0]
peak_value = hist_abovethres[ind_peak]
if (ind_peak + ind_grace) * bin_size <= peak_threshold or (
peak_value < 1.3 * thres_value):
# peak should be at least 'peak_threshold' away from threshold
return False
else:
return True
def filter_modeldirs_badpeak(modeldirs, peak_threshold=None):
"""Filter model dirs without a strong second peak."""
return [d for d in modeldirs if has_singlepeak(d, peak_threshold)]
def filter_modeldirs(modeldirs, exclude_badkc=False, exclude_badpeak=False):
"""Select model directories.
Args:
modeldirs: list of model directories
exclude_badkc: bool, if True, exclude models with too many bad KCs
exclude_badpeak: bool, if True, exclude models with bad peaks
Return:
modeldirs: list of filtered model directories
"""
print('Analyzing {} model directories'.format(len(modeldirs)))
if exclude_badkc:
modeldirs = filter_modeldirs_badkc(modeldirs)
print('{} remain after filtering bad kcs'.format(len(modeldirs)))
if exclude_badpeak:
modeldirs = filter_modeldirs_badpeak(modeldirs)
print('{} remain after filtering bad peaks'.format(len(modeldirs)))
return modeldirs
def load_all_results(path, select_dict=None, exclude_dict=None,
argLast=True, ix=None, exclude_early_models=False,
none_to_string=True):
"""Load results from path.
Args:
path: str or list, if str, root path of all models loading results from
if list, directories of all models
Returns:
res: dictionary of numpy arrays, containing information from all models
"""
if isinstance(path, str):
dirs = get_modeldirs(path)
else:
dirs = path
dirs = select_modeldirs(dirs, select_dict=select_dict)
dirs = exclude_modeldirs(dirs, exclude_dict=exclude_dict)
from collections import defaultdict
res = defaultdict(list)
for i, d in enumerate(dirs):
log = load_log(d)
config = load_config(d)
n_actual_epoch = len(log['val_acc'])
if exclude_early_models and n_actual_epoch < config.max_epoch:
continue
# Add logger values
for key, val in log.items():
if key == 'meta_update_lr': # special handling
key = 'meta_update_lr_trained'
if len(val) == n_actual_epoch:
if argLast:
res[key].append(val[-1]) # store last value in log
elif ix is not None:
res[key].append(val[ix])
else:
res[key].append(val)
else:
res[key].append(val)
if 'loss' in key:
res['log_' + key].append(np.log(val))
if 'kc_prune_weak_weights' in dir(config) and \
config.kc_prune_weak_weights:
k_smart_key = 'K'
else:
k_smart_key = 'K_inferred'
if k_smart_key in res.keys():
res['K_smart'].append(res[k_smart_key][-1])
# Adding configuration values
for k in dir(config):
if k == 'coding_level': # name conflict with log entry
res['coding_level_set'].append(config.coding_level)
elif k == 'data_dir':
res['data_dir'].append(Path(config.data_dir).name)
elif k[0] != '_':
v = getattr(config, k)
if v is None and none_to_string:
v = '_none'
res[k].append(v)
# Add pn2kc peak information
clean_pn2kc = has_nobadkc(d) and has_singlepeak(d)
res['clean_pn2kc'].append(clean_pn2kc)
for key, val in res.items():
try:
res[key] = np.array(val)
except ValueError:
print('Cannot turn ' + key +
' into np array, probably non-homogeneous shape')
return res
nicename_dict = {
'_none': 'None',
'ORN_NOISE_STD': 'Noise level',
'N_PN': 'Number of PNs',
'N_KC': 'Number of KCs',
'N_ORN_DUPLICATION': 'ORNs per type',
'kc_inputs': 'PN inputs per KC',
'glo_score': 'GloScore',
'or_glo_score': 'OR to ORN GloScore',
'combined_glo_score': 'OR to PN GloScore',
'train_acc': 'Training Accuracy',
'train_loss': 'Training Loss',
'log_train_loss': 'Log Training Loss',
'val_acc': 'Accuracy',
'val_loss': 'Loss',
'log_val_loss': 'Log Loss',
'epoch': 'Epoch',
'kc_dropout': 'KC Dropout Rate',
'kc_loss_alpha': r'$\alpha$',
'kc_loss_beta': r'$\beta$',
'initial_pn2kc': 'Initial PN-KC Weights',
'initializer_pn2kc': 'Initializer',
'mean_claw': 'Average Number of KC Claws',
'zero_claw': '% of KC with No Input',
'kc_out_sparse_mean': '% of Active KCs',
'coding_level': '% of Active KCs',
'N_CLASS': 'Number of Classes',
'n_glo': 'Number of ORs per PN',
'n_trueclass': 'Number of Odor Prototypes',
'n_trueclass_ratio': 'Odor Prototypes Per Class',
'n_restricted_patterns': 'N Stereotyped Patterns',
'weight_perturb': 'Weight Perturb.',
'lr': 'Learning rate',
'train_kc_bias': 'Training KC bias',
'pn_norm_pre': 'PN normalization',
'kc_norm_pre': 'KC normalization',
'kc_norm': 'KC normalization',
'batch_norm': 'Batch Norm',
'layer_norm': 'Layer Norm',
'olsen': 'Divisive Norm',
'mean_center': 'Zero Mean',
'kc_dropout_rate': 'KC dropout rate',
'pn_dropout_rate': 'PN dropout rate',
'K_inferred': 'K',
'K': 'fixed threshold K',
'lin_hist_': 'Distribution',
'lin_bins_': 'PN to KC Weight',
'lin_hist': 'Distribution',
'lin_bins': 'PN to KC Weight',
'kc_prune_threshold': 'KC prune threshold',
'n_or_per_orn': 'Number of ORs per ORN',
'K_smart': 'K',
'kc_prune_weak_weights': 'Prune PN-KC weights',
'kc_recinh': 'KC recurrent inhibition',
'kc_recinh_coeff': 'KC rec. inh. strength',
'kc_recinh_step': 'KC rec. inh. step',
'orn_corr': 'ORN correlation',
'w_orn': 'ORN-PN connectivity',
'w_or': 'OR-ORN connectivity',
'w_glo': 'PN-KC connectivity',
'w_combined': 'OR-PN effective connectivity',
'glo_in': 'PN Input',
'glo': 'PN Activity',
'kc_in': 'KC Input',
'kc': 'KC Activity',
'sign_constraint_orn2pn': 'Non-negative ORN-PN',
'meta_lr': 'Meta learning rate',
'meta_num_samples_per_class': '# Samples/Class',
'meta_update_lr': 'Initial inner learning rate',
'skip_orn2pn': 'Skip ORN-PN',
'data_dir': 'Dataset',
'fixed_activity': 'Fixed activity',
'spread_orn_activity': 'ORN activity spread',
'training_type': 'Fixed Weights',
'train_pn2kc': 'Train PN-KC weights',
}
def nicename(name, mode='dict'):
"""Return nice name for publishing."""
if mode in ['lr', 'meta_lr']:
return np.format_float_scientific(name, precision=0, exp_digits=1)
elif mode in ['N_KC', 'N_PN']:
if name >= 1000:
return '{:.1f}K'.format(name/1000)
else:
return name
elif mode == 'kc_recinh_coeff':
return '{:0.1f}'.format(name)
elif mode == 'coding_level':
return '{:0.2f}'.format(name)
elif mode == 'n_trueclass_ratio':
return '{:d}'.format(int(name))
elif mode == 'data_dir':
# Right now this is only used for pn_normalization experiment
if Path(name).name == Path(
'./datasets/proto/concentration').name:
return 'low'
elif Path(name).name == Path(
'./datasets/proto/concentration_mask_row_0').name:
return 'medium'
elif Path(name).name == Path(
'./datasets/proto/concentration_mask_row_0.6').name:
return 'high'
elif name == 'data_dir':
return 'spread'
else:
return name
elif mode == 'scaling':
name = Path(name).name
if name == 'dim':
return 'Max dimension'
elif name == 'angle':
return 'Angle robustness'
elif name == 'vary_or':
return 'Train'
elif name == 'meta_vary_or':
return 'Meta learning'
else:
return name
else:
return nicename_dict.get(name, name) # get(key, default value)
# colors from https://visme.co/blog/color-combinations/ # 14
blue = np.array([2,148,165])/255.
red = np.array([193,64,61])/255.
gray = np.array([167, 156, 147])/255.
darkblue = np.array([3, 53, 62])/255.
green = np.array([65,89,57])/255. # From # 24
def reshape_worn(w_orn, unique_orn, mode='tile'):
"""Reshape w_orn."""
n_orn, n_pn = w_orn.shape
w_orn_by_pn = w_orn
n_duplicate_orn = n_orn // unique_orn
if mode == 'repeat':
w_orn_by_pn = np.reshape(w_orn_by_pn,
(unique_orn, n_duplicate_orn, n_pn))
w_orn_by_pn = np.swapaxes(w_orn_by_pn, 0, 1)
elif mode == 'tile':
w_orn_by_pn = np.reshape(w_orn_by_pn,
(n_duplicate_orn, unique_orn, n_pn))
else:
raise ValueError('Unknown mode' + str(mode))
return w_orn_by_pn
def reshape_worn_by_wor(w_orn, w_or):
ind_max = np.argmax(w_or, axis=0)
w_orn = w_orn[ind_max,:]
return w_orn, ind_max
def compute_glo_score(w_orn, unique_ors, mode='tile', w_or = None):
"""Compute the glomeruli score in numpy.
This function returns the glomeruli score, a number between 0 and 1 that
measures how close the connectivity is to glomeruli connectivity.
For one glomeruli neuron, first we compute the average connections from
each ORN group. Then we sort the absolute connection weights by ORNs.
The glomeruli score is simply:
(Max weight - Second max weight) / (Max weight + Second max weight)
Args:
w_orn: numpy array (n_orn, n_pn). This matrix has to be organized
in the following ways:
In the mode=='repeat'
neurons from the same orn type are indexed consecutively
for example, neurons from the 0-th type would be 0, 1, 2, ...
In the mode=='tile'
neurons from the same orn type are spaced by the number of types,
for example, neurons from the 0-th type would be 0, 50, 100, ...
unique_ors: int, the number of unique ORNs
mode: the way w_orn is organized
Return:
avg_glo_score: scalar, average glomeruli score
glo_scores: numpy array (n_pn,), all glomeruli scores
"""
n_orn, n_pn = w_orn.shape
if mode == 'tile' or mode == 'repeat':
w_orn_by_pn = reshape_worn(w_orn, unique_ors, mode)
w_orn_by_pn = w_orn_by_pn.mean(axis=0)
elif mode == 'matrix':
_, ind_max = reshape_worn_by_wor(w_orn, w_or)
w_orn_by_pn = np.zeros((unique_ors, unique_ors))
for i in range(unique_ors):
out = np.mean(w_orn[ind_max == i, :], axis=0)
out[np.isnan(out)] = 0
w_orn_by_pn[i, :] = out
else:
raise ValueError('reshaping format is not recognized {}'.format(mode))
glo_scores = list()
for i in range(n_pn):
w_tmp = w_orn_by_pn[:, i] # all projections to the i-th PN neuron
indsort = np.argsort(w_tmp)[::-1]
w_max = w_tmp[indsort[0]]
w_second = w_tmp[indsort[1]]
glo_score = (w_max - w_second) / (w_max + w_second)
glo_scores.append(glo_score)
avg_glo_score = np.round(np.mean(glo_scores),4)
return avg_glo_score, glo_scores
def compute_sim_score(w_orn, unique_orn, mode='tile'):
"""Compute the similarity score in numpy.
This function returns the glomeruli score, a number between 0 and 1 that
measures how close the connectivity is to glomeruli connectivity.
For one glomeruli neuron, first we compute the average connections from
each ORN group. Then we sort the absolute connection weights by ORNs.
The glomeruli score is simply:
(Max weight - Second max weight) / (Max weight + Second max weight)
Args:
w_orn: numpy array (n_orn, n_pn). This matrix has to be organized
in the following ways:
In the mode=='repeat'
neurons from the same orn type are indexed consecutively
for example, neurons from the 0-th type would be 0, 1, 2, ...
In the mode=='tile'
neurons from the same orn type are spaced by the number of types,
for example, neurons from the 0-th type would be 0, 50, 100, ...
unique_orn: int, the number of unique ORNs
mode: the way w_orn is organized
Return:
avg_glo_score: scalar, average glomeruli score
glo_scores: numpy array (n_pn,), all glomeruli scores
"""
from sklearn.metrics.pairwise import cosine_similarity
n_orn, n_pn = w_orn.shape
w_orn_by_pn = reshape_worn(w_orn, unique_orn, mode)
n_duplicate_orn = n_orn // unique_orn
if n_duplicate_orn == 1:
return 0, [0]*unique_orn
sim_scores = list()
for i in range(unique_orn):
w_tmp = w_orn_by_pn[:, i, :]
sim_tmp = cosine_similarity(w_tmp)
sim_scores.append(sim_tmp.mean())
avg_sim_score = np.mean(sim_scores)
return avg_sim_score, sim_scores
# def get_colormap():
# def make_colormap(seq):
# """Return a LinearSegmentedColormap
# seq: a sequence of floats and RGB-tuples. The floats should be increasing
# and in the interval (0,1).
# """
#
# seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
# cdict = {'red': [], 'green': [], 'blue': []}
# for i, item in enumerate(seq):
# if isinstance(item, float):
# r1, g1, b1 = seq[i - 1]
# r2, g2, b2 = seq[i + 1]
# cdict['red'].append([item, r1, r2])
# cdict['green'].append([item, g1, g2])
# cdict['blue'].append([item, b1, b2])
# return colors.LinearSegmentedColormap('CustomMap', cdict, N=512)
#
# c = colors.ColorConverter().to_rgb
# a = 'tomato'
# b = 'darkred'
# cmap = make_colormap([c('white'), c(a), .5, c(a), c(b), .8, c(b)])
# return cmap
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval,
b=maxval),
cmap(
|
np.linspace(minval, maxval, n)
|
numpy.linspace
|
from ir_sim.env import env_base
from math import sqrt, pi
from gym import spaces
from gym_env.envs.rvo_inter import rvo_inter
import numpy as np
class ir_gym(env_base):
def __init__(self, world_name, neighbors_region=5, neighbors_num=10, vxmax = 1.5, vymax = 1.5, env_train=True, acceler = 0.5, **kwargs):
super(ir_gym, self).__init__(world_name=world_name, **kwargs)
# self.obs_mode = kwargs.get('obs_mode', 0) # 0 drl_rvo, 1 drl_nrvo
# self.reward_mode = kwargs.get('reward_mode', 0)
self.radius_exp = kwargs.get('radius_exp', 0.2)
self.env_train = env_train
self.nr = neighbors_region
self.nm = neighbors_num
self.rvo = rvo_inter(neighbors_region, neighbors_num, vxmax, vymax, acceler, env_train, self.radius_exp)
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(5,), dtype=np.float32)
self.action_space = spaces.Box(low=np.array([-1, -1]), high=np.array([1, 1]), dtype=np.float32)
self.reward_parameter = kwargs.get('reward_parameter', (0.2, 0.1, 0.1, 0.2, 0.2, 1, -20, 20))
self.acceler = acceler
self.arrive_flag_cur = False
self.rvo_state_dim = 8
def cal_des_omni_list(self):
des_vel_list = [robot.cal_des_vel_omni() for robot in self.robot_list]
return des_vel_list
def rvo_reward_list_cal(self, action_list, **kwargs):
ts = self.components['robots'].total_states() # robot_state_list, nei_state_list, obs_circular_list, obs_line_list
rvo_reward_list = list(map(lambda robot_state, action: self.rvo_reward_cal(robot_state, ts[1], ts[2], ts[3], action, self.reward_parameter, **kwargs), ts[0], action_list))
return rvo_reward_list
def rvo_reward_cal(self, robot_state, nei_state_list, obs_cir_list, obs_line_list, action, reward_parameter=(0.2, 0.1, 0.1, 0.2, 0.2, 1, -10, 20), **kwargs):
vo_flag, min_exp_time, min_dis = self.rvo.config_vo_reward(robot_state, nei_state_list, obs_cir_list, obs_line_list, action, **kwargs)
des_vel = np.round(np.squeeze(robot_state[-2:]), 2)
p1, p2, p3, p4, p5, p6, p7, p8 = reward_parameter
dis_des = sqrt((action[0] - des_vel[0] )**2 + (action[1] - des_vel[1])**2)
max_dis_des = 3
dis_des_reward = - dis_des / max_dis_des # (0-1)
exp_time_reward = - 0.2/(min_exp_time+0.2) # (0-1)
# rvo reward
if vo_flag:
rvo_reward = p2 + p3 * dis_des_reward + p4 * exp_time_reward
if min_exp_time < 0.1:
rvo_reward = p2 + p1 * p4 * exp_time_reward
else:
rvo_reward = p5 + p6 * dis_des_reward
rvo_reward =
|
np.round(rvo_reward, 2)
|
numpy.round
|
# coding: utf-8
# /*##########################################################################
# Copyright (C) 2016-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""Tests for utils module"""
import numpy
import os
import re
import shutil
import tempfile
import unittest
from .. import utils
try:
import h5py
except ImportError:
h5py_missing = True
else:
h5py_missing = False
from ..utils import h5ls
try:
import fabio
except ImportError:
fabio = None
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "11/01/2017"
expected_spec1 = r"""#F .*
#D .*
#S 1 Ordinate1
#D .*
#N 2
#L Abscissa Ordinate1
1 4\.00
2 5\.00
3 6\.00
"""
expected_spec2 = expected_spec1 + """
#S 2 Ordinate2
#D .*
#N 2
#L Abscissa Ordinate2
1 7\.00
2 8\.00
3 9\.00
"""
expected_csv = r"""Abscissa;Ordinate1;Ordinate2
1;4\.00;7\.00e\+00
2;5\.00;8\.00e\+00
3;6\.00;9\.00e\+00
"""
expected_csv2 = r"""x;y0;y1
1;4\.00;7\.00e\+00
2;5\.00;8\.00e\+00
3;6\.00;9\.00e\+00
"""
class TestSave(unittest.TestCase):
"""Test saving curves as SpecFile:
"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.spec_fname = os.path.join(self.tempdir, "savespec.dat")
self.csv_fname = os.path.join(self.tempdir, "savecsv.csv")
self.npy_fname = os.path.join(self.tempdir, "savenpy.npy")
self.x = [1, 2, 3]
self.xlab = "Abscissa"
self.y = [[4, 5, 6], [7, 8, 9]]
self.ylabs = ["Ordinate1", "Ordinate2"]
def tearDown(self):
if os.path.isfile(self.spec_fname):
os.unlink(self.spec_fname)
if os.path.isfile(self.csv_fname):
os.unlink(self.csv_fname)
if os.path.isfile(self.npy_fname):
os.unlink(self.npy_fname)
shutil.rmtree(self.tempdir)
def test_save_csv(self):
utils.save1D(self.csv_fname, self.x, self.y,
xlabel=self.xlab, ylabels=self.ylabs,
filetype="csv", fmt=["%d", "%.2f", "%.2e"],
csvdelim=";", autoheader=True)
csvf = open(self.csv_fname)
actual_csv = csvf.read()
csvf.close()
self.assertRegexpMatches(actual_csv, expected_csv)
def test_save_npy(self):
"""npy file is saved with numpy.save after building a numpy array
and converting it to a named record array"""
npyf = open(self.npy_fname, "wb")
utils.save1D(npyf, self.x, self.y,
xlabel=self.xlab, ylabels=self.ylabs)
npyf.close()
npy_recarray =
|
numpy.load(self.npy_fname)
|
numpy.load
|
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from astropy.modeling import models, fitting
cmap = cm.ScalarMappable(colors.Normalize(1, 5200), cm.viridis)
# Reading in all data files at once
import glob
path_normal ='/projects/p30137/ageller/testing/EBLSST/add_m5/output_files'
allFiles_normal = glob.glob(path_normal + "/*.csv")
path_fast = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/output_files'
allFiles_fast = glob.glob(path_fast + "/*.csv")
path_obsDist = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/obsDist/output_files'
allFiles_obsDist = glob.glob(path_obsDist + "/*.csv")
#will want to remove old when the updates come in from Katie
#normal =[]
#fast=[]
#obsDist = []
#normal_03 =[]
#fast_03=[]
#obsDist_03 = []
#normal_1 =[]
#fast_1 =[]
#obsDist_1 = []
#normal_10 =[]
#fast_10=[]
#obsDist_10 = []
#normal_30 =[]
#fast_30=[]
#obsDist_30 = []
#normal_100 =[]
#fast_100 =[]
#obsDist_100 = []
#normal_1000 =[]
#fast_1000 =[]
#obsDist_1000 = []
#normal_overall =[]
#fast_overall=[]
#obsDist_overall = []
#normal_overall_03 =[]
#fast_overall_03=[]
#obsDist_overall_03 = []
#normal_overall_1 =[]
#fast_overall_1 =[]
#obsDist_overall_1 = []
#normal_overall_10 =[]
#fast_overall_10=[]
#obsDist_overall_10 = []
#normal_overall_30 =[]
#fast_overall_30=[]
#obsDist_overall_30 = []
#normal_overall_100 =[]
#fast_overall_100 =[]
#obsDist_overall_100 = []
#normal_overall_1000=[]
#fast_overall_1000 =[]
#obsDist_overall_1000 = []
N_totalnormal_array = []
N_totalobservablenormal_array = []
N_totalrecoverablenormal_array = []
N_totalnormal_array_03 = []
N_totalobservablenormal_array_03 = []
N_totalrecoverablenormal_array_03 = []
N_totalnormal_array_1 = []
N_totalobservablenormal_array_1 = []
N_totalrecoverablenormal_array_1 = []
N_totalnormal_array_10 = []
N_totalobservablenormal_array_10 = []
N_totalrecoverablenormal_array_10 = []
N_totalnormal_array_30 = []
N_totalobservablenormal_array_30 = []
N_totalrecoverablenormal_array_30 = []
N_totalnormal_array_100 = []
N_totalobservablenormal_array_100 = []
N_totalrecoverablenormal_array_100 = []
N_totalnormal_array_1000 = []
N_totalobservablenormal_array_1000 = []
N_totalrecoverablenormal_array_1000 = []
N_totalnormal22_array = []
N_totalobservablenormal22_array = []
N_totalrecoverablenormal22_array = []
N_totalnormal22_array_03 = []
N_totalobservablenormal22_array_03 = []
N_totalrecoverablenormal22_array_03 = []
N_totalnormal22_array_1 = []
N_totalobservablenormal22_array_1 = []
N_totalrecoverablenormal22_array_1 = []
N_totalnormal22_array_10 = []
N_totalobservablenormal22_array_10 = []
N_totalrecoverablenormal22_array_10 = []
N_totalnormal22_array_30 = []
N_totalobservablenormal22_array_30 = []
N_totalrecoverablenormal22_array_30 = []
N_totalnormal22_array_100 = []
N_totalobservablenormal22_array_100 = []
N_totalrecoverablenormal22_array_100 = []
N_totalnormal22_array_1000 = []
N_totalobservablenormal22_array_1000 = []
N_totalrecoverablenormal22_array_1000 = []
N_totalnormal195_array = []
N_totalobservablenormal195_array = []
N_totalrecoverablenormal195_array = []
N_totalnormal195_array_03 = []
N_totalobservablenormal195_array_03 = []
N_totalrecoverablenormal195_array_03 = []
N_totalnormal195_array_1 = []
N_totalobservablenormal195_array_1 = []
N_totalrecoverablenormal195_array_1 = []
N_totalnormal195_array_10 = []
N_totalobservablenormal195_array_10 = []
N_totalrecoverablenormal195_array_10 = []
N_totalnormal195_array_30 = []
N_totalobservablenormal195_array_30 = []
N_totalrecoverablenormal195_array_30 = []
N_totalnormal195_array_100 = []
N_totalobservablenormal195_array_100 = []
N_totalrecoverablenormal195_array_100 = []
N_totalnormal195_array_1000 = []
N_totalobservablenormal195_array_1000 = []
N_totalrecoverablenormal195_array_1000 = []
N_totalfast_array = []
N_totalobservablefast_array = []
N_totalrecoverablefast_array = []
N_totalfast_array_03 = []
N_totalobservablefast_array_03 = []
N_totalrecoverablefast_array_03 = []
N_totalfast_array_1 = []
N_totalobservablefast_array_1 = []
N_totalrecoverablefast_array_1 = []
N_totalfast_array_10 = []
N_totalobservablefast_array_10 = []
N_totalrecoverablefast_array_10 = []
N_totalfast_array_30 = []
N_totalobservablefast_array_30 = []
N_totalrecoverablefast_array_30 = []
N_totalfast_array_100 = []
N_totalobservablefast_array_100 = []
N_totalrecoverablefast_array_100 = []
N_totalfast_array_1000 = []
N_totalobservablefast_array_1000 = []
N_totalrecoverablefast_array_1000 = []
N_totalfast22_array = []
N_totalobservablefast22_array = []
N_totalrecoverablefast22_array = []
N_totalfast22_array_03 = []
N_totalobservablefast22_array_03 = []
N_totalrecoverablefast22_array_03 = []
N_totalfast22_array_1 = []
N_totalobservablefast22_array_1 = []
N_totalrecoverablefast22_array_1 = []
N_totalfast22_array_10 = []
N_totalobservablefast22_array_10 = []
N_totalrecoverablefast22_array_10 = []
N_totalfast22_array_30 = []
N_totalobservablefast22_array_30 = []
N_totalrecoverablefast22_array_30 = []
N_totalfast22_array_100 = []
N_totalobservablefast22_array_100 = []
N_totalrecoverablefast22_array_100 = []
N_totalfast22_array_1000 = []
N_totalobservablefast22_array_1000 = []
N_totalrecoverablefast22_array_1000 = []
N_totalfast195_array = []
N_totalobservablefast195_array = []
N_totalrecoverablefast195_array = []
N_totalfast195_array_03 = []
N_totalobservablefast195_array_03 = []
N_totalrecoverablefast195_array_03 = []
N_totalfast195_array_1 = []
N_totalobservablefast195_array_1 = []
N_totalrecoverablefast195_array_1 = []
N_totalfast195_array_10 = []
N_totalobservablefast195_array_10 = []
N_totalrecoverablefast195_array_10 = []
N_totalfast195_array_30 = []
N_totalobservablefast195_array_30 = []
N_totalrecoverablefast195_array_30 = []
N_totalfast195_array_100 = []
N_totalobservablefast195_array_100 = []
N_totalrecoverablefast195_array_100 = []
N_totalfast195_array_1000 = []
N_totalobservablefast195_array_1000 = []
N_totalrecoverablefast195_array_1000 = []
N_totalobsDist_array = []
N_totalobservableobsDist_array = []
N_totalrecoverableobsDist_array = []
N_totalobsDist_array_03 = []
N_totalobservableobsDist_array_03 = []
N_totalrecoverableobsDist_array_03 = []
N_totalobsDist_array_1 = []
N_totalobservableobsDist_array_1 = []
N_totalrecoverableobsDist_array_1 = []
N_totalobsDist_array_10 = []
N_totalobservableobsDist_array_10 = []
N_totalrecoverableobsDist_array_10 = []
N_totalobsDist_array_30 = []
N_totalobservableobsDist_array_30 = []
N_totalrecoverableobsDist_array_30 = []
N_totalobsDist_array_100 = []
N_totalobservableobsDist_array_100 = []
N_totalrecoverableobsDist_array_100 = []
N_totalobsDist_array_1000 = []
N_totalobservableobsDist_array_1000 = []
N_totalrecoverableobsDist_array_1000 = []
N_totalobsDist22_array = []
N_totalobservableobsDist22_array = []
N_totalrecoverableobsDist22_array = []
N_totalobsDist22_array_03 = []
N_totalobservableobsDist22_array_03 = []
N_totalrecoverableobsDist22_array_03 = []
N_totalobsDist22_array_1 = []
N_totalobservableobsDist22_array_1 = []
N_totalrecoverableobsDist22_array_1 = []
N_totalobsDist22_array_10 = []
N_totalobservableobsDist22_array_10 = []
N_totalrecoverableobsDist22_array_10 = []
N_totalobsDist22_array_30 = []
N_totalobservableobsDist22_array_30 = []
N_totalrecoverableobsDist22_array_30 = []
N_totalobsDist22_array_100 = []
N_totalobservableobsDist22_array_100 = []
N_totalrecoverableobsDist22_array_100 = []
N_totalobsDist22_array_1000 = []
N_totalobservableobsDist22_array_1000 = []
N_totalrecoverableobsDist22_array_1000 = []
N_totalobsDist195_array = []
N_totalobservableobsDist195_array = []
N_totalrecoverableobsDist195_array = []
N_totalobsDist195_array_03 = []
N_totalobservableobsDist195_array_03 = []
N_totalrecoverableobsDist195_array_03 = []
N_totalobsDist195_array_1 = []
N_totalobservableobsDist195_array_1 = []
N_totalrecoverableobsDist195_array_1 = []
N_totalobsDist195_array_10 = []
N_totalobservableobsDist195_array_10 = []
N_totalrecoverableobsDist195_array_10 = []
N_totalobsDist195_array_30 = []
N_totalobservableobsDist195_array_30 = []
N_totalrecoverableobsDist195_array_30 = []
N_totalobsDist195_array_100 = []
N_totalobservableobsDist195_array_100 = []
N_totalrecoverableobsDist195_array_100 = []
N_totalobsDist195_array_1000 = []
N_totalobservableobsDist195_array_1000 = []
N_totalrecoverableobsDist195_array_1000 = []
colorvalue_normal = []
colorvalue_fast = []
colorvalue_obsDist = []
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
fbFit= fitRagfb()
mbins = np.arange(0,10, 0.1, dtype='float')
for filenormal_ in sorted(allFiles_normal):
filename1 = filenormal_[60:]
fileid1 = filename1.strip('output_file.csv')
colorvalue1 = int(fileid1)
colorvalue_normal.append(colorvalue1)
print ("I'm starting " + fileid1)
datnormal = pd.read_csv(filenormal_, sep = ',', header=2)
##########################################################
datnormal1 = pd.read_csv(filenormal_, sep = ',', header=0, nrows=1)
N_tri1 = datnormal1["NstarsTRILEGAL"][0]
print("N_tri1 = ", N_tri1)
m1hAll01, m1b1 = np.histogram(datnormal["m1"], bins=mbins)
dm11 = np.diff(m1b1)
m1val1 = m1b1[:-1] + dm11/2.
fb1 = np.sum(m1hAll01*dm11*fbFit(m1val1))
N_mult1 = N_tri1*fb1
##########################################################
PeriodIn1 = datnormal['p']
if len(PeriodIn1) == 0.:
continue
if N_tri1 == 0:
continue
else:
# input period -- 'p' in data file
print('length period in = ', len(PeriodIn1))
PeriodOut1 = datnormal['LSM_PERIOD'] #LSM_PERIOD in data file
appMagMean1 = datnormal['appMagMean'] #apparent magnitude, will use to make cuts for 24 (default), 22, and then Kepler's range (?? -- brighter than LSST can manage-- to 19) OR 19.5 (SNR = 10)
print('length period out = ', len(PeriodOut1))
observable1 = np.where(PeriodOut1 != -999)[0]
observable1_03 = np.where(PeriodIn1[observable1] <= 0.3)[0]
observable1_1 = np.where(PeriodIn1[observable1] <= 1)[0]
observable1_10 = np.where(PeriodIn1[observable1] <= 10)[0]
observable1_30 = np.where(PeriodIn1[observable1] <= 30)[0]
observable1_100 = np.where(PeriodIn1[observable1] <= 100)[0]
observable1_1000 =
|
np.where(PeriodIn1[observable1] <= 1000)
|
numpy.where
|
#!/usr/bin/env python3
#
from __future__ import division, print_function
import os
import sys
import pints
import numpy as np
import myokit
import argparse
if __name__=="__main__":
import platform
parallel = True
if platform.system() == 'Darwin':
import multiprocessing
multiprocessing.set_start_method('fork')
elif platform.system() == 'Windows':
parallel = False
# Check input arguments
parser = argparse.ArgumentParser(
description='Fit all the hERG models to sine wave data')
parser.add_argument('--cell', type=int, default=2, metavar='N',
help='repeat number : 1, 2, 3, 4, 5, 6')
parser.add_argument('--model', type=str, default='wang', metavar='N',
help='which model to use')
parser.add_argument('--repeats', type=int, default=25, metavar='N',
help='number of CMA-ES runs from different initial guesses')
parser.add_argument('--protocol', type=int, default=1, metavar='N',
help='which protocol is used to fit the data: 1 for staircase #1, \
2 for sine wave, 3 for complex AP')
parser.add_argument("--big_pop_size", action='store_true', default=False,
help="whether to use big population size of 100 rather than default")
args = parser.parse_args()
cell = args.cell
# Load project modules
sys.path.append(os.path.abspath(os.path.join('python')))
import priors
import cells
import transformation
import data
import model
# Get model string and params
if args.model == 'mazhari':
model_str = 'Mazhari'
x_found = np.loadtxt('cmaesfits/parameter-sets/mazhari-params.txt', unpack=True)
elif args.model == 'mazhari-reduced':
model_str = 'Maz-red'
x_found = np.loadtxt('cmaesfits/parameter-sets/mazhari-reduced-params.txt', unpack=True)
elif args.model == 'wang':
model_str = 'Wang'
x_found = np.loadtxt('cmaesfits/parameter-sets/wang-params.txt', unpack=True)
elif args.model == 'wang-r1':
model_str = 'Wang-r1'
x_found = np.loadtxt('cmaesfits/parameter-sets/wang-r1-params.txt', unpack=True)
for i in {0, 2, 4, 5, 6, 8, 13}:
x_found[i] = np.exp(x_found[i])
elif args.model == 'wang-r2':
model_str = 'Wang-r2'
x_found = np.loadtxt('cmaesfits/parameter-sets/wang-r2-params.txt', unpack=True)
for i in {0, 2, 4, 5, 6, 8, 12}:
x_found[i] = np.exp(x_found[i])
elif args.model == 'wang-r3':
model_str = 'Wang-r3'
x_found = np.loadtxt('cmaesfits/parameter-sets/wang-r3-params.txt', unpack=True)
for i in {0, 2, 4, 5, 6, 8, 11}:
x_found[i] = np.exp(x_found[i])
elif args.model == 'wang-r4':
model_str = 'Wang-r4'
x_found = np.loadtxt('cmaesfits/parameter-sets/wang-r4-params.txt', unpack=True)
for i in {0, 1, 3, 4, 5, 7, 10}:
x_found[i] = np.exp(x_found[i])
elif args.model == 'wang-r5':
model_str = 'Wang-r5'
x_found = np.loadtxt('cmaesfits/parameter-sets/wang-r5-params.txt', unpack=True)
for i in {0, 2, 3, 4, 6, 9}:
x_found[i] = np.exp(x_found[i])
elif args.model == 'wang-r6':
model_str = 'Wang-r6'
x_found =
|
np.loadtxt('cmaesfits/parameter-sets/wang-r6-params.txt', unpack=True)
|
numpy.loadtxt
|
"""Module to load data.
Consists of functions to load data from four different datasets (IMDb, Rotten
Tomatoes, Tweet Weather, Amazon Reviews). Each of these functions do the
following:
- Read the required fields (texts and labels).
- Do any pre-processing if required. For example, make sure all label
values are in range [0, num_classes-1].
- Split the data into training and validation sets.
- Shuffle the training data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import numpy as np
import pandas as pd
def load_imdb_sentiment_analysis_dataset(data_path, seed=123):
"""Loads the Imdb movie reviews sentiment analysis dataset.
# Arguments
data_path: string, path to the data directory.
seed: int, seed for randomizer.
# Returns
A tuple of training and validation data.
Number of training samples: 25000
Number of test samples: 25000
Number of categories: 2 (0 - negative, 1 - positive)
# References
Mass et al., http://www.aclweb.org/anthology/P11-1015
Download and uncompress archive from:
http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
"""
imdb_data_path = os.path.join(data_path, 'aclImdb')
# Load the training data
train_texts = []
train_labels = []
for category in ['pos', 'neg']:
train_path = os.path.join(imdb_data_path, 'train', category)
for fname in sorted(os.listdir(train_path)):
if fname.endswith('.txt'):
with open(os.path.join(train_path, fname)) as f:
train_texts.append(f.read())
train_labels.append(0 if category == 'neg' else 1)
# Load the validation data.
test_texts = []
test_labels = []
for category in ['pos', 'neg']:
test_path = os.path.join(imdb_data_path, 'test', category)
for fname in sorted(os.listdir(test_path)):
if fname.endswith('.txt'):
with open(os.path.join(test_path, fname)) as f:
test_texts.append(f.read())
test_labels.append(0 if category == 'neg' else 1)
# Shuffle the training data and labels.
random.seed(seed)
random.shuffle(train_texts)
random.seed(seed)
random.shuffle(train_labels)
return ((train_texts, np.array(train_labels)),
(test_texts,
|
np.array(test_labels)
|
numpy.array
|
import unittest
import numpy
import sys
import os
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)) + '/../')
import common.numpy
import common.eval
import random
class TestEval(unittest.TestCase):
def distributionAt(self, label, confidence, labels=10):
probabilities = [0] * labels
probabilities[label] = confidence
for i in range(len(probabilities)):
if i == label:
continue
probabilities[i] = (1 - confidence) / (labels - 1)
self.assertAlmostEqual(1, numpy.sum(probabilities))
return probabilities
def testCleanEvaluationNotCorrectShape(self):
labels = numpy.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
probabilities = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.assertRaises(AssertionError, common.eval.CleanEvaluation, probabilities, labels)
labels = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
probabilities = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.assertRaises(AssertionError, common.eval.CleanEvaluation, probabilities, labels)
labels = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
probabilities = numpy.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
])
self.assertRaises(AssertionError, common.eval.CleanEvaluation, probabilities, labels)
def testCleanEvaluationNotCorrectClasses(self):
labels = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 8])
probabilities = numpy.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
])
self.assertRaises(AssertionError, common.eval.CleanEvaluation, probabilities, labels)
def testCleanEvaluationNotProbabilities(self):
labels = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 8])
probabilities = numpy.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.09],
[0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
])
self.assertRaises(AssertionError, common.eval.CleanEvaluation, probabilities, labels)
def testCleanEvaluationTestErrorNoValidation(self):
labels = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
probabilities = numpy.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # ok
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0], # ok
[0.05, 0.05, 0.55, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05], # slightly different
[0.099, 0.099, 0.099, 0.109, 0.099, 0.099, 0.099, 0.099, 0.099, 0.099], # hard
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
])
eval = common.eval.CleanEvaluation(probabilities, labels, validation=0)
self.assertEqual(eval.test_N, eval.N)
self.assertEqual(eval.N, 10)
self.assertEqual(eval.test_error(), 0)
test_errors = [
1,
7,
99,
73,
]
for test_error in test_errors:
labels = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
labels = numpy.tile(labels, 10)
probabilities = common.numpy.one_hot(labels, 10)
indices = numpy.array(random.sample(range(100), test_error))
self.assertEqual(numpy.unique(indices).shape[0], indices.shape[0])
for i in indices:
probabilities[i] = numpy.flip(probabilities[i])
eval = common.eval.CleanEvaluation(probabilities, labels, validation=0)
self.assertEqual(eval.test_error(), test_error/100)
self.assertRaises(AssertionError, eval.confidence_at_tpr, 0.1)
for threshold in numpy.linspace(0, 1, 50):
self.assertEqual(eval.test_error_at_confidence(threshold), test_error/100)
def testCleanEvaluationTestErrorValidation(self):
test_errors = [
1,
7,
89,
73,
]
for test_error in test_errors:
labels = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
labels = numpy.tile(labels, 10)
self.assertTrue(labels.shape[0], 110)
probabilities = common.numpy.one_hot(labels, 10)
indices = numpy.array(random.sample(range(90), test_error))
self.assertEqual(numpy.unique(indices).shape[0], indices.shape[0])
for i in indices:
probabilities[i] = numpy.flip(probabilities[i])
eval = common.eval.CleanEvaluation(probabilities, labels, validation=0.1)
self.assertEqual(eval.N, 100)
self.assertEqual(eval.test_N, 90)
self.assertEqual(eval.validation_N, 10)
self.assertAlmostEqual(eval.test_error(), test_error/90)
for threshold in numpy.linspace(0, 1, 50):
self.assertAlmostEqual(eval.test_error_at_confidence(threshold), test_error/90)
def testCleanEvaluationTestErrorAtConfidence(self):
labels = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
labels = numpy.tile(labels, 10)
probabilities = common.numpy.one_hot(labels, 10)
i = 0
for probability in numpy.linspace(0.1, 1, 101)[1:]:
probabilities_i = probabilities[i]
probabilities_i *= probability
probabilities_i[probabilities_i == 0] = (1 - probability)/9
probabilities[i] = probabilities_i
i += 1
eval = common.eval.CleanEvaluation(probabilities, labels, validation=0)
self.assertEqual(eval.test_error(), 0)
for threshold in numpy.linspace(0, 1, 100):
self.assertAlmostEqual(eval.test_error_at_confidence(threshold), 0)
test_errors = [
1,
7,
73,
99,
]
for test_error in test_errors:
labels = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
labels = numpy.tile(labels, 10)
probabilities = common.numpy.one_hot(labels, 10)
i = 0
self.assertEqual(numpy.linspace(0.11, 1, 101)[1:].shape[0], probabilities.shape[0])
for probability in numpy.linspace(0.11, 1, 101)[1:]:
probabilities_i = probabilities[i]
label = numpy.argmax(probabilities_i)
probabilities_i *= probability
probabilities_i[probabilities_i == 0] = (1 - probability) / 9
probabilities[i] = probabilities_i
self.assertEqual(label, labels[i])
self.assertEqual(labels[i], numpy.argmax(probabilities[i]))
i += 1
numpy.testing.assert_array_equal(labels, numpy.argmax(probabilities, axis=1))
indices = numpy.array(random.sample(range(100), test_error))
self.assertEqual(
|
numpy.unique(indices)
|
numpy.unique
|
"""
A module for unit tests of the logic module
Todo:
* automate the creation of particle arrays
so that it isn't harcoded in each test func
"""
import unittest
import numpy as np
from unittest.mock import Mock
from ..sbelt import logic
ATTR_COUNT = 7 # Number of attributes associated with a Particle
# For reference:
# [0] = x-coord
# [1] = diameter,
# [2] = y-coord (elevation),
# [3] = uid,
# [4] = active (boolean)
# [5] = age counter
# [6] = loop age counter
class TestGetEventParticlesWithOneSubregion(unittest.TestCase):
"""
Test that getting event particles with one Subregion
returns a valid list of event particles.
A 'valid list' will change depending on the function.
See function docstrings for more details.
Attributes:
test_length: the length of the bed
num_particles: the number of model particles
mock_sub_list: list of Mock-type subregions
entrainment_events: number of entrainment events to request
per subregion
level_limit: random int representing level limit
"""
def setUp(self):
self.test_length = 10
self.num_particles = 3
mock_subregion = Mock()
mock_subregion.leftBoundary.return_value = 0
mock_subregion.rightBoundary.return_value = self.test_length
mock_subregion.getName.return_value = 'Mock_Subregion'
self.mock_sub_list = [mock_subregion]
self.entrainment_events = 3
self.level_limit = np.random.randint(0, np.random.randint(2, 10))
def test_all_active_returns_valid_list(self):
"""If there are N active particles in 1 subregion and N events requested
per subregion then a valid list will be a list of all particles.
"""
model_particles = np.zeros((self.num_particles, ATTR_COUNT))
model_particles[:,3] = np.arange(self.num_particles) # unique ids
model_particles[:,4] = np.ones(self.num_particles) # all active
model_particles[:,0] = np.random.randint(
self.test_length,
size=self.num_particles ) # random placement
list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
model_particles,
self.level_limit )
self.assertCountEqual(list, model_particles[:,3])
# Height dependancy should not effect list results here
hp_list = list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
model_particles,
self.level_limit,
height_dependant=True )
self.assertCountEqual(hp_list, model_particles[:,3])
self.assertCountEqual(hp_list, list)
def test_not_all_active_returns_list_of_2(self):
"""If there are N particles in 1 subregion and N-1 are _active_,
and if N events are requested per subregion then a valid list will be
a list of the two active particles.
"""
mp_one_inactive = np.zeros((self.num_particles, ATTR_COUNT))
mp_one_inactive[:,3] = np.arange(self.num_particles)
mp_one_inactive[0][4] = 1
mp_one_inactive[1][4] = 1
mp_one_inactive[:,0] = np.random.randint(self.test_length, size=self.num_particles)
list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
mp_one_inactive,
self.level_limit )
self.assertEqual(len(list), self.num_particles - 1)
active_list = mp_one_inactive[mp_one_inactive[:,4] != 0]
self.assertCountEqual(list, active_list[:,3])
def test_none_active_returns_empty_list(self):
"""If there are N particles in 1 subregion and 0 are _active_
and if N events are requested per subregion, then a valid list will be
an empty list.
"""
np_none_active = np.zeros((self.num_particles, ATTR_COUNT))
np_none_active[:,3] = np.arange(self.num_particles)
np_none_active[:,0] = np.random.randint(self.test_length, size=self.num_particles)
empty_list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
np_none_active,
self.level_limit )
self.assertEqual(len(empty_list), 0)
def test_all_ghost_particles_returns_ghost_particles(self):
"""If there are N particles in 1 subregion and all N particles
are 'ghost' particles (at -1), and if N particles are requested
per subregion, then a valid list will be a list of all the
ghost particles (all the particles).
"""
np_all_ghost = np.zeros((self.num_particles, ATTR_COUNT))
np_all_ghost[:,3] = np.arange(self.num_particles)
np_all_ghost[:,0] = -1
ghost_list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
np_all_ghost,
self.level_limit )
self.assertCountEqual(ghost_list, np_all_ghost[:,3])
class TestGetEventParticlesWithNSubregions(unittest.TestCase):
"""
Test that getting event particles with N Subregion
returns a valid list of event particles.
A 'valid list' will change depending on the function.
See function docstrings for more details.
Attributes:
test_length: the length of the bed
num_particles: the number of model particles
mock_sub_list_2: list of Mock-type subregions
entrainment_events: number of entrainment events to request
per subregion
level_limit: random int representing level limit
"""
def setUp(self):
self.test_length = 20
self.num_particles = 6
mock_subregion_0 = Mock()
mock_subregion_0.leftBoundary.return_value = 0
mock_subregion_0.rightBoundary.return_value = self.test_length / 2
mock_subregion_0.getName.return_value = 'Mock_Subregion_0'
mock_subregion_1 = Mock()
mock_subregion_1.leftBoundary.return_value = self.test_length / 2
mock_subregion_1.rightBoundary.return_value = self.test_length
mock_subregion_1.getName.return_value = 'Mock_Subregion_1'
self.mock_sub_list_2 = [mock_subregion_0, mock_subregion_1]
self.entrainment_events = 3
self.level_limit = np.random.randint(0, np.random.randint(2, 10))
def test_all_active_returns_3_per_subregion(self):
"""If there are M active particles in each of the N subregions and there
are M events requested per subregion, then a valid list will be a
list of all M*N particles.
"""
model_particles = np.zeros((self.num_particles, ATTR_COUNT))
model_particles[:,3] = np.arange(self.num_particles) # unique ids
model_particles[:,4] = np.ones(self.num_particles) # all active
# Randomly place first three particles in Subregion 1
model_particles[0:3, 0] = np.random.randint(
9,
size=3 )
# Randomly place last three particles in Subregion 2
model_particles[3:6, 0] = np.random.randint(
11,
self.test_length,
size=3 )
list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list_2,
model_particles,
self.level_limit )
self.assertCountEqual(list, model_particles[:,3])
self.assertEqual(len(list), self.entrainment_events * 2)
# Height dependancy should not effect list results here
hp_list = list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list_2,
model_particles,
self.level_limit,
height_dependant=True )
self.assertCountEqual(hp_list, model_particles[:,3])
self.assertCountEqual(hp_list, list)
def test_active_in_1_subregion_returns_only_active(self):
"""If there are M active particles in each 1..K subregions and 0
active in K+1...N subregions, and there are M events requested per
subregion, then a valid list will be a list of the M*K active particles.
This is simplified down to only 2 subregions.
"""
mp_half_active = np.zeros((self.num_particles, ATTR_COUNT))
mp_half_active[:,3] = np.arange(self.num_particles)
mp_half_active[0:3, 4] = np.ones(int((self.num_particles/2))) # First half active
mp_half_active[0:3, 0] =
|
np.random.randint(10,size=3 )
|
numpy.random.randint
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import numpy as np
import scipy.misc
import itertools
from math import pow
import seaborn as sns
import matplotlib.pyplot as plt
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
class Data_Generator(object):
"""
Generate one dimensional simulated data.
Randomly sample \mu from uniform distribution.
Velocity is fixed.
Place vector is generated from a Gaussian distribution.
"""
def __init__(self, num_interval=1000, min=0, max=1, to_use_3D_map=False):
"""
Sigma is the variance in the Gaussian distribution.
"""
self.to_use_3D_map = to_use_3D_map
self.num_interval = num_interval
self.min, self.max = min, max
self.interval_length = (self.max - self.min) / (self.num_interval - 1)
def generate(self, num_data, velocity=None, num_step=1, dtype=2, test=False, visualize=False):
if self.to_use_3D_map:
if dtype == 1:
place_pair = self.generate_multi_type1(num_data, 3)
elif dtype == 2:
place_pair = self.generate_multi_type2(num_data, velocity, num_step, 3, test=test, visualize=visualize)
elif dtype == 4:
place_pair = self.generate_two_dim_multi_type4(num_data)
else:
raise NotImplementedError
else:
if dtype == 1:
place_pair = self.generate_multi_type1(num_data, 2)
elif dtype == 2:
place_pair = self.generate_multi_type2(num_data, velocity, num_step, 2, test=test, visualize=visualize)
elif dtype == 4:
place_pair = self.generate_two_dim_multi_type4(num_data)
else:
raise NotImplementedError
return place_pair
def generate_multi_type1(self, num_data, num_dim):
"""sample n-dimentional location pairs"""
mu_before = np.random.choice(self.num_interval, size=[num_data, num_dim])
mu_after = np.random.choice(self.num_interval, size=[num_data, num_dim])
vel = np.sqrt(np.sum((mu_after - mu_before) ** 2, axis=1)) * self.interval_length
place_pair = {'before': mu_before, 'after': mu_after, 'vel': vel}
return place_pair
def generate_multi_type2(self, num_data, velocity, num_step, num_dim, test=False, visualize=False):
"""sample discretized motions and corresponding place pairs"""
num_vel = len(velocity)
if not test:
# if pow(num_vel, num_step) < num_data:
# vel_list = np.asarray(list(itertools.product(np.arange(num_vel), repeat=num_step)))
# num_vel_list = len(vel_list)
#
# div, rem = num_data // num_vel_list, num_data % num_vel_list
# vel_idx = np.vstack((np.tile(vel_list, [div, 1]), vel_list[np.random.choice(num_vel_list, size=rem)]))
# np.random.shuffle(vel_idx)
# else:
vel_idx = np.random.choice(num_vel, size=[num_data, num_step])
vel_grid = np.take(velocity, vel_idx, axis=0)
vel = vel_grid * self.interval_length
vel_grid_cumsum = np.cumsum(vel_grid, axis=1)
mu_max = np.fmin(self.num_interval, np.min(self.num_interval - vel_grid_cumsum, axis=1))
mu_min = np.fmax(0, np.max(-vel_grid_cumsum, axis=1))
mu_start = np.random.sample(size=[num_data, num_dim])
mu_start = np.expand_dims(np.round(mu_start * (mu_max - mu_min) + mu_min - 0.5), axis=1)
mu_seq = np.concatenate((mu_start, mu_start + vel_grid_cumsum), axis=1)
else:
if visualize:
mu_start = np.reshape([4, 4], newshape=(1, 1, 2))
vel_pool = np.where((velocity[:, 0] >= -1) & (velocity[:, 1] >= -1))
vel_idx = np.random.choice(vel_pool[0], size=[num_data * 10, num_step])
vel_grid_cumsum = np.cumsum(np.take(velocity, vel_idx, axis=0), axis=1)
mu_seq = np.concatenate((np.tile(mu_start, [num_data * 10, 1, 1]), vel_grid_cumsum + mu_start), axis=1)
mu_seq_new, vel_idx_new = [], []
for i in range(len(mu_seq)):
mu_seq_sub = mu_seq[i]
if len(np.unique(mu_seq_sub, axis=0)) == len(mu_seq_sub):
mu_seq_new.append(mu_seq[i])
vel_idx_new.append(vel_idx[i])
mu_seq, vel_idx = np.stack(mu_seq_new, axis=0), np.stack(vel_idx_new, axis=0)
mu_seq_rs = np.reshape(mu_seq, [-1, (num_step + 1) * 2])
select_idx = np.where(np.sum(mu_seq_rs >= self.num_interval, axis=1) == 0)[0][:num_data]
vel_idx = vel_idx[select_idx]
mu_seq = mu_seq[select_idx]
vel = np.take(velocity, vel_idx, axis=0) * self.interval_length
else:
vel_idx = np.random.choice(num_vel, size=[num_data * num_dim, num_step])
vel_grid_cumsum = np.cumsum(np.take(velocity, vel_idx, axis=0), axis=1)
mu_max = np.fmin(self.num_interval, np.min(self.num_interval - vel_grid_cumsum, axis=1))
mu_min = np.fmax(0, np.max(-vel_grid_cumsum, axis=1))
select_idx = np.where(np.sum(mu_max < mu_min, axis=1) == 0)[0][:num_data]
vel_idx, vel_grid_cumsum = vel_idx[select_idx], vel_grid_cumsum[select_idx]
vel_grid = np.take(velocity, vel_idx, axis=0)
mu_max, mu_min = mu_max[select_idx], mu_min[select_idx]
mu_start = np.random.sample(size=[num_data, num_dim])
mu_start = np.expand_dims(np.round(mu_start * (mu_max - mu_min) + mu_min - 0.5), axis=1)
mu_seq = np.concatenate((mu_start, mu_start + vel_grid_cumsum), axis=1)
vel = vel_grid * self.interval_length
# sns.distplot(vel, rug=True, hist=False)
# plt.show()
place_seq = {'seq': mu_seq, 'vel': vel, 'vel_idx': vel_idx}
return place_seq
def generate_two_dim_multi_type3(self, num_data, max_vel, num_step, test=False):
"""sample discretized motions and corresponding place pairs"""
max_vel = max_vel * self.interval_length
if not test:
r = np.sqrt(np.random.random(size=[num_data, num_step])) * max_vel
theta = np.random.uniform(low=-np.pi, high=np.pi, size=[num_data, num_step])
vel = np.zeros(shape=(num_data, num_step, 2), dtype=float)
vel[:, :, 0] = r * np.cos(theta)
vel[:, :, 1] = r * np.sin(theta)
vel_cumsum = np.cumsum(vel, axis=1)
mu_max = np.fmin(1, np.min(1 - vel_cumsum, axis=1))
mu_min = np.fmax(0, np.max(- vel_cumsum, axis=1))
mu_start = np.random.random(size=(num_data, 2)) * (mu_max - mu_min) + mu_min
mu_start = np.expand_dims(mu_start, axis=1)
mu_seq = np.concatenate((mu_start, mu_start + vel_cumsum), axis=1) / self.interval_length
else:
if num_data == 1:
mu_start = np.reshape([6, 6], newshape=(1, 1, 2)) * self.interval_length
r = np.sqrt(np.random.random(size=[num_data * 10, num_step])) * max_vel
theta = np.random.uniform(low=-np.pi, high=np.pi, size=[num_data * 10, num_step])
vel = np.zeros(shape=(num_data * 10, num_step, 2), dtype=float)
vel[:, :, 0] = r * np.cos(theta)
vel[:, :, 1] = r * np.sin(theta)
vel[np.where(vel <= 0)[0]] = vel[np.where(vel <= 0)[0]] * 0.3
vel_cumsum = np.cumsum(vel, axis=1)
mu_seq = np.concatenate((mu_start, mu_start + vel_cumsum), axis=1) / self.interval_length
select_idx = np.where(
|
np.sum(mu_seq > self.num_interval - 1, axis=1)
|
numpy.sum
|
# -*- coding: utf-8 -*-
"""
Created on 01/26/2022
@author: maxcurie
"""
import numpy as np
import csv
from mpi4py import MPI
from Dispersion import VectorFinder_auto_Extensive
from MPI_tools import task_dis
comm=MPI.COMM_WORLD
rank=comm.Get_rank()
size=comm.Get_size()
#make sure the csv exist and have the first line:
#omega_omega_n,gamma_omega_n,nu,zeff,eta,shat,beta,ky,ModIndex,mu,xstar
#************Start of user block******************
#para= [nu, zeff,eta, shat, beta, ky, mu]
para_min=[0.1, 1., 0.5, 0.001,0.0005, 0.01, 0.]
para_max=[10, 5., 5., 0.05, 0.02, 0.2, 10.]
path='.'
Output_csv=path+'0MTM_scan_CORI_np_rand.csv'
xstar=10.
ModIndex=1
#************End of user block******************
para_min=np.array(para_min)
para_max=np.array(para_max)
width=(para_max-para_min)
while 1==1:
param=para_min+width*
|
np.random.rand(7)
|
numpy.random.rand
|
"""
post-process results from two-phase cellulose-only EH model (presumed
generated from c++ implementation)
"""
# <NAME>, 2021
import numpy as np
from matplotlib.pyplot import *
ion()
# hard-coded parameters
MwE = 65000.0
MwG = 162.0
Mwg = 180.0
rhol = 1000.0
rhoT = rhol
rGg = MwG/Mwg
# hard-coded intial values in the c++ file, and derived values
lmbde = 0.03
fis0 = 0.05
dilution_factor = 0.5
xG0 = 1.0
xX0 = 0.0
rhog0 = 0.0
rhox0 = 1.0*dilution_factor
rhof0 = 0.0*dilution_factor
conversion_xylan = 0.5
yF0 = 0.2 + 0.6*conversion_xylan
fG0 = xG0*fis0
fGF0 = yF0*fG0
fGR0 = (1-yF0)*fG0
fX0 = xX0*fis0
fL0 = (1 - xG0 - xX0)*fis0
fliq0 = 1 - fis0
fg0 = rhog0*fliq0/rhol
fET = lmbde*fG0
### read in the results ###
data =
|
np.loadtxt("eh_results.csv", skiprows=1, delimiter=",")
|
numpy.loadtxt
|
"""
The module ``ti_milp`` provides an implementation of the task allocation
algorithm for for robotic networks with periodic connectivity.
"""
"""
Copyright 2019 by California Institute of Technology. ALL RIGHTS RESERVED.
United States Government sponsorship acknowledged. Any commercial use
must be negotiated with the Office of Technology Transfer at the
California Institute of Technology.
This software may be subject to U.S. export control laws and regulations.
By accepting this document, the user agrees to comply with all applicable
U.S. export laws and regulations. User has the responsibility to obtain
export licenses, or other export authority as may be required before
exporting such information to foreign countries or providing access to
foreign persons.
This software is a copy and may not be current. The latest version is
maintained by and may be obtained from the Mobility and Robotics Sytstem
Section (347) at the Jet Propulsion Laboratory. Suggestions and patches
are welcome and should be sent to the software's maintainer.
"""
# pylint:disable=E1101
import numpy as np
import time
try:
import cplex # This will be problematic on ARM
CPLEXAvailable = True
except:
CPLEXAvailable = False
try:
# Install pyglpk by bradforboyle. Will not work with Python-GLPK.
import glpk
GLPKAvailable = True
except:
GLPKAvailable = False
try:
import pyscipopt as scip
SCIPAvailable = True
except:
SCIPAvailable = False
import json
from mosaic_schedulers.common.utilities.contact_plan_handler import compute_time_invariant_bandwidth
class MILPTasks:
"""A class containing a representation of the tasks for the MILP scheduler.
:param OptionalTasks: a dict with Tasks as keys. OT[Task] is True iff the task
is optional.
:type OptionalTasks: dict
:param TaskReward: a dict with Tasks as keys. TR[Task] is the reward obtained
for performing the task, a float.
:type TaskReward: dict
:param ProductsBandwidth: a dict with Tasks as keys. PB[Task] is the bandwidth
required to stream of the products of Task (in storage units per time
units), a float.
:type ProductsBandwidth: dict
:param ProductsDataSize: a dict with Tasks as keys. PDS[Task] is the size of the
products of Task (in storage units), a float.
:type ProductsDataSize: dict
:param DependencyList: a dict with Tasks as keys. DL[Task] is a list
of tasks whose data products are required to compute Task.
:type DependencyList: dict
:param MaxLatency: a dict with keys task1, task2. MaxLatency[T1][T2] is the maximum
latency that the data products of T2 (a dependency of T1) can tolerate
before they are ingested by task T1.
:type MaxLatency: dict
"""
def __init__(self,
OptionalTasks={},
TaskReward={},
ProductsBandwidth={},
ProductsDataSize={},
DependencyList={},
MaxLatency={},
):
self.OptionalTasks = OptionalTasks
self.TaskReward = TaskReward
self.ProductsBandwidth = ProductsBandwidth
self.ProductsDataSize = ProductsDataSize
self.DependencyList = DependencyList
self.MaxLatency = MaxLatency
self.validate()
def validate(self):
""" Ensure the Tasks structure is valid, i.e. the keys to the various
components are consistent """
assert set(self.ProductsBandwidth.keys()) == set(
self.ProductsDataSize.keys())
assert set(self.DependencyList.keys()) <= (
set(self.ProductsBandwidth.keys()))
assert set(self.DependencyList.keys()) == set(self.MaxLatency.keys())
for task in self.MaxLatency.keys():
assert set(self.MaxLatency[task].keys()) == set(
self.DependencyList[task])
if self.OptionalTasks.keys(): # If this is not empty
assert set(self.ProductsBandwidth.keys()) == set(
self.OptionalTasks.keys())
assert set(self.OptionalTasks.keys()) == set(
self.TaskReward.keys())
else:
for task in self.ProductsBandwidth.keys():
self.OptionalTasks[task] = False
self.TaskReward[task] = 0.
self.RequiringTasks = {}
for child in self.DependencyList.keys():
for parent in self.DependencyList[child]:
if parent not in self.RequiringTasks.keys():
self.RequiringTasks[parent] = []
self.RequiringTasks[parent] += [child]
class MILPAgentCapabilities:
"""A class containing a representation of the agent capabilities for the
MILP scheduler.
:param ComputationLoad: a dictionary with keys Agent, Task. The value of
ComputationLoad[Agent][Task] is the amount of Agent's computational
resources required to complete Task.
:type ComputationLoad: dict
:param EnergyCost: a dictionary with keys Agent, Task. EnergyCost[A][T] is the
energy cost when agent A computes task T.
:type EnergyCost: dict
:param MaxComputationLoad: a dict with keys Agents. MaxComputationLoad[A] is the
maximum computational resources of agent A.
:type MaxComputationLoad: dict
:param LinkComputationalLoadIn: a dict with keys Agent, Agent.
LinkComputationalLoadIn[A1][A2] is the computational load required to
decode messages on link [A1][A2] at A2.
:type LinkComputationalLoadIn: dict
:param LinkComputationalLoadOut: a dict with keys Agent, Agent.
LinkComputationalLoadOut[A1][A2] is the computational load required to
encode messages on link [A1][A2] at A1.
:type LinkComputationalLoadOut: dict
"""
def __init__(self,
ComputationLoad={},
EnergyCost={},
MaxComputationLoad={},
LinkComputationalLoadIn={},
LinkComputationalLoadOut={},
):
self.ComputationLoad = ComputationLoad
self.EnergyCost = EnergyCost
self.MaxComputationLoad = MaxComputationLoad
self.LinkComputationalLoadIn = LinkComputationalLoadIn
self.LinkComputationalLoadOut = LinkComputationalLoadOut
class CommunicationNetwork:
""" A class containing the properties of the communication network
used by the time-invariant scheduler.
:param Bandwidth: a dictionary with keys Agent, Agent. Bandwidth[A1][A2]
is the bandwidth from agent A1 to agent A2. If Bandwidth[A1][A2]
is zero, the link is not considered in the optimization problem.
:type Bandwidth: dict
:param Latency: a dictionary with keys Agent, Agent. Latency[A1][A2] is
the latency of the link. Note that the latency of a datagram should
be (but isn't at this time) computed as Latency[A1][A2] +
datagram_size/Bandwidth[A1][A2].
:type Latency: dict
:param EnergyCost: a dictionary with keys Agent, Agent. EnergyCost[A1][A2]
is the energy cost to transmit one bit on link A1-A2. The actual
energy cost is computed as EnergyCost[A1][A2]*bit_rate[A1][A2].
:type EnergyCost: dict
"""
def __init__(self,
Bandwidth={},
Latency={},
EnergyCost={},
):
self.Bandwidth = Bandwidth
self.Latency = Latency
self.EnergyCost = EnergyCost
class JSONSolver:
"""
Creates an instance of the time-invariant problem based on a problem input in the format
described in the :doc:`API`.
:param JSONProblemDescription: a JSON description of the problem. See the :doc:`API` for a detailed description.
:type JSONProblemDescription: str, optional
:param Verbose: whether to print status and debug messages. Defaults to False
:type Verbose: bool, optional
:param solver: the solver to use. Should be 'GLPK', 'CPLEX', or 'SCIP', defaults to 'GLPK'.
:type solver: str, optional
:param TimeLimit: a time limit after which to stop the optimizer. Defaults to None (no time limit).
Note that certain solvers (in particular, GLPK) may not honor the time limit.
:type TimeLimit: float, optional
:raises AssertionError: if the JSONProblemDescription is not valid.
.. warning ::
This solver does not support disjunctive prerequirements (do this task OR that task).
If a problem with disjunctive prerequirement is passed, the solver will assert.
"""
def __init__(self,
JSONProblemDescription,
Verbose=False,
solver='GLPK',
TimeLimit=None):
if solver not in ['GLPK', 'CPLEX', 'SCIP']:
if CPLEXAvailable:
print("WARNING: invalid solver. Defaulting to CPLEX")
solver = "CPLEX"
else:
print("WARNING: invalid solver. Defaulting to GLPK")
solver = "GLPK"
if solver == "CPLEX" and (CPLEXAvailable is False):
print("WARNING: CPLEX not available. Switching to GLPK")
solver = "GLPK"
if solver == "SCIP" and (SCIPAvailable is False):
print("WARNING: SCIP not available. Switching to GLPK")
solver = "GLPK"
if solver == "GLPK" and (GLPKAvailable is False):
print("WARNING: GLPK not available. Aborting.")
return
if type(JSONProblemDescription) is dict:
JSONInput = JSONProblemDescription
else: # Attempt to deserialize
JSONInput = json.loads(JSONProblemDescription)
# Time
Thor = JSONInput["Time"]["Thor"]
# Tasks
# Translate tasks to TI format. Check that disjunctive requirements are not included
DepList = {}
for task in JSONInput["Tasks"]["DependencyList"].keys():
if JSONInput["Tasks"]["DependencyList"][task]:
DepList[task] = []
for dep in JSONInput["Tasks"]["DependencyList"][task]:
if dep:
assert len(
dep) == 1, 'Disjunctive prerequirements are not supported yet'
DepList[task] += [dep[0]]
if not DepList[task]: # If the list is empty, pop it
DepList.pop(task)
# Convert products size to average bandwidth needed
PrBw = {}
for task in JSONInput["Tasks"]["ProductsSize"]:
# Average bandwidth to finish by the end of the horizon
PrBw[task] = float(JSONInput["Tasks"]
["ProductsSize"][task])/float(Thor)
# Clean up latency by removing entries corresponding to dependencies that have been removed
# This will ensure that the MILPTasks object is valid, specifically that DependencyList
# and MaxLatency have the same keys
MaxLatency = {}
for task in JSONInput["Tasks"]["MaxLatency"]:
if JSONInput["Tasks"]["MaxLatency"][task]:
MaxLatency[task] = JSONInput["Tasks"]["MaxLatency"][task]
Tasks = MILPTasks(
OptionalTasks=JSONInput["Tasks"]["OptionalTasks"],
TaskReward=JSONInput["Tasks"]["TaskReward"],
ProductsDataSize=JSONInput["Tasks"]["ProductsSize"],
ProductsBandwidth=PrBw,
DependencyList=DepList,
MaxLatency=MaxLatency,
)
# Agent capabilities
# Convert computational load from instantaneous to average
CompLoad = {}
for task in JSONInput["AgentCapabilities"]["ComputationTime"].keys():
for agent in JSONInput["AgentCapabilities"]["ComputationTime"][task].keys():
if agent not in CompLoad.keys():
CompLoad[agent] = {}
CompLoad[agent][task] = float(JSONInput["AgentCapabilities"]["ComputationLoad"][task][agent])*float(
JSONInput["AgentCapabilities"]["ComputationTime"][task][agent]) / float(Thor)
TILinkLoadIn = JSONInput["AgentCapabilities"]["LinkComputationalLoadIn"]
TILinkLoadOut = JSONInput["AgentCapabilities"]["LinkComputationalLoadOut"]
for agent1 in TILinkLoadIn.keys():
for agent2 in TILinkLoadIn[agent1].keys():
TILinkLoadIn[agent1][agent2] = float(
TILinkLoadIn[agent1][agent2])/float(Thor)
TILinkLoadOut[agent1][agent2] = float(
TILinkLoadOut[agent1][agent2])/float(Thor)
AgentCapabilities = MILPAgentCapabilities(
ComputationLoad=CompLoad,
EnergyCost=flipNestedDictionary(
JSONInput["AgentCapabilities"]["EnergyCost"]),
MaxComputationLoad=JSONInput["AgentCapabilities"]["MaxComputationLoad"],
LinkComputationalLoadIn=TILinkLoadIn,
LinkComputationalLoadOut=TILinkLoadOut
)
# Communication network
TVBandwidth = JSONInput["CommunicationNetwork"]
Bandwidth, Latency, EnergyCost = compute_time_invariant_bandwidth(
agents_list=JSONInput["AgentCapabilities"]["MaxComputationLoad"].keys(
),
TVBandwidth=TVBandwidth,
Thor=Thor)
CommNet = CommunicationNetwork(
Bandwidth=Bandwidth,
Latency=Latency,
EnergyCost=EnergyCost,
)
# Options
Options = JSONInput["Options"]
# Cost function
if 'CostFunction' in JSONInput.keys():
if 'CostFunction' in Options.keys():
disp("WARNING: overriding cost function in Options with 'CostFunction'")
Options['CostFunction'] = JSONInput['CostFunction']
# Create the problem
if solver == "GLPK":
self.Scheduler = MOSAICGLPKSolver(
AgentCapabilities=AgentCapabilities,
Tasks=Tasks,
CommunicationNetwork=CommNet,
Options=Options,
Verbose=Verbose
)
elif solver == "CPLEX":
self.Scheduler = MOSAICCPLEXSolver(
AgentCapabilities=AgentCapabilities,
Tasks=Tasks,
CommunicationNetwork=CommNet,
Options=Options,
Verbose=Verbose
)
elif solver == "SCIP":
self.Scheduler = MOSAICSCIPSolver(
AgentCapabilities=AgentCapabilities,
Tasks=Tasks,
CommunicationNetwork=CommNet,
Options=Options,
Verbose=Verbose
)
else:
print("ERROR: unsupported solver (how did you get here?)")
return
if TimeLimit is not None:
self.Scheduler.setTimeLimits(ClockTimeLimit=TimeLimit)
def schedule(self):
""" Solve the scheduling problem.
:return: A solution in the JSON format described in the :doc:`API`.
:rtype: str
"""
self.Scheduler.schedule()
return self.Scheduler.formatToBenchmarkIO()
class MOSAICMILPSolver:
"""
.. warning ::
For an easier-to-use and more consistent interface, you most likely want to call
:class:`JSONSolver` instead of this class and its subclasses.
Abstract implementation of the MILP task allocator.
Subclassed by :class:`MOSAICCPLEXSolver`, :class:`MOSAICSCIPSolver`, and :class:`MOSAICGLPKSolver`.
:param AgentCapabilities: detailing what the agents can do
:type AgentCapabilities: MOSAICTISolver.MILPAgentCapabilities
:param Tasks: detailing the tasks that must be achieved
:type Tasks: MOSAICTISolver.MILPTasks
:param CommunicationNetwork: detailing the communication network availability
:type CommunicationNetwork: MOSAICTISolver.CommunicationNetwork
:param Options: additional solver-specific options, defaults to {}
:type Options: dict, optional
:param Verbose: if True, prints status and debug messages. Defaults to False
:type Verbose: bool, optional
"""
def __init__(self,
AgentCapabilities,
Tasks,
CommunicationNetwork,
Options={},
Verbose=False):
self.AgentCapabilities = AgentCapabilities
self.Tasks = Tasks
self.CommNetwork = CommunicationNetwork
self.Options = Options
self.Verbose = Verbose
self.problemIsSetUp = False
self.problemIsSolved = False
self.problemIsFeasible = False
self.JSONIsFormed = False
self.solverSpecificInit()
self.TaskAssignment = None
self.CommSchedule = None
self.RawOutput = None
for i in self.CommNetwork.Bandwidth.keys():
if self.CommNetwork.Bandwidth[i][i] != 0:
self.CommNetwork.Bandwidth[i][i] = 0
self._verbprint(
"WARNING: removing bandwidth self-loop for agent {}".format(i))
def solverSpecificInit(self):
''' A place to define solver-specific initializations in subclasses '''
pass
def _verbprint(self, _str):
''' Helper function to only print if the verbose flag is set'''
if self.Verbose:
print(_str)
def getRawOutput(self):
""" Return raw problem output from the scheduler """
if self.problemIsSolved is False:
self.solve()
return self.TaskAssignment, self.CommSchedule, self.RawOutput
def schedule(self):
""" Set up and solve the scheduling problem """
self.setUp()
self.solve()
if self.problemIsFeasible is False:
print("Problem infeasible!")
return (None, None)
return (self.TaskAssignment, self.CommSchedule)
def setTimeLimits(self, DetTicksLimit=1e75, ClockTimeLimit=1e75):
'''
Sets a time limit for the overall process. May be ignored by certain
solvers.
'''
raise NotImplementedError
def setSolverParameters(self, Properties):
'''
Passes a dictionary of parameters to the solver
'''
raise NotImplementedError
def setUp(self):
""" Set up the optimization problem. Solver-specific. """
raise NotImplementedError
def solve(self):
""" Solve the optimization problem. Solver-specific. """
raise NotImplementedError
def setMIPStart(self, MIPStart):
""" Provide a warm start to the solver. Solver-specific. """
raise NotImplementedError
def getSolverState(self):
""" Get the solver state. Solver-specific. """
raise NotImplementedError
def getProblem(self):
""" Get the solver problem object. """
raise NotImplementedError
def getOptimizationTerminator(self):
''' Gets an object that can be called to stop the optimization process '''
raise NotImplementedError
def formatToBenchmarkIO(self):
''' Formats the scheduler output to the JSON format described in the :doc:`API` '''
tasks_output = []
if self.problemIsSolved is False:
self._verbprint("Calling solver")
self.solve()
if self.problemIsFeasible is False:
self.Timeline = None
self._verbprint("Problem infeasible!")
return None
TasksList = list(self.Tasks.OptionalTasks.keys())
# AgentsList = list(self.AgentCapabilities.ComputationLoad[TasksList[0]].keys())
# TaskSchedule = self.TaskAssignment['TaskSchedule']
TaskAgent = self.TaskAssignment['TaskAgent']
# ComputationTime = None #self.AgentCapabilities.ComputationTime
# TimeStep = self.TimeStep
CommSchedule = self.CommSchedule
for taskid in TasksList:
if (TaskAgent[taskid] is not None):
_agent = TaskAgent[taskid]
_time = None # float(TaskSchedule[taskid]) * TimeStep
# float(ComputationTime[taskid][_agent]) * TimeStep
_duration = None
_name = taskid
task_output = {
"duration": _duration,
"start_time": _time,
"id": _name,
"name": _name,
"params": {
"agent": _agent,
},
}
tasks_output.append(task_output)
for comm in CommSchedule:
_agent = comm[0]
_time = None # float(comm[3]) * TimeStep
_bandwidth = float(comm[3])
_taskname = comm[2]
_receiver = comm[1]
_duration = None # product size / _bandwidth
task_output = {
"duration": _duration,
"start_time": _time,
"id": "transfer_data",
"name": "transfer_data",
"params": {
"transmitter": _agent,
"data_type": _taskname,
"agent": _agent,
"receiver": _receiver,
"bandwidth": _bandwidth,
},
}
tasks_output.append(task_output)
def sort_by_time(val):
if val["start_time"] is None:
return float("inf")
else:
return val["start_time"]
tasks_output.sort(key=sort_by_time)
out_dict = {"tasks": tasks_output}
return json.dumps(out_dict)
if CPLEXAvailable is False:
class MOSAICCPLEXSolver(MOSAICMILPSolver):
'''
A dummy implementation of the MILP scheduler if CPLEX is not available.
'''
def solverSpecificInit(self):
print("WARNING: CPLEX not installed. This will not work.")
else:
class MOSAICCPLEXSolver(MOSAICMILPSolver):
"""
An implementation of the MILP scheduler that uses the IBM ILOG CPLEX solver.
"""
def solverSpecificInit(self):
# Create the CPLEX problem
self.cpx = cplex.Cplex()
self.cpx.objective.set_sense(self.cpx.objective.sense.minimize)
self.aborter = self.cpx.use_aborter(cplex.Aborter())
def setUp(self):
if self.problemIsSetUp:
return
SetupStartTime = time.time()
# This used to be a part of a larger function.
# By redefining these variables as local, we save a lot of typing
# and possible errors.
AgentCapabilities = self.AgentCapabilities
Tasks = self.Tasks
CommBandwidth = self.CommNetwork.Bandwidth
LinkLatency = self.CommNetwork.Latency
# Options = self.Options
TasksList = list(Tasks.ProductsBandwidth.keys())
AgentsList = list(AgentCapabilities.ComputationLoad.keys())
# We find things
ix = 0
X = {}
X_cost_energy = []
X_cost_optional = []
X_name = []
for i in AgentsList:
X[i] = {}
for m in TasksList:
X[i][m] = ix
ix += 1
X_cost_energy.append(AgentCapabilities.EnergyCost[i][m])
X_cost_optional.append(
-float(Tasks.OptionalTasks[m])*Tasks.TaskReward[m])
X_name.append('X[{},{}]'.format(i, m))
x_vars_len = ix
B = {}
B_name = []
B_cost_energy = []
B_cost_optional = []
B_upper_bound = []
for i in AgentsList:
B[i] = {}
for j in AgentsList:
if CommBandwidth[i][j] > 0:
B[i][j] = {}
for m in Tasks.RequiringTasks.keys():
B[i][j][m] = ix
B_name.append('B[{},{},{}]'.format(i, j, m))
B_cost_energy.append(
(self.CommNetwork.EnergyCost[i][j])*self.CommNetwork.Bandwidth[i][j])
B_cost_optional.append(0.)
B_upper_bound.append(
self.CommNetwork.Bandwidth[i][j])
ix += 1
b_vars_len = ix - x_vars_len
communication_cost_optional_tasks = 1e-8
C = {}
C_name = []
C_cost_energy = []
C_cost_optional = []
C_upper_bound = []
for i in AgentsList:
C[i] = {}
for j in AgentsList:
if CommBandwidth[i][j] > 0:
C[i][j] = {}
for m in Tasks.RequiringTasks.keys():
C[i][j][m] = {}
for mm in Tasks.RequiringTasks[m]:
C[i][j][m][mm] = ix
C_name.append(
'C[{},{},{},{}]'.format(i, j, m, mm))
C_cost_energy.append(0)
C_cost_optional.append(
communication_cost_optional_tasks)
C_upper_bound.append(
self.CommNetwork.Bandwidth[i][j])
ix += 1
comm_vars_length = ix - b_vars_len - x_vars_len
if "CostFunction" in self.Options.keys():
CostFunction = self.Options["CostFunction"]
else:
CostFunction = {"energy": 0.0,
"total_task_reward": 1.0, "total_time": 0.0}
print("WARNING: default cost function used.")
if CostFunction["total_time"] != 0:
print(
"WARNING: minimum-makespan optimization is not supported at this time.")
X_cost = CostFunction["total_task_reward"] * np.array(
X_cost_optional, dtype=float) + CostFunction["energy"] * np.array(X_cost_energy, dtype=float)
B_cost = CostFunction["total_task_reward"] * np.array(
B_cost_optional, dtype=float) + CostFunction["energy"] * np.array(B_cost_energy, dtype=float)
C_cost = CostFunction["total_task_reward"] * np.array(
C_cost_optional, dtype=float) + CostFunction["energy"] *
|
np.array(C_cost_energy, dtype=float)
|
numpy.array
|
from summit.utils.dataset import DataSet
from summit.domain import *
from summit.experiment import Experiment
from summit import get_summit_config_path
from summit.utils import jsonify_dict, unjsonify_dict
import torch
import torch.nn.functional as F
from skorch import NeuralNetRegressor
from skorch.utils import to_device
from sklearn.compose import ColumnTransformer, TransformedTargetRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder, FunctionTransformer
from sklearn.model_selection import (
train_test_split,
cross_validate,
GridSearchCV,
ParameterGrid,
)
from sklearn.model_selection._search import BaseSearchCV, _check_param_grid
from sklearn.base import (
BaseEstimator,
RegressorMixin,
is_classifier,
clone,
TransformerMixin,
)
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._validation import (
_fit_and_score,
_score,
_aggregate_score_dicts,
)
from sklearn.metrics import r2_score
from sklearn.utils.validation import (
_deprecate_positional_args,
indexable,
check_is_fitted,
_check_fit_params,
)
from sklearn.utils import check_array, _safe_indexing
from sklearn.utils.fixes import delayed
from sklearn.metrics._scorer import _check_multimetric_scoring
from scipy.sparse import issparse
from tqdm.auto import tqdm
from joblib import Parallel
import pathlib
import numpy as np
from numpy.random import default_rng
import pandas as pd
from copy import deepcopy
from itertools import product
from collections import defaultdict
from copy import deepcopy
import pkg_resources
import time
import json
import types
import warnings
__all__ = [
"ExperimentalEmulator",
"ANNRegressor",
"get_bnn",
"RegressorRegistry",
"registry",
"get_pretrained_reizman_suzuki_emulator",
"get_pretrained_baumgartner_cc_emulator",
"ReizmanSuzukiEmulator",
"BaumgartnerCrossCouplingEmulator",
]
class ExperimentalEmulator(Experiment):
"""Experimental Emulator
Train a machine learning model based on experimental data.
The model acts a benchmark for testing optimisation strategies.
Parameters
----------
model_name : str
Name of the model, ideally with no spaces
domain : :class:`~summit.domain.Domain`
The domain of the emulator
dataset : :class:`~summit.dataset.Dataset`, optional
Dataset used for training/validation
regressor : :class:`torch.nn.Module`, optional
Pytorch LightningModule class. Defaults to the ANNRegressor
output_variable_names : str or list, optional
The names of the variables that should be trained by the predictor.
Defaults to all objectives in the domain.
descriptors_features : list, optional
A list of input categorical variable names that should be transformed
into their descriptors instead of using one-hot encoding.
clip : bool or list, optional
Whether to clip predictions to the limits of
the objectives in the domain. True (default) means
clipping is activated for all outputs and False means
it is not activated at all. A list of specific outputs to clip
can also be passed.
Notes
-----
By default, categorical features are pre-processed using one-hot encoding.
If descriptors are avaialble, they can be used on a feature-by-feature basis
by specifying names of categorical variables in the descriptors_features keyword
argument.
Examples
--------
>>> from summit.benchmarks import ExperimentalEmulator, ReizmanSuzukiEmulator
>>> from summit.utils.dataset import DataSet
>>> import matplotlib.pyplot as plt
>>> import pathlib
>>> import pkg_resources
>>> # Steal domain and data from Reizman example
>>> DATA_PATH = pathlib.Path(pkg_resources.resource_filename("summit", "benchmarks/data"))
>>> model_name = f"reizman_suzuki_case_1"
>>> domain = ReizmanSuzukiEmulator.setup_domain()
>>> ds = DataSet.read_csv(DATA_PATH / f"{model_name}.csv")
>>> # Create emulator and train (bump max_epochs to 1000 to get better training)
>>> exp = ExperimentalEmulator(model_name,domain,dataset=ds)
>>> res = exp.train(max_epochs=10, cv_folds=2, random_state=100, test_size=0.2)
>>> # Plot to show the quality of the fit
>>> fig, ax = exp.parity_plot(include_test=True)
>>> plt.show()
>>> # Get scores on the test set
>>> scores = exp.test() # doctest: +SKIP
"""
def __init__(self, model_name, domain, **kwargs):
super().__init__(domain, **kwargs)
self.model_name = model_name
# Data
self.ds = kwargs.get("dataset")
self.descriptors_features = kwargs.get("descriptors_features", [])
if self.ds is not None:
self.n_features = self._caclulate_input_dimensions(
self.domain, self.descriptors_features
)
self.n_examples = self.ds.shape[0]
self.output_variable_names = kwargs.get(
"output_variable_names",
[v.name for v in self.domain.output_variables],
)
# Create the regressor
self.regressor = kwargs.get("regressor", ANNRegressor)
self.predictors = kwargs.get("predictors")
self.clip = kwargs.get("clip", True)
def _run(self, conditions, **kwargs):
input_columns = [v.name for v in self.domain.input_variables]
X = conditions[input_columns].to_numpy()
if X.shape[0] == len(input_columns):
X = X[np.newaxis, :]
X = pd.DataFrame(X, columns=input_columns)
y_pred, y_pred_std = self._predict(X)
return_std = kwargs.get("return_std", False)
for i, name in enumerate(self.output_variable_names):
if type(conditions) == pd.Series:
y = y_pred[0, i]
y_std = y_pred_std[0, i]
else:
y = y_pred[:, i]
y_std = y_pred_std[:, i]
conditions.at[(name, "DATA")] = y
if return_std:
conditions.at[(f"{name}_std", "METADATA")] = y_std
return conditions, {}
def _predict(self, X, **kwargs):
"""Get a prediction
Parameters
----------
X : pd.DataFrame
A pandas dataframe with inputs to the predictor
Returns
-------
mean, std
Numpy arrays with the average and standard deviation of the ensemble
"""
y_pred = np.array(
[estimator.predict(X, **kwargs) for estimator in self.predictors]
)
if self.clip:
for i, v in enumerate(self.domain.output_variables):
if type(self.clip) == list:
if v.name not in self.clip:
continue
y_pred[:, :, i] = np.clip(y_pred[:, :, i], v.lower_bound, v.upper_bound)
return y_pred.mean(axis=0), y_pred.std(axis=0)
def train(self, **kwargs):
"""Train the model on the dataset
This will automatically do a train-test split and then train via
cross-validation on the train set.
Parameters
---------
test_size : float, optional
The size of the test as a fraction of the total dataset. Defaults to 0.1.
cv_folds : int, optional
The number of cross validation folds. Defaults to 5.
max_epochs : int, optional
The max number of epochs for each CV fold. Defaults to 100.
scoring : str or list, optional
A list of scoring functions or names of them. Defaults to R2 and MSE.
See here for more https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
search_params : dict, optional
A dictionary with parameter values to change in a gridsearch.
regressor_kwargs : dict, optional
You can pass extra arguments to the regressor here.
callbacks : None, "disable" or list of Callbacks
Skorch callbacks passed to skorch.net. See: https://skorch.readthedocs.io/en/latest/net.html
verbose : int
0 for no logging, 1 for logging
Notes
------
If predictor was set in the initialization, it will not be overwritten.
Returns
-------
A dictionary containing the results of the training.
Examples
-------
>>> from summit import *
>>> import pkg_resources, pathlib
>>> DATA_PATH = pathlib.Path(pkg_resources.resource_filename("summit", "benchmarks/data"))
>>> model_name = f"reizman_suzuki_case_1"
>>> domain = ReizmanSuzukiEmulator.setup_domain()
>>> ds = DataSet.read_csv(DATA_PATH / f"{model_name}.csv")
>>> exp = ExperimentalEmulator(model_name, domain, dataset=ds, regressor=ANNRegressor)
>>> # Test grid search cross validation and training
>>> params = { "regressor__net__max_epochs": [1, 1000]}
>>> exp.train(cv_folds=5, random_state=100, search_params=params, verbose=0) # doctest: +SKIP
"""
if self.ds is None:
raise ValueError("Dataset is required for training.")
# Create predictor
predictor = self._create_predictor(
self.regressor,
self.domain,
self.n_features,
self.n_examples,
output_variable_names=self.output_variable_names,
descriptors_features=self.descriptors_features,
**kwargs,
)
# Get data
input_columns = [v.name for v in self.domain.input_variables]
X = self.ds[input_columns].to_numpy()
y = self.ds[self.output_variable_names].to_numpy().astype(float)
# Sklearn columntransformer expects a pandas dataframe not a dataset
X = pd.DataFrame(X, columns=input_columns)
# Train-test split
test_size = kwargs.get("test_size", 0.1)
random_state = kwargs.get("random_state")
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state
)
y_train, y_test = (
torch.tensor(self.y_train).float(),
torch.tensor(self.y_test).float(),
)
# Training
scoring = kwargs.get("scoring", ["r2", "neg_root_mean_squared_error"])
folds = kwargs.get("cv_folds", 5)
search_params = kwargs.get("search_params", {})
# Run grid search if requested
if search_params:
self.logger.info("Starting grid search.")
gs = ProgressGridSearchCV(
predictor, search_params, refit="r2", cv=folds, scoring=scoring
)
gs.fit(self.X_train, y_train)
best_params = gs.best_params_
params = {}
for param in search_params.keys():
params[param] = best_params[param]
predictor.set_params(**params)
# Run final training using cross validation
initializing = kwargs.get("initializing", False)
if not initializing:
self.logger.info("Starting training.")
res = cross_validate(
predictor,
self.X_train,
y_train,
scoring=scoring,
cv=folds,
return_estimator=True,
)
self.predictors = res.pop("estimator")
# Rename from test to validation
for name in scoring:
scores = res.pop(f"test_{name}")
res[f"val_{name}"] = scores
return res
def test(self, **kwargs):
"""Get test results
This requires that train has already been called or
the ExperimentalEmulator was initialized from a pretrained model.
Parameters
----------
scoring : str or list, optional
A list of scoring functions or names of them. Defaults to R2 and MSE.
See here for more https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
X_test : np.ndarray, optional
Test X inputs
y_test : np.ndarray, optional
Corresponding test labels
Notes
------
The method loops over the predictors, so the resulting are scores averaged over all objectives for each of the predictors.
In contrast, the parity_plot code gives the scores for each objective averaged over the predictors.
Returns
------
scores_dict : dict
A dictionary of scores with test_SCORE as the key and values as an array
of scores for each of the models in the ensemble.
"""
X_test = kwargs.get("X_test", self.X_test)
y_test = kwargs.get("y_test", self.y_test)
if X_test is None:
raise ValueError("X_test is not set or passed")
if y_test is None:
raise ValueError("y_test is not set or passed")
scoring = kwargs.get("scoring", ["r2", "neg_root_mean_squared_error"])
scores_list = []
for predictor in self.predictors:
if callable(scoring):
scorers = scoring
elif scoring is None or isinstance(scoring, str):
scorers = check_scoring(predictor, scoring)
else:
scorers = _check_multimetric_scoring(predictor, scoring)
scores_list.append(_score(predictor, X_test, y_test, scorers))
scores_dict = _aggregate_score_dicts(scores_list)
for name in scoring:
scores = scores_dict.pop(name)
scores_dict[f"test_{name}"] = scores
return scores_dict
@classmethod
def _create_predictor(
cls,
regressor,
domain,
input_dimensions,
num_examples,
output_variable_names,
**kwargs,
):
# Preprocessors
output_variable_names = kwargs.get(
"output_variable_names", [v.name for v in domain.output_variables]
)
X_preprocessor = cls._create_input_preprocessor(domain, **kwargs)
y_preprocessor = cls._create_output_preprocessor(output_variable_names)
# Create network
regressor_kwargs = kwargs.get("regressor_kwargs", {})
regressor_kwargs.update(
dict(
module__input_dim=input_dimensions,
module__output_dim=len(output_variable_names),
module__n_examples=num_examples,
)
)
verbose = kwargs.get("verbose", 0)
net = NeuralNetRegressor(
regressor,
train_split=None,
max_epochs=kwargs.get("max_epochs", 100),
callbacks=kwargs.get("callbacks"),
verbose=verbose,
**regressor_kwargs,
)
# Create predictor
# TODO: also create an inverse function
ds_to_tensor = FunctionTransformer(numpy_to_tensor, check_inverse=False)
pipe = Pipeline(
steps=[
("preprocessor", X_preprocessor),
("dst", ds_to_tensor),
("net", net),
]
)
return UpdatedTransformedTargetRegressor(
regressor=pipe, transformer=StandardScaler(), check_inverse=False
)
@staticmethod
def _caclulate_input_dimensions(domain: Domain, descriptors_features):
num_dimensions = 0
for v in domain.input_variables:
if v.variable_type == "continuous":
num_dimensions += 1
elif v.variable_type == "categorical":
if v.name in descriptors_features:
if v.ds is not None:
num_dimensions += len(v.ds.data_columns)
else:
raise DomainError(
(
f"Descriptors not available for {v.name}),"
f" but it is list in descriptors_features."
"Make sure descriptors is set on the categorical variable."
)
)
else:
num_dimensions += len(v.levels)
return num_dimensions
@staticmethod
def _create_input_preprocessor(domain, **kwargs):
"""Create feature preprocessors """
transformers = []
# Numeric transforms
numeric_features = [
v.name for v in domain.input_variables if v.variable_type == "continuous"
]
if len(numeric_features) > 0:
transformers.append(("num", StandardScaler(), numeric_features))
# Categorical transforms
descriptors_features = kwargs.get("descriptors_features", [])
categorical_features = [
v.name
for v in domain.input_variables
if (v.variable_type == "categorical")
and (v.name not in descriptors_features)
]
categories = [
v.levels
for v in domain.input_variables
if (v.variable_type == "categorical")
and (v.name not in descriptors_features)
]
if len(categorical_features) > 0:
transformers.append(
("cat", OneHotEncoder(categories=categories), categorical_features)
)
if len(descriptors_features) > 0:
datasets = [
v.ds for v in domain.input_variables if v.name in descriptors_features
]
transformers.append(
(
"des",
DescriptorEncoder(datasets=datasets),
descriptors_features,
)
)
# Create preprocessor
if len(numeric_features) == 0 and len(categorical_features) > 0:
raise DomainError(
"With only categorical features, you can do a simple lookup."
)
elif (
len(numeric_features) > 0
or len(categorical_features) > 0
or len(descriptors_features) > 0
):
preprocessor = ColumnTransformer(transformers=transformers)
else:
raise DomainError(
"No continuous or categorical features were found in the dataset."
)
return preprocessor
@staticmethod
def _create_output_preprocessor(output_variable_names):
""""Create target preprocessors"""
transformers = [
("scale", StandardScaler(), output_variable_names),
("dst", FunctionTransformer(numpy_to_tensor), output_variable_names),
]
return ColumnTransformer(transformers=transformers)
def to_dict(self, **experiment_params):
"""Convert emulator parameters to dictionary
Notes
------
This does not save the weights and biases of the regressor.
You need to use save_regressor method.
"""
# Predictors
predictors = [
self._create_predictor_dict(predictor) for predictor in self.predictors
]
# Update experiment_params
experiment_params.update(
{
"model_name": self.model_name,
"regressor_name": str(self.regressor.__name__),
"n_features": self.n_features,
"n_examples": self.n_examples,
"descriptors_features": self.descriptors_features,
"output_variable_names": self.output_variable_names,
"predictors": predictors,
"clip": self.clip,
}
)
return super().to_dict(**experiment_params)
@staticmethod
def _create_predictor_dict(predictor):
num = predictor.regressor_.named_steps.preprocessor.named_transformers_.num
input_preprocessor = {
# Numerical
"num": {
"mean_": num.mean_,
"var_": num.var_,
"scale_": num.scale_,
"n_samples_seen_": num.n_samples_seen_,
}
# Categorical and descriptors is automatic from the domain / kwargs
}
out = predictor.transformer_
output_preprocessor = {
"mean_": out.mean_,
"var_": out.var_,
"scale_": out.scale_,
"n_samples_seen_": out.n_samples_seen_,
}
return jsonify_dict(
{
"input_preprocessor": input_preprocessor,
"output_preprocessor": output_preprocessor,
}
)
@classmethod
def from_dict(cls, d, **kwargs):
"""Create ExperimentalEmulator from a dictionary
Notes
-----
This does not load the regressor weights and biases.
After calling from_dict, call load_regressor to load the
weights and biases.
"""
params = d["experiment_params"]
domain = Domain.from_dict(d["domain"])
# Load regressor
regressor = registry[params["regressor_name"]]
d["experiment_params"]["regressor"] = regressor
# Load predictors
predictors_params = params["predictors"]
predictors = [
cls._create_predictor(
regressor,
domain,
params["n_features"],
params["n_examples"],
output_variable_names=params["output_variable_names"],
descriptors_features=params["descriptors_features"],
verbose=0,
)
for predictor_params in predictors_params
]
d["experiment_params"]["predictor"] = predictors
# Dataset
dataset = kwargs.get("dataset")
d["experiment_params"]["dataset"] = dataset
# Instantiate the class
exp = super().from_dict(d)
# Set runtime parameters
exp.n_features = params["n_features"]
exp.n_examples = params["n_examples"]
# One round of training to initialize all variables
if dataset is None:
exp.ds = generate_data(domain, params["n_features"] + 1)
exp.train(max_epochs=1, verbose=0, initializing=True)
if dataset is None:
exp.ds = None
exp.X_train, exp.y_train, exp.X_test, exp.y_test = None, None, None, None
# Set parameters on predictors
for predictor, predictor_params in zip(exp.predictors, predictors_params):
exp.set_predictor_params(predictor, unjsonify_dict(predictor_params))
return exp
@staticmethod
def set_predictor_params(predictor, predictor_params):
# Input transforms
num = predictor.regressor_.named_steps.preprocessor.named_transformers_.num
input_preprocessor = RecursiveNamespace(
**predictor_params["input_preprocessor"]
)
num.mean_ = input_preprocessor.num.mean_
num.var_ = input_preprocessor.num.var_
num.scale_ = input_preprocessor.num.scale_
num.n_samples_seen_ = input_preprocessor.num.n_samples_seen_
# Output transforms
out = predictor.transformer_
output_preprocessor = RecursiveNamespace(
**predictor_params["output_preprocessor"]
)
out.mean_ = output_preprocessor.mean_
out.var_ = output_preprocessor.var_
out.scale_ = output_preprocessor.scale_
out.n_samples_seen_ = output_preprocessor.n_samples_seen_
def save_regressor(self, save_dir):
"""Save the weights and biases of the regressor to disk
Parameters
----------
save_dir : str or pathlib.Path
The directory used for saving emulator files.
"""
save_dir = pathlib.Path(save_dir)
if self.predictors is None:
raise ValueError(
"No predictors available. First, run training using the train method."
)
for i, predictor in enumerate(self.predictors):
predictor.regressor_.named_steps.net.save_params(
f_params=save_dir / f"{self.model_name}_predictor_{i}.pt"
)
def load_regressor(self, save_dir):
"""Load the weights and biases of the regressor from disk
Parameters
----------
save_dir : str or pathlib.Path
The directory used for saving emulator files.
"""
save_dir = pathlib.Path(save_dir)
for i, predictor in enumerate(self.predictors):
net = predictor.regressor_.named_steps.net
net.initialize()
net.load_params(f_params=save_dir / f"{self.model_name}_predictor_{i}.pt")
predictor.regressor_.named_steps.net = net
def save(self, save_dir):
"""Save all the essential parameters of the ExperimentalEmulator to disk
Parameters
----------
save_dir : str or pathlib.Path
The directory used for saving emulator files.
Notes
------
This saves the parameters needed to reproduce results but not the associated data.
You can separately save X_test, y_test, X_train, and y_train attributes
if you want to be able to reproduce splits, test results and parity plots.
Examples
--------
>>> from summit import *
>>> import pkg_resources, pathlib
>>> DATA_PATH = pathlib.Path(pkg_resources.resource_filename("summit", "benchmarks/data"))
>>> model_name = f"reizman_suzuki_case_1"
>>> domain = ReizmanSuzukiEmulator.setup_domain()
>>> ds = DataSet.read_csv(DATA_PATH / f"{model_name}.csv")
>>> exp = ExperimentalEmulator(model_name, domain, dataset=ds, regressor=ANNRegressor)
>>> res = exp.train(max_epochs=10)
>>> exp.save("reizman_test/")
>>> #Load data for new experimental emulator
>>> exp_new = ExperimentalEmulator.load(model_name, "reizman_test/")
>>> exp_new.X_train, exp_new.y_train, exp_new.X_test, exp_new.y_test = exp.X_train, exp.y_train, exp.X_test, exp.y_test
>>> res = exp_new.test()
>>> fig, ax = exp_new.parity_plot(include_test=True)
"""
save_dir = pathlib.Path(save_dir)
save_dir.mkdir(exist_ok=True)
with open(save_dir / f"{self.model_name}.json", "w") as f:
json.dump(self.to_dict(), f)
self.save_regressor(save_dir)
@classmethod
def load(cls, model_name, save_dir, **kwargs):
"""Load all the essential parameters of the ExperimentalEmulator from disk
Parameters
----------
save_dir : str or pathlib.Path
The directory from which to load emulator files.
Notes
------
This loads the parameters needed to reproduce results but not the associated data.
You can separately load X_test, y_test, X_train, and y_train attributes
if you want to be able to reproduce splits, test results and parity plots.
Examples
--------
>>> from summit import *
>>> import pkg_resources, pathlib
>>> DATA_PATH = pathlib.Path(pkg_resources.resource_filename("summit", "benchmarks/data"))
>>> model_name = f"reizman_suzuki_case_1"
>>> domain = ReizmanSuzukiEmulator.setup_domain()
>>> ds = DataSet.read_csv(DATA_PATH / f"{model_name}.csv")
>>> exp = ExperimentalEmulator(model_name, domain, dataset=ds, regressor=ANNRegressor)
>>> res = exp.train(max_epochs=10)
>>> exp.save("reizman_test")
>>> #Load data for new experimental emulator
>>> exp_new = ExperimentalEmulator.load(model_name, "reizman_test")
>>> exp_new.X_train, exp_new.y_train, exp_new.X_test, exp_new.y_test = exp.X_train, exp.y_train, exp.X_test, exp.y_test
>>> res = exp_new.test()
>>> fig, ax = exp_new.parity_plot(include_test=True)
"""
save_dir = pathlib.Path(save_dir)
with open(save_dir / f"{model_name}.json", "r") as f:
d = json.load(f)
exp = cls.from_dict(d, **kwargs)
exp.load_regressor(save_dir)
return exp
def parity_plot(self, **kwargs):
"""Produce a parity plot based for the trained model using matplotlib
Parameters
---------
output_variable_names : str or list, optional
The output variables to plot. Defaults to all.
include_test : bool, optional
Include the performance of the model on the test set.
Defaults to False.
train_color : str, optional
Hex string for the train points. Defaults to "#6f3666"
test_color : str, optional
Hex string for the train points. Defaults to "#3c328c"
"""
import matplotlib.pyplot as plt
include_test = kwargs.get("include_test", False)
train_color = kwargs.get("train_color", "#6f3666")
test_color = kwargs.get("test_color", "#3c328c")
clip = kwargs.get("clip")
vars = kwargs.get("output_variable_names", self.output_variable_names)
if type(vars) == str:
vars = [vars]
fig, axes = plt.subplots(1, len(vars), figsize=(10, 5))
fig.subplots_adjust(wspace=0.5)
if len(vars) > 1:
fig.subplots_adjust(wspace=0.2)
if type(axes) != np.ndarray:
axes = np.array([axes])
# Do predictions
with torch.no_grad():
y_train_pred, y_train_pred_std = self._predict(self.X_train)
if include_test:
y_test_pred, y_train_pred_std = self._predict(self.X_test)
plots = 0
for i, v in enumerate(self.output_variable_names):
if v in vars:
if include_test:
kwargs = dict(
y_test=self.y_test[:, i], y_test_pred=y_test_pred[:, i]
)
else:
kwargs = {}
make_parity_plot(
self.y_train[:, i],
y_train_pred[:, i],
ax=axes[plots],
train_color=train_color,
test_color=test_color,
title=v,
**kwargs,
)
plots += 1
return fig, axes
def generate_data(domain, n_examples, random_state=None):
data = {}
random =
|
default_rng(random_state)
|
numpy.random.default_rng
|
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
import pywt
from scipy.ndimage import uniform_filter
from scipy import ndimage as ndi
from skimage.feature import match_descriptors, ORB
from skimage.feature import hog, daisy, CENSURE
from skimage import color, exposure, transform
from skimage.transform import pyramid_gaussian
# from skimage.util.montage import montage2d
from skimage.filters import gabor_kernel
from sklearn.feature_extraction.image import extract_patches_2d
# from numba import jit
def extract_features(imgs, feature_fns, verbose=True):
"""
Given pixel data for images and several feature functions that can
operate on single images, apply all feature functions to all
images, concatenating the feature vectors for each image and
storing the features for all images in a single matrix.
Inputs:
- imgs: N x H X W X C array of pixel data for N images.
- feature_fns: List of k feature functions. The ith feature function should
take as input an H x W x D array and return a (one-dimensional) array of
length F_i.
- verbose: Boolean; if true, print progress.
Returns:
An array of shape (F_1 + ... + F_k, N) where each column is the
concatenation
of all features for a single image.
"""
num_images = imgs.shape[0]
if num_images == 0:
return np.array([])
# Use the first image to determine feature dimensions
feature_dims = []
first_image_features = []
for feature_fn in feature_fns:
feats = feature_fn(imgs[0].squeeze())
assert len(feats.shape) == 1, 'Feature func. must be one-dimensional'
feature_dims.append(feats.size)
first_image_features.append(feats)
# Now that we know the dimensions of the features, we can allocate a single
# big array to store all features as columns.
total_feature_dim = sum(feature_dims)
imgs_features = np.zeros((total_feature_dim, num_images))
imgs_features[:total_feature_dim, 0] = np.hstack(first_image_features)
# Extract features for the rest of the images.
for i in xrange(1, num_images):
# idx = 0
for feature_fn, feature_dim in zip(feature_fns, feature_dims):
# next_idx = idx + feature_dim
# imgs_features[idx:next_idx, i] = feature_fn(imgs[i].squeeze())
# idx = next_idx
imgs_features[:, i] = feature_fn(imgs[i].squeeze())
if verbose and i % 100 == 0:
print("Done extracting features for {}/{} images"
.format(i, num_images))
return imgs_features.T
def rgb2gray(img):
"""Convert RGB image to grayscale
Parameters:
rgb : RGB image
Returns:
gray : grayscale image
"""
return np.dot(img[..., :3], [0.299, 0.587, 0.144])
def padding_imgs(img, max_width=0, max_height=0):
w, h, c = img.shape
img = rgb2gray(img)
if max_width != 0 and max_height != 0:
if max_width > max_height:
img = np.pad(img, ((0, max_width - w), (max_width - h, 0)),
'constant', constant_values=0)
else:
img = np.pad(img, ((0, max_height - w), (max_height - h, 0)),
'constant', constant_values=0)
else:
if h > w:
img = np.pad(img, ((0, h-w), (0, 0)), 'constant',
constant_values=0)
else:
img = np.pad(img, ((0, 0), (w-h, 0)), 'constant',
constant_values=0)
return img
def hog_feature(im):
"""Compute Histogram of Gradient (HOG) feature for an image
Modified from skimage.feature.hog
http://pydoc.net/Python/scikits-image/0.4.2/skimage.feature.hog
Reference:
Histograms of Oriented Gradients for Human Detection
<NAME> and <NAME>, CVPR 2005
Parameters:
im : an input grayscale or rgb square image
Returns:
feat: Histogram of Gradient (HOG) feature
"""
# convert rgb to grayscale if needed
if im.ndim == 3:
image = rgb2gray(im)
# if im.ndim[0] > im.ndim[1] or im.ndim[1] > im.ndim[0]:
# image = padding_imgs(im)
# w, h = image.shape
# if h > w:
# image = np.pad(image, ((0, h-w), (0, 0)), 'constant',
# constant_values=0)
# else:
# image = np.pad(image, ((0, 0), (w-h, 0)), 'constant',
# constant_values=0)
else:
image = np.atleast_2d(im)
sx, sy = image.shape # image size
orientations = 9 # number of gradient bins
cx, cy = (8, 8) # pixels per cell
gx = np.zeros(image.shape)
gy = np.zeros(image.shape)
gx[:, :-1] = np.diff(image, n=1, axis=1) # compute gradient on x-direction
gy[:-1, :] = np.diff(image, n=1, axis=0) # compute gradient on y-direction
grad_mag = np.sqrt(gx ** 2 + gy ** 2) # gradient magnitude
grad_ori = np.arctan2(gy, (gx + 1e-15)) * (180 / np.pi) + 90 # gradient orientation
n_cellsx = int(np.floor(sx / cx)) # number of cells in x
n_cellsy = int(np.floor(sy / cy)) # number of cells in y
# compute orientations integral images
orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))
for i in range(orientations):
# create new integral image for this orientation
# isolate orientations in this range
temp_ori = np.where(grad_ori < 180 / orientations * (i + 1),
grad_ori, 0)
temp_ori = np.where(grad_ori >= 180 / orientations * i,
temp_ori, 0)
# select magnitudes for those orientations
cond2 = temp_ori > 0
temp_mag = np.where(cond2, grad_mag, 0)
orientation_histogram[:, :, i] = uniform_filter(
temp_mag, size=(cx, cy))[cx/2::cx, cy/2::cy].T
return orientation_histogram.ravel()
def color_histogram_hsv(im, nbin=10, xmin=0, xmax=255, normalized=True):
"""
Compute color histogram for an image using hue.
Inputs:
- im: H x W x C array of pixel data for an RGB image.
- nbin: Number of histogram bins. (default: 10)
- xmin: Minimum pixel value (default: 0)
- xmax: Maximum pixel value (default: 255)
- normalized: Whether to normalize the histogram (default: True)
Returns:
1D vector of length nbin giving the color histogram over the hue of the
input image.
"""
ndim = im.ndim
bins = np.linspace(xmin, xmax, nbin+1)
hsv = matplotlib.colors.rgb_to_hsv(im/xmax) * xmax
imhist, bin_edges = np.histogram(hsv[:, :, 0],
bins=bins,
density=normalized)
imhist = imhist * np.diff(bin_edges)
# return histogram
return imhist.ravel()
def histogram_equalization(img):
# Contrast stretching
p2 = np.percentile(img, 2)
p98 = np.percentile(img, 98)
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
return img_rescale, img_eq, img_adapteq
def plot_hog(img):
image = color.rgb2gray(img)
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualise=True)
fig, (ax1, ax2) = plt.subplots(1, 2,
figsize=(8, 4),
sharex=True,
sharey=True)
ax1.axis('off')
ax1.imshow(image, cmap=plt.cm.gray)
ax1.set_title('Input image')
ax1.set_adjustable('box-forced')
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image,
in_range=(0, 0.02))
ax2.axis('off')
ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
ax2.set_title('Histogram of Oriented Gradients')
ax1.set_adjustable('box-forced')
plt.show()
def pyramid(img):
w, h, c = img.shape
pyramid = tuple(pyramid_gaussian(img, downscale=2))
composite_image = np.zeros((w, h + h // 2, 3), dtype=np.float32)
composite_image[:w, :h, :] = pyramid[0]
row_count = 0
for p in pyramid[1:]:
n_rows, n_cols = p.shape[:2]
composite_image[row_count:row_count + n_rows, h: h + n_cols] = p
row_count += n_rows
return composite_image
def daisy_feat(img):
img = color.rgb2gray(img)
descs, descs_img = daisy(img, step=180, radius=58, rings=2,
histograms=6,
orientations=8, visualize=True)
return descs.ravel()
def censure(img):
img = color.rgb2gray(img)
# tform = tf.AffineTransform(scale=(1.5, 1.5), rotation=0.5,
# translation=(150, -200))
# img_warp = tf.warp(img, tform)
detector = CENSURE()
detector.detect(img)
# return detector.keypoints, detector.scales
return detector.scales
def orb(img):
img1 = rgb2gray(img)
img2 = transform.rotate(img1, 180)
tform = transform.AffineTransform(scale=(1.3, 1.1), rotation=0.5,
translation=(0, -200))
img3 = transform.warp(img1, tform)
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img3)
keypoints3 = descriptor_extractor.keypoints
descriptors3 = descriptor_extractor.descriptors
matches1 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches2 = match_descriptors(descriptors1, descriptors3, cross_check=True)
return np.hstack((keypoints1[matches1[:, 0]].ravel(),
keypoints2[matches2[:, 1]].ravel()))
# return descriptors1, descriptors2, descriptors3
def gabor_filters(img):
"""Prepare filter-bank kernels"""
img = rgb2gray(img)
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma,
sigma_y=sigma))
kernels.append(kernel)
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(img, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return np.hstack((feats[:, 0].ravel(), feats[:, 1].ravel())).ravel()
def mean_removal(img):
img[:, :, 0] -= 104.006
img[:, :, 1] -= 116.669
img[:, :, 2] -= 122.679
return img
def remove_mean(X):
mean_img = np.mean(X, axis=0)
X -= mean_img
return X
def whitening(X, k=8):
# STEP 1a: Implement PCA to obtain the rotation matrix, U, which is
# the eigenbases sigma.
# Covariance matrix [column-wise variables]: Sigma = (X-mu)' * (X-mu) / N
Sigma =
|
np.dot(X, X.T)
|
numpy.dot
|
"""
"""
import matplotlib
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.animation as animation
import pyfits as pf
import numpy as np
def visualiseWavelengthDependency2D(d1, d2, d3, outname, logscale=True):
"""
"""
plt.subplots(ncols=3, figsize=(18, 7))
ax1 = plt.subplot(1, 3, 1, frame_on=False)
ax2 = plt.subplot(1, 3, 2, frame_on=False)
ax3 = plt.subplot(1, 3, 3, frame_on=False)
ax1.imshow(d1, origin='lower')
ax2.imshow(d2, origin='lower')
ax3.imshow(d3, origin='lower')
if logscale:
ax1.set_title(r'$\lambda = 400$nm, logscale')
ax2.set_title(r'$\lambda = 550$nm, logscale')
ax3.set_title(r'$\lambda = 800$nm, logscale')
else:
ax1.set_title(r'$\lambda = 400$nm, linscale')
ax2.set_title(r'$\lambda = 550$nm, linscale')
ax3.set_title(r'$\lambda = 800$nm, linscale')
#turn of ticks
ax1.axes.get_xaxis().set_visible(False)
ax1.axes.get_yaxis().set_visible(False)
ax2.axes.get_xaxis().set_visible(False)
ax2.axes.get_yaxis().set_visible(False)
ax3.axes.get_xaxis().set_visible(False)
ax3.axes.get_yaxis().set_visible(False)
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
plt.savefig(outname)
plt.close()
def visualiseWavelengthDependency3D(d1, d2, d3, outname, PSF=True):
"""
"""
stopy, stopx = d1.shape
X, Y = np.meshgrid(np.arange(0, stopx, 1),
|
np.arange(0, stopy, 1)
|
numpy.arange
|
from .groundwater import Groundwater
from .overland import OverlandFlow
import numpy as np
from landlab import Component
class CRESTPHYS(Component):
_name= "CREST-Physical"
_cite_as= '''
add later
'''
_info = {
"WM__param":{
"dtype": np.float32,
"intent": "in",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Mean Max Soil Capacity"
},
"IM__param":{
"dtype": np.float32,
"intent": "in",
"optional": False,
"units":"%",
"mapping": "node",
"doc":"impervious area ratio for generating fast runoff"
},
"manning_n__param":{
"dtype": np.float32,
"intent": "in",
"optional": True,
"units": "-",
"mapping": "node",
"doc": "manning roughness"
},
"B__param":{
"dtype": np.float32,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "Exponent of VIC model"
},
"KE__param":{
"dtype": np.float32,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "Evaporation factor -> from PET to AET"
},
"Ksat_groundwater__param":{
"dtype": np.float32,
"intent": "in",
"optional": False,
"units": "m/s",
"mapping": "link",
"doc": "horizontal hydraulic conductivity in groundwater"
},
"Ksat_soil__param":{
"dtype": np.float32,
"intent": "in",
"optional": False,
"units": "m/s",
"mapping": "node",
"doc": "vertical Soil saturated hydraulic conductivity in vadose zone"
},
"topographic__elevation":{
"dtype": np.float32,
"intent": "in",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Surface elevation"
},
"aquifer_base__elevation":{
"dtype": np.float32,
"intent": "in",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Base elevation of aquifer"
},
"surface_water__discharge":{
"dtype": np.float32,
"intent": "out",
"optional": False,
"units": "m^3/s",
"mapping": "node",
"doc": "Surface discharge"
},
"ground_water__discharge":{
"dtype": np.float32,
"intent": "out",
"optional": False,
"units": "m^3/s",
"mapping": "node",
"doc": "Groundwater discharge"
},
"soil_moisture__content":{
"dtype": np.float32,
"intent": "out",
"optional": False,
"units": "mm",
"mapping": "node",
"doc": "Soil Moisture Content"
},
"surface_water__elevation":{
"dtype": np.float32,
"intent": "out",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Surface water elevation"
},
"ground_water__elevation":{
"dtype": np.float32,
"intent": "inout",
"optional": True,
"units": "m",
"mapping": "node",
"doc": "Ground water table"
},
}
def __init__(self,
grid,
porosity=1,
proj=None):
super().__init__(grid)
self.initialize_output_fields()
#===store some parameters for use=========#
self._im= self._grid['node']['IM__param']/100. # convert to unitness value
self._im[self._im>1]=1
self._im[self._im<0]=0
self._ksat_soil= self._grid['node']['Ksat_soil__param']
self._ksat_gw= self._grid['link']['Ksat_groundwater__param']
self._ke= self._grid['node']['KE__param']
self._b= self._grid['node']['B__param']
self._b[self._b<0]=1
self._wm= self._grid['node']['WM__param']
self._wm[self._wm<0]= 100
self._ksat_soil[self._ksat_soil<0]=1
self._manning_n= self._grid['node']['manning_n__param']
self._manning_n_link= self._grid.map_mean_of_link_nodes_to_link(self._manning_n)
self._zsf= self._grid['node']['surface_water__elevation']
self._elev= self._grid['node']['topographic__elevation']
self._zgw= self._grid['node']['ground_water__elevation']
self._z_base= self._grid['node']['aquifer_base__elevation']
self._qsf = self._grid['node']['surface_water__discharge']
self._qgw= self._grid['node']['ground_water__discharge']
self._sm= self._grid['node']['soil_moisture__content']
self._sm[self._sm<0]=0
self._zsf[self._grid.status_at_node==self._grid.BC_NODE_IS_CLOSED]= 0
self._zgw[self._grid.status_at_node==self._grid.BC_NODE_IS_CLOSED]= 0
self._qsf[self._grid.status_at_node==self._grid.BC_NODE_IS_CLOSED]= 0
self._qgw[self._grid.status_at_node==self._grid.BC_NODE_IS_CLOSED]= 0
#===instantiate groundwater and flow accumulator component===#
self._router= OverlandFlow(
self._grid,
rainfall_intensity=0,
mannings_n= self._manning_n_link,
steep_slopes=True
)
self._gw= Groundwater(
self._grid,
hydraulic_conductivity=self._ksat_gw,
porosity=porosity,
recharge_rate=0,
regularization_f=0.01,
courant_coefficient=1)
self.lons= self._grid.x_of_node.reshape(self._grid.shape)[0,:]
self.lats= self._grid.y_of_node.reshape(self._grid.shape)[:,0]
def run_one_step(self, dt):
'''
control function to link overland flow and ground water
'''
if not hasattr(self, '_precip') or not hasattr(self, '_evap'):
msg= 'Missing precipitation or evaporation information, please check...'
raise ValueError(msg)
zsf= self._grid['node']['surface_water__elevation']
zgw= self._grid['node']['ground_water__elevation']
# here we combine surface water and precipitation to turn on reinfiltration
precip= self._precip * dt #convert to mm for convenience
evap= self._evap * dt
adjPET= evap * self._ke
# condition 1: precipitation > PET
cond= (precip>\
adjPET)
# precip[precip<adjPET]= adjPET[precip<adjPET]
# First generate fast runoff
precipSoil= np.zeros_like(precip)
precipImperv= np.zeros_like(precip)
precipSoil[cond]= (precip[cond] - adjPET[cond]) * (1-self._im[cond])
precipImperv[cond]= precip[cond] - adjPET[cond] - precipSoil[cond]
# infiltration
interflowExcess=
|
np.zeros_like(precip)
|
numpy.zeros_like
|
'''
By Real2CAD group at ETH Zurich
3DV Group 14
<NAME>, <NAME>, <NAME>, <NAME>
Editted based on the codes of the JointEmbedding paper (https://github.com/xheon/JointEmbedding)
As for our contributions, please check our report
'''
import argparse
import json
from typing import List, Tuple, Dict
import os
from datetime import datetime
import random
import math
import numpy as np
import scipy.spatial
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from sklearn.manifold import TSNE
import wandb
import open3d as o3d
import data
import metrics
import utils
from models import *
from typing import List, Tuple, Any
def main(opt: argparse.Namespace):
# Configure environment
utils.set_gpu(opt.gpu)
device = torch.device("cuda")
ts = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") # begining timestamp
run_name = opt.name + "_" + ts # modified to a name that is easier to index
run_path = os.path.join(opt.output_root, run_name)
if not os.path.exists(run_path):
os.mkdir(run_path)
assert os.access(run_path, os.W_OK)
print(f"Start testing {run_path}")
print(vars(opt))
# Set wandb
visualize_on = False
if opt.wandb_vis_on:
utils.setup_wandb()
wandb.init(project="ScanCADJoint", entity="real2cad", config=vars(opt), dir=run_path) # team 'real2cad'
#wandb.init(project="ScanCADJoint", config=vars(opt), dir=run_path) # your own worksapce
wandb.run.name = run_name
visualize_on = True
# Model
if opt.skip_connection_sep:
separation_model: nn.Module = HourGlassMultiOutSkip(ResNetEncoderSkip(1), ResNetDecoderSkip(1))
else:
separation_model: nn.Module = HourGlassMultiOut(ResNetEncoder(1), ResNetDecoder(1))
if opt.skip_connection_com:
completion_model: nn.Module = HourGlassMultiOutSkip(ResNetEncoderSkip(1), ResNetDecoderSkip(1))
else:
completion_model: nn.Module = HourGlassMultiOut(ResNetEncoder(1),ResNetDecoder(1)) #multiout for classification
classification_model: nn.Module = CatalogClassifier([256, 1, 1, 1], 8) #classification (8 class)
if opt.offline_sample:
embedding_model: nn.Module = TripletNet(ResNetEncoder(1)) #for offline assiging
else:
embedding_model: nn.Module = TripletNetBatchMix(ResNetEncoder(1)) #for online mining (half anchor scan + half positive cad)
if opt.representation == "tdf":
trans = data.truncation_normalization_transform
else: # binary_occupancy
trans = data.to_occupancy_grid
# Load checkpoints
resume_run_path = opt.resume.split("/")[0]
checkpoint_name = opt.resume.split("/")[1]
resume_run_path = os.path.join(opt.output_root, resume_run_path)
if not os.path.exists(os.path.join(resume_run_path, f"{checkpoint_name}.pt")):
checkpoint_name = "best"
print("Resume from checkpoint " + checkpoint_name)
loaded_model = torch.load(os.path.join(resume_run_path, f"{checkpoint_name}.pt"))
if opt.separation_model_on:
separation_model.load_state_dict(loaded_model["separation"])
separation_model = separation_model.to(device)
separation_model.eval()
else:
separation_model = None
completion_model.load_state_dict(loaded_model["completion"])
classification_model.load_state_dict(loaded_model["classification"])
embedding_model.load_state_dict(loaded_model["metriclearning"])
completion_model = completion_model.to(device)
classification_model = classification_model.to(device)
embedding_model = embedding_model.to(device)
# Make sure models are in evaluation mode
completion_model.eval()
classification_model.eval()
embedding_model.eval()
print("Begin evaluation")
if opt.scenelist_file is None:
# test_scan_list = ["scene0000_00", "scene0001_00", "scene0002_00", "scene0003_00", "scene0004_00", "scene0005_00",
# "scene0006_00", "scene0007_00", "scene0008_00", "scene0009_00", "scene0010_00", "scene0011_00", "scene0012_00", "scene0013_00", "scene0014_00", "scene0015_00"]
test_scan_list =["scene0030_00", "scene0031_00", "scene0032_00", "scene0033_00"]
else:
with open(opt.scenelist_file) as f:
test_scan_list = json.load(f)
scan_base_path = None
if opt.scan_dataset_name == "scannet":
scan_base_path = opt.scannet_path
elif opt.scan_dataset_name == "2d3ds":
scan_base_path = opt.s2d3ds_path
# Compute similarity metrics
# TODO: Update scan2cad_quat_file for 2d3ds dataset
# retrieval_metrics = evaluate_retrieval_metrics(separation_model, completion_model, classification_model, embedding_model, device,
# opt.similarity_file, scan_base_path, opt.shapenet_voxel_path, opt.scan2cad_quat_file,
# opt.scan_dataset_name, opt.separation_model_on, opt.batch_size, trans,
# opt.rotation_trial_count, opt.filter_val_pool, opt.val_max_sample_count,
# wb_visualize_on = visualize_on, vis_sample_count = opt.val_vis_sample_count)
# Compute cad embeddings
if opt.embed_mode:
embed_cad_pool(embedding_model, device, opt.modelpool_file, opt.shapenet_voxel_path, opt.cad_embedding_path,
opt.batch_size, trans, opt.rotation_trial_count)
# embed_scan_objs(separation_model, completion_model, classification_model, embedding_model, device,
# opt.scan2cad_file, scan_base_path, opt.scan_embedding_path, opt.scan_dataset_name,
# opt.separation_model_on, opt.batch_size, trans)
# accomplish Real2CAD task # TODO: add evaluation based on CD
retrieve_in_scans(separation_model, completion_model, classification_model, embedding_model, device, test_scan_list,
opt.cad_embedding_path, opt.cad_apperance_file, scan_base_path, opt.shapenet_voxel_path, opt.shapenet_pc_path, opt.real2cad_result_path,
opt.scan_dataset_name, opt.separation_model_on, opt.batch_size, trans,
opt.rotation_trial_count, opt.filter_val_pool, opt.in_the_wild_mode, opt.init_scale_method,
opt.icp_reg_mode, opt.icp_dist_thre, opt.icp_with_scale_on, opt.only_rot_z, opt.only_coarse_reg, visualize_on)
# print(retrieval_metrics)
# TSNE plot
# embedding_tsne(opt.cad_embedding_path, opt.scan_embedding_path, opt.tsne_img_path, True, visualize_on)
# Compute domain confusion # TODO update (use together with TSNE)
# train_confusion_results = evaluate_confusion(separation_model, completion_model, embedding_model,
# device, opt.confusion_train_path, opt.scannet_path, opt.shapenet_path,
# opt.confusion_num_neighbors, "train")
# print(train_confusion_results) #confusion_mean, conditional_confusions_mean
#
# val_confusion_results = evaluate_confusion(separation_model, completion_model, embedding_model,
# device, opt.confusion_val_path, opt.scannet_path, opt.shapenet_path,
# opt.confusion_num_neighbors, "validation")
# print(val_confusion_results) #confusion_mean, conditional_confusions_mean
pass
def evaluate_confusion(separation: nn.Module, completion: nn.Module, triplet: nn.Module, device, dataset_path: str,
scannet_path: str, shapenet_path: str, num_neighbors: int, data_split: str, batch_size: int = 1,
trans=data.to_occupancy_grid, verbose: bool = False) -> Tuple[np.array, list]:
# Configure datasets
dataset: Dataset = data.TrainingDataset(dataset_path, scannet_path, shapenet_path, "all", [data_split], scan_rep="sdf",
transformation=trans)
dataloader: DataLoader = DataLoader(dataset, shuffle=False, batch_size=batch_size, num_workers=0)
embeddings: List[torch.Tensor] = [] # contains all embedding vectors
names: List[str] = [] # contains the names of the samples
category: List[int] = [] # contains the category label of the samples
domains: List[int] = [] # contains number labels for domains (scan=0/cad=1)
# Iterate over data
for scan, cad in tqdm(dataloader, total=len(dataloader)):
# Move data to GPU
scan_data = scan["content"].to(device)
cad_data = cad["content"].to(device)
with torch.no_grad():
# Pass scan through networks
scan_foreground, _ = separation(scan_data)
scan_completed, _ = completion(torch.sigmoid(scan_foreground))
scan_latent = triplet.embed(torch.sigmoid(scan_completed)).view(batch_size, -1)
embeddings.append(scan_latent)
names.append(f"/scan/{scan['name']}")
domains.append(0) # scan
# Embed cad
cad_latent = triplet.embed(cad_data).view(batch_size, -1)
embeddings.append(cad_latent)
names.append(f"/cad/{cad['name']}")
domains.append(1) # cad
embedding_space = torch.cat(embeddings, dim=0) # problem
embedding_space = embedding_space.cpu().numpy()
domain_labels: np.array = np.asarray(domains)
cat_labels: np.array = np.asarray(category)
# Compute distances between all samples
distance_matrix = metrics.compute_distance_matrix(embedding_space)
confusion, conditional_confusions = metrics.compute_knn_confusions(distance_matrix, domain_labels, num_neighbors)
confusion_mean = np.average(confusion)
conditional_confusions_mean = [np.average(conf) for conf in conditional_confusions]
return confusion_mean, conditional_confusions_mean
def evaluate_retrieval_metrics(separation_model: nn.Module, completion_model: nn.Module, classification_model: nn.Module,
embedding_model: nn.Module, device, similarity_dataset_path: str,
scan_base_path: str, cad_base_path: str, gt_quat_file: str = None,
scan_dataset_name: str = "scannet", separation_model_on: bool = False, batch_size: int = 1,
trans=data.to_occupancy_grid, rotation_count: int = 1, filter_pool: bool = False, test_sample_limit: int = 999999,
wb_visualize_on = True, vis_name: str = "eval/", vis_sample_count: int = 5, verbose: bool = True):
# interested_categories = ["02747177", "02808440", "02818832", "02871439", "02933112", "03001627", "03211117",
# "03337140", "04256520", "04379243"]
unique_scan_objects, unique_cad_objects, sample_idx = get_unique_samples(similarity_dataset_path, rotation_count, test_sample_limit)
if separation_model_on:
scan_input_format = ".sdf"
scan_input_folder_extension = "_object_voxel"
scan_pc_folder_extension = "_object_pc"
input_only_mask = False
else:
scan_input_format = ".mask"
scan_input_folder_extension = "_mask_voxel"
scan_pc_folder_extension = "_mask_pc"
input_only_mask = True
scan_dataset: Dataset = data.InferenceDataset(scan_base_path, unique_scan_objects, scan_input_format, "scan",
transformation=trans, scan_dataset = scan_dataset_name, input_only_mask=input_only_mask)
scan_dataloader = torch.utils.data.DataLoader(dataset=scan_dataset, shuffle=True, batch_size=batch_size)
rotation_ranking_on = False
if rotation_count > 1:
rotation_ranking_on = True
# eval mode
# separation_model.eval()
# completion_model.eval()
# classification_model.eval()
# embedding_model.eval()
record_test_cloud = True
# vis_sep_com_count = vis_sample_count
# load all the scan object and cads that are waiting for testing
# # Evaluate all unique scan segments' embeddings
embeddings: Dict[str, np.array] = {}
mid_pred_cats: Dict[str, str] = {}
for names, elements in tqdm(scan_dataloader, total=len(scan_dataloader)):
# Move data to GPU
elements = elements.to(device)
with torch.no_grad():
if separation_model_on:
scan_foreground, _ = separation_model(elements)
scan_foreground = torch.sigmoid(scan_foreground)
scan_completed, hidden = completion_model(scan_foreground)
else:
scan_completed, hidden = completion_model(elements)
mid_pred_cat = classification_model.predict_name(torch.sigmoid(hidden)) # class str
scan_completed = torch.sigmoid(scan_completed)
# scan_completed = torch.where(scan_completed > 0.5, scan_completed, torch.zeros(scan_completed.shape))
scan_latent = embedding_model.embed(scan_completed)
for idx, name in enumerate(names):
embeddings[name] = scan_latent[idx].cpu().numpy().squeeze()
mid_pred_cats[name] = mid_pred_cat[idx]
#embeddings[names[0]] = scan_latent.cpu().numpy().squeeze() # why [0] ? now works only for batch_size = 1
#mid_pred_cats[name[0]] = mid_pred_cat
if wb_visualize_on: # TODO, may have bug
scan_voxel = elements[0].cpu().detach().numpy().reshape((32, 32, 32))
scan_cloud = data.voxel2point(scan_voxel)
wb_vis_dict = {vis_name + "input scan object": wandb.Object3D(scan_cloud)}
if separation_model_on:
foreground_voxel = scan_foreground[0].cpu().detach().numpy().reshape((32, 32, 32))
foreground_cloud = data.voxel2point(foreground_voxel, color_mode='prob')
wb_vis_dict[vis_name + "point_cloud_foreground"] = wandb.Object3D(foreground_cloud)
completed_voxel = scan_completed[0].cpu().detach().numpy().reshape((32, 32, 32))
completed_cloud = data.voxel2point(completed_voxel, color_mode='prob', visualize_prob_threshold = 0.75)
wb_vis_dict[vis_name + "point_cloud_completed"] = wandb.Object3D(completed_cloud)
wandb.log(wb_vis_dict)
# Evaluate all unique cad embeddings
# update unique_cad_objects
cad_dataset: Dataset = data.InferenceDataset(cad_base_path, unique_cad_objects, ".df", "cad",
transformation=trans)
cad_dataloader = torch.utils.data.DataLoader(dataset=cad_dataset, shuffle=False, batch_size=batch_size)
for names, elements in tqdm(cad_dataloader, total=len(cad_dataloader)):
# Move data to GPU
elements = elements.to(device)
with torch.no_grad():
#cad_latent = embedding_model.embed(elements).view(-1)
cad_latent = embedding_model.embed(elements)
for idx, name in enumerate(names):
embeddings[name] = cad_latent[idx].cpu().numpy().squeeze()
#embeddings[name[0]] = cad_latent.cpu().numpy().squeeze()
# Load GT alignment quat file
with open(gt_quat_file) as qf: # rotation quaternion of the cad models relative to the scan object
quat_content = json.load(qf)
json_quat = quat_content["scan2cad_objects"]
# Evaluate metrics
with open(similarity_dataset_path) as f:
samples = json.load(f).get("samples")
test_sample_limit = min(len(samples), test_sample_limit)
samples = list(samples[i] for i in sample_idx)
retrieved_correct = 0
retrieved_total = 0
retrieved_cat_correct = 0
retrieved_cat_total = 0
ranked_correct = 0
ranked_total = 0
# Top 7 categories and the others
selected_categories = ["03001627", "04379243", "02747177", "02818832", "02871439", "02933112", "04256520", "other"]
category_name_dict = {"03001627": "Chair", "04379243": "Table","02747177": "Trash bin","02818832": "Bed", "02871439":"Bookshelf", "02933112":"Cabinet","04256520":"Sofa","other":"Other"}
category_idx_dict = {"03001627": 0, "04379243": 1, "02747177": 2, "02818832": 3, "02871439": 4, "02933112": 5, "04256520": 6, "other": 7}
per_category_retrieved_correct = {category: 0 for category in selected_categories}
per_category_retrieved_total = {category: 0 for category in selected_categories}
per_category_ranked_correct = {category: 0 for category in selected_categories}
per_category_ranked_total = {category: 0 for category in selected_categories}
idx = 0
visualize = False
vis_sample = []
if vis_sample_count > 0:
visualize = True
vis_sample = random.sample(samples, vis_sample_count)
# Iterate over all annotations
for sample in tqdm(samples, total=len(samples)):
reference_name = sample["reference"]["name"].replace("/scan/", "")
reference_quat = json_quat.get(reference_name, [1.0, 0.0, 0.0, 0.0])
reference_embedding = embeddings[reference_name][np.newaxis, :]
#reference_embedding = embeddings[reference_name]
# only search nearest neighbor in the pool (the "ranked" list should be a subset of the "pool")
pool_names = [p["name"].replace("/cad/", "") for p in sample["pool"]]
# Filter pool with classification result
if filter_pool:
mid_pred_cat = mid_pred_cats[reference_name] # class str
if mid_pred_cat != 'other':
temp_pool_names = list(filter(lambda x: x.split('/')[0] == mid_pred_cat, pool_names))
else: # deal with other categories
temp_pool_names = list(filter(lambda x: x.split('/')[0] not in selected_categories, pool_names))
if len(temp_pool_names) != 0:
pool_names = temp_pool_names
#print("filter pool on")
pool_names = np.asarray(pool_names)
pool_names_all = []
if rotation_ranking_on:
pool_embeddings = []
deg_step = np.around(360.0 / rotation_count)
for p in pool_names:
for i in range(rotation_count):
cur_rot = int(i * deg_step)
cur_rot_p = p + "_" + str(cur_rot)
pool_names_all.append(cur_rot_p)
pool_embeddings.append(embeddings[cur_rot_p])
pool_names_all = np.asarray(pool_names_all)
else:
pool_embeddings = [embeddings[p] for p in pool_names]
pool_names_all = pool_names
pool_embeddings = np.asarray(pool_embeddings)
# Compute distances in embedding space
distances = scipy.spatial.distance.cdist(reference_embedding, pool_embeddings, metric="euclidean")
sorted_indices = np.argsort(distances, axis=1)
sorted_distances = np.take_along_axis(distances, sorted_indices, axis=1) # [1, filtered_pool_size * rotation_trial_count]
sorted_distances = sorted_distances[0] # [filtered_pool_size * rotation_trial_count]
predicted_ranking = np.take(pool_names_all, sorted_indices)[0].tolist()
ground_truth_names = [r["name"].replace("/cad/", "") for r in sample["ranked"]]
ground_truth_cat = ground_truth_names[0].split("/")[0]
# ground_truth_cat = reference_name.split("_")[4] #only works for scannet (scan2cad)
predicted_cat = predicted_ranking[0].split("/")[0]
# retrieval accuracy (top 1 [nearest neighbor] model is in the ranking list [1-3])
sample_retrieved_correct = 1 if metrics.is_correctly_retrieved(predicted_ranking, ground_truth_names) else 0
retrieved_correct += sample_retrieved_correct
retrieved_total += 1
# the top 1's category is correct [specific category str belongs to 'other' would also be compared]
sample_cat_correct = 1 if metrics.is_category_correctly_retrieved(predicted_cat, ground_truth_cat) else 0
#sample_cat_correct = 1 if metrics.is_category_correctly_retrieved(mid_pred_cat, ground_truth_cat) else 0
retrieved_cat_correct += sample_cat_correct
retrieved_cat_total += 1
# per-category retrieval accuracy
reference_category = metrics.get_category_from_list(ground_truth_cat, selected_categories)
per_category_retrieved_correct[reference_category] += sample_retrieved_correct
per_category_retrieved_total[reference_category] += 1
# ranking quality
sample_ranked_correct = metrics.count_correctly_ranked_predictions(predicted_ranking, ground_truth_names)
ranked_correct += sample_ranked_correct
ranked_total += len(ground_truth_names)
per_category_ranked_correct[reference_category] += sample_ranked_correct
per_category_ranked_total[reference_category] += len(ground_truth_names)
if wb_visualize_on and visualize and sample in vis_sample:
idx = idx + 1
# raw scan segment
parts = reference_name.split("_")
if scan_dataset_name == 'scannet':
scan_name = parts[0] + "_" + parts[1]
object_name = scan_name + "__" + parts[3]
scan_cat_name = utils.get_category_name(parts[4])
scan_cat = utils.wandb_color_lut(parts[4])
scan_path = os.path.join(scan_base_path, scan_name, scan_name + scan_input_folder_extension, object_name + scan_input_format)
elif scan_dataset_name == '2d3ds':
area_name = parts[0]+"_"+parts[1]
room_name = parts[2]+"_"+parts[3]
scan_cat_name = parts[4]
scan_cat = utils.wandb_color_lut(utils.get_category_code_from_2d3ds(scan_cat_name))
scan_path = os.path.join(scan_base_path, area_name, room_name, room_name + scan_input_folder_extension, reference_name + scan_input_format)
if separation_model_on:
scan_voxel_raw = data.load_raw_df(scan_path)
scan_voxel = data.to_occupancy_grid(scan_voxel_raw).tdf.reshape((32, 32, 32))
else:
scan_voxel_raw = data.load_mask(scan_path)
scan_voxel = scan_voxel_raw.tdf.reshape((32, 32, 32))
scan_grid2world = scan_voxel_raw.matrix
scan_voxel_res = scan_voxel_raw.size
#print("Scan voxel shape:", scan_voxel.shape)
scan_wb_obj = data.voxel2point(scan_voxel, scan_cat, name=scan_cat_name + ":" + object_name)
wb_vis_retrieval_dict = {vis_name+"input scan object " + str(idx): wandb.Object3D(scan_wb_obj)}
# ground truth cad to scan rotation
Rsc = data.get_rot_cad2scan(reference_quat)
# ground truth cad
if scan_dataset_name == 'scannet':
gt_cad = os.path.join(parts[4], parts[5])
elif scan_dataset_name == '2d3ds':
gt_cad = ground_truth_names[0]
gt_cad_path = os.path.join(cad_base_path, gt_cad+ ".df")
gt_cad_voxel = data.to_occupancy_grid(data.load_raw_df(gt_cad_path)).tdf
gt_cad_voxel_rot = data.rotation_augmentation_interpolation_v3(gt_cad_voxel, "cad", aug_rotation_z = 0, pre_rot_mat = Rsc).reshape((32, 32, 32))
gt_cad_wb_obj = data.voxel2point(gt_cad_voxel_rot, scan_cat, name=scan_cat_name + ":" + gt_cad)
wb_vis_retrieval_dict[vis_name+"ground truth cad " + str(idx)] = wandb.Object3D(gt_cad_wb_obj)
# Top K choices
top_k = 4
for top_i in range(top_k):
predicted_cad = predicted_ranking[top_i]
predicted_cad_path = os.path.join(cad_base_path, predicted_cad.split("_")[0] + ".df")
predicted_cad_voxel = data.to_occupancy_grid(data.load_raw_df(predicted_cad_path)).tdf
predicted_rotation = int(predicted_cad.split("_")[1]) # deg
predicted_cad_voxel_rot = data.rotation_augmentation_interpolation_v3(predicted_cad_voxel, "dummy", aug_rotation_z = predicted_rotation).reshape((32, 32, 32))
predicted_cat = utils.wandb_color_lut(predicted_cad.split("/")[0])
predicted_cat_name = utils.get_category_name(predicted_cad.split("/")[0])
predicted_cad_wb_obj = data.voxel2point(predicted_cad_voxel_rot, predicted_cat, name=predicted_cat_name + ":" + predicted_cad)
wb_title = vis_name + "Top " + str(top_i+1) + " retrieved cad with rotation " + str(idx)
wb_vis_retrieval_dict[wb_title] = wandb.Object3D(predicted_cad_wb_obj)
wandb.log(wb_vis_retrieval_dict)
print("retrieval accuracy")
cat_retrieval_accuracy = retrieved_cat_correct / retrieved_cat_total
print(f"correct: {retrieved_cat_correct}, total: {retrieved_cat_total}, category level (rough) accuracy: {cat_retrieval_accuracy:4.3f}")
cad_retrieval_accuracy = retrieved_correct/retrieved_total
print(f"correct: {retrieved_correct}, total: {retrieved_total}, cad model level (fine-grained) accuracy: {cad_retrieval_accuracy:4.3f}")
if verbose:
for (category, correct), total in zip(per_category_retrieved_correct.items(),
per_category_retrieved_total.values()):
category_name = utils.get_category_name(category)
if total == 0:
print(
f"{category}:[{category_name}] {correct:>5d}/{total:>5d} --> Nan")
else:
print(
f"{category}:[{category_name}] {correct:>5d}/{total:>5d} --> {correct / total:4.3f}")
ranking_accuracy = ranked_correct / ranked_total
# print("ranking quality")
# print(f"correct: {ranked_correct}, total: {ranked_total}, ranking accuracy: {ranking_accuracy:4.3f}")
# if verbose:
# for (category, correct), total in zip(per_category_ranked_correct.items(),
# per_category_ranked_total.values()):
# category_name = utils.get_category_name(category)
# if 0 == total:
# print(
# f"{category}:[{category_name}] {correct:>5d}/{total:>5d} --> Nan")
# else:
# print(
# f"{category}:[{category_name}] {correct:>5d}/{total:>5d} --> {correct / total:4.3f}")
return cat_retrieval_accuracy, cad_retrieval_accuracy, ranking_accuracy
def embed_scan_objs(separation_model: nn.Module, completion_model: nn.Module, classification_model: nn.Module,
embedding_model: nn.Module, device, scan_obj_list_path: str, scan_base_path: str, output_path: str,
scan_dataset_name: str = "scannet", separation_model_on: bool = False,
batch_size: int = 1, trans=data.to_occupancy_grid, output: bool = True):
#load scan list
with open(scan_obj_list_path) as f:
scenes = json.load(f)["scan2cad_objects"]
unique_scan_objects = list(set(scenes.keys()))
scan_seg_count = len(unique_scan_objects)
if separation_model_on:
scan_input_format = ".sdf"
scan_input_folder_extension = "_object_voxel"
scan_pc_folder_extension = "_object_pc"
input_only_mask = False
else:
scan_input_format = ".mask"
scan_input_folder_extension = "_mask_voxel"
scan_pc_folder_extension = "_mask_pc"
input_only_mask = True
scan_dataset: Dataset = data.InferenceDataset(scan_base_path, unique_scan_objects, scan_input_format, "scan",
transformation=trans, scan_dataset = scan_dataset_name, input_only_mask=input_only_mask)
scan_dataloader = torch.utils.data.DataLoader(dataset=scan_dataset, shuffle=False, batch_size=batch_size)
# # Evaluate all unique scan segments' embeddings
embeddings_all: Dict[str, Dict] = {}
for names, elements in tqdm(scan_dataloader, total=len(scan_dataloader)):
# Move data to GPU
elements = elements.to(device)
with torch.no_grad():
if separation_model_on:
scan_foreground, _ = separation_model(elements)
scan_foreground = torch.sigmoid(scan_foreground)
scan_completed, _ = completion_model(scan_foreground)
else:
scan_completed, _ = completion_model(elements)
scan_completed = torch.sigmoid(scan_completed)
scan_latent = embedding_model.embed(scan_completed)
for idx, name in enumerate(names):
cur_scan_embedding = scan_latent[idx].cpu().numpy().squeeze()
if scan_dataset_name == "scannet":
cat = name.split("_")[4]
elif scan_dataset_name == "2d3ds":
cat = utils.get_category_code_from_2d3ds(name.split("_")[4])
if cat not in embeddings_all.keys():
embeddings_all[cat] = {name: cur_scan_embedding}
else:
embeddings_all[cat][name] = cur_scan_embedding
print("Embed [", scan_seg_count, "] scan segments")
if output:
torch.save(embeddings_all, output_path)
print("Output scan segement embeddings to [", output_path, "]")
# TODO better to have a different data input for scan2cad_file
def embed_cad_pool(embedding_model: nn.Module, device, modelpool_path: str, shapenet_path: str, output_path: str,
batch_size: int = 1, trans=data.to_occupancy_grid, rotation_count: int = 1, output: bool = True):
#load model pool
with open(modelpool_path) as f:
model_pool = json.load(f)
# delete duplicate elements from each list
for cat, cat_list in model_pool.items():
cat_list_filtered = list(set(cat_list))
model_pool[cat] = cat_list_filtered
rotation_ranking_on = False
if rotation_count > 1:
rotation_ranking_on = True
# get unique cad names (with rotation)
unique_cads = []
categories = list(model_pool.keys())
for category in categories:
cat_pool = model_pool[category]
for cad_element in cat_pool:
cad = cad_element.replace("/cad/", "")
if rotation_ranking_on:
deg_step = np.around(360.0 / rotation_count)
for i in range(rotation_count):
cur_rot = int(i * deg_step)
cur_cad = cad + "_" + str(cur_rot)
unique_cads.append(cur_cad)
else:
unique_cads.append(cad)
# unique_cads = list(unique_cads)
cad_dataset: Dataset = data.InferenceDataset(shapenet_path, unique_cads, ".df", "cad", transformation=trans)
cad_dataloader = torch.utils.data.DataLoader(dataset=cad_dataset, shuffle=False, batch_size=batch_size)
cad_count = len(unique_cads)
embeddings_all: Dict[str, Dict] = {}
for category in categories:
embeddings_all[category] = {}
print("Embed [", cad_count, "] CAD models with [",rotation_count, "] rotations from [", len(categories), "] categories")
for names, elements in tqdm(cad_dataloader, total=len(cad_dataloader)):
# Move data to GPU
elements = elements.to(device)
with torch.no_grad():
cad_latent = embedding_model.embed(elements)
for idx, name in enumerate(names):
cur_cat = name.split("/")[0]
embeddings_all[cur_cat][name] = cad_latent[idx].cpu().numpy().squeeze()
if output:
torch.save(embeddings_all, output_path)
print("Output CAD embeddings to [", output_path, "]")
def embedding_tsne(cad_embeddings_path: str, scan_embeddings_path: str, out_path: str,
joint_embedding: bool = True, visualize_on: bool = True, rot_count: int = 12):
rotation_step = 360 / rot_count
cad_embeddings_dict = torch.load(cad_embeddings_path)
scan_embeddings_dict = torch.load(scan_embeddings_path)
sample_rate_cad = 20
sample_rate_scan = 10
cad_embeddings = []
cad_cat = []
cad_rot = []
cad_flag = []
count = 0
cats = list(cad_embeddings_dict.keys())
for cat, cat_embeddings_dict in cad_embeddings_dict.items():
for cad_id, cad_embedding in cat_embeddings_dict.items():
if np.random.randint(sample_rate_cad)==0: # random selection
rot = int(int(cad_id.split('_')[1])/rotation_step)
#print(rot)
cad_embeddings.append(cad_embedding)
cad_cat.append(cat)
cad_rot.append(rot)
cad_flag.append(0) # is cad
count += 1
cad_embeddings = np.asarray(cad_embeddings)
if joint_embedding:
scan_embeddings = []
scan_cat = []
scan_flag = []
count = 0
cats = list(scan_embeddings_dict.keys())
for cat, cat_embeddings_dict in scan_embeddings_dict.items():
for scan_id, scan_embedding in cat_embeddings_dict.items():
if np.random.randint(sample_rate_scan)==0: # random selection
scan_embeddings.append(scan_embedding)
scan_cat.append(cat)
scan_flag.append(1) # is scan
count += 1
scan_embeddings = np.asarray(scan_embeddings)
joint_embeddings = np.vstack((cad_embeddings, scan_embeddings))
joint_cat = cad_cat + scan_cat
joint_flag = cad_flag + scan_flag
print("Visualize the joint embedding space of scan and CAD")
tsne = TSNE(n_components=2, init='pca', random_state=501)
embedding_tsne = tsne.fit_transform(joint_embeddings)
else:
print("Visualize the embedding space of CAD")
tsne = TSNE(n_components=2, init='pca', random_state=501)
embedding_tsne = tsne.fit_transform(cad_embeddings)
# Visualization (2 dimensional)
x_min, x_max = embedding_tsne.min(0), embedding_tsne.max(0)
embedding_tsne = (embedding_tsne - x_min) / (x_max - x_min) # normalization
marker_list = ['o', '>', 'x', '.', ',', '+', 'v', '^', '<', 's', 'd', '8']
legends = []
cat_names = []
cat_idxs = []
# deal with 'others' category
for cat in cats:
cat_name = utils.get_category_name(cat)
cat_idx = utils.get_category_idx(cat)+1
if cat_name not in cat_names:
cat_names.append(cat_name)
if cat_idx not in cat_idxs:
cat_idxs.append(cat_idx)
for i in range(len(cat_names)):
legend = mpatches.Patch(color=plt.cm.Set1(cat_idxs[i]), label=cat_names[i])
legends.append(legend)
legends = list(set(legends))
plt.figure(figsize=(10, 10))
for i in range(embedding_tsne.shape[0]):
if joint_embedding:
if joint_flag[i] == 0: # cad
plt.scatter(embedding_tsne[i, 0], embedding_tsne[i, 1], s=40, color=plt.cm.Set1(utils.get_category_idx(joint_cat[i])+1),
marker=marker_list[0], alpha = 0.5)
else: # scan
plt.scatter(embedding_tsne[i, 0], embedding_tsne[i, 1], s=40, color=plt.cm.Set1(utils.get_category_idx(joint_cat[i])+1),
marker=marker_list[1])
else: # only cad embeddings
plt.scatter(embedding_tsne[i, 0], embedding_tsne[i, 1], s=40, color=plt.cm.Set1(utils.get_category_idx(cad_cat[i])+1), marker=marker_list[cad_rot[i]], alpha = 0.8) # cad only
plt.xticks([])
plt.yticks([])
# plt.legend(handles=lengends, loc='upper left', fontsize=12)
plt.legend(handles=legends, loc='best', fontsize=16)
#plt.savefig(os.path.join(out_path, "cad_embedding_tsne.jpg"), dpi=1000)
if joint_embedding:
save_path = out_path+"_joint_embedding_tsne.jpg"
else:
save_path = out_path+"_cad_embedding_tsne.jpg"
plt.savefig(save_path, dpi=1000)
print("TSNE image saved to [", save_path, " ]")
if visualize_on:
wandb.log({"embedding_tsne": plt})
'''
Retrieve cads in a list of scans and then apply CAD to scan alignment
'''
def retrieve_in_scans(separation_model: nn.Module, completion_model: nn.Module, classification_model: nn.Module,
embedding_model: nn.Module, device, test_scan_list: List[str], cad_embeddings_path: str, cad_appearance_file: str,
scan_base_path: str, cad_voxel_base_path: str, cad_pc_base_path: str, result_out_path: str,
scan_dataset_name: str = "scannet", separation_model_on: bool = False,
batch_size: int = 1, trans=data.to_occupancy_grid, rotation_count: int = 1,
filter_pool: bool = True, in_the_wild: bool = True, init_scale_method: str = "naive",
icp_mode: str = "p2p", corr_dist_thre_scale = 4.0, estimate_scale_icp: bool = False,
rot_only_around_z: bool = False, use_coarse_reg_only: bool = False,
visualize: bool = False, wb_vis_name: str = "2d3ds_test/"):
cad_embeddings = torch.load(cad_embeddings_path)
all_categories = list(cad_embeddings.keys())
selected_categories = ["03001627", "04379243", "02747177", "02818832", "02871439", "02933112", "04256520", "other"]
#{"03001627": "Chair", "04379243": "Table", "02747177": "Trash bin", "02818832": "Bed", "02871439": "Bookshelf", "02933112": "Cabinet", "04256520": "Sofa"}
other_categories = list(set(all_categories).difference(set(selected_categories)))
if separation_model_on:
scan_input_format = ".sdf"
scan_input_folder_extension = "_object_voxel"
scan_pc_folder_extension = "_object_pc"
input_only_mask = False
else:
scan_input_format = ".mask"
scan_input_folder_extension = "_mask_voxel"
scan_pc_folder_extension = "_mask_pc"
input_only_mask = True
if not in_the_wild:
with open(cad_appearance_file) as f:
cad_appearance_dict = json.load(f)
unique_scan_objects, scene_scan_objects = get_scan_objects_in_scenes(test_scan_list, scan_base_path,
extension = scan_input_format, folder_extension=scan_input_folder_extension)
scan_dataset: Dataset = data.InferenceDataset(scan_base_path, unique_scan_objects, scan_input_format, "scan",
transformation=trans, scan_dataset = scan_dataset_name, input_only_mask=input_only_mask)
scan_dataloader = torch.utils.data.DataLoader(dataset=scan_dataset, shuffle=False, batch_size=batch_size)
rotation_ranking_on = False
if rotation_count > 1:
rotation_ranking_on = True
deg_step = np.around(360.0 / rotation_count)
# load all the scan object and cads that are waiting for testing
# # Evaluate all unique scan segments' embeddings
scan_embeddings: Dict[str, np.array] = {}
completed_voxels: Dict[str, np.array] = {} # saved the completed scan object (tensor) [potential issue: limited memory]
mid_pred_cats: Dict[str, str] = {}
for names, elements in tqdm(scan_dataloader, total=len(scan_dataloader)):
# Move data to GPU
elements = elements.to(device)
with torch.no_grad():
if separation_model_on:
scan_foreground, _ = separation_model(elements)
scan_foreground = torch.sigmoid(scan_foreground)
scan_completed, hidden = completion_model(scan_foreground)
else:
scan_completed, hidden = completion_model(elements)
mid_pred_cat = classification_model.predict_name(torch.sigmoid(hidden)) # class str
scan_completed = torch.sigmoid(scan_completed)
scan_latent = embedding_model.embed(scan_completed)
for idx, name in enumerate(names):
scan_embeddings[name] = scan_latent[idx].cpu().numpy().squeeze()
mid_pred_cats[name] = mid_pred_cat[idx]
if init_scale_method == "bbx":
# record the completed object
completed_voxels[name] = scan_completed[idx].cpu().numpy()
results = []
# TODO: try to make it running in parallel to speed up
for scene_name, scan_objects in scene_scan_objects.items():
scene_results = {"id_scan": scene_name, "aligned_models": []}
print("Process scene [", scene_name, "]")
print("---------------------------------------------")
for scan_object in tqdm(scan_objects, total=len(scan_objects)):
print("Process scan segement [", scan_object, "]")
scan_object_embedding = scan_embeddings[scan_object][np.newaxis, :]
if not in_the_wild:
cad_embeddings_in_scan = {}
cad_list_in_scan = list(cad_appearance_dict[scene_name].keys())
for cad in cad_list_in_scan:
parts = cad.split("_")
cat = parts[0]
cad_id = parts[1]
if cat not in cad_embeddings_in_scan.keys():
cad_embeddings_in_scan[cat] = {}
if rotation_ranking_on:
for i in range(rotation_count):
cur_rot = int(i * deg_step)
cad_str = cat+"/"+cad_id+"_" + str(cur_rot)
cad_embeddings_in_scan[cat][cad_str] = cad_embeddings[cat][cad_str]
else:
cad_str = cat+"/"+cad_id
cad_embeddings_in_scan[cat][cad_str] = cad_embeddings[cat][cad_str]
else:
cad_embeddings_in_scan = cad_embeddings
all_categories = list(cad_embeddings_in_scan.keys())
other_categories = list(set(all_categories).difference(set(selected_categories)))
filtered_cad_embeddings = {}
mid_pred_cat = mid_pred_cats[scan_object] # class str
print("Predicted category:", utils.get_category_name(mid_pred_cat))
if mid_pred_cat != 'other':
if filter_pool and mid_pred_cat in all_categories:
filtered_cad_embeddings = cad_embeddings_in_scan[mid_pred_cat]
else: # search in the whole model pool (when we do not enable pool filtering or when the category prediction is not in the pool's category keys)
for cat in all_categories:
filtered_cad_embeddings = {**filtered_cad_embeddings, **cad_embeddings_in_scan[cat]}
else: # if is classified as 'other', search in the categories of 'other' (when other categories do exsit in the model pool's keys)
if filter_pool and len(other_categories)>0:
for cat in other_categories:
filtered_cad_embeddings = {**filtered_cad_embeddings, **cad_embeddings_in_scan[cat]}
else:
for cat in all_categories:
filtered_cad_embeddings = {**filtered_cad_embeddings, **cad_embeddings_in_scan[cat]}
pool_names = list(filtered_cad_embeddings.keys())
pool_embeddings = [filtered_cad_embeddings[p] for p in pool_names]
pool_embeddings = np.asarray(pool_embeddings)
# Compute distances in embedding space
distances = scipy.spatial.distance.cdist(scan_object_embedding, pool_embeddings, metric="euclidean") # figure out which distance is the better
sorted_indices = np.argsort(distances, axis=1)
sorted_distances = np.take_along_axis(distances, sorted_indices, axis=1) # [1, filtered_pool_size * rotation_trial_count]
sorted_distances = sorted_distances[0] # [filtered_pool_size * rotation_trial_count]
predicted_ranking = np.take(pool_names, sorted_indices)[0].tolist()
# apply registration
# load scan segment (target cloud)
parts = scan_object.split("_")
if scan_dataset_name == 'scannet':
scan_name = parts[0] + "_" + parts[1]
object_name = scan_name + "__" + parts[3]
scan_path = os.path.join(scan_base_path, scan_name, scan_name + scan_input_folder_extension, scan_object + scan_input_format)
scan_pc_path = os.path.join(scan_base_path, scan_name, scan_name + scan_pc_folder_extension, scan_object + ".pcd")
elif scan_dataset_name == '2d3ds':
area_name = parts[0]+"_"+parts[1]
room_name = parts[2]+"_"+parts[3]
scan_path = os.path.join(scan_base_path, area_name, room_name, room_name + scan_input_folder_extension, scan_object + scan_input_format)
scan_pc_path = os.path.join(scan_base_path, area_name, room_name, room_name + scan_pc_folder_extension, scan_object + ".pcd")
if separation_model_on:
scan_voxel_raw = data.load_raw_df(scan_path)
scan_voxel = data.to_occupancy_grid(scan_voxel_raw).tdf.reshape((32, 32, 32))
else:
scan_voxel_raw = data.load_mask(scan_path)
scan_voxel = scan_voxel_raw.tdf.reshape((32, 32, 32))
scan_grid2world = scan_voxel_raw.matrix
scan_voxel_res = scan_voxel_raw.size
#print("Scan voxel shape:", scan_voxel.shape)
#res = scan_grid2world[0,0]
if init_scale_method == "bbx":
# get the completed scan object (voxel representation)
scan_completed_voxel = completed_voxels[scan_object] # np.array
# rotate to CAD's canonical system
if rotation_ranking_on:
top_1_predicted_rotation = int(predicted_ranking[0].split("_")[1]) # deg
else:
top_1_predicted_rotation = 0
scan_completed_voxel_rot = data.rotation_augmentation_interpolation_v3(scan_completed_voxel, "dummy", aug_rotation_z = -top_1_predicted_rotation).reshape((32, 32, 32))
# calculate bbx length of the rotated completed scan object in voxel space (unit: voxel)
scan_bbx_length = data.cal_voxel_bbx_length(scan_completed_voxel_rot) # output: np.array([bbx_lx, bbx_ly, bbx_lz])
scan_voxel_wb = data.voxel2point(scan_voxel, name=scan_object)
if visualize:
wandb_vis_dict = {wb_vis_name+"input scan object": wandb.Object3D(scan_voxel_wb)}
# load point cloud
scan_seg_pcd = o3d.io.read_point_cloud(scan_pc_path)
scan_seg_pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=scan_voxel_res, max_nn=30))
# initialization
candidate_cads = []
candidate_cad_pcds = []
candidate_rotations = []
candidate_cads_voxel_wb = []
candidate_T_init = []
candidate_T_reg = []
candidate_fitness_reg = []
# Apply registration
corr_dist_thre = corr_dist_thre_scale * scan_voxel_res
top_k = 4
for top_i in range(top_k):
# load cad (source cloud)
predicted_cad = predicted_ranking[top_i]
title_str = "Top "+ str(top_i+1) + " retrieved model "
print(title_str, predicted_cad)
if rotation_ranking_on:
predicted_rotation = int(predicted_cad.split("_")[1]) # deg
else:
predicted_rotation = 0
predicted_cat = utils.wandb_color_lut(predicted_cad.split("/")[0])
predicted_cat_name = utils.get_category_name(predicted_cad.split("/")[0])
cad_voxel_path = os.path.join(cad_voxel_base_path, predicted_cad.split("_")[0] + ".df")
cad_voxel = data.to_occupancy_grid(data.load_raw_df(cad_voxel_path)).tdf
cad_voxel_rot = data.rotation_augmentation_interpolation_v3(cad_voxel, "dummy", aug_rotation_z = predicted_rotation).reshape((32, 32, 32))
cad_voxel_wb = data.voxel2point(cad_voxel_rot, predicted_cat, name=predicted_cat_name + ":" + predicted_cad)
if visualize:
wandb_vis_dict[wb_vis_name + title_str] = wandb.Object3D(cad_voxel_wb)
cad_pcd_path = os.path.join(cad_pc_base_path, predicted_cad.split("_")[0] + ".pcd")
cad_pcd = o3d.io.read_point_cloud(cad_pcd_path)
if icp_mode == "p2l": # requires normal
cad_pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.05, max_nn=30))
if init_scale_method == "bbx":
# calculate bbx length of the (none rotated) retrieved cad in voxel space (unit: voxel)
cad_bbx_length = data.cal_voxel_bbx_length(cad_voxel) # output: np.array([bbx_lx, bbx_ly, bbx_lz])
cad_scale_multiplier = scan_bbx_length / cad_bbx_length # potential issue (int/int), result should be float
direct_scale_on = False
elif init_scale_method == "learning":
direct_scale_on = True
cad_scale_multiplier = np.ones(3) # comment later, replace with the predicted value
else: # init_scale_method == "naive":
cad_scale_multiplier = np.ones(3)
direct_scale_on = False
# Transformation initial guess
T_init = data.get_tran_init_guess(scan_grid2world, predicted_rotation, direct_scale = direct_scale_on,
cad_scale_multiplier=cad_scale_multiplier)
# Apply registration
if icp_mode == "p2l": # point-to-plane distance metric
T_reg, eval_reg = data.reg_icp_p2l_o3d(cad_pcd, scan_seg_pcd, corr_dist_thre, T_init, estimate_scale_icp, rot_only_around_z)
else: # point-to-point distance metric
T_reg, eval_reg = data.reg_icp_p2p_o3d(cad_pcd, scan_seg_pcd, corr_dist_thre, T_init, estimate_scale_icp, rot_only_around_z)
fitness_reg = eval_reg.fitness
candidate_cads.append(predicted_cad)
candidate_cad_pcds.append(cad_pcd)
candidate_rotations.append(predicted_rotation)
candidate_cads_voxel_wb.append(cad_voxel_wb)
candidate_T_init.append(T_init)
candidate_T_reg.append(T_reg)
candidate_fitness_reg.append(fitness_reg)
candidate_fitness_reg = np.array(candidate_fitness_reg)
best_idx =
|
np.argsort(candidate_fitness_reg)
|
numpy.argsort
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: <NAME>
Enable an agent to follow a hard coded trajectory in the form of
a square with rounded corners using trained straight and circle models.
"""
import argparse
import cProfile
import pstats
import sys
import time
import math
import yaml
import joblib
import matplotlib.pyplot as plt
import numpy as np
from rllab.misc import tensor_utils
from aa_simulation.envs.renderer import _Renderer
def render(renderer, state, action):
"""
Render simulation environment.
"""
renderer.update(state, action)
def modify_state_curve(state, move_param):
"""
Convert state [x, y, yaw, x_dot, y_dot, yaw_dot] to
[dx, theta, ddx, dtheta]
"""
x_0, y_0, r = move_param
x, y, yaw, x_dot, y_dot, yaw_dot = state
x -= x_0
y -= y_0
dx = np.sqrt(np.square(x) + np.square(y)) - r
theta = _normalize_angle(np.arctan2(-x, y) + np.pi - yaw)
ddx = x/(x**2 + y**2)**0.5*x_dot + y/(x**2 + y**2)**0.5*y_dot
dtheta = x/(x**2 + y**2)*x_dot - y/(x**2 + y**2)*y_dot - yaw_dot
return np.array([dx, theta, ddx, dtheta])
def _normalize_angle(angle):
"""
Normalize angle to [-pi, pi).
"""
angle = angle % (2*np.pi)
if (angle >= np.pi):
angle -= 2*np.pi
return angle
def _normalize_angle2(angle):
"""
Normalize angle to [0, 2 * pi).
"""
angle = angle % (2*np.pi)
return angle
def modify_state_straight(state, move_param):
"""
Add target direction and target velocity to state, to feed
in the NN.
"""
x_0, y_0, target_dir = move_param
x, y, yaw, x_dot, y_dot, dyaw = state
target_dir = _normalize_angle2(target_dir)
new_x, new_y = _cal_distance(x, y, move_param)
yaw = _normalize_angle2(yaw) - target_dir
yaw = _normalize_angle(yaw)
new_x_dot = x_dot * np.cos(target_dir) + y_dot * np.sin(target_dir)
new_y_dot = y_dot * np.cos(target_dir) - x_dot * np.sin(target_dir)
return
|
np.array([new_y, yaw, new_x_dot, new_y_dot, dyaw])
|
numpy.array
|
#!/usr/bin/env python
# coding: utf-8
import pdb
import IPython.display as ipd
import soundfile as sf
import IPython
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import numpy as np
import scipy as sp
import scipy.interpolate
import scipy.io.wavfile
import aubio
import librosa
from librosa.util import frame
import os
from utils import Create_Phoneme_Labels, pitch_shift, time_stretch
from audiomentations import Compose, AddGaussianNoise, TimeStretch, PitchShift, Shift, SpecCompose, SpecChannelShuffle, SpecFrequencyMask
# Spectrogram parameters
frame_sizes = [4096]
num_specs = [64]
num_frames = 48
hop_size = 512
delta_bool = False
# Augmentation parameters
augment_waveform = Compose(
[
AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.05, p=0.5),
]
)
augment_spectrogram = SpecCompose(
[
SpecFrequencyMask(p=0.75),
]
)
# Create AVP Test Dataset
print('AVP Test')
path_audio = 'data/external/AVP_Dataset/Personal'
list_wav = []
list_csv = []
for path, subdirs, files in os.walk(path_audio):
for filename in files:
if filename.endswith('.wav'):
list_wav.append(os.path.join(path, filename))
if filename.endswith('.csv'):
list_csv.append(os.path.join(path, filename))
list_wav = sorted(list_wav)
list_csv = sorted(list_csv)
list_wav.sort(key = lambda f:int(''.join(filter(str.isdigit,f))))
list_csv.sort(key = lambda f:int(''.join(filter(str.isdigit,f))))
list_wav = list_wav[2::5]
list_csv = list_csv[2::5]
for i in range(len(list_wav)):
onsets = np.loadtxt(list_csv[i], delimiter=',', usecols=0)
Classes = np.loadtxt(list_csv[i], delimiter=',', usecols=1, dtype=np.unicode_)
Onset_Phonemes = np.loadtxt(list_csv[i], delimiter=',', usecols=2, dtype=np.unicode_)
Nucleus_Phonemes = np.loadtxt(list_csv[i], delimiter=',', usecols=3, dtype=np.unicode_)
Onset_Phonemes_Labels, Nucleus_Phonemes_Labels, Onset_Phonemes_Reduced_Labels, Nucleus_Phonemes_Reduced_Labels = Create_Phoneme_Labels(Onset_Phonemes, Nucleus_Phonemes)
audio, fs = librosa.load(list_wav[i], sr=44100)
audio = audio/np.max(abs(audio))
onsets_samples = onsets*fs
onsets_samples = onsets_samples.astype(int)
for j in range(len(num_specs)):
for w in range(len(frame_sizes)):
frame_size = frame_sizes[w]
num_spec = num_specs[j]
audio = audio.astype(np.float32)
spec = librosa.feature.melspectrogram(np.concatenate((audio,np.zeros(1024))), sr=44100, n_fft=frame_size, hop_length=hop_size, n_mels=num_spec, power=1.0).T
#spec = np.abs(librosa.stft(audio, n_fft=frame_size, hop_length=hop_size, win_length=None, window='hann', center=True, dtype=None, pad_mode='reflect')[:frame_size//2].T)
if delta_bool:
delta = librosa.feature.delta(spec)
Dataset_Spec = np.concatenate((spec, delta), axis=1)
else:
Dataset_Spec = spec
Onsets = np.zeros(spec.shape[0])
location = np.floor(onsets_samples/hop_size)
if (location.astype(int)[-1]<len(Onsets)):
Onsets[location.astype(int)] = 1
else:
Onsets[location.astype(int)[:-1]] = 1
num_onsets = int(np.sum(Onsets))
Spec_Matrix = np.zeros((num_onsets,num_spec,num_frames))
L = len(Onsets)
count = 0
for n in range(L):
if Onsets[n]==1:
c = 1
while Onsets[n+c]==0 and (n+c)<L-1:
c += 1
Spec = Dataset_Spec[n:n+c]
if c<num_frames:
Spec = np.concatenate((Spec,np.zeros((num_frames-c,num_spec))))
elif c>=num_frames:
Spec = Spec[:num_frames]
Spec_Matrix[count] = np.flipud(Spec.T)
count += 1
list_num = [Spec_Matrix.shape[0],len(Classes),len(Onset_Phonemes_Labels),len(Nucleus_Phonemes_Labels),len(Onset_Phonemes_Reduced_Labels),len(Nucleus_Phonemes_Reduced_Labels)]
if list_num.count(list_num[0])!=len(list_num):
print(list_num)
print(list_wav[i])
np.save('data/interim/AVP/Dataset_Test_' + str(i).zfill(2), Spec_Matrix)
np.save('data/interim/AVP/Classes_Test_' + str(i).zfill(2), Classes)
np.save('data/interim/AVP/Syll_Onset_Test_' + str(i).zfill(2), Onset_Phonemes_Labels)
np.save('data/interim/AVP/Syll_Nucleus_Test_' + str(i).zfill(2), Nucleus_Phonemes_Labels)
np.save('data/interim/AVP/Syll_Onset_Reduced_Test_' + str(i).zfill(2), Onset_Phonemes_Reduced_Labels)
np.save('data/interim/AVP/Syll_Nucleus_Reduced_Test_' + str(i).zfill(2), Nucleus_Phonemes_Reduced_Labels)
# Create AVP Test Aug Dataset
print('AVP Test Aug')
path_audio = 'data/external/AVP_Dataset/Personal'
list_wav = []
list_csv = []
for path, subdirs, files in os.walk(path_audio):
for filename in files:
if filename.endswith('.wav'):
list_wav.append(os.path.join(path, filename))
if filename.endswith('.csv'):
list_csv.append(os.path.join(path, filename))
list_wav = sorted(list_wav)
list_csv = sorted(list_csv)
list_wav.sort(key = lambda f:int(''.join(filter(str.isdigit,f))))
list_csv.sort(key = lambda f:int(''.join(filter(str.isdigit,f))))
list_wav = list_wav[2::5]
list_csv = list_csv[2::5]
for i in range(len(list_wav)):
onsets = np.loadtxt(list_csv[i], delimiter=',', usecols=0)
Classes = np.loadtxt(list_csv[i], delimiter=',', usecols=1, dtype=np.unicode_)
audio, fs = librosa.load(list_wav[i], sr=44100)
audio_ref = audio/np.max(abs(audio))
onsets_samples = onsets*fs
onsets_ref = onsets_samples.astype(int)
for j in range(len(num_specs)):
for w in range(len(frame_sizes)):
frame_size = frame_sizes[w]
num_spec = num_specs[j]
Spec_Matrix_All = np.zeros((1,num_spec,num_frames))
Classes_All = np.zeros(1)
Onset_Phonemes_Labels_All = np.zeros(1)
Nucleus_Phonemes_Labels_All = np.zeros(1)
Onset_Phonemes_Reduced_Labels_All = np.zeros(1)
Nucleus_Phonemes_Reduced_Labels_All = np.zeros(1)
for k in range(14):
Classes = np.loadtxt(list_csv[i], delimiter=',', usecols=1, dtype=np.unicode_)
Onset_Phonemes = np.loadtxt(list_csv[i], delimiter=',', usecols=2, dtype=np.unicode_)
Nucleus_Phonemes = np.loadtxt(list_csv[i], delimiter=',', usecols=3, dtype=np.unicode_)
Onset_Phonemes_Labels, Nucleus_Phonemes_Labels, Onset_Phonemes_Reduced_Labels, Nucleus_Phonemes_Reduced_Labels = Create_Phoneme_Labels(Onset_Phonemes, Nucleus_Phonemes)
kn = np.random.randint(0,2)
pt = np.random.uniform(low=-1.5, high=1.5, size=None)
st = np.random.uniform(low=0.8, high=1.2, size=None)
if kn==0:
audio = pitch_shift(audio_ref, fs, pt)
audio = time_stretch(audio, st)
onsets = onsets_ref/st
onsets = onsets.astype(int)
elif kn==1:
audio = time_stretch(audio_ref, st)
audio = pitch_shift(audio, fs, pt)
onsets = onsets_ref/st
onsets = onsets.astype(int)
audio = audio.astype(np.float32)
audio = augment_waveform(samples=audio, sample_rate=44100)
spec = librosa.feature.melspectrogram(np.concatenate((audio,np.zeros(1024))), sr=44100, n_fft=frame_size, hop_length=hop_size, n_mels=num_spec, power=1.0).T
#spec = np.abs(librosa.stft(audio, n_fft=frame_size, hop_length=hop_size, win_length=None, window='hann', center=True, dtype=None, pad_mode='reflect')[:frame_size//2])
if delta_bool:
delta = librosa.feature.delta(spec)
Dataset_Spec = np.concatenate((spec, delta), axis=1)
else:
Dataset_Spec = spec
Onsets = np.zeros(spec.shape[0])
location = np.floor(onsets/hop_size)
if (location.astype(int)[-1]<len(Onsets)):
Onsets[location.astype(int)] = 1
else:
Onsets[location.astype(int)[:-1]] = 1
num_onsets = int(np.sum(Onsets))
if num_onsets!=len(Classes):
raise('num_onsets==len(Classes)')
Spec_Matrix = np.zeros((num_onsets,num_spec,num_frames))
L = len(Onsets)
count = 0
for n in range(L):
if Onsets[n]==1:
c = 1
while Onsets[n+c]==0 and (n+c)<L-1:
c += 1
Spec = Dataset_Spec[n:n+c]
if c<num_frames:
Spec = np.concatenate((Spec,np.zeros((num_frames-c,num_spec))))
elif c>=num_frames:
Spec = Spec[:num_frames]
Spec_Matrix[count] = np.flipud(Spec.T)
count += 1
for n in range(Spec_Matrix.shape[0]):
spec = Spec_Matrix[n]
spec = np.expand_dims(spec,-1)
spec = augment_spectrogram(spec)
spec = spec.reshape(spec.shape[0],spec.shape[1])
Spec_Matrix[n] = spec
Spec_Matrix_All = np.vstack((Spec_Matrix_All,Spec_Matrix))
Classes_All = np.concatenate((Classes_All,Classes))
Onset_Phonemes_Labels_All = np.concatenate((Onset_Phonemes_Labels_All,Onset_Phonemes_Labels))
Nucleus_Phonemes_Labels_All = np.concatenate((Nucleus_Phonemes_Labels_All,Nucleus_Phonemes_Labels))
Onset_Phonemes_Reduced_Labels_All = np.concatenate((Onset_Phonemes_Reduced_Labels_All,Onset_Phonemes_Reduced_Labels))
Nucleus_Phonemes_Reduced_Labels_All = np.concatenate((Nucleus_Phonemes_Reduced_Labels_All,Nucleus_Phonemes_Reduced_Labels))
list_num = [Spec_Matrix_All.shape[0],len(Classes_All),len(Onset_Phonemes_Labels_All),len(Nucleus_Phonemes_Labels_All),len(Onset_Phonemes_Reduced_Labels_All),len(Nucleus_Phonemes_Reduced_Labels_All)]
if list_num.count(list_num[0])!=len(list_num):
print(list_num)
print(list_wav[i])
Spec_Matrix_All = Spec_Matrix_All[1:]
Classes_All = Classes_All[1:]
Onset_Phonemes_Labels_All = Onset_Phonemes_Labels_All[1:]
Nucleus_Phonemes_Labels_All = Nucleus_Phonemes_Labels_All[1:]
Onset_Phonemes_Reduced_Labels_All = Onset_Phonemes_Reduced_Labels_All[1:]
Nucleus_Phonemes_Reduced_Labels_All = Nucleus_Phonemes_Reduced_Labels_All[1:]
np.save('data/interim/AVP/Dataset_Test_Aug_' + str(i).zfill(2), Spec_Matrix_All)
np.save('data/interim/AVP/Classes_Test_Aug_' + str(i).zfill(2), Classes_All)
np.save('data/interim/AVP/Syll_Onset_Test_Aug_' + str(i).zfill(2), Onset_Phonemes_Labels_All)
np.save('data/interim/AVP/Syll_Nucleus_Test_Aug_' + str(i).zfill(2), Nucleus_Phonemes_Labels_All)
np.save('data/interim/AVP/Syll_Onset_Reduced_Test_Aug_' + str(i).zfill(2), Onset_Phonemes_Reduced_Labels_All)
np.save('data/interim/AVP/Syll_Nucleus_Reduced_Test_Aug_' + str(i).zfill(2), Nucleus_Phonemes_Reduced_Labels_All)
# Create Train Dataset
print('AVP Train')
fs = 44100
path_audio = 'data/external/AVP_Dataset/Personal'
list_wav_all = []
list_csv_all = []
for path, subdirs, files in os.walk(path_audio):
for filename in files:
if filename.endswith('.wav'):
list_wav_all.append(os.path.join(path, filename))
if filename.endswith('.csv'):
list_csv_all.append(os.path.join(path, filename))
list_wav_all = sorted(list_wav_all)
list_csv_all = sorted(list_csv_all)
list_wav_all.sort(key = lambda f:int(''.join(filter(str.isdigit,f))))
list_csv_all.sort(key = lambda f:int(''.join(filter(str.isdigit,f))))
list_wav = list_wav_all[::5] + list_wav_all[1::5] + list_wav_all[3::5] + list_wav_all[4::5]
list_csv = list_csv_all[::5] + list_csv_all[1::5] + list_csv_all[3::5] + list_csv_all[4::5]
list_wav_all = sorted(list_wav)
list_csv_all = sorted(list_csv)
list_wav_all.sort(key = lambda f:int(''.join(filter(str.isdigit,f))))
list_csv_all.sort(key = lambda f:int(''.join(filter(str.isdigit,f))))
for j in range(len(num_specs)):
for k in range(len(frame_sizes)):
frame_size = frame_sizes[k]
num_spec = num_specs[j]
for part in range(28):
Spec_Matrix_All = np.zeros((1,num_spec,num_frames))
Classes_All = np.zeros(1)
Onset_Phonemes_Labels_All = np.zeros(1)
Nucleus_Phonemes_Labels_All = np.zeros(1)
Onset_Phonemes_Reduced_Labels_All = np.zeros(1)
Nucleus_Phonemes_Reduced_Labels_All = np.zeros(1)
for i in range(4):
onsets = np.loadtxt(list_csv_all[4*part+i], delimiter=',', usecols=0)
Classes = np.loadtxt(list_csv_all[4*part+i], delimiter=',', usecols=1, dtype=np.unicode_)
audio, fs = librosa.load(list_wav_all[4*part+i], sr=44100)
audio_ref = audio/np.max(abs(audio))
onsets_samples = onsets*fs
onsets_ref = onsets_samples.astype(int)
for k in range(1):
Classes = np.loadtxt(list_csv_all[4*part+i], delimiter=',', usecols=1, dtype=np.unicode_)
Onset_Phonemes = np.loadtxt(list_csv_all[4*part+i], delimiter=',', usecols=2, dtype=np.unicode_)
Nucleus_Phonemes =
|
np.loadtxt(list_csv_all[4*part+i], delimiter=',', usecols=3, dtype=np.unicode_)
|
numpy.loadtxt
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 10 14:30:52 2016
@author: gawe
"""
# ========================================================================== #
# ========================================================================== #
# This section is to improve python compataibilty between py27 and py3
from __future__ import absolute_import, with_statement, absolute_import, division, print_function, unicode_literals
__metaclass__ = type
# ========================================================================== #
import numpy as _np
from scipy import linalg as _la
import matplotlib.mlab as _mlab
import matplotlib.pyplot as _plt
from pybaseutils.Struct import Struct
from pybaseutils.utils import detrend_mean, detrend_none, detrend_linear
from pybaseutils import utils as _ut
try:
from FFT.windows import windows
except:
from .windows import windows
# end try
# ========================================================================== #
# ========================================================================== #
def fft_pwelch(tvec, sigx, sigy, tbounds=None, Navr=None, windowoverlap=None,
windowfunction=None, useMLAB=None, plotit=None, verbose=None,
detrend_style=None, onesided=None, **kwargs):
"""
function [freq, Pxy, Pxx, Pyy, Cxy, phi_xy, info] =
fft_pwelch(tvec, sigx, sigy, tbounds, Navr, windowoverlap,
windowfunction, useMLAB, plotit, verbose, plottofig)
This function computes the spectra from an input and reference signal. It
detrends the input, calculates the cross- and auto-power spectra,
normalizes everything, and saves parameters necessary for future work
this function includes a trend removal!
# ------------------------------------------------------------------- #
inputs
tvec - [s], time-series of signal
sigx - [V], signal X, ntx x nsigs (ntx can be shorter than nt)
sigy - [V], signal Y, nt x nsigs
tbounds - [s], time-window for analysis. [tstart,tend] def:[1,len(tvec)]
Navr - [#], number of segments to split the input signals into
windowoverlap - [#], 0 to 1, fraction of segment overlap, def: 0
windowfunction - [string], type of window function, def: 'box'
The hamming, hanning, and kaiser window are also
supported, and feel free to add more
useMLAB - [Boolean], use CPSD and mscohere for FFT analysis, def:homebrew!
plotit - [Boolean], plot the linear amplitude spectra / coherence?
verbose - [Boolean], print to the screen
plottofig - [fig_handle], plot to this figure
outputs
freq - [Hz], frequency vector
Pxy - [V], Linear cross-power spectra of signal X and Y
Pxx - [V], Linear auto-power spectra of signal X
Pyy - [V], Linear auto-power spectra of signal Y
Cxy - [-], Coherence between signal X and signal Y
phi_xy - [rad], cross-phase between signal X and signal Y
info - [structure], contains all the calculations and extras
including - fftinfo.ENBW -> Convert back to power spectral densities by
Pxy = Pxy/ENBW [V^2/Hz]
+ much more
# ------------------------------------------------------------------- #
Written by GMW - Feb 7th, 2013 in Matlab
Revisions:
Feb 10th, 2016 - GMW - Ported to Python for use at W7-X
Mar 8th, 2016 - GMW - Extensive revisions:
- Made inputs work with column vectors for speed
- Added uncertainty analysis of the power spectra so that it
takes into account the complex part of the signal
- Added the mean_angle definition for averaging the phase with
uncertainty propagation
- Added the coh_var definition for propagating uncertainty to
the coherence
- Converted the main module into a class that calls this function
Major revisions compiled July, 29th, 2019:
multi-channel support
different length inputs signals: resampling and tiling windows to match length (nT-crossphase stuff)
upgraded window input selection by automatically selecting recomended overlap percentage
added option to input minimum resolvable frequency instead of number of windows.
added normalized auto- and cross-correlation calculations (getting this right is a pain)
"""
if Navr is None:
calcNavr = True
if windowfunction is None:
# windowfunction = 'SFT3F' # very low overlap correlation, wider peak to get lower frequencies
# windowfunction = 'SFT3M' # very low overlap correlation, low sidebands
windowfunction = 'Hanning' # moderate overlap correlation, perfect amplitude flattness at optimum overlap
if windowoverlap is None:
# get recommended overlap by function name
windowoverlap = windows(windowfunction, verbose=False)
if useMLAB is None:
useMLAB=False
if plotit is None:
plotit=True
if verbose is None:
verbose=False
if detrend_style is None:
detrend_style=1
if tbounds is None:
tbounds = [tvec[0], tvec[-1]]
# end if
if onesided is None:
onesided = True
if _np.iscomplexobj(sigx) or _np.iscomplexobj(sigy):
onesided = False
# end if
# end if
# Matlab returns the power spectral denstiy in [V^2/Hz], and doesn't
# normalize it's FFT by the number of samples or the power in the windowing
# function. These can be handled by controlling the inputs and normalizing
# the output:
# dt = 0.5*(tvec[2]-tvec[0]) #[s], time-step in time-series
# Fs = 1.0/dt #[Hz], sampling frequency
Fs = (len(tvec)-1)/(tvec[-1]-tvec[0])
# dt = 1.0/Fs
# ==================================================================== #
# ==================================================================== #
# Detrend the two signals to get FFT's
i0 = int( _np.floor( Fs*(tbounds[0]-tvec[0] ) ) )
i1 = int( _np.floor( 1+Fs*(tbounds[1]-tvec[0] ) ) )
# i0 = _np.where(tvec>tbounds[0])[0]
# if len(_np.atleast_1d(i0))==0: i0 = 0 # end if
# i1 = _np.where(tvec>tbounds[1])[0]
# if len(_np.atleast_1d(i0))==0: i0 = 0 # end if
nsig = _np.size( tvec[i0:i1] )
# Must know two of these inputs to determine third
# k = # windows, M = Length of data to be segmented, L = length of segments,
# K = (M-NOVERLAP)/(L-NOVERLAP)
# Navr = (nsig-noverlap)/(nwins-noverlap)
# nwins = (nsig - noverlap)/Navr + noverlap
# noverlap = (nsig - nwins*Navr)/(1-Navr)
# noverlap = windowoverlap*nwins
# nwins = nsig/(Navr-Navr*windowoverlap + windowoverlap)
#
# Integral number of periods in data set, need 2 to detect signal
# sigx = _np.atleast_2d(sigx) # multi-channel input only supported for sigy
sigy = _np.atleast_2d(sigy)
if _np.shape(sigy)[1] == len(tvec):
sigy = sigy.T
# end if
nch = _np.size(sigy, axis=1)
# ====================================================================== #
if _np.size(sigx, axis=0) != _np.size(sigy, axis=0):
nTmodel = True
if calcNavr:
nwins = _np.size(sigx, axis=0)
else:
nwins = fftanal._getNwins(nsig, Navr, windowoverlap)
# end if
else:
nTmodel = False
# override Navr if someone requested a period for each window, or a minimum frequency to resolve
if 'minFreq' in kwargs:
kwargs['tper'] = 2.0/kwargs['minFreq']
if 'tper' in kwargs :
nwins = int(Fs*kwargs['tper'])
else:
if Navr is None:
Navr = 8
# end if
calcNavr = False
nwins = fftanal._getNwins(nsig, Navr, windowoverlap)
# end if
# end if
# get the number of points to overlap based on unique data
noverlap = fftanal._getNoverlap(nwins, windowoverlap)
# Reflect the data in the first and last windows at the end-points
reflecting = False
if i0 == 0 and i1 == len(tvec):
reflecting = True
# sigx=_np.r_['0', sigx[nwins-1:0:-1],sigx,sigx[-1:-nwins:-1]]
# sigy=_np.r_['0',sigy[nwins-1:0:-1,:],sigy,sigy[-1:-nwins:-1,:]] # concatenate along axis 0
sigx = _np.concatenate((sigx[nwins-1:0:-1,...],sigx,sigx[-1:-nwins:-1,...]), axis=0)
sigy = _np.concatenate((sigy[nwins-1:0:-1,...],sigy,sigy[-1:-nwins:-1,...]), axis=0)
nsig = sigx.shape[0]
# end if
# if necessary get the number of averaging windows
if calcNavr: # nTmodel or 'tper' in kwargs:
Navr = fftanal._getNavr(nsig, nwins, noverlap)
# Navr = int( (nsig-noverlap)/(nwins-noverlap) )
# end if
# ====================================================================== #
if nwins>=nsig:
Navr = 1
nwins = nsig
# endif
# nfft = max(2^12,2^nextpow2(nwins))
nfft = nwins
Nnyquist = fftanal._getNnyquist(nfft)
# Remember that since we are not dealing with infinite series, the lowest
# frequency we actually resolve is determined by the period of the window
# fhpf = 1.0/(nwins*dt) # everything below this should be set to zero (when background subtraction is applied)
# ==================================================================== #
# =================================================================== #
# Define windowing function for apodization
win, winparams = windows(windowfunction, nwins=nwins, verbose=verbose, msgout=True)
# Instantiate the information class that will be output
fftinfo = fftinfosc()
fftinfo.win = win
fftinfo.winparams = winparams
fftinfo.windowoverlap = windowoverlap
fftinfo.ibnds = [i0, i1] # time-segment
# Define normalization constants
fftinfo.S1 = fftanal._getS1(win)
fftinfo.S2 = fftanal._getS2(win)
# Normalized equivalent noise bandwidth
fftinfo.NENBW = fftanal._getNENBW(Nnyquist, fftinfo.S1, fftinfo.S2)
fftinfo.ENBW = fftanal._getENBW(Fs, fftinfo.S1, fftinfo.S2) # Effective noise bandwidth
# ================================================================ #
detrend = fftanal._detrend_func(detrend_style=detrend_style)
# ================================================================ #
if useMLAB:
if onesided: # True is boolean 1
sides = 'onesided'
else:
sides = 'twosided'
# end if
# sides = 'twosided'
# Use MLAB for the power spectral density calculations
if verbose:
print('using matlab built-ins for spectra/coherence calculations')
# endif verbose
tx = tvec
if nTmodel:
# # Does not work very well. amplitude is all wrong, and coherence is very low
# sigx = _np.hstack((sigx, _np.zeros((nsig-len(sigx)+1,), dtype=sigx.dtype)))
# sigx = _np.tile(sigx, _np.size(sigy, axis=0)//len(sigx)+1)
# sigx = sigx[:len(tvec)]
while sigx.shape[0]<sigy.shape[0]:
# Wrap the data periodically
# sigx=_np.r_[sigx[nwins-1:0:-1],sigx,sigx[-1:-nwins:-1]]
sigx=_np.r_[sigx, sigx[-1:-nwins:-1]]
# end while
if sigx.shape[0]>sigy.shape[0]:
sigx = sigx[:sigy.shape[0]]
# end if
# end if
x_in = sigx[i0:i1]
y_in = sigy[i0:i1,:]
# Power spectral density (auto-power spectral density), and
# cross-power spectral density of signal 1 and signal 2,
# Pyy = Yfft.*conj(Yfft), and the linear amplitude spectrum, Lxx:
Pxx, freq = _mlab.csd(x_in, x_in, nfft, Fs=Fs, detrend=detrend,
window=win, noverlap=noverlap, sides=sides,
scale_by_freq=True)
Pyy = _np.zeros((nch, len(freq)), dtype=_np.float64)
Pxy = _np.zeros((nch, len(freq)), dtype=_np.complex128)
for ii in range(nch):
# [V^2/Hz], RMS power spectral density calculation
Pyy[ii,:], freq = _mlab.csd(y_in[:,ii], y_in[:,ii], nfft, Fs=Fs, detrend=detrend,
window=win, noverlap=noverlap, sides=sides,
scale_by_freq=True)
Pxy[ii,:], freq = _mlab.csd(x_in, y_in[:,ii], nfft, Fs=Fs, detrend=detrend,
window=win, noverlap=noverlap, sides=sides,
scale_by_freq=True)
# end if
# Get the coherence
# if (Navr==1):
# Cxy2 = _np.ones_like(Pxx)
# else:
# # returns mean squared coherence
# [Cxy2, freq] = _mlab.cohere(y_in, x_in, nfft, Fs, detrend=detrend,
# window=win, noverlap=noverlap, sides=sides,
# scale_by_freq=False)
# #endif
# Cxy = _np.sqrt(_np.abs(Cxy2))
if onesided:
# They also return the nyquist value
# freq = freq[:Nnyquist-1]
# Pxx = Pxx[:Nnyquist-1]
# Pyy = Pyy[:,:Nnyquist-1]
# Pxy = Pxy[:,:Nnyquist-1]
freq = freq[:Nnyquist]
Pxx = Pxx[:Nnyquist]
Pyy = Pyy[:,:Nnyquist]
Pxy = Pxy[:,:Nnyquist]
# end if
Pyy = Pyy.T # nfreq x nch
Pxy = Pxy.T
# ================================================================= #
else:
# Without Matlab: Welch's average periodogram method:
if verbose:
print('using home-brew functions for spectra/coherence calculations')
# endif verbose
# ============ #
# Pre-allocate
Pxx_seg = _np.zeros((Navr, nfft), dtype=_np.complex128)
Pyy_seg = _np.zeros((nch, Navr, nfft), dtype=_np.complex128)
Pxy_seg = _np.zeros((nch, Navr, nfft), dtype=_np.complex128)
Xfft = _np.zeros((Navr, nfft), dtype=_np.complex128)
Yfft = _np.zeros((nch, Navr, nfft), dtype=_np.complex128)
if nTmodel:
tx = tvec[:len(sigx)]
# assume that one of the signals is the length of 1 window
x_in = sigx # reference signal is the model Doppler signal
y_in = sigy[i0:i1,:] # noisy long signal is the model CECE signal
else:
tx = tvec
x_in = sigx[i0:i1]
y_in = sigy[i0:i1,:]
# end if
x_in = detrend(x_in, axis=0)
y_in = detrend(y_in, axis=0)
ist = _np.arange(Navr)*(nwins - noverlap)
ist = ist.astype(int)
# for gg in _np.arange(Navr):
for gg in range(Navr):
istart = ist[gg] # Starting point of this window
iend = istart+nwins # End point of this window
if nTmodel:
xtemp = _np.copy(x_in)
else:
xtemp = x_in[istart:iend]
# end if
ytemp = y_in[istart:iend,:]
# Windowed signal segment
# To get the most accurate spectrum, minimally detrend
xtemp = win*xtemp
ytemp = (_np.atleast_2d(win).T*_np.ones((1,nch), dtype=ytemp.dtype))*ytemp
# xtemp = win*detrend(xtemp, axis=0)
# ytemp = (_np.atleast_2d(win).T*_np.ones((1,nch), dtype=ytemp.dtype))*detrend(ytemp, axis=0)
# The FFT output from matlab isn't normalized:
# y_n = sum[ y_m.*exp( 2_np.pi*1i*(n/N)*m ) ]
# The inverse is normalized::
# y_m = (1/N)*sum[ y_n.*exp( -2_np.pi*1i*(n/N)*m ) ]
#
# Python normalizations are optional, pick it to match MATLAB
Xfft[gg, :nfft] = _np.fft.fft(xtemp, n=nfft, axis=0) # defaults to last axis
Yfft[:, gg, :nfft] = _np.fft.fft(ytemp, n=nfft, axis=0).T # nch x Navr x nfft
#endfor loop over fft windows
#Auto- and cross-power spectra
Pxx_seg[:Navr, :nfft] = Xfft*_np.conj(Xfft)
Pyy_seg[:,:Navr, :nfft] = Yfft*_np.conj(Yfft)
Pxy_seg[:,:Navr, :nfft] = Yfft*(_np.ones((nch,1,1), dtype=Xfft.dtype)*_np.conj(Xfft))
# Get the frequency vector
freq = _np.fft.fftfreq(nfft, 1.0/Fs)
# freq = Fs*_np.arange(0.0, 1.0, 1.0/nfft)
# if (nfft%2):
# # freq = Fs*(0:1:1/(nfft+1))
# freq = Fs*_np.arange(0.0,1.0,1.0/(nfft+1))
# # end if nfft is odd
if onesided:
# freq = freq[:Nnyquist-1] # [Hz]
# Pxx_seg = Pxx_seg[:, :Nnyquist-1]
# Pyy_seg = Pyy_seg[:, :, :Nnyquist-1]
# Pxy_seg = Pxy_seg[:, :, :Nnyquist-1]
freq = freq[:Nnyquist] # [Hz]
Pxx_seg = Pxx_seg[:, :Nnyquist]
Pyy_seg = Pyy_seg[:, :, :Nnyquist]
Pxy_seg = Pxy_seg[:, :, :Nnyquist]
# All components but DC split their energy between positive +
# negative frequencies: One sided spectra,
Pxx_seg[:, 1:-1] = 2*Pxx_seg[:, 1:-1] # [V^2/Hz],
Pyy_seg[:, :, 1:-1] = 2*Pyy_seg[:, :, 1:-1] # [V^2/Hz],
Pxy_seg[:, :, 1:-1] = 2*Pxy_seg[:, :, 1:-1] # [V^2/Hz],
if nfft%2: # Odd
Pxx_seg[:, -1] = 2*Pxx_seg[:, -1]
Pyy_seg[:, :, -1] = 2*Pyy_seg[:, :, -1]
Pxy_seg[:, :, -1] = 2*Pxy_seg[:, :, -1]
# endif nfft is odd
else:
freq = _np.fft.fftshift(freq)
Pxx_seg = _np.fft.fftshift(Pxx_seg, axes=-1)
Pyy_seg = _np.fft.fftshift(Pyy_seg, axes=-1)
Pxy_seg = _np.fft.fftshift(Pxy_seg, axes=-1)
# end if
# Remove gain of the window function to yield the RMS Power spectrum
# in each segment (constant peak amplitude) ... doing this after cutting the number of pionts in half if one-sided
Pxx_seg = (1.0/(fftinfo.S1**2))*Pxx_seg # [Vrms^2]
Pyy_seg = (1.0/(fftinfo.S1**2))*Pyy_seg # [Vrms^2]
Pxy_seg = (1.0/(fftinfo.S1**2))*Pxy_seg # [Vrms^2]
# Compute the power spectral density from the RMS power spectrum
# (constant noise floor)
Pxx_seg = Pxx_seg/fftinfo.ENBW # [V^2/Hz]
Pyy_seg = Pyy_seg/fftinfo.ENBW # [V^2/Hz]
Pxy_seg = Pxy_seg/fftinfo.ENBW # [V^2/Hz]
# Average the different realizations: This is the output from cpsd.m
# RMS Power spectrum
Pxx = _np.mean(Pxx_seg, axis=0) # [V^2/Hz]
Pyy = _np.mean(Pyy_seg, axis=1).T # nfft x nch
Pxy = _np.mean(Pxy_seg, axis=1).T
# Estimate the variance in the power spectra
# fftinfo.varPxx = _np.var((Pxx_seg[:Navr, :Nnyquist]), axis=0)
# fftinfo.varPyy = _np.var((Pyy_seg[:Navr, :Nnyquist]), axis=0)
# fftinfo.varPxy = _np.var((Pxy_seg[:Navr, :Nnyquist]), axis=0)
# # use the RMS for the standard deviation
# fftinfo.varPxx = _np.mean(Pxx_seg**2.0, axis=0)
# fftinfo.varPyy = _np.mean(Pyy_seg**2.0, axis=0)
# fftinfo.varPxy = _np.mean(Pxy_seg**2.0, axis=0)
# fftinfo.varPxy = _np.var(_np.real(Pxy_seg), axis=0) + 1j*_np.var(_np.imag(Pxy_seg), axis=0)
# Save the cross-phase in each segmentas well
phixy_seg = _np.angle(Pxy_seg) # [rad], Cross-phase of each segment
#[ phixy_seg[0:Navr,0:Nnyquist], varphi_seg[0:Navr,0:Nnyquist] ] = \
# varangle(Pxy_seg, fftinfo.varPxy)
# Right way to average cross-phase:
# mean and variance in cross-phase
varphi_seg = _np.zeros_like(phixy_seg)
# [phi_xy, fftinfo.varPhxy] = mean_angle(phixy_seg[0:Navr, :],
# varphi_seg[0:Navr,:], dim=0)
#
# # Now take the power and linear spectra
# Segmented data ... useful for making spectrograms
fftinfo.Pxx_seg = Pxx_seg
fftinfo.Pyy_seg = Pyy_seg
fftinfo.Pxy_seg = Pxy_seg
fftinfo.Xfft_seg = Xfft
fftinfo.Yfft_seg = Yfft
fftinfo.phixy_seg = phixy_seg
fftinfo.varphi_seg = varphi_seg
# ====================== #
# endif
# Calculate the mean-squared and complex coherence
# take the absolute value of Cxy to get the RMS coherence
# take the abs. value of Cxy2 and the sqrt to get the RMS coherence
Cxy, Cxy2 = Cxy_Cxy2(Pxx, Pyy, Pxy) # complex numbers returned
# ========================== #
# Uncertainty and phase part #
# ========================== #
# derived using error propagation from eq 23 for gamma^2 in
# <NAME>, Journal of Sound an Vibration 59(3), 405-421, 1978
fftinfo.varCxy = ((1.0-Cxy*_np.conjugate(Cxy))/_np.sqrt(2*Navr))**2.0
# fftinfo.varCxy = ((1.0-Cxy2)/_np.sqrt(2*Navr))**2.0
fftinfo.varCxy2 = 4.0*Cxy2*fftinfo.varCxy # d/dx x^2 = 2 *x ... var: (2*x)^2 * varx
# Estimate the variance in the power spectra: this requires building
# a distribution by varying the parameters used in the FFT, nwindows,
# nfft, windowfunction, etc. I don't do this right now
fftinfo.varPxx = (Pxx/_np.sqrt(Navr))**2.0
fftinfo.varPyy = (Pyy/_np.sqrt(Navr))**2.0
fftinfo.varPxy = (Pxy/_np.sqrt(Navr))**2.0
# fftinfo.varPxy = Pxx*Pyy*(1.0-Cxy)/Navr # this gives nice results ... similar to above as Cxy is a measure of shared power
# <NAME>, <NAME>, 17 056103, 2010
# Doesn't so far give a convincing answer...
# fftinfo.varPhxy = _np.zeros(Pxy.shape, dtype=_np.float64)
#fftinfo.varPhxy = (_np.sqrt(1-Cxy2)/_np.sqrt(2*Navr*Cxy))**2.0
# fftinfo.varPhxy = (_np.sqrt(1-_np.abs(Cxy*_np.conj(Cxy)))/_np.sqrt(2*Navr*_np.abs(Cxy)))**2.0
# fftinfo.varPhxy = (_np.sqrt(1.0-Cxy2))/_np.sqrt(2*Navr*_np.sqrt(Cxy2))**2.0
fftinfo.varPhxy = (_np.sqrt(1.0-_np.abs(Cxy2)))/_np.sqrt(2*Navr*_np.sqrt(_np.abs(Cxy2)))**2.0
# ========================== #
# Save the cross-phase as well
# phi_xy = _np.angle(Pxy)
phi_xy = _np.arctan2(Pxy.imag, Pxy.real)
# ========================== #
# Linear amplitude spectrum from the power spectral density
# RMS Linear amplitude spectrum (constant amplitude values)
fftinfo.Lxx = _np.sqrt(_np.abs(fftinfo.ENBW*Pxx)) # [V_rms]
fftinfo.Lyy = _np.sqrt(_np.abs(fftinfo.ENBW*Pyy)) # [V_rms]
fftinfo.Lxy = _np.sqrt(_np.abs(fftinfo.ENBW*Pxy)) # [V_rms]
if onesided:
# Rescale RMS values to Amplitude values (assumes a zero-mean sine-wave)
# Just the points that split their energy into negative frequencies
fftinfo.Lxx[1:-1] = _np.sqrt(2)*fftinfo.Lxx[1:-1] # [V],
fftinfo.Lyy[1:-1,:] = _np.sqrt(2)*fftinfo.Lyy[1:-1,:] # [V],
fftinfo.Lxy[1:-1,:] = _np.sqrt(2)*fftinfo.Lxy[1:-1,:] # [V],
if nfft%2: # Odd
fftinfo.Lxx[-1] = _np.sqrt(2)*fftinfo.Lxx[-1]
fftinfo.Lyy[-1,:] = _np.sqrt(2)*fftinfo.Lyy[-1,:]
fftinfo.Lxy[-1,:] = _np.sqrt(2)*fftinfo.Lxy[-1,:]
# endif nfft/2 is odd
# ======================================================================= #
# Cross and auto-correlation from power spectra
fftinfo.Rxx = Pxx.copy()
fftinfo.Rxx[1:-1, ...] *= 0.5
if nfft%2:
fftinfo.Rxx[-1, ...] *= 0.5
fftinfo.Rxx = _np.fft.irfft(fftinfo.Rxx, n=nfft, axis=0)
fftinfo.Ryy = Pyy.copy()
fftinfo.Ryy[1:-1, ...] *= 0.5
if nfft%2:
fftinfo.Ryy[-1, ...] *= 0.5
fftinfo.Ryy = _np.fft.irfft(fftinfo.Ryy, n=nfft, axis=0)
fftinfo.Rxy = Pxy.copy()
fftinfo.Rxy[1:-1, ...] *= 0.5
if nfft%2:
fftinfo.Rxy[-1, ...] *= 0.5
fftinfo.Rxy = _np.fft.irfft(fftinfo.Rxy, n=nfft, axis=0)
fftinfo.iCxy = Cxy.copy()
fftinfo.iCxy = _np.fft.irfft(fftinfo.iCxy, n=nfft, axis=0)
# ======================================================================= #
else:
# ======================================================================= #
# Cross and auto-correlation from power spectra
# fftinfo.Rxy_seg = _np.fft.fftshift(_np.sqrt(nfft)*_np.fft.ifft(
# _np.fft.fftshift(Pxy_seg, axes=-1), n=nfft, axis=-1), axes=-1)
fftinfo.Rxx = _np.fft.ifft(_np.fft.ifftshift(Pxx, axes=0), n=nfft, axis=0)
fftinfo.Ryy = _np.fft.ifft(_np.fft.ifftshift(Pyy, axes=0), n=nfft, axis=0)
fftinfo.Rxy = _np.fft.ifft(_np.fft.ifftshift(Pxy, axes=0), n=nfft, axis=0)
fftinfo.iCxy = _np.fft.ifft(_np.fft.ifftshift(Cxy, axes=0), n=nfft, axis=0)
# fftinfo.iCxy = _np.fft.ifft(_np.fft.ifftshift(_np.sqrt(_np.abs(Cxy2)), axes=0), n=nfft, axis=0).real
# ======================================================================= #
# end if
fftinfo.Rxx *= _np.sqrt(nfft)
fftinfo.Ryy *= _np.sqrt(nfft)
fftinfo.Rxy *= _np.sqrt(nfft)
fftinfo.iCxy *= _np.sqrt(nfft)
# Calculate the normalized auto- and cross-correlations
fftinfo.Ex = fftinfo.Rxx[0, ...].copy() # power in the x-spectrum, int( |u(f)|^2, df)
fftinfo.Ey = fftinfo.Ryy[0, ...].copy() # power in the y-spectrum, int( |v(f)|^2, df)
# fftinfo.Rxx /= fftinfo.Ex
# fftinfo.Ryy /= fftinfo.Ey
fftinfo.corrcoef = fftinfo.Rxy/_np.sqrt(_np.ones((nfft,1), dtype=fftinfo.Rxy.dtype)*(fftinfo.Ex*fftinfo.Ey))
fftinfo.Rxx = _np.fft.fftshift(fftinfo.Rxx, axes=0)
fftinfo.Ryy = _np.fft.fftshift(fftinfo.Ryy, axes=0)
fftinfo.Rxy = _np.fft.fftshift(fftinfo.Rxy, axes=0)
fftinfo.iCxy = _np.fft.fftshift(fftinfo.iCxy, axes=0)
fftinfo.corrcoef = _np.fft.fftshift(fftinfo.corrcoef, axes=0)
fftinfo.lags = (_np.asarray(range(1, nfft+1), dtype=int)-Nnyquist)/Fs
# ======================================================================= #
fftinfo.varLxx = (fftinfo.Lxx**2)*(fftinfo.varPxx/_np.abs(Pxx)**2)
fftinfo.varLyy = (fftinfo.Lyy**2)*(fftinfo.varPyy/_np.abs(Pyy)**2)
fftinfo.varLxy = (fftinfo.Lxy**2)*(fftinfo.varPxy/_np.abs(Pxy)**2)
if nch == 1:
Pyy = Pyy.flatten()
Pxy = Pxy.flatten()
Cxy = Cxy.flatten()
Cxy2 = Cxy2.flatten()
phi_xy = phi_xy.flatten()
fftinfo.lags = fftinfo.lags.flatten()
fftinfo.Rxx = fftinfo.Rxx.flatten()
fftinfo.Ryy = fftinfo.Ryy.flatten()
fftinfo.Rxy = fftinfo.Rxy.flatten()
fftinfo.corrcoef = fftinfo.corrcoef.flatten()
fftinfo.iCxy = fftinfo.iCxy.flatten()
fftinfo.Lxx = fftinfo.Lxx.flatten()
fftinfo.Lyy = fftinfo.Lyy.flatten()
fftinfo.Lxy = fftinfo.Lxy.flatten()
fftinfo.varLxx = fftinfo.varLxx.flatten()
fftinfo.varLyy = fftinfo.varLyy.flatten()
fftinfo.varLxy = fftinfo.varLxy.flatten()
fftinfo.varCxy = fftinfo.varCxy.flatten()
fftinfo.varCxy2 = fftinfo.varCxy2.flatten()
fftinfo.varPxx = fftinfo.varPxx.flatten()
fftinfo.varPyy = fftinfo.varPyy.flatten()
fftinfo.varPxy = fftinfo.varPxy.flatten()
fftinfo.varPhxy = fftinfo.varPhxy.flatten()
# end if
# Store everything
fftinfo.nch = nch
fftinfo.Fs = Fs
fftinfo.Navr = Navr
fftinfo.nwins = nwins
fftinfo.noverlap = noverlap
fftinfo.overlap = windowoverlap
fftinfo.window = windowfunction
fftinfo.minFreq = 2.0*Fs/nwins
fftinfo.freq = freq.copy()
fftinfo.Pxx = Pxx.copy()
fftinfo.Pyy = Pyy.copy()
fftinfo.Pxy = Pxy.copy()
fftinfo.Cxy = Cxy.copy()
fftinfo.Cxy2 = Cxy2.copy()
fftinfo.phi_xy = phi_xy.copy()
# ==================================================================== #
# Plot the comparisons
if plotit:
if reflecting:
# sigx = sigx[(nwins//2-1):-nwins//2]
# sigy = sigy[(nwins//2-1):-nwins//2,:]
sigx = sigx[(nwins-1):-nwins+1]
sigy = sigy[(nwins-1):-nwins+1,:]
# end if
afont = {'fontname':'Arial','fontsize':14}
# plot the signals
if 'hfigSig' in kwargs:
hfig1 = _plt.figure(kwargs['hfigSig'])
else:
hfig1 = _plt.figure()
if 'axSig' in kwargs:
_ax = kwargs['axSig']
else:
_ax = _plt.subplot(1,1,1)
if _np.iscomplexobj(sigx) and _np.iscomplexobj(sigy):
_ax.plot(tx, _np.real(sigx), 'b-')
_ax.plot(tx, _np.imag(sigx), 'b--')
_ax.plot(tvec, _np.real(sigy), 'r-')
_ax.plot(tvec, _np.imag(sigy), 'r--')
elif _np.iscomplexobj(sigx) and not _np.iscomplexobj(sigy):
_ax.plot(tvec, sigy, 'r-')
_ax.plot(tx, _np.real(sigx), 'b-')
_ax.plot(tx, _np.imag(sigx), 'b--')
elif _np.iscomplexobj(sigy) and not _np.iscomplexobj(sigx):
_ax.plot(tx, sigx, 'b-')
_ax.plot(tvec, _np.real(sigy), 'r-')
_ax.plot(tvec, _np.imag(sigy), 'r--')
else:
_ax.plot(tx, sigx, 'b-', tvec, sigy, 'r-')
# end if
_ax.set_title('Input Signals', **afont)
_ax.set_xlabel('t[s]', **afont)
_ax.set_ylabel('sig_x,sig_y[V]', **afont)
if tbounds is not None:
_plt.axvline(x=tbounds[0], color='k')
_plt.axvline(x=tbounds[1], color='k')
# end if
#The correlations and spectra
if 'hfigSpec' in kwargs:
hfig2 = _plt.figure(kwargs['hfigSpec'])
else:
hfig2 = _plt.figure()
if 'axSpec' in kwargs:
_ax1 = kwargs['axSpec'][0]
else:
_ax1 = _plt.subplot(2,2,1)
# _plt.plot(1e3*fftinfo.lags, fftinfo.iCxy, 'r-')
_ax1.plot(1e3*fftinfo.lags, fftinfo.corrcoef, 'b-')
_plt.ylabel(r'$\rho$', **afont)
_plt.xlabel('lags [ms]', **afont)
_plt.title('Cross-corrrelation')
if 'axSpec' in kwargs:
_ax2 = kwargs['axSpec'][1]
else:
_ax2 = _plt.subplot(2,2,2)
# frq = 1e-3*freq; xlbl = 'f[KHz]'
frq = freq; xlbl = 'f[Hz]'
if 0:
# _ax2.plot(frq,_np.abs(fftinfo.Lxx), 'b-'); ylbl = r'L$_{ij}$ [I.U.]'
# _ax2.plot(frq,_np.abs(fftinfo.Lyy), 'r-'); tlbl = 'Linear Amplitude Spectra'
# _ax2.plot(frq,_np.abs(fftinfo.Lxy), 'k-');
_ax2.plot(frq,_np.abs(Pxx), 'b-'); ylbl = r'P$_{ij}$ [I.U.$^2$/Hz]'
_ax2.plot(frq,_np.abs(Pyy), 'r-'); tlbl = 'Power Spectra'
_ax2.plot(frq,_np.abs(Pxy), 'k-');
if onesided:
_ax2.set_xlim(0,1.01*frq[-1])
else:
_ax2.set_xlim(-1.01*frq[-1],1.01*frq[-1])
# end if
elif onesided:
_ax2.loglog(frq, _np.abs(Pxx), 'b-');
_ax2.loglog(frq, _np.abs(Pyy), 'r-'); ylbl = r'P$_{ij}$ [dB/Hz]'
_ax2.loglog(frq, _np.abs(Pxy), 'k-'); tlbl = 'Power Spectra'
xlims = _ax2.get_xlim()
_ax2.set_xlim(xlims[0], 1.01*frq[-1])
else:
_ax2.semilogy(frq, _np.abs(Pxx), 'b-'); ylbl = r'P$_{ij}$ [dB/Hz]'
_ax2.semilogy(frq, _np.abs(Pyy), 'r-'); tlbl = 'Power Spectra'
_ax2.semilogy(frq, _np.abs(Pxy), 'k-');
_ax2.set_xlim(-1.01*frq[-1],1.01*frq[-1])
# end if
_ax2.set_title(tlbl, **afont)
_ax2.set_ylabel(ylbl, **afont),
_ax2.set_xlabel(xlbl, **afont)
if 'axSpec' in kwargs:
_ax3 = kwargs['axSpec'][2]
else:
_ax3 = _plt.subplot(2, 2, 3, sharex=_ax2)
# _ax3.plot(frq, _np.sqrt(_np.abs(Cxy2)), 'k-')
# _ax3.plot(frq, _np.abs(Cxy).real, 'k-')
# _plt.axhline(y=1.0/_np.sqrt(Navr), color='k')
# _ax3.set_title('Coherence', **afont)
# _ax3.set_ylabel(r'C$_{xy}$', **afont)
# _ax3.set_ylabel(r'$|\gamma|$', **afont)
_ax3.plot(frq, _np.abs(Cxy2), 'k-')
_plt.axhline(y=1.0/Navr, color='k')
_ax3.set_title('Mean-Squared Coherence', **afont)
# _ax3.set_ylabel(r'C$_{xy}^2$', **afont)
_ax3.set_ylabel(r'$\gamma^2$', **afont)
_ax3.set_xlabel(xlbl, **afont)
if 'axSpec' in kwargs:
_ax4 = kwargs['axSpec'][3]
else:
_ax4 = _plt.subplot(2, 2, 4, sharex=_ax2)
_ax4.plot(frq, phi_xy, 'k-')
_ax4.set_title('Cross-Phase', **afont)
_ax4.set_ylabel(r'$\phi_{xy}$', **afont)
_ax4.set_xlabel(xlbl, **afont)
_plt.tight_layout()
# _plt.subplots_adjust(top=0.85)
# if windowoverlap>0:
# _plt.suptitle('Analysis using %i overlapping %s windows\n%s'%(Navr,winparams[0],winparams[1]))
# else:
# _plt.suptitle('Analysis using %i non-overlapping %s windows\n%s'%(Navr,winparams[0],winparams[1]))
# # end if
_plt.draw()
# _plt.show()
fftinfo.hfig1 = hfig1
fftinfo.hfig2 = hfig2
fftinfo.axSig = _ax
fftinfo.ax = [__ax for __ax in [_ax1, _ax2, _ax3, _ax4]]
# endif plotit
return freq, Pxy, Pxx, Pyy, Cxy, phi_xy, fftinfo
# end fft_pwelch
# ========================================================================== #
class fftinfosc(Struct):
def __init__(self):
self.S1 = _np.array( [], dtype=_np.float64)
self.S2 = _np.array( [], dtype=_np.float64)
self.NENBW = _np.array( [], dtype=_np.float64)
self.ENBW = _np.array( [], dtype=_np.float64)
self.freq = _np.array( [], dtype=_np.float64)
self.Pxx = _np.array( [], dtype = _np.complex128 )
self.Pyy = _np.array( [], dtype = _np.complex128 )
self.Pxy = _np.array( [], dtype = _np.complex128 )
self.Cxy = _np.array( [], dtype=_np.complex128)
self.varcoh = _np.array( [], dtype=_np.complex128)
self.phi_xy = _np.array( [], dtype=_np.float64)
self.varphi = _np.array( [], dtype=_np.float64)
self.Lxx = _np.array( [], dtype = _np.complex128 )
self.Lyy = _np.array( [], dtype = _np.complex128 )
self.Lxy = _np.array( [], dtype = _np.complex128 )
self.varLxx = _np.array( [], dtype = _np.complex128 )
self.varLyy = _np.array( [], dtype = _np.complex128 )
self.varLxy = _np.array( [], dtype = _np.complex128 )
#Segment data
self.Pxx_seg = _np.array( [], dtype = _np.complex128 )
self.Pyy_seg = _np.array( [], dtype = _np.complex128 )
self.Pxy_seg = _np.array( [], dtype = _np.complex128 )
self.Xfft_seg = _np.array( [], dtype = _np.complex128 )
self.Yfft_seg = _np.array( [], dtype = _np.complex128 )
# end class
# =========================================================================== #
# =========================================================================== #
def integratespectra(freq, Pxy, Pxx, Pyy, frange, varPxy=None, varPxx=None, varPyy=None):
"""
function [Pxy_i, Pxx_i, Pyy_i, Cxy_i, ph_i, info] =
integrate_spectrum(freq, Pxy, Pxx, Pyy, frange, varPxy, varPxx, varPyy)
This is a simple function that integrates a power spectrum over a specified
frequency range. It propagates the errors from spectra variances.
Required inputs:
freq - [Hz], frequency vector
Pxy - [Complex], cross-power spectrum between signals 1 and 2
Pxx - [Real (or Complex)], auto-power of signal 1
Pyy - [Real (or Complex)], auto-power of signal 2
frange - [lower frequency, upper frequency] - [Hz], frequency range to
integrate over
Optional inputs:
varPxy - [Complex], variance in cross-power spectrum between signals 1 and 2
varPxx - [Real (or Complex)], variance in auto-power of signal 1
varPyy - [Real (or Complex)], variance in auto-power of signal 2
Outputs:
Pxy_i - integrated cross-power
Pxx_i - integrated auto-power of signal 1
Pyy_i - integrated auto-power of signal 2
Cxy_i - coherence between signal 1 and signal 2 in frequency range
determined by frange
ph_i - cross-phase between signal 1 and signal 2 in frequency range
determined by frange
info - Structure containing propagated variances
requires:
trapz_var - integrates using a trapezoidal rule and propagates uncertainty
varcoh - calculates coherence and propagates uncertainty
varphi - calculates angle and propagates uncertainty
"""
if varPyy is None: varPyy = _np.size_like(Pyy) # end if
if varPxx is None: varPxx = _np.size_like(Pxx) # end if
if varPxy is None: varPxy = _np.size_like(Pxy) # end if
# Integrate over the frequency range specified
# ifl = find( freq>frange(1), 1, 'first')-1
# ifh = find( freq>frange(2), 1, 'first')-1
# Pxy_i = trapz(freq(ifl:ifh), Pxy(ifl:ifh))
# Pxx_i = trapz(freq(ifl:ifh), Pxx(ifl:ifh))
# Pyy_i = trapz(freq(ifl:ifh), Pyy(ifl:ifh))
Pxy = _ut.reshapech(Pxy)
varPxy = _ut.reshapech(varPxy)
Pxx = _ut.reshapech(Pxx)
varPxx = _ut.reshapech(varPxx)
Pyy = _ut.reshapech(Pyy)
varPyy = _ut.reshapech(varPyy)
inds = _np.where( (freq>=frange[0])*(freq<=frange[1]) )[0]
[Pxy_real, varPxy_real, _, _] = \
_ut.trapz_var(freq[inds], _np.real(Pxy[inds,:]), None, _np.real(varPxy[inds,:]), dim=0)
[Pxy_imag, varPxy_imag, _, _] = \
_ut.trapz_var(freq[inds], _np.imag(Pxy[inds,:]), None, _np.imag(varPxy[inds,:]), dim=0)
Pxy_i = Pxy_real + 1j*Pxy_imag
varPxy_i = varPxy_real + 1j*varPxy_imag
[Pxx_i, varPxx_i, _, _] = \
_ut.trapz_var(freq[inds], Pxx[inds,:], None, varPxx[inds,:], dim=0)
[Pyy_i, varPyy_i, _, _] = \
_ut.trapz_var(freq[inds], Pyy[inds,:], None, varPyy[inds,:], dim=0)
# Calculate coherence from integrated peak
# Cxy_i = Pxy_i.*(Pxy_i').'./(Pxx_i.*Pyy_i); %Mean-squared Coherence between the two signals
# Cxy_i = sqrt( abs( Cxy_i ) ); % Coherence
meansquared = 0 #Return the coherence, not the mean-squared coherence
[Cxy_i, varCxy_i] = varcoh(Pxy_i, varPxy_i, Pxx_i, varPxx_i, Pyy_i, varPyy_i, meansquared)
# Calculate cross-phase from integrated peak
# ph_i = atan( Pxy_imag./Pxy_real ) # [rad], Cross-phase
angle_range = _np.pi
[ph_i, varph_i] = varphi(Pxy_real, Pxy_imag, varPxy_real, varPxy_imag, angle_range)
# Store it all for outputting
info = Struct()
info.frange = _np.asarray([frange[0], frange[1]])
info.ifrange = inds
info.Pxy_i = Pxy_i
info.varPxy_i = varPxy_i
info.Pxx_i = Pxx_i
info.varPxx_i = varPxx_i
info.Pyy_i = Pyy_i
info.varPyy_i = varPyy_i
info.angle_range = angle_range
info.ph_i = ph_i
info.varph_i = varph_i
info.meansquared = meansquared
info.Cxy_i = Cxy_i
info.varCxy_i = varCxy_i
# Cross-power weighted average frequency - (center of gravity)
info.fweighted = _np.dot(freq[inds].reshape(len(inds),1), _np.ones((1,_np.size(Pxy,axis=1)), dtype=float))
info.fweighted = _np.trapz( info.fweighted*_np.abs(Pxy[inds,:]))
info.fweighted /= _np.trapz(_np.abs(Pxy[inds,:]))
return Pxy_i, Pxx_i, Pyy_i, Cxy_i, ph_i, info
# end def integratespectra
def getNpeaks(Npeaks, tvec, sigx, sigy, **kwargs):
kwargs.setdefault('tbounds',None)
kwargs.setdefault('Navr', None)
kwargs.setdefault('windowoverlap', None)
kwargs.setdefault('windowfunction', None)
kwargs.setdefault('useMLAB', None)
kwargs.setdefault('plotit', None)
kwargs.setdefault('verbose', None)
kwargs.setdefault('detrend_style', None)
kwargs.setdefault('onesided', True)
fmin = kwargs.pop('fmin', None)
fmax = kwargs.pop('fmax', None)
minsep = kwargs.pop('minsep', 6)
freq, Pxy, Pxx, Pyy, Cxy, phi_xy, fftinfo = fft_pwelch(tvec, sigx, sigy, **kwargs)
Lxx = fftinfo.Lxx
Lyy = fftinfo.Lyy
Lxy = fftinfo.Lxy
# fmin = 0.0 if fmin is None else fmin
# fmax = freq[-1] if fmax is None else fmax
# iff = _np.ones((len(freq),), dtype=bool)
# iff[(freq<=fmin)*(freq>=fmax)] = False
# freq = freq[iff]
# Lyy = Lyy[iff]
# phi_xy = phi_xy[iff]
#
# threshold = kwargs.pop('threshold', -1)
## mph = kwargs.pop('mph', None)
# mph = kwargs.pop('mph', 0.5*(_np.nanmax(sigy)-_np.nanmin(sigy))/(20*Npeaks))
# ind = _ut.detect_peaks(Lyy, mpd=int(10*minsep/(freq[10]-freq[0])), mph=mph, threshold=threshold, show=True)
#
# out = []
# for ii in range(Npeaks):
# out.append([ Lyy[ind[ii]], freq[ind[ii]], phi_xy[ind[ii]] ])
# # end for
# build a boolean index array and replace peaks with false (+- an equivalent noise bandwidth)
nfreq = len(freq)
ENBW = fftinfo.ENBW # equiv. noise bandwidth
ENBW = max((ENBW, minsep))
iff = _np.ones((nfreq,), dtype=bool)
irem = int(2*nfreq*ENBW/(freq[-1]-freq[0]))
# Remove frequencies that are outside of the selected range
fmin = 0.0 if fmin is None else fmin
fmax = freq[-1] if fmax is None else fmax
iff[(freq<=fmin)*(freq>=fmax)] = False
freq = freq[iff]
nfreq = len(freq)
Lxx = Lxx[iff]
Lyy = Lyy[iff]
Lxy = Lxy[iff]
phi_xy = phi_xy[iff]
iff = iff[iff]
out = []
for ii in range(Npeaks):
# Find the maximum peak in the cross-power spectrum
imax = _np.argmax(Lxy)
# Use the amplitude from the linear amplitude signal spectrum
Ai = _np.copy(Lyy[imax])
# freqency and phase from the big calculation
fi = _np.copy(freq[imax])
pi = _np.copy(phi_xy[imax])
# Store the amplitude, frequency, and phase for output
out.append([Ai, fi, pi])
# Remove frequencies from the calculation that are around the current
# peak +- an equivalent noise bandwdith
if (imax-irem//2>=0) and (imax+irem//2<nfreq):
iff[imax-irem//2:imax+irem//2] = False
elif (imax+irem//2<nfreq):
iff[:imax+irem//2] = False
elif (imax-irem//2>=0):
iff[-(imax+irem//2):] = False
# end if
freq = freq[iff]
nfreq = len(freq)
Lxx = Lxx[iff]
Lyy = Lyy[iff]
Lxy = Lxy[iff]
phi_xy = phi_xy[iff]
iff = iff[iff]
# end for
return tuple(out)
# =========================================================================== #
# =========================================================================== #
"""
Functions to extend the usage of the matplotlib "mlab" functions
"""
def fft_pmlab(sig1,sig2,dt,plotit=False):
#nfft=2**_mlab.nextpow2(_np.length(sig1))
nfft = _np.size(sig1)
(ps1, ff) = _mlab.psd(sig1, NFFT=nfft, Fs=1./dt, detrend=_mlab.detrend_mean, \
sides='onesided', noverlap=0, scale_by_freq=True )
(ps2, ff) = _mlab.psd(sig2, NFFT=nfft, Fs=1./dt, detrend=_mlab.detrend_mean, \
sides='onesided', noverlap=0, scale_by_freq=True )
#(p12, ff) = mlab.csd(sig1, sig2, NFFT=sig1.len, Fs=1./dt,sides='default', scale_by_freq=False)
(p12, ff) = _mlab.csd(sig1, sig2, NFFT=nfft, Fs=1./dt, detrend=_mlab.detrend_mean, \
sides='onesided', noverlap=0, scale_by_freq=True )
if plotit:
_plt.figure(num='Power Spectral Density')
_plt.plot(ff*1e-9, _np.abs(ps1), 'b-')
_plt.plot(ff*1e-9, _np.abs(ps2), 'g-')
_plt.plot(ff*1e-9, _np.abs(p12), 'r-')
_plt.xlabel('freq [GHz]')
_plt.ylabel('PSD')
_plt.show()
#end plotit
return ff, ps1, ps2, p12
def coh(x,y,fs,nfft=2048,fmin=0.0, fmax=500e3, detrend='mean', ov=0.67):
"""
Calculate mean-squared coherence of data and return it below a maximum frequency
"""
# print('using nfft=%i\n')%(nfft)
# Cxy, F = _mlab.cohere(x,y,NFFT=nfft,Fs=fs,detrend=detrend,pad_to=None,noverlap=int(_np.floor(nfft*ov)),window=_mlab.window_hanning)
# ind=_np.where((_np.abs(F)<=fmax) & (_np.abs(F)>=fmin))
window=_mlab.window_hanning
noverlap=int(ov*nfft)
pad_to=None
sides='default'
scale_by_freq=None
Pxx, F = _mlab.psd(x, nfft, fs, detrend=detrend, window=window, noverlap=noverlap,
pad_to=pad_to, sides=sides, scale_by_freq=scale_by_freq)
Pyy, F = _mlab.psd(y, nfft, fs, detrend=detrend, window=window, noverlap=noverlap,
pad_to=pad_to, sides=sides, scale_by_freq=scale_by_freq)
Pxy, F = _mlab.csd(x, y, nfft, fs, detrend=detrend, window=window, noverlap=noverlap,
pad_to=pad_to, sides=sides, scale_by_freq=scale_by_freq)
Cxy2 = _np.divide(_np.absolute(Pxy)**2, Pxx*Pyy)
Cxy2.shape = (len(F),)
ind=_np.where((F<=fmax) & (F>=fmin))
co=Cxy2[ind]
fo=F[ind]
# return co,fo
return _np.sqrt(co),fo
def coh2(x,y,fs,nfft=4096, fmin=0, fmax=500e3, detrend='none', peak_treshold=None):
"""
Calculate mean-squared coherence, cross-phase, and auto-power spectra
(w.r.t x) of data and return it below a maximum frequency
"""
fxx, f = _mlab.csd(x,x,NFFT=nfft,Fs=fs,noverlap=nfft/2,window=_mlab.window_hanning,scale_by_freq=True)
fyy, f = _mlab.csd(y,y,NFFT=nfft,Fs=fs,noverlap=nfft/2,window=_mlab.window_hanning,scale_by_freq=True)
fxy, f = _mlab.csd(x,y,NFFT=nfft,Fs=fs,noverlap=nfft/2,window=_mlab.window_hanning,scale_by_freq=True)
Kxy = _np.real(fxy)
Qxy = _np.imag(fxy)
COH = _np.array([_np.abs(fxy[i]*_np.conj(fxy[i]))/(fxx[i]*fyy[i]) for i in range(len(f))])
PHA = _np.array([_np.arctan2(Qxy[i],Kxy[i]) for i in range(len(f))])
PSD = _np.array(_np.abs(fxx))
ind=_np.where(_np.abs(f)<=fmax)
co=COH[ind]
fo=f[ind]
do=PHA[ind]
po=PSD[ind]
return {'coh': co, 'f': fo, 'PS': po, 'pha':do}
def psd(x, fs, nfft=2048, fmin=None, fmax=None, detrend='none', peak_threshold=None, ov=0.67):
"""
Calculate power spectral density of data and return spectra within frequency range
"""
P,F=_mlab.psd(x,NFFT=nfft,Fs=fs,detrend=detrend,pad_to=None,noverlap=int(_np.floor(ov*nfft)),window=_mlab.window_hanning)
# ind=_np.where((_np.abs(F)<=fmax) & (_np.abs(F)>=fmin))
threshold = _np.ones(P.shape, dtype=bool)
if fmin is not None:
threshold = threshold & (F>=fmin)
if fmax is not None:
threshold = threshold & (F<=fmax)
# threshold = (F<=fmax) & (F>=fmin)
if peak_threshold is not None:
threshold = threshold & (P>peak_threshold)
ind=_np.where(threshold)
pso=P[ind]
fo=F[ind]
return pso,fo
def csd(x,y,fs,nfft=2048,fmin=0,fmax=500e3, detrend='none', peak_threshold=None, ov=0.67):
"""
Calculate cross-power spectral density of data and return spectra within frequency range
x, y, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None
"""
P,F=_mlab.csd(x, y, NFFT=nfft, Fs=fs, detrend=detrend, pad_to=None, noverlap=int(_np.floor(ov*nfft)), window=_mlab.window_hanning)
# ind=_np.where((_np.abs(F)<=fmax) & (_np.abs(F)>=fmin))
threshold = _np.ones(P.shape, dtype=bool)
if fmin is not None:
threshold = threshold & (F>=fmin)
if fmax is not None:
threshold = threshold & (F<=fmax)
# threshold = (F<=fmax) & (F>=fmin)
if peak_threshold is not None:
threshold = threshold & (P>peak_threshold)
ind=_np.where(threshold)
pso=P[ind]
fo=F[ind]
return pso,fo
# =========================================================================== #
# =========================================================================== #
"""
Test functions for propagating estimated uncertainties
"""
def monticoh(Pxy, varPxy, Pxx, varPxx, Pyy, varPyy, nmonti=1000, meansquared=True):
nmonti = int(nmonti)
sh = _np.shape(Pxy)
Pxy = _np.atleast_2d(Pxy)
if _np.size(Pxy, axis=0)==1: Pxy = Pxy.T # endif
Pxx = _np.atleast_2d(Pxx)
if _np.size(Pxx, axis=0)==1: Pxx = Pxx.T # endif
Pyy = _np.atleast_2d(Pyy)
if _np.size(Pyy, axis=0)==1: Pyy = Pyy.T # endif
varPxy = _np.atleast_2d(varPxy)
if _np.size(varPxy, axis=0)==1: varPxy = varPxy.T # endif
varPxx = _np.atleast_2d(varPxx)
if _np.size(varPxx, axis=0)==1: varPxx = varPxx.T # endif
varPyy = _np.atleast_2d(varPyy)
if _np.size(varPyy, axis=0)==1: varPyy = varPyy.T # endif
Pxy_s = Pxy.copy()
varPxy_s = varPxy.copy()
Pxx_s = Pxx.copy()
varPxx_s = varPxx.copy()
Pyy_s = Pyy.copy()
varPyy_s = varPyy.copy()
g2 = _np.zeros( (nmonti, _np.size(Pxy,axis=0), _np.size(Pxy, axis=1)), dtype=float)
for ii in range(nmonti):
Pxy = Pxy_s + _np.sqrt(varPxy_s)*_np.random.normal(0.0, 1.0, len(Pxy))
Pxx = Pxx_s + _np.sqrt(varPxx_s)*_np.random.normal(0.0, 1.0, len(Pxx))
Pyy = Pyy_s + _np.sqrt(varPyy_s)*_np.random.normal(0.0, 1.0, len(Pyy))
g2[ii] = _np.abs( Pxy*_np.conj( Pxy ) )/( _np.abs(Pxx)*_np.abs(Pyy) )
# end for
varg2 = _np.nanvar(g2, axis=0)
g2 = _np.nanmean(g2, axis=0)
if meansquared:
return g2.reshape(sh), varg2.reshape(sh)
else:
return _np.sqrt(g2.reshape(sh)), _np.sqrt(varg2.reshape(sh))
# end if
# end def
def varcoh(Pxy, varPxy, Pxx, varPxx, Pyy, varPyy, meansquared=True):
# function [Coh,varCoh]=varcoh(Pxy,varPxy,Pxx,varPxx,Pyy,varPyy)
# Only works when varPxy was formed by separating real and imaginary
# components. As is done in fft_pwelch.m
ms = _np.imag(Pxy)
mc = _np.real(Pxy)
vs = _np.imag(varPxy)
vc = _np.real(varPxy)
Coh = _np.array( _np.size(Pxy),dtype=_np.complex128)
varCoh = _np.zeros_like( Coh )
if meansquared:
Coh = _np.abs( Pxy*_np.conj( Pxy ) )/( _np.abs(Pxx)*_np.abs(Pyy) )
# C = ( T*T' )/(XY) = (R^2 + I^2)/(XY)
# d()/dR = 2R/(XY)
# d()/dI = 2I/(XY)
# d()/dX = -(R^2+I^2)/(X^2*Y)
# d()/dY = -(R^2+I^2)/(X*Y^2)
# vC = C^2 * ( vR*(2R/(R^2+I^2))^2 + vI*(2I/(R^2+I^2))^2 + vX/X^2+ vY/Y2)
varCoh = Coh**2*( vc*( 2*mc/( mc**2+ms**2) )**2 + \
vs*( 2*ms/( mc**2+ms**2) )**2 + \
varPxx*(1/Pxx)**2 + varPyy*(1/Pyy)**2 )
# if meansquared is False:
# # Return the coherence, not the mean-squared coherence
# varCoh = 0.25*varCoh/Coh # (0.5*(Coh**-0.5))**2.0 * varCoh
# Coh = _np.sqrt(Coh)
# # endif
else: # return the complex coherence
Coh = Pxy / _np.sqrt( _np.abs(Pxx)*_np.abs(Pyy) )
# vardenom = ...
# varCoh = Coh**2.0*( varPxy +
varCoh = Coh**2*( vc*( 2*mc/( mc**2+ms**2) )**2 + \
vs*( 2*ms/( mc**2+ms**2) )**2 + \
varPxx*(1/Pxx)**2 + varPyy*(1/Pyy)**2 )
# Return the coherence, not the mean-squared coherence
varCoh = 0.25*varCoh/Coh # (0.5*(Coh**-0.5))**2.0 * varCoh
Coh = _np.sqrt(Coh)
# end if
return Coh, varCoh
# end function varcoh
def montiphi(Pxy, varPxy, nmonti=1000, angle_range=_np.pi):
nmonti = int(nmonti)
sh = _np.shape(Pxy)
Pxy = _np.atleast_2d(Pxy)
if _np.size(Pxy, axis=0)==1: Pxy = Pxy.T # endif
varPxy = _np.atleast_2d(varPxy)
if _np.size(varPxy, axis=0)==1: varPxy = varPxy.T # endif
Pxy_s = Pxy.copy()
varPxy_s = varPxy.copy()
ph = _np.zeros( (nmonti, _np.size(Pxy,axis=0), _np.size(Pxy, axis=1)), dtype=float)
for ii in range(nmonti):
Pxy = Pxy_s + _np.sqrt(varPxy_s)*_np.random.normal(0.0, 1.0, len(Pxy))
# the angle function computes atan2( 1/n sum(sin(phi)),1/n sum(cos(phi)) )
if angle_range>0.5*_np.pi:
ph[ii] = _np.arctan2( _np.imag(Pxy), _np.real(Pxy) )
else:
ph[ii] = _np.arctan( _np.imag(Pxy) / _np.real(Pxy) )
# endif
# # This function might not work becasue of wrapping issues?
# ph[ii] = _np.unwrap(ph[ii])
# end for
varph = _np.nanvar(ph, axis=0)
ph = _np.nanmean(ph, axis=0)
return ph.reshape(sh), varph.reshape(sh)
def varphi(Pxy_real, Pxy_imag, varPxy_real, varPxy_imag, angle_range=_np.pi):
#def varphi(Pxy, varPxy, angle_range=_np.pi):
# Pxy_real = _np.real(Pxy)
# Pxy_imag = _np.imag(Pxy)
#
# varPxy_real = _np.real(varPxy)
# varPxy_imag = _np.imag(varPxy)
# the angle function computes atan2( 1/n sum(sin(phi)),1/n sum(cos(phi)) )
if angle_range>0.5*_np.pi:
ph = _np.arctan2( Pxy_imag, Pxy_real )
else:
ph = _np.arctan( Pxy_imag / Pxy_real )
# endif
# substitute variables and propagate errors into the tangent function
_tangent = Pxy_imag/Pxy_real # tangent = sin / cos
# vt = (tt**2)*( vsa/(msa**2) + vca/(mca**2) ) # variance in tangent
# vt = vsa/(mca**2) + vca*msa**2/(mca**4) # variance in tangent
# vt = (tt**2)*( varPxy_imag/(Pxy_imag**2) + varPxy_real/(Pxy_real**2) )
_vartang = (varPxy_imag+varPxy_real*_tangent**2)/(Pxy_real**2)
# the variance of the arctangent function is related to the derivative
# d(arctangent)/dx = 1/(1+x^2) using a = atan( tan(a) )
# varph = vt/(1+tt**2)**2
varph = _vartang/(1+_tangent**2)**2
return ph, varph
# ========================================================================== #
def mean_angle(phi, vphi=None, dim=0, angle_range=0.5*_np.pi, vsyst=None):
"""
Proper way to average a phase angle is to convert from polar (imaginary)
coordinates to a cartesian representation and average the components.
"""
if vphi is None:
vphi = _np.zeros_like(phi)
# endif
if vsyst is None:
vsyst = _np.zeros_like(phi)
# endif
nphi = _np.size(phi, dim)
complex_phase = _np.exp(1.0j*phi)
complex_var = vphi*(_np.abs(complex_phase))**2
complex_vsy = vsyst*(_np.abs(complex_phase))**2
# Get the real and imaginary parts of the complex phase
ca = _np.real(complex_phase)
sa = _np.imag(complex_phase)
# Take the mean and variance of these components
# mca = _np.mean(ca, dim)
# msa = _np.mean(sa, dim)
#
# vca = _np.var(ca, dim) + _np.sum(complex_var, dim)/(nphi**2)
# vsa = _np.var(sa, dim) + _np.sum(complex_var, dim)/(nphi**2)
mca = _np.nanmean(ca, axis=dim)
msa = _np.nanmean(sa, axis=dim)
# Stat error
vca = _np.nanvar(ca, axis=dim) + _np.nansum(complex_var, axis=dim)/(nphi**2)
vsa = _np.nanvar(sa, axis=dim) + _np.nansum(complex_var, axis=dim)/(nphi**2)
# Add in systematic error
vca += (_np.nansum( _np.sqrt(complex_vsy), axis=dim )/nphi)**2.0
vsa += (_np.nansum( _np.sqrt(complex_vsy), axis=dim )/nphi)**2.0
mean_phi, var_phi = varphi(Pxy_real=mca, Pxy_imag=msa,
varPxy_real=vca, varPxy_imag=vsa, angle_range=angle_range)
return mean_phi, var_phi
# end mean_angle
# # the angle function computes atan2( 1/n sum(sin(phi)),1/n sum(cos(phi)) )
# if angle_range > 0.5*_np.pi:
# mean_phi = _np.arctan2(msa, mca)
# else:
# mean_phi = _np.arctan(msa/mca)
# # endif
#
# # substitute variables and propagate errors into the tangent function
# tt = msa/mca # tangent = sin / cos
# # vt = (tt**2)*( vsa/(msa**2) + vca/(mca**2) ) # variance in tangent
# vt = vsa/(mca**2) + vca*msa**2/(mca**4) # variance in tangent
#
# # the variance of the arctangent function is related to the derivative
# # d(arctangent)/dx = 1/(1+x^2) using a = atan( tan(a) )
# var_phi = vt/(1+tt**2)**2
# return mean_phi, var_phi
## end mean_angle
def unwrap_tol(data, scal=_np.pi, atol=None, rtol=None, itol=None):
if atol is None and rtol is None: atol = 0.2 # endif
if atol is None and rtol is not None: atol = rtol*scal # endif
if itol is None: itol = 1 # endif
tt = _np.asarray(range(len(data)))
ti = tt[::itol]
diffdata = _np.diff(data[::itol])/scal
diffdata = _np.sign(diffdata)*_np.floor(_np.abs(diffdata) + atol)
data[1:] = data[1:]-_np.interp(tt[1:], ti[1:], scal*_np.cumsum(diffdata))
return data
#end unwrap_tol
# ========================================================================= #
# ========================================================================= #
"""
Functinos for taking the derivative of a signal using fft's (fft_deriv)
"""
def rescale(xx, yy, scaley=True, scalex=True):
slope = 1.0
offset = 0.0
xslope = 1.0
xoffset = 0.0
if scaley:
slope = _np.nanmax(yy)-_np.nanmin(yy) # maximum 1.0
offset = _np.nanmin(yy) # minimum 0.0
if slope == 0: slope = 1.0 # end if
yy = (yy.copy()-offset)/slope
if scalex:
xslope = _np.nanmax(xx)-_np.nanmin(xx) # shrink so maximum is less than 1.0
xoffset = -1e-4 # prevent 0 from being in problem
if xslope == 0: xslope = 1.0 # end if
xx = (xx.copy()-xoffset)/xslope
# end if
return xx, yy, (slope, offset, xslope, xoffset)
def unscale(xx, yy, scl, dydx=None):
slope = scl[0]
offset = scl[1]
xslope = scl[2]
xoffset = scl[3]
xx = xx*xslope+xoffset
yy = slope*yy+offset
if dydx is not None:
dydx = dydx*slope/xslope
return xx, yy, dydx
else:
return xx, yy
def fft_deriv(sig, xx=None, lowpass=True, Fs_new=None, modified=True, detrend=detrend_none, window=None):
"""
inputs:
sig - (nx,) - signal, dependent variable
xx - (nx,) - grid on domain, independent variable (default: 0:nx)
lowpass - [Hz or Bool], filter frequency pre-fft (default: nyquist freq. if True)
modified - [Bool], See documentation below (default:True)
detrend - [function handle], detrending function pre-LPF and pre-fft (default: detrend_none)
window - [function handle], window function for signal (default: None)
outputs:
dsdx - (nd,) - derivative of the signal
xx - (nd,) - independent variable of signal
Note: nd = len(xx), downsampled signal to have twice the LPF frequency
Documentation:
dfdx = ifft( wavenumber*fft(f) )
Ringing artifacts are present in the derivative calculated by FFT's due to
the lack of periodicity and high-frequency content in real signals.
There are several methods to decrease ringing:
1) use a modified wavenumber in the derivative calculation
this decreases ringing everywhere, but ringing is still present at
edges due to lack of periodicity
wavenumber = 1.0j*k if unmodified
wavenumber = 1.0j*sin(k*dx)/dx if modified
2) use a window function
this decreases ringing everywhere, but decreases the accuracy of
the derivative near the edges of the domain
(the signal is multiplied by zero at the end-points)
"""
if xx is None:
N = len(sig)
xx = 1.0*_np.asarray(range(0, N))
# end if
# ======= =#
# Low pass filter the data before calculating the FFT if requested
# if 0:
if lowpass:
dxo = xx[1] - xx[0]
if lowpass is True:
# lowpass = 0.1*1.0/dxo
lowpass = 0.5*1.0/dxo
# end if
# b, a = butter_lowpass(lowpass, fnyq=0.5/dxo, order=2)
# sig = _dsp.filtfilt(b, a, sig)
Fs = 1.0/dxo
if Fs_new is None:
Fs_new = min(5.0*lowpass, Fs)
# end if
if Fs_new<Fs:
sig = downsample_efficient(sig, Fs=Fs, Fs_new=Fs_new, plotit=False, halforder=2, lowpass=lowpass)
xx = xx[0] + _np.arange(0, len(xx)/Fs, 1.0/Fs_new)
Fs = Fs_new
# end if
# end if
# ======= =#
# Scale the data to make it simple
xx, sig, scl = rescale(xx, sig, scaley=True, scalex=True)
# offset = _np.nanmean(sig, axis=0)
# sig -= offset
sig = detrend(sig)
# ======= =#
N = len(xx)
nfft = N
dx = xx[1] - xx[0]
L = N*dx
# Get the wavenumber vector
k = _np.fft.fftfreq(nfft, d=dx/L)
k *= 2.0*_np.pi
if modified:
# Modified wave number
# - Windowed with a sinc function to kill some of the ringing near the center
# - Sunaina et al 2018 Eur. J. Phys. 39 065806
wavenumber = 1.0j*_np.sin(k*dx)/(dx)
else:
# Naive fft derivative (subject to ringing)
wavenumber = 1.0j*k
# end if
wavenumber /= L
# Calculate the derivative using fft
if window is None:
win = _np.ones_like(sig)
else:
win = window(nfft) # periodic hamming window is a good choice for the center of the domain
# end if
sig = win*sig # accurate at center of signal, no ringing, bad outside center
# Calculate the derivative using fft
ds0 = (sig[1]-sig[0])/(xx[1]-xx[0]) # beginning of boundary
ds1 = (sig[-1]-sig[-2])/(xx[-1]-xx[-2]) # end point of boundary
sig = _np.real(_np.fft.ifft(wavenumber*_np.fft.fft(sig, n=nfft), n=nfft))
# Unnormalize the center of the window
sig /= win # accurate at center of signal, no ringing, bad outside center
# discontinuity at end-points when not windowing
sig[0] = ds0
sig[-1] = ds1
# Rescale back to the original data scale
# sig += offset
xx, _, sig = unscale(xx, sig.copy(), scl=scl, dydx=sig)
# ======= =#
# Low pass filter the data after calculating the FFT if requested
if 0:
# if lowpass:
dx = xx[1] - xx[0]
if lowpass is True:
lowpass = 0.5*1.0/dx
# end if
Fs = 1.0/dx
if Fs_new is None:
Fs_new = min(5.0*lowpass, Fs)
# end if
if Fs_new<Fs:
sig = downsample_efficient(sig, Fs=Fs, Fs_new=Fs_new, plotit=False, halforder=2, lowpass=lowpass)
xx = xx[0] + _np.arange(0, len(xx)/Fs, 1.0/Fs_new)
Fs = Fs_new
# end if
# end if
# ======= =#
return sig, xx
# end def
def test_fft_deriv(modified=True):
for jj in range(1):
if jj == 0:
win = 'Unwindowed:'
window = None
elif jj == 1:
win = 'Windowed:'
window = _np.hamming
for ii in range(5):
N = int(2e3)
L = 13.0 #interval of data
dx = L/N
xx = dx*_np.asarray(range(N))
if ii == 0:
# Test with a rectangle function
yy = _ut.rect(2.0*xx/L-0.75)
dy_analytic = _ut.delta(2.0*xx/L-0.75+0.5) - _ut.delta(2.0*xx/L-0.75-0.5)
titl = '%s Box function'%(win,)
elif ii == 1:
# Test with a gaussian function
yy = _np.exp(-0.5*(xx/L)*(xx/L)/(0.25*0.25))
dy_analytic = (-1.0*(xx/L)*(1.0/L)/(0.25*0.25))*yy
titl = '%s Gaussian function'%(win,)
elif ii == 2:
# Test with a line
yy = _np.linspace(-1.2, 11.3, num=len(xx), endpoint=True)
a = (yy[-1]-yy[0])/(xx[-1]-xx[0])
# b = yy[0] - a*xx[0]
dy_analytic = a*_np.ones_like(yy)
titl = '%s Linear function'%(win,)
elif ii == 3:
# Test with a sine
yy = _np.sin(xx)
dy_analytic = _np.cos(xx)
titl = '%s Sine function: aperiodic boundary'%(win,)
elif ii == 4:
# Test with a sine
xx = 6.0*_np.pi*xx/L
yy = _np.sin(xx)
dy_analytic = _np.cos(xx)
xx = xx[:-1]
yy = yy[:-1]
dy_analytic = dy_analytic[:-1]
titl = '%s Sine function: periodic boundary'%(win,)
# end if
# # add some random noise
## yy += 0.05*(_np.nanmax(yy)-_np.nanmin(yy))*_np.random.random(size=xx.shape)
# yy += 0.05*yy*_np.random.random(size=xx.shape)
dydt, xo = fft_deriv(yy, xx, modified=modified, window=window)
_plt.figure('%s wavenumber: Test (%i,%i)'%('Modified' if modified else 'Unmodified',jj,ii+1))
_plt.plot(xx, yy, '-', label='function')
_plt.plot(xx, dy_analytic, '-', label='analytical der')
_plt.plot(xo, dydt, '*', label='fft der')
_plt.title(titl)
_plt.legend(loc='lower left')
# end for
# end for
# _plt.savefig('images/fft-der.png')
_plt.show()
# end def test_fft_deriv()
# ========================================================================= #
# ========================================================================= #
def Cxy_Cxy2(Pxx, Pyy, Pxy, ibg=None): #, thresh=1.e-6):
Pxx = Pxx.copy()
Pyy = Pyy.copy()
Pxy = Pxy.copy()
# Mean-squared coherence
Pxx = _np.atleast_2d(Pxx)
if _np.size(Pxx, axis=1) != _np.size(Pyy, axis=1):
Pxx = Pxx.T*_np.ones( (1, _np.size(Pyy, axis=1)), dtype=Pxx.dtype)
# end if
Cxy2 = Pxy*_np.conj( Pxy )/( _np.abs(Pxx)*_np.abs(Pyy) )
# Cxy2 = _np.abs(Cxy2) # mean-squared coherence
# Cxy = _np.sqrt(Cxy2) # RMS coherence
# Complex coherence
Cxy = Pxy/_np.sqrt( _np.abs(Pxx)*_np.abs(Pyy) )
if ibg is None:
return Cxy, Cxy2
# Imaginary coherence
iCxy = _np.imag(Cxy)/(1.0-_np.real(Cxy))
# Background subtracted coherence
Cprime = _np.real(Cxy-_np.mean(Cxy[:,ibg], axis=-1)) \
/(1.0-_np.real(Cxy-_np.mean(Cxy[:,ibg], axis=-1)))
return iCxy, Cprime
# ========================================================================= #
# ========================================================================= #
#class fftmch(fftanal):
class fftanal(Struct):
afont = {'fontname':'Arial','fontsize':14}
def __init__(self, tvec=None, sigx=None, sigy=None, **kwargs):
self.verbose = kwargs.get( 'verbose', True)
if tvec is None or sigx is None:
if self.verbose:
print('Please give at least a time-vector [s]'
+ ' and a signal vector [a.u.]')
# end if
return
else:
self.init(tvec, sigx, sigy, **kwargs)
# self.fftpwelch()
# endif
# end __init__
def init(self, tvec=None, sigx=None, sigy=None, **kwargs):
if sigy is None or sigx is sigy:
self.nosigy = True
else:
self.nosigy = False
#endif
self.tvec = tvec
self.sigx = sigx
self.sigy = sigy
# ======== #
self.tbounds = kwargs.get( 'tbounds', [ tvec.min(), tvec.max() ] )
self.useMLAB = kwargs.get( 'useMLAB', False )
self.plotit = kwargs.get( 'plotit', False)
self.verbose = kwargs.get( 'verbose', True)
self.Navr = kwargs.get( 'Navr', None)
self.window = kwargs.get( 'windowfunction', 'Hanning') #'SFT3F')
if self.window is None: self.window = 'Hanning' # end if
self.overlap = kwargs.get( 'windowoverlap', windows(self.window, verbose=False))
self.tvecy = kwargs.get( 'tvecy', None)
self.onesided = kwargs.get('onesided', None)
self.detrendstyle = kwargs.get('detrend', 1) # >0 mean, 0 None, <0 linear
self.frange = kwargs.get('frange', None)
self.axes = kwargs.get('axes', -1)
if self.onesided is None:
self.onesided = True
if _np.iscomplexobj(sigx) or _np.iscomplexobj(sigy):
self.onesided = False
# end if
# end if
# ======== #
# put the signals on the same time base for fourier tranfsormation
if self.tvecy is not None:
self.tvec, self.sigx, self.sigy = self.resample(tvec, sigx, self.tvecy, sigy)
# end if
self.Fs = self.__Fs__(self.tvec)
self.ibounds = self.__ibounds__(self.tvec, self.tbounds)
self.nsig = _np.size( self.__trimsig__(tvec, self.ibounds) )
calcNavr = False
if self.Navr is None:
calcNavr = True
self.Navr = 8
# end if
# if the window time is specified ... overwrite nwins, noverlap and Navr
if 'minFreq' in kwargs: # if the minimum resolvable frequency is specificed
kwargs['tper'] = 2.0/kwargs['minFreq']
# end if
if 'tper' in kwargs:
self.tper = kwargs['tper']
self.nwins = int(self.Fs*self.tper)
else:
calcNavr = False
self.nwins = self.getNwins()
# end if
self.noverlap = self.getNoverlap()
if calcNavr:
self.Navr = self.getNavr()
# end if
self.win, self.winparams = self.makewindowfn(self.window, self.nwins, self.verbose)
self.getNnyquist()
self.getNorms()
# end def init
def update(self, d=None):
if d is not None:
if type(d) != dict: d = d.dict_from_class() # endif
super(fftanal, self).__init__(d)
# endif
# end def update
# =========================================================== #
def fftpwelch(self):
# Call the fft_pwelch function that is defined above
self.freq, self.Pxy, self.Pxx, self.Pyy, \
self.Cxy, self.phi_xy, self.fftinfo = \
fft_pwelch(self.tvec, self.sigx, self.sigy, self.tbounds,
Navr=self.Navr, windowoverlap=self.overlap,
windowfunction=self.window, useMLAB=self.useMLAB,
plotit=self.plotit, verbose=self.verbose,
detrend_style=self.detrendstyle, onesided=self.onesided)
self.update(self.fftinfo)
def stft(self):
if self.useMLAB:
import scipy.signal as _dsp
if not self.onesided or (type(self.onesided)==type('') and self.onesided.find('two')>-1):
onesided = False
elif self.onesided or (type(self.onesided)==type('') and self.onesided.find('one')>-1):
onesided = True
# end if
self.freq, self.tseg, self.Xseg = _dsp.stft(self.sigx, fs=self.Fs,
window=self.win, nperseg=self.nwins, noverlap=self.noverlap,
nfft=self.nwins, detrend=self.detrend, return_onesided=onesided,
boundary='zeros', padded=True, axis=self.axes)
_, _, self.Yseg = _dsp.stft(self.sigy, fs=self.Fs,
window=self.win, nperseg=self.nwins, noverlap=self.noverlap,
nfft=self.nwins, detrend=self.detrend, return_onesided=onesided,
boundary='zeros', padded=True, axis=self.axes)
self.Pstft()
self.averagewins()
else:
self.pwelch()
# end if
def pwelch(self):
self.Xstft()
if not self.nosigy:
self.Ystft()
self.Pstft()
self.averagewins()
# =============== #
def crosscorr(self): #, fbnds=None):
# cross correlation from the FFT
# if we calculated one-sided spectra, then we have to add back
# in the Nyquist components before inverse tranforming.
# ==================== #
nfft = self.nwins
for param in ['Pxx', 'Pyy', 'Pxy']:
if hasattr(self, param):
tmp = getattr(self, param).copy()
if self.onesided:
# remaking the two-sided spectra for the auto-power
tmp[..., 1:-1] *= 0.5
if nfft%2: # odd
tmp[-1] *= 0.5
# end if
tmp = _np.sqrt(nfft)*_np.fft.irfft(tmp, n=nfft, axis=-1)
else:
tmp = _np.fft.ifftshift(tmp, axes=-1)
tmp = _np.sqrt(nfft)*_np.fft.ifft(tmp, n=nfft, axis=-1)
# end if
# cauchy energy integral == auto-correlation at zero-time lag
if param.find('Pxx')>-1:
setattr(self, 'Ex', tmp[..., 0].copy())
if param.find('Pyy')>-1:
setattr(self, 'Ey', tmp[..., 0].copy())
# end if
# shift the time-series of lagged correlations to be the normal smallest to largest
tmp = _np.fft.fftshift(tmp, axes=-1)
setattr(self, 'R'+param[1:], tmp)
# end if
# end for
if hasattr(self, 'Rxy'):
self.corrcoef = self.Rxy.copy()/(_np.ones((self.nch,1), dtype=self.Ey.dtype)*_np.sqrt(self.Ex*self.Ey))
# end if
self.lags = (_np.asarray(range(1, nfft+1), dtype=int)-self.Nnyquist)/self.Fs
# end def
def crosscorr_stft(self): #, fbnds=None):
# cross correlation from the FFT
# if we calculated one-sided spectra, then we have to add back
# in the Nyquist components before inverse tranforming.
# ==================== #
nfft = self.nwins
for param in ['Pxx_seg', 'Pyy_seg', 'Pxy_seg']:
if hasattr(self, param):
tmp_seg = getattr(self, param).copy()
if self.onesided:
# remaking the two-sided spectra for the auto-power
tmp_seg[...,1:-1] *= 0.5
if nfft%2: # odd
tmp_seg[..., -1] *= 0.5
# end if
tmp_seg = _np.sqrt(nfft)*_np.fft.irfft(tmp_seg, n=nfft, axis=-1)
else:
tmp_seg = _np.fft.ifftshift(tmp_seg, axes=-1)
tmp_seg = _np.sqrt(nfft)*_np.fft.ifft(tmp_seg, n=nfft, axis=-1)
# end if
# cauchy energy integral == auto-correlation at zero-time lag
if param.find('Pxx')>-1:
setattr(self, 'Ex_seg', tmp_seg[..., 0].copy())
if param.find('Pyy')>-1:
setattr(self, 'Ey_seg', tmp_seg[..., 0].copy())
# end if
# shift the time-series of lagged correlations to be the normal smallest to largest
tmp_seg = _np.fft.fftshift(tmp_seg, axes=-1)
setattr(self, 'R'+param[1:], tmp_seg)
# end if
# end for
if hasattr(self, 'Rxy_seg'):
self.corrcoef_seg = self.Rxy_seg.copy()/(_np.ones((self.nch,1), dtype=self.Ey_seg.dtype)*_np.sqrt(self.Ex_seg*self.Ey_seg))
# end if
self.lags = (_np.asarray(range(1, nfft+1), dtype=int)-self.Nnyquist)/self.Fs
# end def
# =============== #
def Xstft(self):
# Perform the loop over averaging windows to generate the short time four. xform
# Note that the zero-frequency component is in the middle of the array (2-sided transform)
sig = self.__trimsig__(self.sigx, self.ibounds)
tvec = self.__trimsig__(self.tvec, self.ibounds)
self.tseg, self.freq, self.Xseg, self.Xpow = self.fft_win(sig, tvec) # frequency [cycles/s], STFT [Navr, nfft]
self.Xfft = _np.mean(self.Xseg, axis=0)
return self.freq, self.Xseg
def Ystft(self):
# Perform the loop over averaging windows to generate the short time four. xform
# Note that the zero-frequency component is in the middle of the array (2-sided transform)
sig = self.__trimsig__(self.sigy, self.ibounds)
tvec = self.__trimsig__(self.tvec, self.ibounds)
self.tseg, self.freq, self.Yseg, self.Ypow = self.fft_win(sig, tvec) # frequency [cycles/s], STFT [Navr, nfft]
self.Yfft = _np.mean(self.Yseg, axis=0)
#self.tseg = self.tbounds[0]+(self.arange(self.Navr)+0.5)*self.tper
return self.freq, self.Yseg
def Pstft(self):
if hasattr(self,'Xseg'):
self.Pxx_seg = self.Xseg*_np.conj(self.Xseg)
self.Lxx_seg = _np.sqrt(_np.abs(self.ENBW*self.Pxx_seg)) # [V_rms]
if self.onesided:
self.Lxx_seg = _np.sqrt(2)*self.Lxx_seg # V_amp
if hasattr(self,'Yseg'):
self.Pyy_seg = self.Yseg*_np.conj(self.Yseg)
self.Lyy_seg = _np.sqrt(_np.abs(self.ENBW*self.Pyy_seg)) # [V_rms]
if self.onesided:
self.Lyy_seg = _np.sqrt(2)*self.Lyy_seg # V_amp
if hasattr(self, 'Xseg') and hasattr(self,'Yseg'):
self.Pxy_seg = self.Xseg*_np.conj(self.Yseg)
self.Lxy_seg = _np.sqrt(_np.abs(self.ENBW*self.Pxy_seg)) # [V_rms]
if self.onesided:
self.Lxy_seg = _np.sqrt(2)*self.Lxy_seg # V_amp
# Save the cross-phase as well
self.phixy_seg = _np.angle(self.Pxy_seg) # [rad], Cross-phase of each segment
# Mean-squared Coherence
self.Cxy_seg, self.Cxy2_seg = Cxy_Cxy2(self.Pxx_seg, self.Pyy_seg, self.Pxy_seg)
# end def
# =============== #
def averagewins(self):
for param in ['Pxx', 'Pyy', 'Pxy']:
if hasattr(self, param+'_seg'):
# Use the mean of the windows
# self.Pxx = _np.mean(self.Pxx_seg, axis=0)
setattr(self, param, _np.mean(getattr(self, param+'_seg'), axis=0))
# use the RMS for the standard deviation
# self.varPxx = _np.mean(self.Pxx_seg**2.0, axis=0)
# setattr(self, 'var'+param, _np.mean(getattr(self, param+'_seg')**2.0, axis=0) )
# Else use the normal statistical estimate:
# self.varPxx = (self.Pxx/_np.sqrt(self.Navr))**2.0
setattr(self, 'var'+param, (getattr(self, param)/_np.sqrt(self.Navr))**2.0)
# end if
# end for
if hasattr(self, 'Pxy'):
# Cross-phase as well
self.phi_xy = _np.angle(self.Pxy)
# Complex coherence and Mean-squared coherence
self.Cxy, self.Cxy2 = Cxy_Cxy2(self.Pxx, self.Pyy, self.Pxy)
# ========================== #
# Uncertainty and phase part
# Estimate the variance in the power spectra: this requires building
# a distribution by varying the parameters used in the FFT, nwindows,
# nfft, windowfunction, etc. I don't do this right now
# self.varPxy = self.Pxx*self.Pyy*(1.0-self.Cxy)/self.Navr
# <NAME>, Phys. Plasmas, 17 056103, 2010
# Doesn't so far give a convincing answer...
# fftinfo.varPhxy = _np.zeros(Pxy.shape, dtype=_np.float64)
self.varPhxy = (_np.sqrt(1.0-self.Cxy2)/_np.sqrt(2.0*self.Navr*self.Cxy))**2.0
# derived using error propagation from eq 23 for gamma^2 in
# <NAME>, Journal of Sound an Vibration 59(3), 405-421, 1978
# fftinfo.varCxy = _np.zeros_like(Cxy)
self.varCxy = ((1-self.Cxy2)/_np.sqrt(2*self.Navr))**2.0
self.varCxy2 = 4.0*self.Cxy2*self.varCxy # d/dx x^2 = 2 *x ... var: (2*x)^2 * varx
# end if
# end def averagewins
# =============== #
def convert2amplitudes(self):
# Linear amplitude spectrum from the power spectral density
# RMS Linear amplitude spectrum (constant amplitude values)
#
for param in ['Pxx', 'Pyy', 'Pxy']:
if hasattr(self, param):
# self.Lxx = _np.sqrt(_np.abs(self.ENBW*self.Pxx)) # [V_rms]
tmp = _np.sqrt(_np.abs(self.ENBW*getattr(self, param))) # [V_rms])
if self.onesided:
# Rescale RMS values to Amplitude values (assumes a zero-mean sine-wave)
# Just the points that split their energy into negative frequencies
# self.Lxx[1:-1] = _np.sqrt(2)*self.Lxx[1:-1] # [V],
tmp[1:-1] = _np.sqrt(2)*tmp[1:-1] # [V],
if self.nfft%2: # Odd
# self.Lxx[-1] = _np.sqrt(2)*self.Lxx[-1]
tmp[-1] = _np.sqrt(2)*tmp[-1] # [V], TODO:! test!
# endif nfft/2 is odd
# end if onesided
setattr(self, 'L'+param[1:], tmp) # [V])
# self.varLxx = (self.Lxx**2)*(self.varPxx/_np.abs(self.Pxx)**2)
setattr(self, 'varL'+param[1:], (tmp**2)*(getattr(self, 'var'+param)/_np.abs(getattr(self, param))**2) )
# end if
# end for
# end def
# ====================================================================== #
# ====================================================================== #
def getNavr(self):
self.Navr = fftanal._getNavr(self.nsig, self.nwins, self.noverlap)
return self.Navr
# end def getNavr
def getNwins(self):
self.nwins = fftanal._getNwins(self.nsig, self.Navr, self.overlap)
return self.nwins
# end def getNwins
def getNoverlap(self):
# Number of points to overlap
self.noverlap = fftanal._getNoverlap(self.nwins, self.overlap)
return self.noverlap
def getNnyquist(self):
self.Nnyquist = self._getNnyquist(self.nwins)
return self.Nnyquist
def getNorms(self):
# Define normalization constants
self.S1, self.S2, self.NENBW, self.ENBW = fftanal._getNorms(self.win, self.Nnyquist, self.Fs)
# end def
# ===================================================================== #
def integrate_spectra(self): # TODO: CHECK ACCURACY OF THIS!
self.integrated = Struct()
[ self.integrated.Pxy, self.integrated.Pxx, self.integrated.Pyy,
self.integrated.Cxy, self.integrated.ph, self.integrated.info ] = \
integratespectra(self.freq, self.Pxy, self.Pxx, self.Pyy, self.frange,
self.varPxy, self.varPxx, self.varPyy)
# end def
# ===================================================================== #
def detrend(self, sig):
detrender = fftanal._detrend_func(detrend_style=self.detrendstyle)
return detrender(sig)
def fft(self, sig, nfft=None, axes=None):
#The FFT output from matlab isn't normalized:
# y_n = sum[ y_m.*exp( 2_np.pi*1i*(n/N)*m ) ]
# The inverse is normalized::
# y_m = (1/N)*sum[ y_n.*exp( -2_np.pi*1i*(n/N)*m ) ]
#
# Python normalizations are optional, pick it to match MATLAB
if axes is None: axes = self.axes # end if
if nfft is None: nfft = self.nfft # end if
return _np.fft.fft(sig, n=nfft, axis=axes)
def ifft(self, sig, nfft=None, axes=None):
#The FFT output from matlab isn't normalized:
# y_n = sum[ y_m.*exp( 2_np.pi*1i*(n/N)*m ) ]
# The inverse is normalized::
# y_m = (1/N)*sum[ y_n.*exp( -2_np.pi*1i*(n/N)*m ) ]
#
# Python normalizations are optional, pick it to match MATLAB
if axes is None: axes = self.axes # end if
if nfft is None: nfft = self.nfft # end if
return _np.fft.ifft(sig, n=nfft, axis=axes)
def fftshift(self, sig, axes=None):
if axes is None: axes = self.axes # end if
return _np.fft.fftshift(sig, axes=axes)
def ifftshift(self, sig, axes=None):
if axes is None: axes = self.axes # end if
return _np.fft.ifftshift(sig, axes=axes)
def fft_win(self, sig, tvec=None, detrendwin=False):
x_in = sig.copy()
if tvec is None:
tvec = _np.linspace(0.0, 1.0, len(x_in))
# endif
win = self.win
nwins = self.nwins
Navr = self.Navr
noverlap = self.noverlap
# Fs = self.Fs
Fs = self.__Fs__(tvec)
Nnyquist = self.Nnyquist
nfft = nwins
# Define normalization constants for the window
S1 = self.S1
S2 = self.S2
ENBW = self.ENBW # Equivalent noise bandwidth
# ===== #
# detrend the whole background, like most programs do it
if not detrendwin:
x_in = self.detrend(x_in)
# end if
# ===== #
ist = _np.arange(Navr)*(nwins-noverlap)
Xfft = _np.zeros((Navr, nfft), dtype=_np.complex128)
tt = _np.zeros( (Navr,), dtype=float)
pseg = _np.zeros( (Navr,), dtype=float)
for gg in _np.arange(Navr):
istart = ist[gg] #Starting point of this window
iend = istart+nwins #End point of this window
if gg == 0:
self.tper = tvec[iend]-tvec[istart]
# endif
tt[gg] = _np.mean(tvec[istart:iend])
xtemp = x_in[istart:iend]
# Windowed signal segment:
# - To get the most accurate spectrum, background subtract
# xtemp = win*_dsp.detrend(xtemp, type='constant')
if detrendwin:
# this is only good when the background is not evolving!
xtemp = self.detrend(xtemp)
# end if
xtemp = win*xtemp
pseg[gg] = _np.trapz(xtemp*_np.conj(xtemp), x=tvec[istart:iend]).real
Xfft[gg,:] = self.fft(xtemp, nfft)
#endfor loop over fft windows
freq = _np.fft.fftfreq(nfft, 1.0/Fs)
if self.onesided:
freq = freq[:Nnyquist] # [Hz]
Xfft = Xfft[:,:Nnyquist]
# freq = freq[:Nnyquist-1] # [Hz]
# Xfft = Xfft[:,:Nnyquist-1]
# Real signals equally split their energy between positive and negative frequencies
Xfft[:, 1:-1] = _np.sqrt(2)*Xfft[:, 1:-1]
if nfft%2: # odd
Xfft[:,-1] = _np.sqrt(2)*Xfft[:,-1]
# endif
else:
freq = self.fftshift(freq, axes=0)
Xfft = self.fftshift(Xfft, axes=-1)
# end if
# Remove gain of the window function to yield the RMS Power spectrum
# in each segment (constant peak amplitude)
Xfft /= S1 # Vrms
pseg /= S2
# Compute the spectral density from the RMS spectrum
# (constant noise floor)
Xfft /= _np.sqrt(ENBW) # [V/Hz^0.5]
return tt, freq, Xfft, pseg
# ===================================================================== #
# ===================================================================== #
def plotall(self):
# The input signals versus time
self.fig = _plt.figure(figsize=(15,15))
self.ax1 = _plt.subplot(2,3,1) # time-series
self.ax2 = _plt.subplot(2,3,2) # correlation coeff
self.ax3 = _plt.subplot(2,3,3) # power spectra
self.ax4 = _plt.subplot(2,3,4, sharex=self.ax2) # spectrogram
self.ax5 = _plt.subplot(2,3,5, sharex=self.ax3) # phase coherence
self.ax6 = _plt.subplot(2,3,6, sharex=self.ax3) # cross-phase
_ax1 = self.plottime(_ax=self.ax1)
_ax2 = self.plotCorr(_ax=self.ax2)
_ax3 = self.plotPxy(_ax=self.ax3)
_ax4 = self.plotspec(param='Pxy', logscale=True, _ax=self.ax4)
# _ax4, _ax2 = self.plotCorrelations(_ax=[self.ax4, self.ax2])
_ax5 = self.plotCxy(_ax=self.ax5)
_ax6 = self.plotphxy(_ax=self.ax6)
_plt.tight_layout()
_plt.draw()
return _ax1, _ax2, _ax3, _ax4, _ax5, _ax6
def plotspec(self, param='Pxy', logscale=False, _ax=None, vbnds=None, cmap=None): # spectrogram
# Minimum resolvable frequency, Maximum resolvable frequency (or bounded if the freq vector has been trimmed)
fbounds = [1e-3*max((2.0*self.Fs/self.nwins, self.freq.min())), 1e-3*min((self.Fs/2.0, self.freq.max()))]
_ax = fftanal._plotspec(self.tseg, self.freq, getattr(self, param+'_seg').copy(),
logscale=logscale, _ax=_ax, vbnds=vbnds, cmap=cmap,
titl=param, ylbl='freq [KHz]', xlbl='time [s]',
tbounds=self.tbounds, fbounds=fbounds) # spectrogram
return _ax
# end def
def plotCorrelations(self, axs=None):
plotCorr = fftanal._plotCorr
if axs is None:
_plt.figure()
_ax1 = _plt.subplot(4,1,1)
_ax2 = _plt.subplot(4,1,2, sharex=_ax1, sharey=_ax1)
_ax3 = _plt.subplot(4,1,3, sharex=_ax1, sharey=_ax1)
_ax4 = _plt.subplot(4,1,4, sharex=_ax1)
axs = [_ax1, _ax2, _ax3, _ax4]
# end if
axs = _np.atleast_1d(axs)
if len(axs) == 1:
_ax = plotCorr(self.lags, self.corrcoef, _ax=axs[0], scl=1e6, afont=self.afont, titl=None, ylbl=r'$\rho_{xy}$', fmt='k-')
return _ax
elif len(axs) == 2:
_ax1 = plotCorr(self.lags, self.Rxx, _ax=axs[0], scl=1e6, afont=self.afont, titl='Correlations', xlbl=None, ylbl=r'$R_{xx}$', fmt='b-')
plotCorr(self.lags, self.Ryy, _ax=axs[0], scl=1e6, afont=self.afont, titl=None, xlbl=None, ylbl=r'$R_{yy}$', fmt='r-')
plotCorr(self.lags, self.Rxy, _ax=axs[0], scl=1e6, afont=self.afont, titl=None, xlbl=None, ylbl=r'$R_{xy}$', fmt='k-')
_ax2 = plotCorr(self.lags, self.corrcoef, _ax=axs[1], scl=1e6, afont=self.afont, titl='Cross-Correlation', ylbl=r'$\rho_{xy}$', fmt='k-')
return _ax1, _ax2
elif len(axs) == 3:
_ax1 = plotCorr(self.lags, self.Rxx, _ax=axs[0], scl=1e6, afont=self.afont, titl='Auto-Correlation', xlbl=None, ylbl=r'$R_{xx}$', fmt='b-')
_ax2 = plotCorr(self.lags, self.Ryy, _ax=axs[1], scl=1e6, afont=self.afont, titl='Auto-Correlation', xlbl=None, ylbl=r'$R_{yy}$', fmt='r-')
_ax3 = plotCorr(self.lags, self.Rxy, _ax=axs[2], scl=1e6, afont=self.afont, titl='Cross-Correlation', xlbl=None, ylbl=r'$R_{xy}$', fmt='k-')
return _ax1, _ax2, _ax3
else:
_ax1 = plotCorr(self.lags, self.Rxx, _ax=axs[0], scl=1e6, afont=self.afont, titl='Cross-Correlation', xlbl='', ylbl=r'$R_{xx}$', fmt='b-')
_ax2 = plotCorr(self.lags, self.Ryy, _ax=axs[1], scl=1e6, afont=self.afont, titl=None, xlbl='', ylbl=r'$R_{yy}$', fmt='r-')
_ax3 = plotCorr(self.lags, self.Rxy, _ax=axs[2], scl=1e6, afont=self.afont, titl=None, xlbl='', ylbl=r'$R_{xy}$', fmt='k-')
_ax4 = plotCorr(self.lags, self.corrcoef, _ax=axs[3], scl=1e6, afont=self.afont, titl=None, ylbl=r'$\rho_{xy}$', fmt='k-')
return _ax1, _ax2, _ax3, _ax4
# end if
# end def
# ===================================================================== #
def plottime(self, _ax=None):
_ax = fftanal._plotSignal([self.tvec, self.tvec], [self.sigx, self.sigy],
_ax=_ax, scl=1.0, afont=self.afont, titl='Input Signals',
ylbl='Sigx, Sigy', fmt='k-', tbounds=self.tbounds)
return _ax
def plotCorr(self, _ax=None):
_ax = fftanal._plotCorr(self.lags, self.corrcoef, _ax=_ax, scl=1e6, afont=self.afont, titl=None, ylbl=r'$\rho_{xy}$', fmt='k-')
return _ax
def plotPxy(self, _ax=None):
_ax = fftanal._plotlogAmp(self.freq, self.Pxx, self.Pyy, self.Pxy, afont=self.afont, _ax=_ax, scl=1e-3)
return _ax
def plotCxy(self, _ax=None):
_ax = fftanal._plotMeanSquaredCoherence(self.freq, self.Cxy2, afont=self.afont, _ax=_ax, scl=1e-3, Navr=self.Navr)
return _ax
def plotphxy(self, _ax=None):
_ax = fftanal._plotPhase(self.freq, self.phi_xy, afont=self.afont, _ax=_ax, scl=1e-3)
return _ax
# ===================================================================== #
# ===================================================================== #
def __calcAmp__(self, tvec, sigx, sigy, tbounds, nn=8, ol=0.5, ww='hanning'):
# The amplitude is most accurately calculated by using several windows
self.frqA, self.Axy, self.Axx, self.Ayy, self.aCxy, _, _ = \
fft_pwelch(tvec, sigx, sigy, tbounds, Navr=nn, windowoverlap=ol,
windowfunction=ww, useMLAB=self.useMLAB, plotit=0,
verbose=self.verbose, detrend_style=self.detrendstyle,
onesided=self.onesided)
self.__plotAmp__()
# end def
def __calcPh1__(self, tvec, sigx, sigy, tbounds, nn=1, ol=0.0, ww='box'):
# The amplitude is most accurately calculated by using several windows
self.frqP, _, _, _, _, self.ph, _ = \
fft_pwelch(tvec, sigx, sigy, tbounds, Navr=nn, windowoverlap=ol,
windowfunction=ww, useMLAB=self.useMLAB, plotit=0,
verbose=self.verbose, detrend_style=self.detrendstyle,
onesided=self.onesided)
self.__plotPh1__()
# end def
def __plotAmp__(self, _ax=None):
fftanal._plotlogAmp(self.frqA, self.Axx, self.Ayy, self.Axy, afont=self.afont, _ax=_ax, scl=1e-3)
def __plotPh1__(self, _ax=None):
fftanal._plotPhase(self.frqP, self.ph, afont=self.afont, _ax=_ax, scl=1e-3)
# ===================================================================== #
def __preallocateFFT__(self):
#Inputs
self.tvec = _np.array([], dtype=_np.float64)
#Outputs
self.freq = _np.array([], dtype=_np.float64)
self.Pxy = _np.array([], dtype = _np.complex128)
self.Pxx = _np.array([], dtype = _np.complex128)
self.Pyy = _np.array([], dtype = _np.complex128)
self.varPxy = _np.array([], dtype = _np.complex128)
self.varPxx = _np.array([], dtype = _np.complex128)
self.varPyy = _np.array([], dtype = _np.complex128)
self.Coh = _np.array([], dtype=_np.float64)
self.varCoh = _np.array([], dtype=_np.float64)
self.phi = _np.array([], dtype=_np.float64)
self.varphi = _np.array([], dtype=_np.float64)
#end __preallocateFFT__
# ===================================================================== #
# ===================================================================== #
@staticmethod
def resample(tvx, sigx, tvy, sigy):
Fsx = fftanal.__Fs__(tvx)
Fsy = fftanal.__Fs__(tvy)
if len(sigx) > len(sigy):
sigy = upsample(sigy, Fsy, Fsx)
tvec = tvx
elif len(sigy) > len(sigx):
sigx = upsample(sigx, Fsx, Fsy)
tvec = tvy
return tvec, sigx, sigy
@staticmethod
def __Fs__(tvec):
return (len(tvec)-1)/(tvec[-1]-tvec[0])
@staticmethod
def __ibounds__(tvec, tbounds):
ib1 = int(_np.floor((tbounds[0]-tvec[0])*fftanal.__Fs__(tvec)))
ib2 = int(_np.floor(1 + (tbounds[1]-tvec[0])*fftanal.__Fs__(tvec)))
return [ib1, ib2]
@staticmethod
def __trimsig__(sigt, ibounds):
return sigt[ibounds[0]:ibounds[1]]
# ===================================================================== #
@staticmethod
def makewindowfn(windowfunction, nwins, verbose=True):
#Define windowing function for apodization
win, winparams = windows(windowfunction, nwins=nwins, verbose=verbose, msgout=True)
return win, winparams
# ===================================================================== #
"""
Must know two of these inputs to determine third
k = # windows, M = Length of data to be segmented, L = length of segments,
K = (M-NOVERLAP)/(L-NOVERLAP)
Navr = (nsig-noverlap)/(nwins-noverlap)
nwins = (nsig - noverlap)/Navr + noverlap
noverlap = (nsig - nwins*Navr)/(1-Navr)
noverlap = windowoverlap*nwins
nwins = nsig/(Navr-Navr*windowoverlap + windowoverlap)
"""
@staticmethod
def _getNwins(nsig, Navr, windowoverlap):
# Heliotron-J
nwins = int(_np.floor(nsig*1.0/(Navr-Navr*windowoverlap + windowoverlap)))
if nwins>=nsig:
nwins = nsig
# end if
return nwins
@staticmethod
def _getNoverlap(nwins, windowoverlap):
return int( _np.ceil( windowoverlap*nwins ) )
@staticmethod
def _getNavr(nsig, nwins, noverlap):
if nwins>= nsig:
return int(1)
else:
return (nsig-noverlap)//(nwins-noverlap)
# end def getNavr
@staticmethod
def __getNwins(nsig, Navr, noverlap):
nwins = (nsig-noverlap)//Navr+noverlap
if nwins>= nsig:
return nsig
else:
return nwins
# end def getNwins
@staticmethod
def __getNoverlap(nsig, nwins, Navr):
if nwins>= nsig:
return 0
else:
return (nsig-nwins*Navr)//(1-Navr)
# end if
# end def getNoverlap
@staticmethod
def _getMINoverlap(nsig, nwins, Navr):
noverlap = 1
while fftanal._checkCOLA(nsig, nwins, noverlap) == False and noverlap<1e4:
noverlap += 1
# end while
return noverlap
@staticmethod
def _getMAXoverlap(nsig, nwins, Navr):
noverlap = _np.copy(nwins)-1
while fftanal._checkCOLA(nsig, nwins, noverlap) == False and noverlap>0:
noverlap -= 1
# end while
return noverlap
@staticmethod
def _checkCOLA(nsig, nwins, noverlap):
return (nsig - nwins) % (nwins-noverlap) == 0
@staticmethod
def _getNnyquist(nfft):
Nnyquist = nfft//2 # Even
if nfft % 2: # Odd
# nyq = nfft//2 +1
Nnyquist = (nfft+1)//2
# end if
# Nnyquist = nfft//2 + 1
# if (nfft%2): # odd
# Nnyquist = (nfft+1)//2
# # end if the remainder of nfft / 2 is odd
## Nnyquist = nfft//2
return Nnyquist
@staticmethod
def _getS1(win):
return _np.sum(win)
@staticmethod
def _getS2(win):
return _np.sum(win**2.0)
@staticmethod
def _getNENBW(Nnyquist, S1, S2):
return Nnyquist*1.0*S2/(S1**2) # Normalized equivalent noise bandwidth
@staticmethod
def _getENBW(Fs, S1, S2):
return Fs*S2/(S1**2) # Effective noise bandwidth
@staticmethod
def _getNorms(win, Nnyquist, Fs):
S1 = fftanal._getS1(win)
S2 = fftanal._getS2(win)
# Normalized equivalent noise bandwidth
NENBW = fftanal._getNENBW(Nnyquist, S1, S2)
ENBW = fftanal._getENBW(Fs, S1, S2) # Effective noise bandwidth
return S1, S2, NENBW, ENBW
# ===================================================================== #
@staticmethod
def intspectra(freq, sigft, ifreq=None, ispan=None, ENBW=None):
"""
This function integrates the spectra over a specified range.
"""
if ifreq is None:
ifreq = _np.argmax(_np.abs(sigft), axis=0)
if ENBW is not None:
ispan = 2*_np.where(freq>=ENBW)[0][0]
elif ispan is None:
ispan = 6
# end if
ilow = ifreq-ispan//2
ihigh = ifreq+ispan//2
elif ispan is None:
ilow = 0
ihigh = len(sigft)
# end
Isig = _np.trapz(sigft[ilow:ihigh], freq[ilow:ihigh], axis=0)
Ivar = _np.zeros_like(Isig) # TODO!: decide if you want to use trapz_var or not
return Isig, Ivar
@staticmethod
def _detrend_func(detrend_style=None):
if detrend_style is None: detrend_style = 0 # end if
if detrend_style > 0:
detrend = detrend_mean # ------- Mean detrending ========= #
elif detrend_style < 0:
detrend = detrend_linear # ------ Linear detrending ======== #
else: # detrend_style == 0:
detrend = detrend_none # -------- No detrending ========== #
# end if
return detrend
# ===================================================================== #
@staticmethod
def _fft_win(sig, **kwargs):
x_in = sig.copy()
tvec = kwargs.get('tvec', None)
detrendwin = kwargs.get('detrendwin', False)
onesided = kwargs.get('onesided', False)
win = kwargs['win']
nwins = kwargs['nwins']
Navr = kwargs['Navr']
noverlap = kwargs['noverlap']
Nnyquist = kwargs['Nnyquist']
detrender = fftanal._detrend_func(detrend_style=kwargs['detrend_style'])
# Define normalization constants for the window
S1 = kwargs['S1']
S2 = kwargs['S2']
ENBW = kwargs['ENBW'] # Equivalent noise bandwidth
if tvec is None:
tvec = _np.linspace(0.0, 1.0, len(x_in))
# endif
Fs = kwargs.get('Fs', fftanal.__Fs__(tvec))
nfft = nwins
nch = 1
if len(x_in.shape)>1:
_, nch = x_in.shape
# end if
# ===== #
# detrend the whole background, like most programs do it
if not detrendwin:
x_in = detrender(x_in)
# end if
# ===== #
ist = _np.arange(Navr)*(nwins-noverlap)
Xfft = _np.zeros((nch, Navr, nfft), dtype=_np.complex128)
tt = _np.zeros( (Navr,), dtype=float)
pseg = _np.zeros( (nch, Navr,), dtype=float)
for gg in _np.arange(Navr):
istart = ist[gg] #Starting point of this window
iend = istart+nwins #End point of this window
tt[gg] = _np.mean(tvec[istart:iend])
xtemp = x_in[istart:iend, ...]
# Windowed signal segment:
# - To get the most accurate spectrum, background subtract
# xtemp = win*_dsp.detrend(xtemp, type='constant')
if detrendwin:
# this is only good when the background is not evolving!
xtemp = detrender(xtemp, axes=0)
# end if
xtemp = (_np.atleast_2d(win).T*_np.ones((1,nch), dtype=xtemp.dtype))*xtemp
pseg[..., gg] = _np.trapz(xtemp**2.0, x=tvec[istart:iend], axes=0)
Xfft[..., gg,:nfft] = _np.fft.fft(xtemp, n=nfft, axes=0).T # nch, Navr, nfft
#endfor loop over fft windows
freq = _np.fft.fftfreq(nfft, 1.0/Fs)
if onesided:
freq = freq[:Nnyquist] # [Hz]
Xfft = Xfft[...,:Nnyquist]
# Real signals equally split their energy between positive and negative frequencies
Xfft[..., 1:-1] = _np.sqrt(2)*Xfft[..., 1:-1]
if nfft%2: # odd
Xfft[...,-1] = _np.sqrt(2)*Xfft[...,-1]
# endif
else:
freq = _np.fft.fftshift(freq, axes=0)
Xfft = _np.fft.fftshift(Xfft, axes=-1)
# end if
# in the case of one-channel input, don't over expand stuff
pseg = pseg.squeeze()
Xfft = Xfft.squeeze()
# Remove gain of the window function to yield the RMS Power spectrum
# in each segment (constant peak amplitude)
Xfft /= S1 # Vrms
pseg /= S2
# Compute the spectral density from the RMS spectrum
# (constant noise floor)
Xfft /= _np.sqrt(ENBW) # [V/Hz^0.5]
return tt, freq, Xfft, pseg
@staticmethod
def _plotspec(tseg, freq, Pxy_seg, logscale=False, _ax=None, vbnds=None, cmap=None, tbounds=None,
titl=r'P$_{xy}', ylbl='freq [KHz]', xlbl='time [s]', fbounds=None): # spectrogram
spec =
|
_np.abs(Pxy_seg)
|
numpy.abs
|
#!/usr/bin/env python
import sys
import os.path
from os.path import join as PJ
import re
import json
import numpy as np
from tqdm import tqdm
import igraph as ig
import jgf
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numbers
def isFloat(value):
if(value is None):
return False
try:
numericValue = float(value)
return np.isfinite(numericValue)
except ValueError:
return False
def isNumberObject(value):
return isinstance(value, numbers.Number)
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
ret = int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
ret = float(obj)
elif isinstance(obj, (np.ndarray,)):
ret = obj.tolist()
else:
ret = json.JSONEncoder.default(self, obj)
if isinstance(ret, (float)):
if math.isnan(ret):
ret = None
if isinstance(ret, (bytes, bytearray)):
ret = ret.decode("utf-8")
return ret
results = {"errors": [], "warnings": [], "brainlife": [], "datatype_tags": [], "tags": []}
def warning(msg):
global results
results['warnings'].append(msg)
#results['brainlife'].append({"type": "warning", "msg": msg})
print(msg)
def error(msg):
global results
results['errors'].append(msg)
#results['brainlife'].append({"type": "error", "msg": msg})
print(msg)
def exitApp():
global results
with open("product.json", "w") as fp:
json.dump(results, fp, cls=NumpyEncoder)
if len(results["errors"]) > 0:
sys.exit(1)
else:
sys.exit()
def exitAppWithError(msg):
global results
results['errors'].append(msg)
#results['brainlife'].append({"type": "error", "msg": msg})
print(msg)
exitApp()
configFilename = "config.json"
argCount = len(sys.argv)
if(argCount > 1):
configFilename = sys.argv[1]
outputDirectory = "output"
figuresOutputDirectory = PJ(outputDirectory,"figures")
with open("template/index.html", "r") as fd:
htmlTemplate = fd.read();
with open("template/styles.css", "r") as fd:
stylesTemplate = fd.read();
if(not os.path.exists(outputDirectory)):
os.makedirs(outputDirectory)
if(not os.path.exists(figuresOutputDirectory)):
os.makedirs(figuresOutputDirectory)
with open(configFilename, "r") as fd:
config = json.load(fd)
# "transform":"absolute", //"absolute" or "signed"
# "retain-weights":false,
# "threshold": "none"
networks = jgf.igraph.load(config["network"], compressed=True)
nullNetworks = None
if("nullmodels" in config and config["nullmodels"]):
nullNetworks = jgf.igraph.load(config["nullmodels"], compressed=True)
binsCount = 25
if(len(networks)>1):
warning("Input files have more than one network. Only the first entry was used to compose the report.")
if(len(networks)==0):
exitAppWithError("The network file should contain at least one network.")
else:
network = networks[0];
networkAttributesKeys = network.attributes()
networkAttributes = [[] for _ in networkAttributesKeys];
distributionPlots = [];
for keyIndex,key in enumerate(networkAttributesKeys):
value = network[key]
if(isFloat(value)):
networkAttributes[keyIndex].append("%.3g"%value)
else:
networkAttributes[keyIndex].append(value)
if(nullNetworks):
nullValues = []
if(isFloat(value)):
for nullNetwork in nullNetworks:
if(key in nullNetwork.attributes()):
if(isFloat(nullNetwork[key])):
nullValues.append(nullNetwork[key]);
if(nullValues):
nullAverage =
|
np.average(nullValues)
|
numpy.average
|
import h5py
import numpy as np
from numpy.fft import fft2, ifft2, fftshift, ifftshift
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
def spectral_derivative(array, wvn):
return ifft2((1j * wvn * (fft2(array))))
# Load the data:
filename = 'scalar_fixed_noRand'
data_path = '../../data/{}.hdf5'.format(filename)
data_file = h5py.File(data_path, 'r')
# Loading grid array data:
x, y = data_file['grid/x'], data_file['grid/y']
X, Y = np.meshgrid(x[:], y[:])
Nx, Ny = x[:].size, y[:].size
dx, dy = x[1] - x[0], y[1] - y[0]
# k-space arrays and meshgrid:
dkx = 2 * np.pi / (Nx * dx)
dky = 2 * np.pi / (Ny * dy) # K-space spacing
kx = np.fft.fftshift(np.arange(-Nx // 2, Nx // 2) * dkx)
ky = np.fft.fftshift(np.arange(-Ny // 2, Ny // 2) * dky)
Kx, Ky = np.meshgrid(kx, ky) # K-space meshgrid
psi = data_file['initial_state/psi'][:, :]
# Calculate the mass current:
dpsi_x = spectral_derivative(psi, Kx)
dpsi_y = spectral_derivative(psi, Ky)
nv_x = (np.conj(psi) * dpsi_x - np.conj(dpsi_x) * psi) / (2 * 1j)
nv_y = (np.conj(psi) * dpsi_y - np.conj(dpsi_y) * psi) / (2 * 1j)
dnv_x_y = spectral_derivative(nv_x, Ky)
dnv_y_x = spectral_derivative(nv_y, Kx)
pseudo_vort = -dnv_x_y + dnv_y_x
fig, ax = plt.subplots(1, 2, figsize=(16, 8))
for axis in ax:
axis.set_aspect('equal')
axis.set_xlabel(r'$x/\xi$')
ax[0].set_ylabel(r'$y/\xi$')
ax[0].set_title(r'$|\psi|^2$')
ax[1].set_title(r'$\nabla \times n\vec{v}$')
vort_cvals =
|
np.linspace(-500, 500, 100)
|
numpy.linspace
|
import numpy as np
# Load parameters
params = np.loadtxt("input/parameters.txt", dtype=str, delimiter=':')
# Random numbers seed
seed = int(params[params[:, 0] == "seed", 1])
rng = np.random.default_rng(seed)
# nu values
max_nu = float(params[params[:, 0] == "max nu", 1])
# Load training set
training_set = np.loadtxt("input/training_set.csv", dtype=float, delimiter=',')
# Number of samples
n_samples = training_set.shape[0]
# Number of features
n_features = training_set.shape[1]
# Number of presenters sets
n_presenters_sets = int(params[params[:, 0] == "presenters sets", 1])
# Number of detectors
n_detectors = n_features * n_presenters_sets
# Load training set cluster labels
labels = np.loadtxt("input/labels.csv", dtype=int, delimiter=',')
# Unique clusters and number of samples in each one
clusters, clusters_n_samples = np.unique(labels, return_counts=True)
# Number of clusters
n_clusters = len(clusters)
# Empty clusters
clusters_samples = []
# Assign samples to clusters
for cluster, cluster_index in zip(clusters, range(n_clusters)):
clusters_samples.append(np.empty((clusters_n_samples[cluster_index]), dtype=int))
sample_count = 0
for label, sample_index in zip(labels, range(n_samples)):
if (label == cluster):
clusters_samples[cluster_index][sample_count] = sample_index
sample_count += 1
# Build samples queue
samples_queue =
|
np.ravel(clusters_samples)
|
numpy.ravel
|
import argparse
import numpy as np
import os
import imageio
import tensorflow as tf
import matplotlib
matplotlib.use('TkAgg') # Need Tk for interactive plots.
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from scipy.special import softmax
from dirl_core import triplet_loss_distribution
from dirl_core import dirl_utils
from dirl_core import plot_utils
from sklearn.neighbors import KDTree
from collections import Counter
from scipy.special import softmax
# cpu_cores = [12,11,10,9,8,7] # Cores (numbered 0-11)
# os.system("taskset -pc {} {}".format(",".join(str(i) for i in cpu_cores), os.getpid()))
from tensorflow.python.client import device_lib
tf.ConfigProto().gpu_options.allow_growth = False
print([x.name for x in device_lib.list_local_devices()if x.device_type == 'GPU'])
def sample_data(mean, cov0=None, num_dim=2, num_classes=2, num_instances=100, class_ratio=0.5, noise_sf=0.15):
std = 0.1
N_class = []
cov_mat = []
X_source = np.empty((0, num_dim), float)
Y_source = np.asarray([], dtype=int)
N_class.append(int(num_instances * class_ratio))
N_class.append(int(num_instances * (1.0 - class_ratio)))
for class_id in range(num_classes):
if cov0 is None:
cov = np.eye(num_dim) * std
cov_noise = np.random.randn(num_dim, num_dim) * noise_sf
cov_noise = np.dot(cov_noise, cov_noise.transpose())
cov += cov_noise
cov_mat.append(cov)
else:
cov_mat.append(cov[class_id])
x, y = np.random.multivariate_normal(mean[class_id], cov_mat[class_id], N_class[class_id]).T
X = np.concatenate([x.reshape(len(x), 1), y.reshape(len(y), 1)], axis=1)
X_source =
|
np.append(X_source, X, axis=0)
|
numpy.append
|
#!/usr/local/sci/bin/python
# PYTHON3
#
# Author: <NAME>
# Created: 8th October 2015
# Last update: 24th July 2020
# Location: /data/local/hadkw/HADCRUH2/UPDATE2014/PROGS/PYTHON/ # this will probably change
# GitHub: https://github.com/Kate-Willett/Climate_Explorer/tree/master/PYTHON/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# Reads in monthly mean anomaly regional average time series for q, T and RH from HadISDH
# Can plot monthly or annual data
# Can plot one region or all four
# For a one region plot it can be annual, monthly or seasonal (DJF, MAM, JJA, SON)
# Plots a T q scatter with each year as the point (or MONYY for monthly)
# Colours the points by simultaneous RH value
# Plots RH colour bar to the right
# Adds Tq and TRH correlation to plot
# Adds Tq and TRH slope to plot
#
# NO MISSING DATA IN TIME SERIES!!!!
#
# <references to related published material, e.g. that describes data set>
#
# -----------------------
# LIST OF MODULES
# -----------------------
# Inbuilt:
# import matplotlib.pyplot as plt
# import numpy as np
# import numpy.ma as ma
# import sys, os
# import scipy.stats as ss # for pearsonr
# import struct
# import datetime as dt
# from matplotlib.dates import date2num,num2date
# from scipy.io import netcdf
# import matplotlib.colors as mc
# import matplotlib.cm as mpl_cm
# import pdb
#
# Other:
# ReadNetCDFTS - infile function to read in netCDF timeseries, written by <NAME>
# PlotScatter - infile function to plot, written by <NAME>
#
# -----------------------
# DATA
# -----------------------
# directory for regional timeseries:
# /data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/
# files currently worked on:
# Specific humidity:
# HadISDH.landq.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_areaTS_19732014.nc
# Relative humidity:
# HadISDH.landRH.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_areaTS_19732014.nc
# Temperature:
# HadISDH.landT.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_areaTS_19732014.nc
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# Select 'TimeRes' to be 'M' or 'Y' for month or year
# Ensure correct file paths and files
# Ensure start year (styr) and end year (edyr) are correct
# Select 'Region' to be 'A', 'G','N','T' or 'S' for All, Globe, NHemi, Tropics, SHemi
#
# run:
# python2.7 PlotTqRhScatter_OCT2015.py
# python3
# > module load scitools/default-current
# > python PlotTqRHScatter_PCT2015.py
#
# -----------------------
# OUTPUT
# -----------------------
# directory for output images:
# /data/local/hadkw/HADCRUH2/UPDATE2014/IMAGES/ANALYSIS/
# Output image file: (nowmon+nowyear= e.g., OCT2015):
# ScatterTqRH_HadISDH.landq.2.0.1.2014p_'+nowmon+nowyear+
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 3 16 April 2018
# ---------
#
# Enhancements
# python 3
# netCDF4
# masked arrays to deal with missing data
# Can now do seasonal for individual regions
#
# Changes
#
# Bug fixes
#
# Version 3 16 April 2018
# ---------
#
# Enhancements
# Updated editable info so fewer edits are required to run for the most recent version/year
#
# Changes
#
# Bug fixes
#
# Version 2 9 August 2016
# ---------
#
# Enhancements
# Can also plot T vs RH coloured by q anomaly
#
# Changes
#
# Bug fixes
#
# Version 1 8 October 2015
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# Set up python imports
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import sys, os
import scipy.stats as ss # for pearsonr
import struct
import datetime as dt
from matplotlib.dates import date2num,num2date
#from scipy.io import netcdf
import netCDF4 as nc4
import matplotlib.colors as mc
import matplotlib.cm as mpl_cm
import pdb #stop: pdb.set_trace(), start: c
import numpy.ma as ma
# Set up initial run choices
TimeRes='Y' # M=month, Y=year
Region='S' # A=All, G=Globe, N=NHemi, T=Tropics, S=SHemi
Seasons=True # If Region is G, N, T, or S and Seasons == True then plot seasonally (or False for not) M and Y still works
homogtype='IDPHA' # 'IDPHA','PHA','PHADPD'
thenmon='JAN'
thenyear='2020'
version='4.2.0.2019f'
styr=1973
edyr=2019
nyrs=(edyr-styr)+1
nmons=(nyrs)*12
if (TimeRes == 'Y'):
ntims=nyrs
else:
ntims=nmons
YrStr=np.array(range(styr,edyr+1),dtype=str)
YrStr=np.array(([i[2:5] for i in YrStr])) # now a string array of the last two digits
# Set up directories and files
INDIR='/data/users/hadkw/WORKING_HADISDH/UPDATE'+str(edyr)+'/STATISTICS/TIMESERIES/'
OUTDIR='/data/users/hadkw/WORKING_HADISDH/UPDATE'+str(edyr)+'/IMAGES/ANALYSIS/'
In_q='HadISDH.landq.'+version+'_FLATgridIDPHA5by5_anoms8110_'+thenmon+thenyear+'_areaTS_1973'+str(edyr)+'.nc'
In_RH='HadISDH.landRH.'+version+'_FLATgridIDPHA5by5_anoms8110_'+thenmon+thenyear+'_areaTS_1973'+str(edyr)+'.nc'
In_T='HadISDH.landT.'+version+'_FLATgridIDPHA5by5_anoms8110_'+thenmon+thenyear+'_areaTS_1973'+str(edyr)+'.nc'
OutPlotTq='ScatterTqbyRH_HadISDH.'+version+'_'+TimeRes+'_'+Region
OutPlotTRH='ScatterTRHbyq_HadISDH.'+version+'_'+TimeRes+'_'+Region
if (Seasons):
OutPlotTq = 'ScatterTqbyRH_HadISDH.'+version+'_'+TimeRes+'_'+Region+'_SEASONS'
OutPlotTRH = 'ScatterTRHbyq_HadISDH.'+version+'_'+TimeRes+'_'+Region+'_SEASONS'
# Set up variables
q_arr=0 #set once file read in
T_arr=0 #set once file read in
RH_arr=0 #set once file read in
#************************************************************************
# Subroutines
#************************************************************************
# READNETCDFTS
def ReadNetCDFTS(FileName,ReadInfo,TheData):
''' Open the NetCDF File
Get the data
FileName: stroing containing filepath/name
TheData: an empty 2D array big enough for 1 or 4 regions worth of data
ReadInfo: list of 1 or 4 strings of variable name/s for the globe, N Hemi, Tropics and S.Hemi '''
ncf=nc4.Dataset(FileName,'r')
# ncf.variables this lists the variable names
for loo in range(len(ReadInfo)):
print(loo)
var=ncf.variables[ReadInfo[loo]]
#pdb.set_trace()
TheData[loo,:]=np.copy(var[:])
# # Maybe I've done something wrong but its reading it transposed
# TheData=np.transpose(TheData)
ncf.close()
return TheData # ReadNetCDFTS
#************************************************************************
# MakeUpSteps
def MakeUpSteps(TheArray,stepsies=9):
''' Given a max and min, make up NICE step sizes for a 9 element colourbar '''
''' Currently works with a minimum range of 0.2 and a maximum or 3.0 '''
''' Can only deal with symmetric ranges '''
''' READS: TheArray - an array of data '''
''' stepsies (OPTIONAL) - number of colours in colourbar - default 9 is NICE '''
''' RETURNS: vmin - minimum threshold of range '''
''' vmax - maximum threshold of range '''
''' bounds - stepsies linear increments through the range from vmin to vmax '''
''' strcounds - strings of the bounds for labelling the colourbar '''
vmax=np.int(np.ceil(np.max(abs(TheArray))*10))/10.
vmin=-vmax
nsteps = stepsies
if (vmax <= 0.2):
vmax = 0.2
vmin = -0.2
if (vmax <= 0.3):
vmax = 0.32
vmin = -0.32
elif (vmax <= 0.4):
vmax = 0.4
vmin = -0.4
elif (vmax <= 0.6):
vmax = 0.6
vmin = -0.6
elif (vmax <= 0.8):
vmax = 0.8
vmin = -0.8
elif (vmax <= 1.0):
vmax = 1.0
vmin = -1.0
elif (vmax <= 1.2):
vmax = 1.2
vmin = -1.2
elif (vmax <= 1.6):
vmax = 1.6
vmin = -1.6
elif (vmax <= 2.0):
vmax = 2.0
vmin = -2.0
elif (vmax <= 3.0):
vmax = 3.0
vmin = -3.0
# pdb.set_trace() # stop here and play
bounds=np.linspace(vmin,vmax,nsteps)
strbounds=["%4.1f" % i for i in bounds]
return vmax,vmin,strbounds,bounds
#************************************************************************
# PlotScatter
def PlotScatter(TheFileTq,TheFileTRH,TheYrStr,Thentims,Theq_arr,TheRH_arr,TheT_arr,TheReg,TheSeasons,ThePointees):
''' Plot Tq scatter with colours related to RH'''
''' Plot TRH scatter with colours related to q'''
''' Points are either the last two years YY or MONYY '''
''' Save as png and eps '''
''' TheFile - the filepath and filename for the image '''
''' TheYrStr - a string array of the last two digits for years NYrs long '''
''' Thentims - an integer for the number of points to be plotted '''
''' Theq_arr - the specific humidity data (can be monthly or yearly '''
''' TheRH_arr - the relative humidity data (can be monthly or yearly '''
''' TheT_arr - the temperature data (can be monthly or yearly '''
# Load colours and set up bounds
cmap=plt.get_cmap('BrBG') # BrownBlueGreen
cmaplist=[cmap(i) for i in range(cmap.N)]
for loo in range(np.int(cmap.N/2)-30,np.int(cmap.N/2)+30):
cmaplist.remove(cmaplist[np.int(cmap.N/2)-30]) # remove the very pale colours in the middle
# #cmaplist.remove(cmaplist[(cmap.N/2)-10:(cmap.N/2)+10]) # remove the very pale colours in the middle
#
## remove the darkest and lightest (white and black) - and reverse
# for loo in range(40):
# cmaplist.remove(cmaplist[0])
## cmaplist.reverse()
## for loo in range(10):
## cmaplist.remove(cmaplist[0])
## cmaplist.reverse()
cmap=cmap.from_list('this_cmap',cmaplist,cmap.N)
# FIRST MAKE UP THE TqbyRH plot
# Call MakeUpSteps routine to get a NICE set of colourbar indices
vmin,vmax,strbounds,bounds=MakeUpSteps(TheRH_arr)
norm=mpl_cm.colors.BoundaryNorm(bounds,cmap.N)
ytitlee='Specific Humidity Anomalies (g kg$^{-1}$)'
xtitlee='Temperature Anomalies ($^{o}$C)'
titleesR=['Globe 70$^{o}$S to 70$^{o}$N','N. Hemisphere 20$^{o}$N to 70$^{o}$N','Tropics 20$^{o}$S to 20$^{o}$N','S. Hemisphere 70$^{o}$S to 20$^{o}$S']
titleesS=['December-February','March-May','June-August','September-November']
# set up max and min of q and T for axes - keep same for all regions
qmax=np.ceil(np.max(abs(Theq_arr))/0.1)*0.1
qmin=-qmax
tmax=np.ceil(np.max(abs(TheT_arr))/0.1)*0.1
tmin=-tmax
# set up plot - are we working with one region or four?
if (TheReg != 'A'):
# Is it to be a seasonal (four plot) scenario?
if (TheSeasons):
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([qmin,qmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(qmin,qmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
#pdb.set_trace()
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],Theq_arr[pp,vv],c=TheRH_arr[pp,vv],marker=r"$ {} $".format(ThePointees[pp,vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titleesS[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],Theq_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],Theq_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
#pdb.set_trace()
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'RH Anomalies (%rh)',size=12,ha='center',rotation='vertical',va='center')
else:
# Single plot scenario
fig = plt.figure(1,figsize=(8,8))
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
ax1=plt.axes([0.1,0.1,0.75,0.8]) # left, bottom, width, height
ax1.set_xlim([tmin,tmax])
ax1.set_ylim([qmin,qmax])
# make blank plot with zero lines on
ax1.plot(np.zeros(100),np.linspace(qmin,qmax,100),color='black',linewidth=2)
ax1.plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax1.plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
#print(vv,TheT_arr[0,vv],Theq_arr[0,vv],TheRH_arr[0,vv],r"$ {} $".format(Pointees[vv]))
scats=ax1.scatter(TheT_arr[0,vv],Theq_arr[0,vv],c=TheRH_arr[0,vv],marker=r"$ {} $".format(ThePointees[vv]),s=250,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
ax1.set_xlabel(xtitlee,size=14)
ax1.set_ylabel(ytitlee,size=14)
ax1.tick_params(axis='both', which='major', labelsize=14)
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=14)
plt.figtext(0.97,0.5,'RH Anomalies (%rh)',size=14,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
# plt.figtext(0.02,0.96,TheLetter,size=18)
if (TheReg == 'G'):
PointTitle=0
if (TheReg == 'N'):
PointTitle=1
if (TheReg == 'T'):
PointTitle=2
if (TheReg == 'S'):
PointTitle=3
ax1.set_title(titleesR[PointTitle],size=18)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[0,:],Theq_arr[0,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[0,:],Theq_arr[0,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext(0.05,0.96,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext(0.05,0.9,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext(0.05,0.84,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax1.plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
else:
# Four plot scenario
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([qmin,qmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(qmin,qmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],Theq_arr[pp,vv],c=TheRH_arr[pp,vv],marker=r"$ {} $".format(ThePointees[vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titlees[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],Theq_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],Theq_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
#pdb.set_trace()
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'RH Anomalies (%rh)',size=12,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
#plt.show()
plt.savefig(TheFileTq+".eps")
plt.savefig(TheFileTq+".png")
# raw_input("stop") # REALLY USEFUL TO INTERACT WITHIN SUBROUTINE ctrl C
# plt.ion()
# plt.show() can then zoom and save
#***********************************
# SECOND MAKE UP THE TRHbyq plot
# Call MakeUpSteps routine to get a NICE set of colourbar indices
vmin,vmax,strbounds,bounds=MakeUpSteps(Theq_arr)
norm=mpl_cm.colors.BoundaryNorm(bounds,cmap.N)
ytitlee='Relative Humidity Anomalies (%rh)'
xtitlee='Temperature Anomalies ($^{o}$C)'
titlees=['Globe 70$^{o}$S to 70$^{o}$N','N. Hemisphere 20$^{o}$N to 70$^{o}$N','Tropics 20$^{o}$S to 20$^{o}$N','S. Hemisphere 70$^{o}$S to 20$^{o}$S']
# set up max and min of RH and T for axes - keep same for all regions
rhmax=np.ceil(np.max(abs(TheRH_arr))/0.1)*0.1
rhmin=-rhmax
tmax=np.ceil(np.max(abs(TheT_arr))/0.1)*0.1
tmin=-tmax
# set up plot - are we working with one region or four?
if (TheReg != 'A'):
if (Seasons):
# Four plot scenario
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([rhmin,rhmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(rhmin,rhmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],TheRH_arr[pp,vv],c=Theq_arr[pp,vv],marker=r"$ {} $".format(ThePointees[pp,vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titleesS[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],TheRH_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],TheRH_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'q Anomalies (g kg$^{-1}$)',size=12,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
else:
# Single plot scenario
fig = plt.figure(1,figsize=(8,8))
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
ax1=plt.axes([0.1,0.1,0.75,0.8]) # left, bottom, width, height
ax1.set_xlim([tmin,tmax])
ax1.set_ylim([rhmin,rhmax])
# make blank plot with zero lines on
ax1.plot(np.zeros(100),np.linspace(rhmin,rhmax,100),color='black',linewidth=2)
ax1.plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax1.plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot YEAR LABELS for the goods
for vv in range(Thentims):
#print(vv,TheT_arr[0,vv],Theq_arr[0,vv],TheRH_arr[0,vv],r"$ {} $".format(Pointees[vv]))
scats=ax1.scatter(TheT_arr[0,vv],TheRH_arr[0,vv],c=Theq_arr[0,vv],marker=r"$ {} $".format(ThePointees[vv]),s=250,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
ax1.set_xlabel(xtitlee,size=14)
ax1.set_ylabel(ytitlee,size=14)
ax1.tick_params(axis='both', which='major', labelsize=14)
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=14)
plt.figtext(0.97,0.5,'q Anomalies (g kg$^{-1}$)',size=14,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
# plt.figtext(0.02,0.96,TheLetter,size=18)
if (TheReg == 'G'):
PointTitle=0
if (TheReg == 'N'):
PointTitle=1
if (TheReg == 'T'):
PointTitle=2
if (TheReg == 'S'):
PointTitle=3
ax1.set_title(titlees[PointTitle],size=18)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[0,:],TheRH_arr[0,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[0,:],TheRH_arr[0,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext(0.05,0.96,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext(0.05,0.9,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext(0.05,0.84,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax1.plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
else:
# Four plot scenario
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([rhmin,rhmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(rhmin,rhmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],TheRH_arr[pp,vv],c=Theq_arr[pp,vv],marker=r"$ {} $".format(ThePointees[vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titlees[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],TheRH_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],TheRH_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'q Anomalies (g kg$^{-1}$)',size=12,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
#plt.show()
plt.savefig(TheFileTRH+".eps")
plt.savefig(TheFileTRH+".png")
# raw_input("stop") # REALLY USEFUL TO INTERACT WITHIN SUBROUTINE ctrl C
# plt.ion()
# plt.show() can then zoom and save
return #PlotNiceDotsMap
#************************************************************************
# MAIN PROGRAM
#************************************************************************
# Read in region data for each variable
if (Region == 'A'):
nReg=4
else:
nReg=1
tmpq_arr=np.empty((nReg,nmons))
tmpRH_arr=np.empty((nReg,nmons))
tmpT_arr=np.empty((nReg,nmons))
q_arr=np.empty((nReg,ntims))
RH_arr=np.empty((nReg,ntims))
T_arr=np.empty((nReg,ntims))
MyFile=INDIR+In_q
if (Region == 'A'):
ReadInfo=['glob_q_anoms','nhem_q_anoms','trop_q_anoms','shem_q_anoms']
elif (Region == 'G'):
ReadInfo=['glob_q_anoms']
elif (Region == 'N'):
ReadInfo=['nhem_q_anoms']
elif (Region == 'T'):
ReadInfo=['trop_q_anoms']
elif (Region == 'S'):
ReadInfo=['shem_q_anoms']
tmpq_arr=ReadNetCDFTS(MyFile,ReadInfo,tmpq_arr)
MyFile=INDIR+In_RH
if (Region == 'A'):
ReadInfo=['glob_RH_anoms','nhem_RH_anoms','trop_RH_anoms','shem_RH_anoms']
elif (Region == 'G'):
ReadInfo=['glob_RH_anoms']
elif (Region == 'N'):
ReadInfo=['nhem_RH_anoms']
elif (Region == 'T'):
ReadInfo=['trop_RH_anoms']
elif (Region == 'S'):
ReadInfo=['shem_RH_anoms']
tmpRH_arr=ReadNetCDFTS(MyFile,ReadInfo,tmpRH_arr)
MyFile=INDIR+In_T
if (Region == 'A'):
ReadInfo=['glob_T_anoms','nhem_T_anoms','trop_T_anoms','shem_T_anoms']
elif (Region == 'G'):
ReadInfo=['glob_T_anoms']
elif (Region == 'N'):
ReadInfo=['nhem_T_anoms']
elif (Region == 'T'):
ReadInfo=['trop_T_anoms']
elif (Region == 'S'):
ReadInfo=['shem_T_anoms']
tmpT_arr=ReadNetCDFTS(MyFile,ReadInfo,tmpT_arr)
#pdb.set_trace()
# If annual - convert monthly mean anomalies to annual mean anomalies
# THERE SHOULD BE NO MISSING DATA IN THESE!!!!
# However, there are because of April 2015 so we need to set up as masked array.
tmpq_arr = ma.masked_where(tmpq_arr < -1000,tmpq_arr) # mdi is -1e30 but floating point inaccuracy means it may not match?
tmpT_arr = ma.masked_where(tmpT_arr < -1000,tmpT_arr) # mdi is -1e30 but floating point inaccuracy means it may not match?
tmpRH_arr = ma.masked_where(tmpRH_arr < -1000,tmpRH_arr) # mdi is -1e30 but floating point inaccuracy means it may not match?
if (Seasons):
SeasonPointer = np.reshape(np.arange(nmons),(nyrs,12))
DJF = np.reshape(SeasonPointer[:,(0,1,11,)],nyrs*3)
MAM = np.reshape(SeasonPointer[:,(2,3,4,)],nyrs*3)
JJA =
|
np.reshape(SeasonPointer[:,(5,6,7,)],nyrs*3)
|
numpy.reshape
|
import copy
import logging
import operator
import random
import numpy as np
import torch
import wandb
from fedml_api.utils.client import Client
from fedml_api.utils.testInfo import TestInfo
class FedTiAPI(object):
def __init__(self, dataset, device, args, model_trainer):
self.device = device
self.args = args
[train_data_num, test_data_num, train_data_global, test_data_global,
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num] = dataset
logging.info("inside of fedti init, client num:" + str(self.args.client_num_in_total))
self.train_global = train_data_global
self.test_global = test_data_global
self.val_global = None
self.train_data_num_in_total = train_data_num
self.test_data_num_in_total = test_data_num
self.client_list = []
self.train_data_local_num_dict = train_data_local_num_dict
self.train_data_local_dict = train_data_local_dict
self.test_data_local_dict = test_data_local_dict
self.model_trainer = model_trainer
self._setup_clients(train_data_local_num_dict, train_data_local_dict, test_data_local_dict, model_trainer)
def _setup_clients(self, train_data_local_num_dict, train_data_local_dict, test_data_local_dict, model_trainer):
logging.info("############setup_clients (START)#############")
logging.info("client_num_in_total:" + str(self.args.client_num_in_total))
for client_idx in range(self.args.client_num_in_total):
c = Client(client_idx, train_data_local_dict[client_idx], test_data_local_dict[client_idx],
train_data_local_num_dict[client_idx], self.args, self.device, model_trainer, 0, 0, 0, 0)
self.client_list.append(c)
logging.info("number of clients in client_list:" + str(len(self.client_list)))
logging.info("############setup_clients (END)#############")
def train(self, is_show_info):
return self.train_for_truthfulness(truth_ratio=1, is_show_info=is_show_info, is_test_truthfulness=False)
# used to test truthfulness
def train_for_truthfulness(self, truth_ratio, is_show_info, is_test_truthfulness):
w_global = self.model_trainer.get_model_params()
np.random.seed(self.args.comm_round)
payment_list = []
bidding_price_list = []
running_time_list = []
client_utility_list = []
social_cost_list = []
server_cost_list = []
truth_index = 0
for round_idx in range(self.args.comm_round):
logging.info("################Communication round : {}".format(round_idx))
w_locals = []
"""
for scalability: following the original FedAvg algorithm, we uniformly sample a fraction of clients in each round.
Instead of changing the 'Client' instances, our implementation keeps the 'Client' instances and then updates their local dataset
"""
# bids init
for client in self.client_list:
client.update_bid(training_intensity=np.random.randint(50, 100), cost=
|
np.random.random()
|
numpy.random.random
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.