repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
891k
| license
stringclasses 15
values | hash
int64 -9,223,135,201,861,841,000
9,223,183,049B
| line_mean
float64 6
99.4
| line_max
int64 17
1k
| alpha_frac
float64 0.25
0.89
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
adamrvfisher/TechnicalAnalysisLibrary | PriceRelativeRemoteSignalATROptimizerTwoAsset.py | 1 | 7759 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 19:07:37 2017
@author: AmatVictoriaCuramIII
"""
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
Empty = []
Dataset = pd.DataFrame()
Portfolio = pd.DataFrame()
Start = t.time()
Counter = 0
#Input
Ticker1 = 'UVXY'
Ticker2 = '^VIX'
#Remote Signal
Ticker3 = '^VIX'
#Here we go
Asset1 = YahooGrabber(Ticker1)
Asset2 = YahooGrabber(Ticker2)
#Remote Signal
Asset3 = YahooGrabber(Ticker3)
#Match lengths
#Trimmer
trim = abs(len(Asset1) - len(Asset2))
if len(Asset1) == len(Asset2):
pass
else:
if len(Asset1) > len(Asset2):
Asset1 = Asset1[trim:]
else:
Asset2 = Asset2[trim:]
Asset3 = Asset3[-len(Asset2):]
#Asset2 = Asset2[-600:]
#Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset2['LogRet'] = np.log(Asset2['Adj Close']/Asset2['Adj Close'].shift(1))
Asset2['LogRet'] = Asset2['LogRet'].fillna(0)
#Prepare the remote controller
Asset3['LogRet'] = np.log(Asset3['Adj Close']/Asset3['Adj Close'].shift(1))
Asset3['LogRet'] = Asset3['LogRet'].fillna(0)
#window = 7
##Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
#Asset3['Method1'] = Asset3['High'] - Asset3['Low']
#Asset3['Method2'] = abs((Asset3['High'] - Asset3['Adj Close'].shift(1)))
#Asset3['Method3'] = abs((Asset3['Low'] - Asset3['Adj Close'].shift(1)))
#Asset3['Method1'] = Asset3['Method1'].fillna(0)
#Asset3['Method2'] = Asset3['Method2'].fillna(0)
#Asset3['Method3'] = Asset3['Method3'].fillna(0)
#Asset3['TrueRange'] = Asset3[['Method1','Method2','Method3']].max(axis = 1)
#Asset3['AverageTrueRange'] = (Asset3['TrueRange'].rolling(window = window,
# center=False).sum())/window
#
##Retrim Assets
#Asset1 = Asset1[window:]
#Asset2 = Asset2[window:]
#Asset3 = Asset3[window:]
#Brute Force Optimization
iterations = range(0, 3000)
for i in iterations:
Counter = Counter + 1
a = rand.random()
b = 1 - a
c = rand.random()
d = 1 - c
e = rand.randint(3,20)
window = int(e)
#Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset3['Method1'] = Asset3['High'] - Asset3['Low']
Asset3['Method2'] = abs((Asset3['High'] - Asset3['Adj Close'].shift(1)))
Asset3['Method3'] = abs((Asset3['Low'] - Asset3['Adj Close'].shift(1)))
Asset3['Method1'] = Asset3['Method1'].fillna(0)
Asset3['Method2'] = Asset3['Method2'].fillna(0)
Asset3['Method3'] = Asset3['Method3'].fillna(0)
Asset3['TrueRange'] = Asset3[['Method1','Method2','Method3']].max(axis = 1)
Asset3['AverageTrueRange'] = (Asset3['TrueRange'].rolling(window = window,
center=False).sum())/window
Asset1['Position'] = a
Asset1['Position'] = np.where(Asset3['TrueRange'].shift(1) > Asset3['AverageTrueRange'].shift(1),
c,a)
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = b
Asset2['Position'] = np.where(Asset3['TrueRange'].shift(1) > Asset3['AverageTrueRange'].shift(1),
d,b)
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = (Asset1['Pass']) * (-1) #Pass a short position
Portfolio['Asset2Pass'] = (Asset2['Pass']) #* (-1)
# Portfolio['PriceRelative'] = Asset1['Adj Close'] / Asset2['Adj Close']
#asone['PriceRelative'][-180:].plot(grid = True, figsize = (8,5))
Portfolio['LongShort'] = (Portfolio['Asset1Pass']) + (Portfolio['Asset2Pass'])
# Portfolio['LongShort'][-180:].cumsum().apply(np.exp).plot(grid=True,
# figsize=(8,5))
if Portfolio['LongShort'].std() == 0:
continue
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
if MaxDD > float(.25):
continue
dailyreturn = Portfolio['LongShort'].mean()
if dailyreturn < .002:
continue
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
print(Counter)
Empty.append(a)
Empty.append(b)
Empty.append(c)
Empty.append(d)
Empty.append(e)
Empty.append(sharpe)
Empty.append(sharpe/MaxDD)
Empty.append(dailyreturn/MaxDD)
Empty.append(MaxDD)
Emptyseries = pd.Series(Empty)
Dataset[0] = Emptyseries.values
Dataset[i] = Emptyseries.values
Empty[:] = []
z1 = Dataset.iloc[6]
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top performers
DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z1:
if h > w1:
v1.append(h)
for j in v1:
r = Dataset.columns[(Dataset == j).iloc[6]]
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[6]] #this is the column number
kfloat = float(k[0])
End = t.time()
print(End-Start, 'seconds later')
print(Dataset[k])
window = int((Dataset[kfloat][4]))
#Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset3['Method1'] = Asset3['High'] - Asset3['Low']
Asset3['Method2'] = abs((Asset3['High'] - Asset3['Adj Close'].shift(1)))
Asset3['Method3'] = abs((Asset3['Low'] - Asset3['Adj Close'].shift(1)))
Asset3['Method1'] = Asset3['Method1'].fillna(0)
Asset3['Method2'] = Asset3['Method2'].fillna(0)
Asset3['Method3'] = Asset3['Method3'].fillna(0)
Asset3['TrueRange'] = Asset3[['Method1','Method2','Method3']].max(axis = 1)
Asset3['AverageTrueRange'] = (Asset3['TrueRange'].rolling(window = window,
center=False).sum())/window
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset1['Position'] = (Dataset[kfloat][0])
Asset1['Position'] = np.where(Asset3['TrueRange'].shift(1) > Asset3['AverageTrueRange'].shift(1),
Dataset[kfloat][2],Dataset[kfloat][0])
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Asset2['Position'] = (Dataset[kfloat][1])
Asset2['Position'] = np.where(Asset3['TrueRange'].shift(1) > Asset3['AverageTrueRange'].shift(1),
Dataset[kfloat][3],Dataset[kfloat][1])
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = Asset1['Pass'] * (-1)
Portfolio['Asset2Pass'] = Asset2['Pass'] #* (-1)
#Portfolio['PriceRelative'] = Asset1['Adj Close'] / Asset2['Adj Close']
#asone['PriceRelative'][-180:].plot(grid = True, figsize = (8,5))
Portfolio['LongShort'] = Portfolio['Asset1Pass'] + Portfolio['Asset2Pass']
Portfolio['LongShort'][:].cumsum().apply(np.exp).plot(grid=True,
figsize=(8,5))
dailyreturn = Portfolio['LongShort'].mean()
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown2 = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
#conversionfactor = Portfolio['PriceRelative'][-1]
print(max(drawdown2))
#pd.to_pickle(Portfolio, 'VXX:UVXY') | apache-2.0 | -6,308,838,753,224,976,000 | 36.61194 | 101 | 0.608197 | false |
neutrons/FastGR | addie/processing/idl/table_handler.py | 1 | 22403 | from __future__ import (absolute_import, division, print_function)
#import re
import glob
import os
import numpy as np
from qtpy.QtCore import (Qt)
from qtpy.QtGui import (QCursor)
from qtpy.QtWidgets import (QFileDialog, QMenu, QMessageBox, QTableWidgetSelectionRange)
import addie.processing.idl.populate_master_table
from addie.processing.idl.export_table import ExportTable
from addie.processing.idl.import_table import ImportTable
from addie.utilities.file_handler import FileHandler
from addie.processing.idl.populate_background_widgets import PopulateBackgroundWidgets
from addie.processing.idl.sample_environment_handler import SampleEnvironmentHandler
import addie.processing.idl.step2_gui_handler
from addie.widgets.filedialog import get_save_file
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
class TableHandler(object):
list_selected_row = None
def __init__(self, parent=None):
self.parent = parent
def retrieve_list_of_selected_rows(self):
self.list_selected_row = []
for _row_index in range(self.parent.postprocessing_ui.table.rowCount()):
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row_index, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row_index, 0).children()[1]
if _selected_widget.checkState() == Qt.Checked:
_entry = self._collect_metadata(row_index=_row_index)
self.list_selected_row.append(_entry)
def _collect_metadata(self, row_index=-1):
if row_index == -1:
return []
_name = self.retrieve_item_text(row_index, 1)
_runs = self.retrieve_item_text(row_index, 2)
_sample_formula = self.retrieve_item_text(row_index, 3)
_mass_density = self.retrieve_item_text(row_index, 4)
_radius = self.retrieve_item_text(row_index, 5)
_packing_fraction = self.retrieve_item_text(row_index, 6)
_sample_shape = self._retrieve_sample_shape(row_index)
_do_abs_correction = self._retrieve_do_abs_correction(row_index)
_metadata = {'name': _name,
'runs': _runs,
'sample_formula': _sample_formula,
'mass_density': _mass_density,
'radius': _radius,
'packing_fraction': _packing_fraction,
'sample_shape': _sample_shape,
'do_abs_correction': _do_abs_correction}
return _metadata
def _retrieve_sample_shape(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 7)
_selected_index = _widget.currentIndex()
_sample_shape = _widget.itemText(_selected_index)
return _sample_shape
def _retrieve_do_abs_correction(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 8).children()[1]
if (_widget.checkState() == Qt.Checked):
return 'go'
else:
return 'nogo'
def current_row(self):
_row = self.parent.postprocessing_ui.table.currentRow()
return _row
def right_click(self, position=None):
_duplicate_row = -1
_plot_sofq = -1
_remove_row = -1
_new_row = -1
_copy = -1
_paste = -1
_cut = -1
_refresh_table = -1
_clear_table = -1
# _import = -1
# _export = -1 _check_all = -1
_uncheck_all = -1
_undo = -1
_redo = -1
_plot_sofq_diff_first_run_row = -1
_plot_sofq_diff_average_row = -1
_plot_cryostat = -1
_plot_furnace = -1
_invert_selection = -1
menu = QMenu(self.parent)
if self.parent.table_selection_buffer == {}:
paste_status = False
else:
paste_status = True
if (self.parent.postprocessing_ui.table.rowCount() > 0):
_undo = menu.addAction("Undo")
_undo.setEnabled(self.parent.undo_button_enabled)
_redo = menu.addAction("Redo")
_redo.setEnabled(self.parent.redo_button_enabled)
menu.addSeparator()
_copy = menu.addAction("Copy")
_paste = menu.addAction("Paste")
self._paste_menu = _paste
_paste.setEnabled(paste_status)
_cut = menu.addAction("Clear")
menu.addSeparator()
_check_all = menu.addAction("Check All")
_uncheck_all = menu.addAction("Unchecked All")
menu.addSeparator()
_invert_selection = menu.addAction("Inverse Selection")
menu.addSeparator()
_new_row = menu.addAction("Insert Blank Row")
if (self.parent.postprocessing_ui.table.rowCount() > 0):
_duplicate_row = menu.addAction("Duplicate Row")
_remove_row = menu.addAction("Remove Row(s)")
menu.addSeparator()
_plot_menu = menu.addMenu('Plot')
_plot_sofq = _plot_menu.addAction("S(Q) ...")
_plot_sofq_diff_first_run_row = _plot_menu.addAction("S(Q) Diff (1st run)...")
_plot_sofq_diff_average_row = _plot_menu.addAction("S(Q) Diff (Avg.)...")
_temp_menu = _plot_menu.addMenu("Temperature")
_plot_cryostat = _temp_menu.addAction("Cyrostat...")
_plot_furnace = _temp_menu.addAction("Furnace...")
menu.addSeparator()
_refresh_table = menu.addAction("Refresh/Reset Table")
_clear_table = menu.addAction("Clear Table")
action = menu.exec_(QCursor.pos())
self.current_row = self.current_row()
if action == _undo:
self.parent.action_undo_clicked()
elif action == _redo:
self.parent.action_redo_clicked()
elif action == _copy:
self._copy()
elif action == _paste:
self._paste()
elif action == _cut:
self._cut()
elif action == _duplicate_row:
self._duplicate_row()
elif action == _plot_sofq:
self._plot_sofq()
elif action == _plot_sofq_diff_first_run_row:
self._plot_sofq_diff_first_run_row()
elif action == _plot_sofq_diff_average_row:
self._plot_sofq_diff_average_row()
elif action == _plot_cryostat:
self._plot_temperature(samp_env_choice='cryostat')
elif action == _plot_furnace:
self._plot_temperature(samp_env_choice='furnace')
elif action == _invert_selection:
self._inverse_selection()
elif action == _new_row:
self._new_row()
elif action == _remove_row:
self._remove_selected_rows()
elif action == _refresh_table:
self._refresh_table()
elif action == _clear_table:
self._clear_table()
elif action == _check_all:
self.check_all()
elif action == _uncheck_all:
self.uncheck_all()
def _import(self):
_current_folder = self.parent.current_folder
[_table_file, _] = QFileDialog.getOpenFileName(parent=self.parent,
caption="Select File",
directory=_current_folder,
filter=("text (*.txt);; All Files (*.*)"))
if not _table_file:
return
if isinstance(_table_file, tuple):
_table_file = _table_file[0]
new_path = os.path.dirname(_table_file)
self.parent.current_folder = new_path
self._clear_table()
_import_handler = ImportTable(filename=_table_file, parent=self.parent)
_import_handler.run()
_pop_back_wdg = PopulateBackgroundWidgets(main_window=self.parent)
_pop_back_wdg.run()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _export(self):
_current_folder = self.parent.current_folder
_table_file, _ = get_save_file(parent=self.parent,
caption="Select File",
directory=_current_folder,
filter={'text (*.txt)':'txt', 'All Files (*.*)':''})
if not _table_file:
return
if isinstance(_table_file, tuple):
_table_file = _table_file[0]
_file_handler = FileHandler(filename=_table_file)
_file_handler.check_file_extension(ext_requested='txt')
_table_file = _file_handler.filename
_export_handler = ExportTable(parent=self.parent,
filename=_table_file)
_export_handler.run()
def _copy(self):
_selection = self.parent.postprocessing_ui.table.selectedRanges()
_selection = _selection[0]
left_column = _selection.leftColumn()
right_column = _selection.rightColumn()
top_row = _selection.topRow()
bottom_row = _selection.bottomRow()
self.parent.table_selection_buffer = {'left_column': left_column,
'right_column': right_column,
'top_row': top_row,
'bottom_row': bottom_row}
self._paste_menu.setEnabled(True)
def _paste(self, _cut=False):
_copy_selection = self.parent.table_selection_buffer
_copy_left_column = _copy_selection['left_column']
# make sure selection start at the same column
_paste_selection = self.parent.postprocessing_ui.table.selectedRanges()
_paste_left_column = _paste_selection[0].leftColumn()
if not (_copy_left_column == _paste_left_column):
QMessageBox.warning(self.parent,
"Check copy/paste selection!",
"Check your selection! ")
return
_copy_right_column = _copy_selection["right_column"]
_copy_top_row = _copy_selection["top_row"]
_copy_bottom_row = _copy_selection["bottom_row"]
_paste_top_row = _paste_selection[0].topRow()
index = 0
for _row in range(_copy_top_row, _copy_bottom_row+1):
_paste_row = _paste_top_row + index
for _column in range(_copy_left_column, _copy_right_column + 1):
if _column in np.arange(1, 7):
if _cut:
_item_text = ''
else:
_item_text = self.retrieve_item_text(_row, _column)
self.paste_item_text(_paste_row, _column, _item_text)
if _column == 7:
if _cut:
_widget_index = 0
else:
_widget_index = self.retrieve_sample_shape_index(_row)
self.set_widget_index(_widget_index, _paste_row)
if _column == 8:
if _cut:
_widget_state = Qt.Unchecked
else:
_widget_state = self.retrieve_do_abs_correction_state(_row)
self.set_widget_state(_widget_state, _paste_row)
index += 1
def _inverse_selection(self):
selected_range = self.parent.postprocessing_ui.table.selectedRanges()
nbr_column = self.parent.postprocessing_ui.table.columnCount()
self.select_all(status=True)
# inverse selected rows
for _range in selected_range:
_range.leftColumn = 0
_range.rightColun = nbr_column-1
self.parent.postprocessing_ui.table.setRangeSelected(_range, False)
def select_all(self, status=True):
nbr_row = self.parent.postprocessing_ui.table.rowCount()
nbr_column = self.parent.postprocessing_ui.table.columnCount()
_full_range = QTableWidgetSelectionRange(0, 0, nbr_row-1, nbr_column-1)
self.parent.postprocessing_ui.table.setRangeSelected(_full_range, status)
def check_all(self):
self.select_first_column(status=True)
def uncheck_all(self):
self.select_first_column(status=False)
def select_row(self, row=-1, status=True):
nbr_column = self.parent.postprocessing_ui.table.columnCount()
_range = QTableWidgetSelectionRange(row, 0, row, nbr_column-1)
self.parent.postprocessing_ui.table.setRangeSelected(_range, status)
def check_row(self, row=-1, status=True):
_widgets = self.parent.postprocessing_ui.table.cellWidget(row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(row, 0).children()[1]
_selected_widget.setChecked(status)
def select_first_column(self, status=True):
for _row in range(self.parent.postprocessing_ui.table.rowCount()):
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()[1]
_selected_widget.setChecked(status)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def check_selection_status(self, state, row):
list_ranges = self.parent.postprocessing_ui.table.selectedRanges()
for _range in list_ranges:
bottom_row = _range.bottomRow()
top_row = _range.topRow()
range_row = list(range(top_row, bottom_row + 1))
for _row in range_row:
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()[1]
_selected_widget.setChecked(state)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _cut(self):
self._copy()
self._paste(_cut=True)
def _duplicate_row(self):
_row = self.current_row
metadata_to_copy = self._collect_metadata(row_index=_row)
o_populate = addie.processing.idl.populate_master_table.PopulateMasterTable(main_window=self.parent)
o_populate.add_new_row(metadata_to_copy, row=_row)
def _plot_fetch_files(self, file_type='SofQ'):
if file_type == 'SofQ':
search_dir = './SofQ'
prefix = 'NOM_'
suffix = 'SQ.dat'
elif file_type == 'nexus':
cwd = os.getcwd()
search_dir = cwd[:cwd.find('shared')]+'/nexus'
prefix = 'NOM_'
suffix = '.nxs.h5'
#ipts = int(re.search(r"IPTS-(\d*)\/", os.getcwd()).group(1))
_row = self.current_row
_row_runs = self._collect_metadata(row_index=_row)['runs'].split(',')
output_list = list()
file_list = [a_file for a_file in glob.glob(search_dir+'/'+prefix+'*')]
for run in _row_runs:
the_file = search_dir+'/'+prefix+str(run)+suffix
if the_file in file_list:
output_list.append({'file': the_file, 'run': run})
return output_list
def _plot_fetch_data(self):
file_list = self._plot_fetch_files(file_type='SofQ')
for data in file_list:
with open(data['file'], 'r') as handle:
x, y, e = np.loadtxt(handle, unpack=True)
data['x'] = x
data['y'] = y
return file_list
def _plot_datasets(self, datasets, shift_value=1.0, cmap_choice='inferno', title=None):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# configure plot
cmap = plt.get_cmap(cmap_choice)
cNorm = colors.Normalize(vmin=0, vmax=len(datasets))
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cmap)
mrks = [0, -1]
# plot data
shifter = 0.0
for idx, data in enumerate(datasets):
data['y'] += shifter
colorVal = scalarMap.to_rgba(idx)
if 'linestyle' in data:
ax.plot(data['x'], data['y'], data['linestyle']+'o', label=data['run'], color=colorVal, markevery=mrks,)
else:
ax.plot(data['x'], data['y'], label=data['run'], color=colorVal, markevery=mrks)
shifter += shift_value
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], title='Runs', loc='center left', bbox_to_anchor=(1, 0.5))
if title:
fig.suptitle(title)
plt.show()
def _plot_sofq(self):
sofq_datasets = self._plot_fetch_data()
self._plot_datasets(sorted(sofq_datasets, key=lambda k: int(k['run'])), title='S(Q)')
def _plot_sofq_diff_first_run_row(self):
sofq_datasets = self._plot_fetch_data()
sofq_base = dict(sofq_datasets[0])
for sofq in sorted(sofq_datasets, key=lambda k: int(k['run'])):
sofq['y'] = sofq['y'] - sofq_base['y']
self._plot_datasets(sofq_datasets, shift_value=0.2, title='S(Q) - S(Q) for run '+sofq_base['run'])
def _plot_sofq_diff_average_row(self):
sofq_datasets = self._plot_fetch_data()
sofq_data = [sofq['y'] for sofq in sofq_datasets]
sofq_avg = np.average(sofq_data, axis=0)
for sofq in sorted(sofq_datasets, key=lambda k: int(k['run'])):
sofq['y'] = sofq['y'] - sofq_avg
self._plot_datasets(sofq_datasets, shift_value=0.2, title='S(Q) - <S(Q)>')
def _plot_temperature(self, samp_env_choice=None):
file_list = self._plot_fetch_files(file_type='nexus')
samp_env = SampleEnvironmentHandler(samp_env_choice)
datasets = list()
for data in file_list:
samp_x, samp_y = samp_env.getDataFromFile(data['file'], 'samp')
envi_x, envi_y = samp_env.getDataFromFile(data['file'], 'envi')
print(data['file'])
datasets.append({'run': data['run'] + '_samp', 'x': samp_x, 'y': samp_y, 'linestyle': '-'})
datasets.append({'run': None, 'x': envi_x, 'y': envi_y, 'linestyle': '--'})
self._plot_datasets(sorted(datasets, key=lambda k: k['run']),
shift_value=0.0, title='Temperature: '+samp_env_choice)
def _new_row(self):
_row = self.current_row
if _row == -1:
_row = 0
o_populate = addie.processing.idl.populate_master_table.PopulateMasterTable(main_window=self.parent)
_metadata = o_populate.empty_metadata()
o_populate.add_new_row(_metadata, row=_row)
def _remove_selected_rows(self):
selected_range = self.parent.postprocessing_ui.table.selectedRanges()
_nbr_row_removed = 0
_local_nbr_row_removed = 0
for _range in selected_range:
_top_row = _range.topRow()
_bottom_row = _range.bottomRow()
nbr_row = _bottom_row - _top_row + 1
for i in np.arange(nbr_row):
self._remove_row(row=_top_row - _nbr_row_removed)
_local_nbr_row_removed += 1
_nbr_row_removed = _local_nbr_row_removed
_pop_back_wdg = PopulateBackgroundWidgets(main_window=self.parent)
_pop_back_wdg.run()
def _remove_row(self, row=-1):
if row == -1:
row = self.current_row
self.parent.postprocessing_ui.table.removeRow(row)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _refresh_table(self):
self.parent.populate_table_clicked()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _clear_table(self):
_number_of_row = self.parent.postprocessing_ui.table.rowCount()
self.parent.postprocessing_ui.table.setSortingEnabled(False)
for _row in np.arange(_number_of_row):
self.parent.postprocessing_ui.table.removeRow(0)
self.parent.postprocessing_ui.background_line_edit.setText("")
self.parent.postprocessing_ui.background_comboBox.clear()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def set_widget_state(self, _widget_state, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 8).children()[1]
_widget.setCheckState(_widget_state)
def retrieve_do_abs_correction_state(self, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 8).children()[1]
return _widget.checkState()
def set_widget_index(self, _widget_index, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 7)
_widget.setCurrentIndex(_widget_index)
def paste_item_text(self, _row, _column, _item_text):
_item = self.parent.postprocessing_ui.table.item(_row, _column)
_item.setText(_item_text)
def retrieve_sample_shape_index(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 7)
_selected_index = _widget.currentIndex()
return _selected_index
def retrieve_item_text(self, row, column):
_item = self.parent.postprocessing_ui.table.item(row, column)
if _item is None:
return ''
else:
return str(_item.text())
def name_search(self):
nbr_row = self.parent.postprocessing_ui.table.rowCount()
if nbr_row == 0:
return
_string = str(self.parent.postprocessing_ui.name_search.text()).lower()
if _string == '':
self.select_all(status=False)
else:
for _row in range(nbr_row):
_text_row = str(self.parent.postprocessing_ui.table.item(_row, 1).text()).lower()
if _string in _text_row:
self.select_row(row=_row, status=True)
| mit | 796,581,622,623,209,200 | 39.148746 | 120 | 0.573138 | false |
ryanjmccall/nupic.research | union_pooling/union_pooling/experiments/union_sdr_overlap/plot_experiment.py | 4 | 4392 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import csv
import os
import sys
import matplotlib.pyplot as plt
import numpy
from experiments.capacity import data_utils
_OVERLAPS_FILE_NAME = "/overlaps.csv"
def main(inputPath, csvOutputPath, imgOutputPath):
# remove existing /overlaps.csv if present
if os.path.exists(csvOutputPath + _OVERLAPS_FILE_NAME):
os.remove(csvOutputPath + _OVERLAPS_FILE_NAME)
if not os.path.exists(csvOutputPath):
os.makedirs(csvOutputPath)
if not os.path.exists(imgOutputPath):
os.makedirs(imgOutputPath)
print "Computing Union SDR overlap between SDR traces in following dir:"
print inputPath + "\n"
files = os.listdir(inputPath)
if len(files) != 2:
print "Found {0} files at input path {1} - Requires exactly 2.".format(
len(files), inputPath)
sys.exit(1)
pathNoLearn = inputPath + "/" + files[0]
pathLearn = inputPath + "/" + files[1]
print "Comparing files..."
print pathLearn
print pathNoLearn + "\n"
# Load source A
with open(pathLearn, "rU") as fileA:
csvReader = csv.reader(fileA)
dataA = [line for line in csvReader]
unionSizeA = [len(datum) for datum in dataA]
# Load source B
with open(pathNoLearn, "rU") as fileB:
csvReader = csv.reader(fileB)
dataB = [line for line in csvReader]
unionSizeB = [len(datum) for datum in dataB]
assert len(dataA) == len(dataB)
# To display all plots on the same y scale
yRangeMax = 1.05 * max(max(unionSizeA), max(unionSizeB))
# Plot union size for data A
x = [i for i in xrange(len(dataA))]
stdDevs = None
title = "Union Size with Learning vs. Time"
data_utils.getErrorbarFigure(title, x, unionSizeA, stdDevs, "Time",
"Union Size", yRangeMax=yRangeMax)
figPath = "{0}/{1}.png".format(imgOutputPath, title)
plt.savefig(figPath, bbox_inches="tight")
# Plot union size for data B and save image
title = "Union Size without Learning vs. Time"
data_utils.getErrorbarFigure(title, x, unionSizeB, stdDevs, "Time",
"Union Size", yRangeMax=yRangeMax)
figPath = "{0}/{1}.png".format(imgOutputPath, title)
plt.savefig(figPath, bbox_inches="tight")
with open(csvOutputPath + _OVERLAPS_FILE_NAME, "wb") as outputFile:
csvWriter = csv.writer(outputFile)
overlaps = [getOverlap(dataA[i], dataB[i]) for i in xrange(len(dataA))]
csvWriter.writerow(overlaps)
outputFile.flush()
# Plot overlap and save image
title = "Learn-NoLearn Union SDR Overlap vs. Time"
data_utils.getErrorbarFigure(title, x, overlaps, stdDevs, "Time","Overlap",
yRangeMax=yRangeMax)
figPath = "{0}/{1}.png".format(imgOutputPath, title)
plt.savefig(figPath, bbox_inches="tight")
raw_input("Press any key to exit...")
def getOverlap(listA, listB):
arrayA = numpy.array(listA)
arrayB = numpy.array(listB)
intersection = numpy.intersect1d(arrayA, arrayB)
return len(intersection)
def _getArgs():
"""
Parses and returns command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="Path to unionSdrTrace .csv files")
parser.add_argument("--csvOutput", help="Path for csv output.")
parser.add_argument("--imgOutput", help="Path for image output.")
return parser.parse_args()
if __name__ == "__main__":
args = _getArgs()
main(args.input, args.csvOutput, args.imgOutput)
| gpl-3.0 | -4,415,004,528,747,624,400 | 30.826087 | 77 | 0.673042 | false |
AhmedHani/Kaggle-Machine-Learning-Competitions | Medium/Toxic Comment Classification Challenge/train_ffnn.py | 1 | 1063 | import numpy as np
import pandas as pd
from keras.models import Model
from keras.layers import Dense, Embedding, Input
from keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout
from keras.preprocessing import text, sequence
from keras.callbacks import EarlyStopping, ModelCheckpoint
max_features = 20000
maxlen = 100
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
train = train.sample(frac=1)
list_sentences_train = train["comment_text"].fillna("CVxTz").values
list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
y = train[list_classes].values
list_sentences_test = test["comment_text"].fillna("CVxTz").values
tokenizer = text.Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(list_sentences_train))
list_tokenized_train = tokenizer.texts_to_sequences(list_sentences_train)
list_tokenized_test = tokenizer.texts_to_sequences(list_sentences_test)
X_t = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)
X_te = sequence.pad_sequences(list_tokenized_test, maxlen=maxlen) | mit | 61,240,177,327,376,260 | 35.689655 | 88 | 0.775165 | false |
ecervera/mindstorms-nb | nxt/functions.py | 1 | 7333 | import json
import shutil
from IPython.core.display import display, HTML
def configure(n):
config = {
'version' : 'nxt',
'number' : n
}
with open('../task/robot_config.json', 'w') as f:
json.dump(config, f)
shutil.copyfile('./functions.py', '../task/functions.py')
print("\x1b[32mConfiguració completa, podeu continuar.\x1b[0m")
display(HTML('<p>Ara ja podeu continuar, començant la primera tasca de programació: provareu el robot a vore si respon i es mou correctament.</p><h2><a href="../task/index.ipynb" target="_blank">>>> Prova de connexió</a></h2>'))
def next_notebook(nb):
if nb=='moviments':
display(HTML('<p>Ja podeu passar a la pàgina següent, on aprendreu a controlar els moviments del robot:</p><h2><a href="motors.ipynb" target="_blank">>>> Moviments del robot</a></h2>'))
elif nb=='quadrat':
display(HTML('<p>Ara ja podeu continuar, bona sort!</p><h2><a href="quadrat.ipynb" target="_blank">>>> Exercici de moviment</a></h2>'))
elif nb=='sensors':
display(HTML('<p>Fins ara heu aprés a controlar el moviment del robot, i també a programar bucles, no està gens malament!</p><p>Per a continuar, anem a vore els altres components del robot, els sensors, que ens permetran fer programes encara més sofisticats.</p><h2><a href="sensors.ipynb" target="_blank">>>> Sensors</a></h2>'))
elif nb=='touch':
display(HTML('<p>Ara ja podeu passar al primer exercici amb sensors:</p><h2><a href="touch.ipynb" target="_blank">>>> Tacte</a></h2>'))
elif nb=='navigation':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="navigation.ipynb" target="_blank">>>> Exercici de navegació</a></h2>'))
elif nb=='sound':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="sound.ipynb" target="_blank">>>> Sensor de so</a></h2>'))
elif nb=='light':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="light.ipynb" target="_blank">>>> Sensor de llum</a></h2>'))
elif nb=='ultrasonic':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="ultrasonic.ipynb" target="_blank">>>> Sensor ultrasònic</a></h2>'))
elif nb=='sumo':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="sumo.ipynb" target="_blank">>>> El Gran Repte</a></h2>'))
else:
pass
import nxt.bluesock
import nxt.motor
import math
import time
from bluetooth.btcommon import BluetoothError
def connect():
global brick
global mB; global mC
global s1; global s2; global s3; global s4
global tempo
global connected_robot
with open('robot_config.json', 'r') as f:
config = json.load(f)
n = config['number']
try:
address = {2: '00:16:53:0A:9B:72', \
3: '00:16:53:0A:9D:F2', \
4: '00:16:53:0A:5C:72',
5: '00:16:53:08:D5:59', \
6: '00:16:53:08:DE:51', \
7: '00:16:53:0A:5A:B4', \
8: '00:16:53:0A:9B:27', \
9: '00:16:53:0A:9E:2C', \
10: '00:16:53:17:92:8A', \
11: '00:16:53:17:94:E0', \
12: '00:16:53:1A:C6:BD'}
brick = nxt.bluesock.BlueSock(address[n]).connect()
mB = nxt.motor.Motor(brick, nxt.motor.PORT_B)
mC = nxt.motor.Motor(brick, nxt.motor.PORT_C)
s1 = nxt.sensor.Touch(brick, nxt.sensor.PORT_1)
s2 = nxt.sensor.Sound(brick, nxt.sensor.PORT_2)
s2.set_input_mode(0x08,0x80) # dB adjusted, percentage
s3 = nxt.sensor.Light(brick, nxt.sensor.PORT_3)
s3.set_illuminated(True)
s3.set_input_mode(0x05,0x80) # Light active, percentage
s4 = nxt.sensor.Ultrasonic(brick, nxt.sensor.PORT_4)
tempo = 0.5
connected_robot = n
print("\x1b[32mRobot %d connectat.\x1b[0m" % n)
except BluetoothError as e:
errno, errmsg = eval(e.args[0])
if errno==16:
print("\x1b[31mNo es pot connectar, hi ha un altre programa ocupant la connexió.\x1b[0m")
elif errno==13:
print("\x1b[31mNo es pot connectar, el dispositiu no està emparellat.\x1b[0m")
elif errno == 112:
print("\x1b[31mNo es troba el brick, assegurat que estiga encés.\x1b[0m")
else:
print("Error %d: %s" % (errno, errmsg))
except KeyError:
print("\x1b[31mNúmero de robot incorrecte.\x1b[0m")
def disconnect():
try:
brick.sock.close()
print("\x1b[32mRobot %d desconnectat.\x1b[0m" % connected_robot)
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def stop():
try:
mB.brake()
mC.brake()
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def forward(speed=100,speed_B=100,speed_C=100):
move(speed_B=min(abs(speed),abs(speed_B)),speed_C=min(abs(speed),abs(speed_C)))
def backward(speed=100,speed_B=100,speed_C=100):
move(speed_B=-min(abs(speed),abs(speed_B)),speed_C=-min(abs(speed),abs(speed_C)))
def left(speed=100):
move(speed_B=0,speed_C=abs(speed))
def left_sharp(speed=100):
move(speed_B=-abs(speed),speed_C=abs(speed))
def right(speed=100):
move(speed_B=abs(speed),speed_C=0)
def right_sharp(speed=100):
move(speed_B=abs(speed),speed_C=-abs(speed))
def move(speed_B=0,speed_C=0):
max_speed = 100
speed_B = int(speed_B)
speed_C = int(speed_C)
if speed_B > 100:
speed_B = 100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_B < -100:
speed_B = -100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_C > 100:
speed_C = 100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_C < -100:
speed_C = -100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
try:
mB.run(-int(speed_B*max_speed/100))
mC.run(int(speed_C*max_speed/100))
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def touch():
return s1.is_pressed()
def sound():
return s2.get_loudness()
def light():
return s3.get_lightness()
from nxt.telegram import InvalidOpcodeError, InvalidReplyError
def ultrasonic():
global s4
try:
return s4.get_distance()
except (InvalidOpcodeError, InvalidReplyError):
disconnect()
print("\x1b[33mError de connexió, reintentant...\x1b[0m")
time.sleep(1)
connect(connected_robot)
return s4.get_distance()
def play_sound(s):
brick.play_sound_file(False, bytes((s+'.rso').encode('ascii')))
def say(s):
play_sound(s)
def play_tone(f,t):
try:
brick.play_tone_and_wait(f, int(t*1000*tempo))
time.sleep(0.01)
except:
pass
from IPython.display import clear_output
def read_and_print(sensor):
try:
while True:
clear_output(wait=True)
print(sensor())
except KeyboardInterrupt:
pass
def test_sensors():
try:
while True:
clear_output(wait=True)
print(" Touch: %d\n Light: %d\n Sound: %d\nUltrasonic: %d" % (touch(),light(),sound(), ultrasonic()))
except KeyboardInterrupt:
pass
import matplotlib.pyplot as plt
def plot(l):
plt.plot(l)
| mit | -1,915,779,626,699,435,000 | 34.634146 | 346 | 0.609309 | false |
leojohnthomas/ahkab | ekv.py | 1 | 25762 | # -*- coding: iso-8859-1 -*-
# ekv.py
# Partial implementation of the EKV 3.0 MOS transistor model
# Copyright 2010 Giuseppe Venturini
#
# The EKV model was developed by Matthias Bucher, Christophe Lallement,
# Christian Enz, Fabien Théodoloz, François Krummenacher at the Electronics
# Laboratories, Swiss Federal Institute of Technology (EPFL),
# Lausanne, Switzerland.
# This implementation is based upon:
# 1. Matthias Bucher, Christian Enz, François Krummenacher, Jean-M. Sallese,
# Christophe Lallement and Alain-S. Porret,
# The EKV 3.0 Compact MOS Transistor Model: Accounting for Deep-Submicron
# Aspects, <http://www.nsti.org/publications/MSM/2002/pdf/346.pdf>
# 2. EKV 2.6 Technical report, <http://legwww.epfl.ch/ekv/pdf/ekv_v262.pdf>.
#
# This file is part of the ahkab simulator.
#
# Ahkab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# Ahkab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License v2
# along with ahkab. If not, see <http://www.gnu.org/licenses/>.
"""
The EKV model was developed by Matthias Bucher, Christophe Lallement,
Christian Enz, Fabien Théodoloz, François Krummenacher at the Electronics
Laboratories, Swiss Federal Institute of Technology (EPFL),
Lausanne, Switzerland. The Tecnical Report upon which this implementation
is based is available here:
<http://legwww.epfl.ch/ekv/pdf/ekv_v262.pdf>.
This module defines two classes:
ekv_device
ekv_mos_model
Features:
- EKV model implementation, computation of charges, potentials,
reverse and forward currents, slope factor and normalization factors,
- Calculation of trans-conductances based on the charge approach.
- N/P MOS symmetry
- Rudimentary temperature effects.
The Missing Features:
- Channel length modulation
- Reverse Short Channel Effect (RSCE)
- Complex mobility degradation is missing
- Transcapacitances
- Quasistatic implementation
"""
import constants, options, utilities, printing
import math
# DEFAULT VALUES FOR 500n CH LENGTH
COX_DEFAULT = .7e-3
VTO_DEFAULT = .5
GAMMA_DEFAULT = 1
PHI_DEFAULT = .7
KP_DEFAULT = 50e-6
UCRIT_DEFAULT = 2e6
LAMBDA_DEFAULT = .5
XJ_DEFAULT = .1e-6
TCV_DEFAULT = 1e-3
BEX_DEFAULT = -1.5
ISMALL_GUESS_MIN = 1e-10
class ekv_device:
INIT_IFRN_GUESS = 1
def __init__(self, nd, ng, ns, nb, W, L, model, M=1, N=1):
""" EKV device
Parameters:
nd: drain node
ng: gate node
ns: source node
nb: bulk node
L: element width [m]
W: element length [m]
M: multiplier (n. of shunt devices)
N: series mult. (n. of series devices)
model: pass an instance of ekv_mos_model
Selected methods:
- get_output_ports() -> (nd, ns)
- get_drive_ports() -> (nd, nb), (ng, nb), (ns, nb)
"""
self.ng = ng
self.nb = nb
self.n1 = nd
self.n2 = ns
self.ports = ((self.n1, self.nb), (self.ng, self.nb), (self.n2, self.nb))
class dev_class: pass # empty class to hold device parameters
self.device = dev_class()
self.device.L = float(L) #channel length -
self.device.W = float(W) #channel width -
self.device.M = int(M) #parallel multiple device number
self.device.N = int(N) #series multiple device number
self.ekv_model = model
self.opdict = {}
self.opdict.update({'state':(float('nan'), float('nan'), float('nan'))})
self.opdict.update({'ifn':self.INIT_IFRN_GUESS})
self.opdict.update({'irn':self.INIT_IFRN_GUESS})
self.opdict.update({'ip_abs_err':self.ekv_model.get_ip_abs_err(self.device)})
self.letter_id = 'M'
self.is_nonlinear = True
self.is_symbolic = True
self.dc_guess = [self.ekv_model.VTO*(0.1)*self.ekv_model.NPMOS, self.ekv_model.VTO*(1.1)*self.ekv_model.NPMOS, 0]
devcheck, reason = self.ekv_model._device_check(self.device)
if not devcheck:
raise Exception, reason + " out of boundaries."
def get_drive_ports(self, op):
"""Returns a tuple of tuples of ports nodes, as:
(port0, port1, port2...)
Where each port is in the form:
port0 = (nplus, nminus)
"""
return self.ports #d,g,s
def get_output_ports(self):
return ((self.n1, self.n2),)
def __str__(self):
mos_type = self._get_mos_type()
rep = " " + self.ekv_model.name + " w="+ str(self.device.W) + " l=" + \
str(self.device.L) + " M="+ str(self.device.M) + " N=" + \
str(self.device.N)
return rep
def _get_mos_type(self):
"""Returns N or P (capitalized)
"""
mtype = 'N' if self.ekv_model.NPMOS == 1 else 'P'
return mtype
def i(self, op_index, ports_v, time=0):
"""Returns the current flowing in the element with the voltages
applied as specified in the ports_v vector.
ports_v: [voltage_across_port0, voltage_across_port1, ...]
time: the simulation time at which the evaluation is performed.
It has no effect here. Set it to None during DC analysis.
"""
ret, j1, j2 = self.ekv_model.get_ids(self.device, ports_v, \
self.opdict)
return ret
def update_status_dictionary(self, ports_v):
if self.opdict is None:
self.opdict = {}
if not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('gmd')) or \
not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('gmg')) or \
not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('gms')) or \
not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('Ids')):
self.opdict['state'] == ports_v[0]
self.opdict['gmd'] = self.g(0, ports_v[0], 0)
self.opdict['gmg'] = self.g(0, ports_v[0], 1)
self.opdict['gms'] = self.g(0, ports_v[0], 2)
self.opdict['Ids'] = self.i(0, ports_v[0])
gmd = self.opdict['gmd']
gmg = self.opdict['gmg']
gms = self.opdict['gms']
ids = self.opdict['Ids']
if ids == 0:
TEF = float('nan')
else:
TEF = abs(gms*constants.Vth()/ids)
self.opdict['TEF'] = TEF
def print_op_info(self, ports_v):
arr = self.get_op_info(ports_v)
print arr,
def get_op_info(self, ports_v):
"""Operating point info, for design/verification. """
mos_type = self._get_mos_type()
self.update_status_dictionary(ports_v)
sat_status = "SATURATION" if self.opdict['SAT'] else "LINEAR"
if self.opdict["WMSI"] == 0:
wmsi_status = "WEAK INVERSION"
if self.opdict["WMSI"] == 1:
wmsi_status = "MODERATE INVERSION"
if self.opdict["WMSI"] == 2:
wmsi_status = "STRONG INVERSION"
arr = [["M"+self.descr, mos_type.upper()+" ch",wmsi_status, "", "", sat_status, "", "", "", "", "",""],]
arr.append(["beta", "[A/V^2]:", self.opdict['beta'], "Weff", "[m]:", str(self.opdict['Weff'])+" ("+str(self.device.W)+")", "Leff", "[m]:", str(self.opdict['Leff'])+ " ("+str(self.device.L)+")", "M/N:", "", str(self.device.M)+"/"+str(self.device.N)])
arr.append(["Vdb", "[V]:", float(ports_v[0][0]), "Vgb", "[V]:", float(ports_v[0][1]), "Vsb", "[V]:", float(ports_v[0][2]), "Vp", "[V]:", self.opdict['Vp'],])
arr.append([ "VTH", "[V]:", self.opdict['VTH'], "VOD", "[V]:", self.opdict['VOD'], "nq: ", "",self.opdict['nq'], "VA", "[V]:", str(self.opdict['Ids']/self.opdict['gmd'])])
arr.append(["Ids", "[A]:", self.opdict['Ids'], "nv: ", "",self.opdict['nv'], "Ispec", "[A]:", self.opdict["Ispec"], "TEF:", "", str(self.opdict['TEF']),])
arr.append(["gmg", "[S]:", self.opdict['gmg'], "gms", "[S]:", self.opdict['gms'], "rob", "[Ohm]:", 1/self.opdict['gmd'], "", "", ""])
arr.append(["if:", "", self.opdict['ifn'],"ir:", "", self.opdict['irn'], "Qf", "[C/m^2]:", self.opdict["qf"], "Qr", "[C/m^2]:", self.opdict["qr"],])
#arr.append([ "", "", "", "", "", ""])
return printing.table_setup(arr)
def g(self, op_index, ports_v, port_index, time=0):
"""Returns the differential (trans)conductance rs the port specified by port_index
when the element has the voltages specified in ports_v across its ports,
at (simulation) time.
ports_v: a list in the form: [voltage_across_port0, voltage_across_port1, ...]
port_index: an integer, 0 <= port_index < len(self.get_ports())
time: the simulation time at which the evaluation is performed. Set it to
None during DC analysis.
"""
assert op_index == 0
assert port_index < 3
if port_index == 0:
g = self.ekv_model.get_gmd(self.device, ports_v, self.opdict)
elif port_index == 1:
g = self.ekv_model.get_gmg(self.device, ports_v, self.opdict)
if port_index == 2:
g = self.ekv_model.get_gms(self.device, ports_v, self.opdict)
if op_index == 0 and g == 0:
if port_index == 2:
sign = -1
else:
sign = +1
g = sign*options.gmin*2
#print type(g), g
if op_index == 0 and port_index == 0:
self.opdict.update({'gmd':g})
elif op_index == 0 and port_index == 1:
self.opdict.update({'gmg':g})
elif op_index == 0 and port_index == 2:
self.opdict.update({'gms':g})
return g
def get_value_function(self, identifier):
def get_value(self):
return self.opdict[identifier]
return get_value
class scaling_holder: pass # will hold the scaling factors
class ekv_mos_model:
def __init__(self, name=None, TYPE='n', TNOM=None, COX=None, \
GAMMA=None, NSUB=None, PHI=None, VTO=None, KP=None, \
XJ=None, LAMBDA=None, \
TOX=None, VFB=None, U0=None, TCV=None, BEX=None):
self.scaling = scaling_holder()
self.name = "model_ekv0" if name is None else name
Vth = constants.Vth()
self.TNOM = float(TNOM) if TNOM is not None else constants.Tref
#print "TYPE IS:" + TYPE
self.NPMOS = 1 if TYPE == 'n' else -1
# optional parameters (no defaults)
self.TOX = float(TOX) if TOX is not None else None
self.NSUB = float(NSUB) if NSUB is not None else None
self.VFB = self.NPMOS*float(VFB) if VFB is not None else None
self.U0 = float(U0) if U0 is not None else None
# crucial parameters
if COX is not None:
self.COX = float(COX)
elif TOX is not None:
self.COX = constants.si.eox/TOX
else:
self.COX = COX_DEFAULT
if GAMMA is not None:
self.GAMMA = float(GAMMA)
elif NSUB is not None:
self.GAMMA = math.sqrt(2*constants.e*constants.si.esi*NSUB*10**6/self.COX)
else:
self.GAMMA = GAMMA_DEFAULT
if PHI is not None:
self.PHI = float(PHI)
elif NSUB is not None:
self.PHI = 2*constants.Vth(self.TNOM)*math.log(NSUB*10**6/constants.si.ni(self.TNOM))
else:
self.PHI = PHI_DEFAULT
if VTO is not None:
self.VTO = self.NPMOS*float(VTO)
if self.VTO < 0:
print "(W): model %s has internal negative VTO (%f V)." % (self.name, self.VTO)
elif VFB is not None:
self.VTO = VFB + PHI + GAMMA*PHI #inv here??
else:
self.VTO = self.NPMOS*VTO_DEFAULT
if KP is not None:
self.KP = float(KP)
elif U0 is not None:
self.KP = (U0*10**-4)*self.COX
else:
self.KP = KP_DEFAULT
self.LAMBDA = LAMBDA if LAMBDA is not None else LAMBDA_DEFAULT
self.XJ = XJ if XJ is not None else XJ_DEFAULT
self.UCRIT = UCRIT_DEFAULT
# Intrinsic model temperature parameters
self.TCV = self.NPMOS*float(TCV) if TCV is not None else self.NPMOS*TCV_DEFAULT
self.BEX = float(BEX) if BEX is not None else BEX_DEFAULT
self.set_device_temperature(constants.T)
#Setup switches
self.SATLIM = math.exp(4)
self.WMSI_factor = 10
self.NR_damp_factor = options.nl_voltages_lock_factor
sc, sc_reason = self._self_check()
if not sc:
raise Exception, sc_reason + " out of range"
def set_device_temperature(self, T):
"""Change the temperature of the device. VTO, KP and PHI get updated.
"""
self.TEMP = T
self.VTO = self.VTO - self.TCV*(T-self.TNOM)
self.KP = self.KP*(T/self.TNOM)**self.BEX
self.PHI = self.PHI * T/self.TNOM + 3*constants.Vth(self.TNOM)*math.log(T/self.TNOM) \
- constants.si.Eg(self.TNOM)*T/self.TNOM + constants.si.Eg(T)
def get_device_temperature(self):
"""Returns the temperature of the device - in K.
"""
return self.TEMP
def print_model(self):
"""All the internal parameters of the model get printed out,
for visual inspection. Notice some can be set to None
(ie not available) if they were not provided in the netlist
or some not provided are calculated from the others.
"""
arr = []
TYPE = 'N' if self.NPMOS == 1 else "P"
arr.append([self.name, "", "", TYPE+" MOS", "EKV MODEL", "", "", "", "", "", "", ""])
arr.append(["KP", "[A/V^2]", self.KP, "VTO", "[V]:", self.VTO, "TOX", "[m]", self.TOX, "COX", "[F/m^2]:", self.COX])
arr.append(["PHI", "[V]:", self.PHI, "GAMMA", "sqrt(V)", self.GAMMA, "NSUB", "[cm^-3]", self.NSUB, "VFB", "[V]:", self.VFB])
arr.append(["U0", "[cm^2/(V*s)]:", self.U0, "TCV", "[V/K]", self.TCV, "BEX", "", self.BEX, "", "", ""])
arr.append(["INTERNAL", "", "", "SAT LIMIT", "", self.SATLIM, "W/M/S INV FACTOR", "", self.WMSI_factor, "", "", ""])
printing.table_print(arr)
def get_voltages(self, vd, vg, vs):
"""Performs the VD <-> VS swap if needed.
Returns:
(VD, VG, VS) after the swap
CS, an integer which equals to:
+1 if no swap was necessary,
-1 if VD and VS have been swapped.
"""
# vd / vs swap
vd = vd*self.NPMOS
vg = vg*self.NPMOS
vs = vs*self.NPMOS
if vs > vd:
vd_new = vs
vs_new = vd
cs = -1
else:
vd_new = vd
vs_new = vs
cs = +1
return ((float(vd_new), float(vg), float(vs_new)), cs)
def get_ip_abs_err(self, device):
"""Absolute error to be enforced in the calculation of the normalized currents.
"""
return options.iea / (2*constants.Vth(self.TEMP)**2*self.KP*device.M*device.W/device.L)
def setup_scaling(self, nq, device):
"""Calculates and stores in self.scaling the following factors:
Ut, the thermal voltage,
Is, the specific current,
Gs, the specific transconductance,
Qs, the specific charge.
"""
self.scaling.Ut = constants.Vth()
self.scaling.Is = 2 * nq * self.scaling.Ut**2 * self.KP * device.W/device.L
self.scaling.Gs = 2 * nq * self.scaling.Ut * self.KP * device.W/device.L
self.scaling.Qs = 2 * nq * self.scaling.Ut * self.COX
return
def get_vp_nv_nq(self, VG):
"""Calculates and returns:
VP, the pinch-off voltage,
nv, the slope factor,
nq, the charge linearization factor.
"""
VGeff = VG - self.VTO + self.PHI + self.GAMMA*math.sqrt(self.PHI)
if VGeff > 0 and VG - self.VTO + (math.sqrt(self.PHI)+self.GAMMA/2)**2 > 0:
VP = VG - self.VTO - self.GAMMA*(math.sqrt(VG -self.VTO +(math.sqrt(self.PHI)+self.GAMMA/2)**2) -(math.sqrt(self.PHI)+self.GAMMA/2))
if math.isnan(VP): VP = 0 # the argument of sqrt ^^ went negative
else:
VP = -self.PHI
#print "VG", VG, "VGeff", VGeff, "VP", VP, self.GAMMA, self.PHI, math.sqrt(VG -self.VTO +(math.sqrt(self.PHI)+self.GAMMA/2)**2), VG -self.VTO +(math.sqrt(self.PHI)+self.GAMMA/2)**2
nq = 1 + .5 * self.GAMMA / math.sqrt(self.PHI + .5*VP)
nv = 1 + .5 * self.GAMMA / math.sqrt(self.PHI + VP + 1e-12)
return VP, nv, nq
def get_ids(self, device, (vd, vg, vs), opdict=None, debug=False):
"""Returns:
IDS, the drain-to-source current (de-normalized),
qs, the (scaled) charge at the source,
qr, the (scaled) charge at the drain.
"""
if debug: print "=== Current for vd:", vd, "vg:", vg, "vs:", vs
ip_abs_err = self.get_ip_abs_err(device) if opdict['ip_abs_err'] is None else opdict['ip_abs_err']
(VD, VG, VS), CS_FACTOR = self.get_voltages(vd, vg, vs)
#Weff, Leff = self.get_eff_wl(device.W, device.L)
VP, nv, nq = self.get_vp_nv_nq(VG)
self.setup_scaling(nq, device)
vp = VP/self.scaling.Ut
vs = VS/self.scaling.Ut
vd = VD/self.scaling.Ut
if debug: print "Scaled voltages: vd:", vd, "vp:", vp, "vs:", vs
v_ifn = vp - vs
ifn = self.get_ismall(v_ifn, opdict['ip_abs_err'], max(opdict['ifn'], ISMALL_GUESS_MIN), debug=debug)
if False:
Leff = device.L
v_irn = vp - vd
else:
Leff, v_irn = self.get_leq_virp(device, (vd, vg, vs), VP, device.L, ifn)
irn = self.get_ismall(v_irn, opdict['ip_abs_err'], max(opdict['irn'], ISMALL_GUESS_MIN), debug=debug)
if debug:
print "vd:", vd, "vg:",VG/self.scaling.Ut, "vs:", vs, "vds:", vd-vs
print "v_ifn:", v_ifn, "v_irn:",v_irn
print "ifn:", ifn, "irn:",irn
print "ip_abs_err:", ip_abs_err
print "Vth:", self.scaling.Ut
print "nv", nv, "Is", self.scaling.Is
print "Weff:", device.W, "Leff:", Leff
print "NPMOS:", self.NPMOS, "CS_FACTOR", CS_FACTOR
qf = self.ismall2qsmall(ifn)
qr = self.ismall2qsmall(irn)
Ids = CS_FACTOR*self.NPMOS * device.L/Leff * device.M * self.scaling.Is * (ifn - irn)
vd_real = vd if CS_FACTOR == 1 else vs
vs_real = vs if CS_FACTOR == 1 else vd
opdict.update({'state':(vd_real*self.NPMOS, vg*self.NPMOS, vs_real*self.NPMOS)})
opdict.update({'Ids':Ids, "Weff":device.W, "Leff":Leff, 'Vp':VP})
opdict.update({'ifn':ifn, "irn":irn, "nv":nv, "nq":nq, 'beta':.5*self.KP*device.W/Leff, 'Ispec':self.scaling.Is})
opdict.update({'VTH':self.VTO, "VOD":self.NPMOS*nv*(VP-VS), 'SAT':ifn>irn*self.SATLIM})
opdict.update({'qf':qf*self.scaling.Qs, 'qr':qr*self.scaling.Qs})
if max(ifn, irn) > self.WMSI_factor:
WMSI = 2
elif max(ifn, irn) < 1/self.WMSI_factor:
WMSI = 0
else:
WMSI = 1
opdict.update({'WMSI':WMSI})
if debug: print "current:", Ids
return Ids, qf, qr
def get_leq_virp(self, device, (vd, vg, vs), Vp, Leff, ifn):
#if ifn > 0 and Vp - constants.Vth()*vd > 0:
assert vd >= vs
Vc = self.UCRIT * device.N * Leff
Vdss = Vc * (math.sqrt(.25 + constants.Vth()/Vc*math.sqrt(ifn)) - .5) # eq. 46
# Drain-to-source saturation voltage for reverse normalized current, eq. 47
Vdssp = Vc * (math.sqrt(.25 +constants.Vth()/Vc *(math.sqrt(ifn) - .75*math.log(ifn))) - .5) + \
constants.Vth()*(math.log(.5 * Vc/constants.Vth()) - .6)
# channel length modulation
vser_1 = math.sqrt(ifn) - Vdss/constants.Vth()
#if vser_1 < 0:
# vser_1 = 0
Vds = (vd - vs)*.5*constants.Vth()
delta_v = 4*constants.Vth()*math.sqrt(self.LAMBDA*vser_1 + 1.0/64) # eq. 48
Vip = math.sqrt(Vdss**2 + delta_v**2) - math.sqrt((Vds - Vdss)**2 + delta_v**2) #eq 50
Lc = math.sqrt(constants.si.esi*self.XJ/self.COX) #eq. 51
delta_l = self.LAMBDA * Lc * math.log(1 + (Vds - Vip)/(Lc*self.UCRIT)) #eq. 52
# Equivalent channel length including channel-length modulation and velocity saturation
Lp = device.N*Leff - delta_l + (Vds + Vip)/self.UCRIT #eq. 53
Lmin = device.N*Leff/10.0 #eq. 54
Leq = .5*(Lp + math.sqrt(Lp**2 + Lmin**2)) #eq. 55
assert not math.isnan(Vdssp)
assert not math.isnan(delta_v)
v_irp = (Vp - Vds - vs*constants.Vth() - math.sqrt(Vdssp**2 + delta_v**2) + math.sqrt((Vds-Vdssp)**2+delta_v**2))/constants.Vth()
#else:
# v_irp = Vp/constants.Vth() - vd
# Leq = Leff
return Leq, v_irp
def get_gms(self, device, (vd, vg, vs), opdict=None, debug=False):
"""Returns the source-bulk transconductance or d(IDS)/d(VS-VB)."""
(j1, j2, j3), CS_FACTOR = self.get_voltages(vd, vg, vs)
Ids, qf, qr = self.get_ids(device, (vd, vg, vs), opdict, debug)
if CS_FACTOR == +1:
gms = -1.0*self.scaling.Gs*qf
elif CS_FACTOR == -1:
gms = -self.scaling.Gs*qr
return gms
def get_gmd(self, device, (vd, vg, vs), opdict=None, debug=False):
"""Returns the drain-bulk transconductance or d(IDS)/d(VD-VB)."""
(j1, j2, j3), CS_FACTOR = self.get_voltages(vd, vg, vs)
Ids, qf, qr = self.get_ids(device, (vd, vg, vs), opdict, debug)
if CS_FACTOR == +1:
gmd = self.scaling.Gs*qr
elif CS_FACTOR == -1:
gmd = self.scaling.Gs*qf
return gmd
def get_gmg(self, device, (vd, vg, vs), opdict=None, debug=False):
"""Returns the gate-bulk transconductance or d(IDS)/d(VG-VB)."""
VP, nv, nq = self.get_vp_nv_nq(float(vg))
Ids, qf, qr = self.get_ids(device, (vd, vg, vs), opdict, debug)
(j1, j2, j3), CS_FACTOR = self.get_voltages(vd, vg, vs)
gmg = CS_FACTOR*self.scaling.Gs*(qf-qr)/nv
return gmg
def get_ismall(self, vsmall, ip_abs_err, iguess=None, debug=False):
"""Solves the problem: given v, find i such that:
v = ln(q) + 2q
q = sqrt(.25 + i) - .5
A damped Newton algorithm is used inside.
"""
# starting guess for Newton's Method.
if iguess is None:
iguess = 1
# sanity checks
if math.isnan(vsmall):
raise Exception, \
"Attempted to calculate a current corresponding to a NaN voltage."
if not ip_abs_err > 0:
raise Exception, \
"The normalized current absolute error has been set to a negative value."
#if vsmall < 0:
# return 0.0
check = False
ismall = iguess
if debug: iter_c = 0
while True:
if debug: iter_c = iter_c + 1
vsmall_iter, numeric_problem_v = self.get_vsmall(ismall)
dvdi, numeric_problem_i = self.get_dvsmall_dismall(ismall)
deltai = (vsmall - vsmall_iter)/dvdi
numeric_problem = numeric_problem_i or numeric_problem_v
if debug:
print "Numeric problem:", numeric_problem
print "ABS: deltai < ip_abs_err", deltai, "<", ip_abs_err, ":", abs(deltai) < ip_abs_err
print "REL: deltai < ismall*options.ier", deltai, "<", ismall*options.ier, abs(deltai) < ismall*options.ier
print deltai, ismall
# absolute and relative value convergence checks.
if ((abs(deltai) < ip_abs_err or numeric_problem) and abs(deltai) < ismall*options.ier) or \
(abs(deltai) < ip_abs_err*1e-6 or numeric_problem):
# To make the algorithm more robust,
# the convergence check has to be passed twice in a row
# to reach convergence.
if not check:
check = True
else:
break
else:
check = False
# convergence was not reached, update ismall
if math.isnan(ismall):
print "Ismall is NaN!!"
exit()
if ismall == 0:
# this is a sign we went below the machine resolution
# it makes no sense to iterate there as quantization errors
# prevent reaching a meaningful result.
break
else:
# Damped Newton with domain restriction: ismall >= 0.
ratio = deltai/ismall
if ratio > self.NR_damp_factor:
# Do not allow a change in ismall bigger than self.NR_damp_factor
# in a single iteration
ismall = self.NR_damp_factor*ismall
elif ratio <= -1:
# this would give a negative ismall
ismall = 0.1*ismall
else:
ismall = ismall + deltai
if debug:
print str(iter_c) + " iterations."
return ismall
def get_vsmall(self, ismall, verbose=3):
"""Returns v according to the equations:
q = sqrt(.25 + i) - .5
v = ln(q) + 2q
"""
if abs(ismall) < utilities.EPS:
ismall = utilities.EPS # otherwise we get log(0)
if verbose == 6:
print "EKV: Machine precision limited the resolution on i. (i<EPS)"
numeric_problem = True
else:
numeric_problem = False
vsmall = math.log(math.sqrt(.25 + ismall) - 0.5) + 2*math.sqrt(.25 + ismall) - 1.0
return vsmall, numeric_problem
def get_dvsmall_dismall(self, ismall, verbose=3):
"""The Newton algorithm in get_ismall(...) requires the evaluation of the
first derivative of the fixed point function:
dv/di = 1.0/(sqrt(.25+i)-.5) * .5/sqrt(.25 + i) + 1/sqrt(.25 + i)
This is provided by this module.
"""
if abs(ismall) < utilities.EPS:
ismall = utilities.EPS
numeric_problem = True
if verbose == 6:
print "EKV: Machine precision limited the resolution on dv/di in the NR iteration. (i<EPS)"
else:
numeric_problem = False
dvdi = 1.0/(math.sqrt(.25+ismall)-.5) * .5/math.sqrt(.25 + ismall) + 1.0/math.sqrt(.25 + ismall)
return dvdi, numeric_problem
def ismall2qsmall(self, ismall, verbose=0):
""" i(f,r) -> q(f,r)
Convert a source/drain scaled current to the corresponding normalized charge."""
if verbose == 6: #ismall is lower than EPS, errors here are usually not important
print "EKV: Machine precision limited the resolution on q(s,d). (i<EPS)"
qsmall = math.sqrt(.25 + ismall) - .5
return qsmall
def qsmall2ismall(self, qsmall):
""" q(f,r) -> i(f,r)
Convert a source/drain scaled charge to the corresponding normalized current."""
ismall = qsmall**2 + qsmall
return ismall
def _self_check(self):
"""Performs sanity check on the model parameters."""
ret = True, ""
if self.NSUB is not None and self.NSUB < 0:
ret = (False, "NSUB "+str(self.NSUB))
elif self.U0 is not None and not self.U0 > 0:
ret = (False, "UO "+str(self.U0))
elif not self.GAMMA > 0:
ret = (False, "GAMMA "+str(self.GAMMA))
elif not self.PHI > 0.1:
ret = (False, "PHI "+str(self.PHI))
return ret
def _device_check(self, adev):
"""Performs sanity check on the device parameters."""
if not adev.L > 0:
ret = (False, "L")
elif not adev.W > 0:
ret = (False, "W")
elif not adev.N > 0:
ret = (False, "N")
elif not adev.M > 0:
ret = (False, "M")
else:
ret = (True, "")
return ret
if __name__ == '__main__':
# Tests
import matplotlib.pyplot as plt
ekv_m = ekv_mos_model(TYPE='n', KP=50e-6, VTO=.4)
ma = ekv_device(1, 2, 3, 4, W=10e-6,L=1e-6, model=ekv_m)
ma.descr = "1"
# OP test
vd = 0
vg = 1
vs = 0
ma.print_op_info(((vd, vg, vs),))
ekv_m.print_model()
# gmUt/Ids test
import mosq
msq = mosq.mosq(1, 2, 3, kp=50e-6, w=10e-6, l=1e-6, vt=.4, lambd=0, mos_type='n')
data0 = []
data1 = []
data2 = []
data3 = []
vd = 2.5
if True:
vs = 1
for Vhel in range(1,2500):
print ".",
vg = Vhel/1000.0
ma.update_status_dictionary(((vd, vg, 0),))
data0.append(ma.opdict['Ids'])
#print "Current for vd", vd, "vg", vg, "vs", vs
data1.append(ma.opdict['TEF'])
isq = msq.i((vd, vg, vs),)
gmsq = msq.g((vd, vg, vs),0)
if isq > 0:
data2.append(gmsq/isq*constants.Vth())
else:
data2.append(float('nan'))
data3.append(isq)
plt.semilogx(data0, data1, data3, data2)
plt.title('Transconductance efficiency factor')
plt.legend(['(GM*UT)/ID'])
plt.show()
| gpl-2.0 | 2,957,746,477,690,178,000 | 33.296937 | 251 | 0.636914 | false |
ceroytres/RBM | binary_RBM.py | 1 | 4503 | from __future__ import print_function
import numpy as np
from numba import jit
class binary_RBM(object):
def __init__(self,n_visible=None,n_hidden=256,batchSize=256,lr=0.1,alpha=0,
mu=.95,epochs=1,k=10):
self.n_hidden=n_hidden
self.n_visible=n_visible
self.batchSize=batchSize
self.k=k
self.alpha=alpha
self.W=np.random.rand(n_visible,n_hidden)
self.W*=8*np.sqrt(6./(n_hidden + n_visible))
self.W-=4*np.sqrt(6./(n_hidden + n_visible))
self.hbias=np.zeros(n_hidden)
self.vbias=np.zeros(n_visible)
self.epochs=epochs
self.lr=lr
self.mu=mu
@jit
def fit(self,x):
v_W=np.zeros(self.W.shape)
v_h=np.zeros(self.hbias.shape)
v_v=np.zeros(self.vbias.shape)
cost=self.get_pseudo_likelihood(x)
print("Epoch %d Pseudo-likelihood cost:%f" % (0,cost))
for t in range(0,self.epochs):
N=x.shape[0]
batches,num_batches=self._batchLists(N)
num_batches=int(num_batches)
self.mu=(1-(3.0/(5.0+t)))
for i in range(0,num_batches):
idx=batches[i]
data=np.squeeze(x[idx,:])
B=data.shape[0]
p_h=self._sigmoid(np.dot(data,self.W)+self.hbias)
if t==0 and i==0:
h=p_h>np.random.rand(p_h.shape[0],p_h.shape[1])
for k in range(0,self.k):
p_v=self._sigmoid(np.dot(h,self.W.T)+self.vbias)
v=p_v>np.random.rand(p_v.shape[0],p_v.shape[1])
q_h=self._sigmoid(np.dot(v,self.W)+self.hbias)
h=q_h>np.random.rand(q_h.shape[0],q_h.shape[1])
g_W=np.dot(data.T,p_h)-np.dot(v.T,q_h)
g_W/=B
g_v=data.mean(axis=0)-v.mean(axis=0)
g_h=p_h.mean(axis=0)-q_h.mean(axis=0)
v_W=self.mu*v_W*(t/(t+1.0))+self.lr*(g_W-self.alpha*self.W)
v_h=self.mu*v_h*(t/(t+1.0))+self.lr*g_h
v_v=self.mu*v_v*(t/(t+1.0))+self.lr*g_v
self.W+=v_W
self.hbias+=v_h
self.vbias+=v_v
self.lr/=np.sqrt(t+2)
cost=self.get_pseudo_likelihood(x)
print("Epoch %d Pseudo-likelihood cost:%f" % (t+1,cost))
return None
def _batchLists(self,N):
num_batches=np.ceil(N/self.batchSize)
batch_idx=np.tile(np.arange(0,num_batches)\
,self.batchSize)
batch_idx=batch_idx[0:N]
np.random.shuffle(batch_idx)
batch_list=[]
for i in range(0,int(num_batches)):
idx=np.argwhere(batch_idx==i)
batch_list.append(idx)
return batch_list,num_batches
@jit
def _sigmoid(self,z):
return 1/(1+np.exp(-z))
@jit
def get_pseudo_likelihood(self,x):
v=x.copy()
idx = (np.arange(v.shape[0]),
np.random.randint(0, v.shape[1], v.shape[0]))
v[idx]=1-v[idx]
N=self.vbias.shape[0]
PL=N*np.log(self._sigmoid(self.free_energy(v)-self.free_energy(x)))
return PL.mean()
@jit
def free_energy(self,x):
F=-np.dot(x,self.vbias)-np.sum(np.logaddexp(0,np.dot(x,self.W)+self.hbias),axis=1)
return F
@jit
def gibbs_sample(self,iters):
v=np.random.rand(self.n_visible)
for i in range(0,iters):
p_h=self._sigmoid(np.dot(v,self.W)+self.hbias)
h=p_h>np.random.rand(p_h.shape[0])
p_v=self._sigmoid(np.dot(h,self.W.T)+self.vbias)
v=p_v>np.random.rand(p_v.shape[0])
return v,p_v
if __name__=="__main__":
import matplotlib.pyplot as plt
x=np.load('trainIm.pkl')/255.0
x=x.reshape((784,60000)).T
rbm=binary_RBM(n_visible=784,n_hidden=50,alpha=1e-6,lr=.1,batchSize=20,epochs=10,mu=1)
rbm.fit(x)
v,p_v=rbm.gibbs_sample(100000)
plt.figure()
plt.imshow(p_v.reshape((28,28)),cmap='gray')
plt.show()
W=rbm.W
plt.figure()
for i in xrange(25):
plt.subplot(5,5,i+1)
plt.imshow(W[:,i].reshape((28,28)),cmap='gray')
| mit | -6,674,141,656,296,916,000 | 28.431373 | 90 | 0.487897 | false |
bloyl/mne-python | mne/channels/tests/test_layout.py | 4 | 14417 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import copy
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
import matplotlib.pyplot as plt
from mne.channels import (make_eeg_layout, make_grid_layout, read_layout,
find_layout, HEAD_SIZE_DEFAULT)
from mne.channels.layout import (_box_size, _find_topomap_coords,
generate_2d_layout)
from mne import pick_types, pick_info
from mne.io import read_raw_kit, _empty_info, read_info
from mne.io.constants import FIFF
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
fif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif')
lout_path = op.join(io_dir, 'tests', 'data')
bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
fname_kit_umd = op.join(io_dir, 'kit', 'tests', 'data', 'test_umd-raw.sqd')
def _get_test_info():
"""Make test info."""
test_info = _empty_info(1000)
loc = np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
dtype=np.float32)
test_info['chs'] = [
{'cal': 1, 'ch_name': 'ICA 001', 'coil_type': 0, 'coord_frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 1, 'range': 1.0, 'scanno': 1,
'unit': -1, 'unit_mul': 0},
{'cal': 1, 'ch_name': 'ICA 002', 'coil_type': 0, 'coord_frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 2, 'range': 1.0, 'scanno': 2,
'unit': -1, 'unit_mul': 0},
{'cal': 0.002142000012099743, 'ch_name': 'EOG 061', 'coil_type': 1,
'coord_frame': 0, 'kind': 202, 'loc': loc.copy(), 'logno': 61,
'range': 1.0, 'scanno': 376, 'unit': 107, 'unit_mul': 0}]
test_info._update_redundant()
test_info._check_consistency()
return test_info
def test_io_layout_lout(tmpdir):
"""Test IO with .lout files."""
tempdir = str(tmpdir)
layout = read_layout('Vectorview-all', scale=False)
layout.save(op.join(tempdir, 'foobar.lout'))
layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
print(layout) # test repr
def test_io_layout_lay(tmpdir):
"""Test IO with .lay files."""
tempdir = str(tmpdir)
layout = read_layout('CTF151', scale=False)
layout.save(op.join(tempdir, 'foobar.lay'))
layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
def test_find_topomap_coords():
"""Test mapping of coordinates in 3D space to 2D."""
info = read_info(fif_fname)
picks = pick_types(info, meg=False, eeg=True, eog=False, stim=False)
# Remove extra digitization point, so EEG digitization points match up
# with the EEG channels
del info['dig'][85]
# Use channel locations
kwargs = dict(ignore_overlap=False, to_sphere=True,
sphere=HEAD_SIZE_DEFAULT)
l0 = _find_topomap_coords(info, picks, **kwargs)
# Remove electrode position information, use digitization points from now
# on.
for ch in info['chs']:
ch['loc'].fill(np.nan)
l1 = _find_topomap_coords(info, picks, **kwargs)
assert_allclose(l1, l0, atol=1e-3)
for z_pt in ((HEAD_SIZE_DEFAULT, 0., 0.),
(0., HEAD_SIZE_DEFAULT, 0.)):
info['dig'][-1]['r'] = z_pt
l1 = _find_topomap_coords(info, picks, **kwargs)
assert_allclose(l1[-1], z_pt[:2], err_msg='Z=0 point moved', atol=1e-6)
# Test plotting mag topomap without channel locations: it should fail
mag_picks = pick_types(info, meg='mag')
with pytest.raises(ValueError, match='Cannot determine location'):
_find_topomap_coords(info, mag_picks, **kwargs)
# Test function with too many EEG digitization points: it should fail
info['dig'].append({'r': [1, 2, 3], 'kind': FIFF.FIFFV_POINT_EEG})
with pytest.raises(ValueError, match='Number of EEG digitization points'):
_find_topomap_coords(info, picks, **kwargs)
# Test function with too little EEG digitization points: it should fail
info['dig'] = info['dig'][:-2]
with pytest.raises(ValueError, match='Number of EEG digitization points'):
_find_topomap_coords(info, picks, **kwargs)
# Electrode positions must be unique
info['dig'].append(info['dig'][-1])
with pytest.raises(ValueError, match='overlapping positions'):
_find_topomap_coords(info, picks, **kwargs)
# Test function without EEG digitization points: it should fail
info['dig'] = [d for d in info['dig'] if d['kind'] != FIFF.FIFFV_POINT_EEG]
with pytest.raises(RuntimeError, match='Did not find any digitization'):
_find_topomap_coords(info, picks, **kwargs)
# Test function without any digitization points, it should fail
info['dig'] = None
with pytest.raises(RuntimeError, match='No digitization points found'):
_find_topomap_coords(info, picks, **kwargs)
info['dig'] = []
with pytest.raises(RuntimeError, match='No digitization points found'):
_find_topomap_coords(info, picks, **kwargs)
def test_make_eeg_layout(tmpdir):
"""Test creation of EEG layout."""
tempdir = str(tmpdir)
tmp_name = 'foo'
lout_name = 'test_raw'
lout_orig = read_layout(kind=lout_name, path=lout_path)
info = read_info(fif_fname)
info['bads'].append(info['ch_names'][360])
layout = make_eeg_layout(info, exclude=[])
assert_array_equal(len(layout.names), len([ch for ch in info['ch_names']
if ch.startswith('EE')]))
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False)
assert_array_equal(lout_new.kind, tmp_name)
assert_allclose(layout.pos, lout_new.pos, atol=0.1)
assert_array_equal(lout_orig.names, lout_new.names)
# Test input validation
pytest.raises(ValueError, make_eeg_layout, info, radius=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, radius=0.6)
pytest.raises(ValueError, make_eeg_layout, info, width=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, width=1.1)
pytest.raises(ValueError, make_eeg_layout, info, height=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, height=1.1)
def test_make_grid_layout(tmpdir):
"""Test creation of grid layout."""
tempdir = str(tmpdir)
tmp_name = 'bar'
lout_name = 'test_ica'
lout_orig = read_layout(kind=lout_name, path=lout_path)
layout = make_grid_layout(_get_test_info())
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir)
assert_array_equal(lout_new.kind, tmp_name)
assert_array_equal(lout_orig.pos, lout_new.pos)
assert_array_equal(lout_orig.names, lout_new.names)
# Test creating grid layout with specified number of columns
layout = make_grid_layout(_get_test_info(), n_col=2)
# Vertical positions should be equal
assert layout.pos[0, 1] == layout.pos[1, 1]
# Horizontal positions should be unequal
assert layout.pos[0, 0] != layout.pos[1, 0]
# Box sizes should be equal
assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:])
def test_find_layout():
"""Test finding layout."""
pytest.raises(ValueError, find_layout, _get_test_info(), ch_type='meep')
sample_info = read_info(fif_fname)
grads = pick_types(sample_info, meg='grad')
sample_info2 = pick_info(sample_info, grads)
mags = pick_types(sample_info, meg='mag')
sample_info3 = pick_info(sample_info, mags)
# mock new convention
sample_info4 = copy.deepcopy(sample_info)
for ii, name in enumerate(sample_info4['ch_names']):
new = name.replace(' ', '')
sample_info4['chs'][ii]['ch_name'] = new
eegs = pick_types(sample_info, meg=False, eeg=True)
sample_info5 = pick_info(sample_info, eegs)
lout = find_layout(sample_info, ch_type=None)
assert lout.kind == 'Vectorview-all'
assert all(' ' in k for k in lout.names)
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
# test new vector-view
lout = find_layout(sample_info4, ch_type=None)
assert_equal(lout.kind, 'Vectorview-all')
assert all(' ' not in k for k in lout.names)
lout = find_layout(sample_info, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2)
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3)
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5)
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
# no common layout, 'meg' option not supported
lout = find_layout(read_info(fname_ctf_raw))
assert_equal(lout.kind, 'CTF-275')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
lout = find_layout(read_info(fname_bti_raw))
assert_equal(lout.kind, 'magnesWH3600')
raw_kit = read_raw_kit(fname_kit_157)
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
raw_kit.info['bads'] = ['MEG 013', 'MEG 014', 'MEG 015', 'MEG 016']
raw_kit.info._check_consistency()
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
# fallback for missing IDs
for val in (35, 52, 54, 1001):
raw_kit.info['kit_system_id'] = val
lout = find_layout(raw_kit.info)
assert lout.kind == 'custom'
raw_umd = read_raw_kit(fname_kit_umd)
lout = find_layout(raw_umd.info)
assert_equal(lout.kind, 'KIT-UMD-3')
# Test plotting
lout.plot()
lout.plot(picks=np.arange(10))
plt.close('all')
def test_box_size():
"""Test calculation of box sizes."""
# No points. Box size should be 1,1.
assert_allclose(_box_size([]), (1.0, 1.0))
# Create one point. Box size should be 1,1.
point = [(0, 0)]
assert_allclose(_box_size(point), (1.0, 1.0))
# Create two points. Box size should be 0.5,1.
points = [(0.25, 0.5), (0.75, 0.5)]
assert_allclose(_box_size(points), (0.5, 1.0))
# Create three points. Box size should be (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points), (0.5, 0.5))
# Create a grid of points. Box size should be (0.1, 0.1).
x, y = np.meshgrid(np.linspace(-0.5, 0.5, 11), np.linspace(-0.5, 0.5, 11))
x, y = x.ravel(), y.ravel()
assert_allclose(_box_size(np.c_[x, y]), (0.1, 0.1))
# Create a random set of points. This should never break the function.
rng = np.random.RandomState(42)
points = rng.rand(100, 2)
width, height = _box_size(points)
assert width is not None
assert height is not None
# Test specifying an existing width.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.4), (0.4, 0.5))
# Test specifying an existing width that has influence on the calculated
# height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.2), (0.2, 1.0))
# Test specifying an existing height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.4), (0.5, 0.4))
# Test specifying an existing height that has influence on the calculated
# width.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.1), (1.0, 0.1))
# Test specifying both width and height. The function should simply return
# these.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=0.1, height=0.1), (0.1, 0.1))
# Test specifying a width that will cause unfixable horizontal overlap and
# essentially breaks the function (height will be 0).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=1), (1, 0))
# Test adding some padding.
# Create three points. Box size should be a little less than (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, padding=0.1), (0.9 * 0.5, 0.9 * 0.5))
def test_generate_2d_layout():
"""Test creation of a layout from 2d points."""
snobg = 10
sbg = 15
side = range(snobg)
bg_image = np.random.RandomState(42).randn(sbg, sbg)
w, h = [.2, .5]
# Generate fake data
xy = np.array([(i, j) for i in side for j in side])
lt = generate_2d_layout(xy, w=w, h=h)
# Correct points ordering / minmaxing
comp_1, comp_2 = [(5, 0), (7, 0)]
assert lt.pos[:, :2].max() == 1
assert lt.pos[:, :2].min() == 0
with np.errstate(invalid='ignore'): # divide by zero
assert_allclose(xy[comp_2] / float(xy[comp_1]),
lt.pos[comp_2] / float(lt.pos[comp_1]))
assert_allclose(lt.pos[0, [2, 3]], [w, h])
# Correct number elements
assert lt.pos.shape[1] == 4
assert len(lt.box) == 4
# Make sure background image normalizing is correct
lt_bg = generate_2d_layout(xy, bg_image=bg_image)
assert_allclose(lt_bg.pos[:, :2].max(), xy.max() / float(sbg))
| bsd-3-clause | -6,684,274,647,835,185,000 | 38.283379 | 79 | 0.623569 | false |
mailund/pairwise-IM | IMSystem.py | 1 | 3547 | from numpy import matrix
from scipy.linalg import expm
## Constants used as indices in rate and transition matrices
LINEAGES_IN_SEP_POPS = 0
LINEAGES_IN_POP_1 = 1
LINEAGES_IN_POP_2 = 2
COALESCED = 3
NOT_COALESCED = [0,1,2]
def make_rate_matrix(c1, c2, m12, m21):
'''Create a rate matrix based on coalescence rates c1 and c2 and
migration rates m12 and m21.'''
Q = matrix(
[
# State 1: lineages in different populations
[-(m12+m21), m21, m12, 0],
# State 2: both lineages in population 1
[2*m12, -(2*m12+c1), 0, c1],
# State 3: both lineages in population 2
[2*m21, 0, -(2*m21+c2), c2],
# State 4: coalesced (catches both populations; absorbing)
[0, 0, 0, 0]
])
return Q
class IMSystem(object):
'''Wrapping a two-population isolation-with-migration system.'''
def __init__(self, ts, c1s, c2s, m12s, m21s):
'''Build the system based on end-points of time intervals, ts,
coalescence rates c1s and c2s and migration rates m12s and m21s.
'''
self.ts = ts
self.c1s = c1s
self.c2s = c2s
self.m12s = m12s
self.m21s = m21s
self.no_intervals = len(ts)
assert len(self.c1s) == self.no_intervals
assert len(self.c2s) == self.no_intervals
assert len(self.m12s) == self.no_intervals
assert len(self.m21s) == self.no_intervals
self.Qs = [make_rate_matrix(self.c1s[i],self.c2s[i],self.m12s[i],self.m21s[i])
for i in xrange(self.no_intervals)]
self.Ps = [None] * self.no_intervals
self.Ps[0] = matrix(expm(self.Qs[0] * self.ts[0]))
for i in xrange(1,self.no_intervals):
self.Ps[i] = self.Ps[i-1] * matrix(expm(self.Qs[i] * (self.ts[i]-self.ts[i-1])))
def coalescence_distribution(self):
'''Returns the (discritized) coalescence distribution for the time
intervals. Implicitly the time interval from the last ts till infinity
is assumed to carry the probability mass that gets this to sum to 1.'''
pdm_20 = [0] * (self.no_intervals + 1)
pdm_11 = [0] * (self.no_intervals + 1)
pdm_02 = [0] * (self.no_intervals + 1)
pdm_20[0] = self.Ps[0][LINEAGES_IN_POP_1,COALESCED]
pdm_11[0] = self.Ps[0][LINEAGES_IN_SEP_POPS,COALESCED]
pdm_02[0] = self.Ps[0][LINEAGES_IN_POP_2,COALESCED]
for i in xrange(1,self.no_intervals):
P1 = self.Ps[i-1]
P2 = self.Ps[i]
pdm_20[i] = P2[LINEAGES_IN_POP_1,COALESCED] - P1[LINEAGES_IN_POP_1,COALESCED]
pdm_11[i] = P2[LINEAGES_IN_SEP_POPS,COALESCED] - P1[LINEAGES_IN_SEP_POPS,COALESCED]
pdm_02[i] = P2[LINEAGES_IN_POP_2,COALESCED] - P1[LINEAGES_IN_POP_2,COALESCED]
pdm_20[-1] = 1 - sum(pdm_20)
pdm_11[-1] = 1 - sum(pdm_11)
pdm_02[-1] = 1 - sum(pdm_02)
return (pdm_20,pdm_11,pdm_02)
if __name__ == '__main__':
from scipy import linspace
ts = linspace(0.1,4)
c1s = [1] * len(ts)
c2s = [2] * len(ts)
m12s = [0.0] * len(ts)
m21s = [0.0] * len(ts)
im = IMSystem(ts, c1s, c2s, m12s, m21s)
pdm_20,pdm_11,pdm_02 = im.coalescence_distribution()
from matplotlib import pyplot
pyplot.plot(im.ts,pdm_20[0:-1])
pyplot.plot(im.ts,pdm_11[0:-1])
pyplot.plot(im.ts,pdm_02[0:-1])
pyplot.axis([0, max(ts), 0, max([max(pdm_20),max(pdm_11),max(pdm_02)])])
pyplot.show()
| gpl-3.0 | 5,477,560,428,867,526,000 | 36.336842 | 95 | 0.572597 | false |
ky822/Data_Bootcamp | Code/Python/WB_wdi_all.py | 2 | 2294 | """
Messing around with World Bank data. We start by reading in the whole WDI
from the online csv. Since the online file is part of a zipped collection,
this turned into an exploration of how to handle zip files -- see Section 1.
Section 2 (coming) does slicing and plotting.
Prepared for the NYU Course "Data Bootcamp."
More at https://github.com/DaveBackus/Data_Bootcamp
References
* http://datacatalog.worldbank.org/
* http://stackoverflow.com/questions/19602931/basic-http-file-downloading-and-saving-to-disk-in-python
* https://docs.python.org/3.4/library/urllib.html
Written by Dave Backus @ NYU, September 2014
Created with Python 3.4
"""
import pandas as pd
import urllib
import zipfile
import os
"""
1. Read data from component of online zip file
"""
# locations of file input and output
url = 'http://databank.worldbank.org/data/download/WDI_csv.zip'
file = os.path.basename(url) # cool tool via SBH
# the idea is to dump data in a different directory, kill with data = ''
data = '' # '../Data/'
#%%
# copy file from url to hard drive (big file, takes a minute or two)
urllib.request.urlretrieve(url, data+file)
#%%
# zipfile contains several files, we want WDI_Data.csv
print(['Is zipfile?', zipfile.is_zipfile(file)])
# key step, give us a file object to work with
zf = zipfile.ZipFile(data+file, 'r')
print('List of zipfile contents (two versions)')
[print(file) for file in zf.namelist()]
zf.printdir()
#%%
# copy data file to hard drive's working directory, then read it
csv = zf.extract('WDI_Data.csv')
df1 = pd.read_csv('WDI_Data.csv')
print(df1.columns)
#%%
# alternative: open and read
csv = zf.open('WDI_Data.csv')
df2 = pd.read_csv(csv)
print(df3.columns)
#%%
# same thing in one line
df3 = pd.read_csv(zf.open('WDI_Data.csv'))
print(df3.columns)
# zf.close() #??
# do we want to close zf? do we care?
# seems to be essential with writes, not so much with reads
# if so, can either close or use the "with" construction Sarah used.
# basic open etc:
# https://docs.python.org/3.4/tutorial/inputoutput.html#reading-and-writing-files
# on with (go to bottom): http://effbot.org/zone/python-with-statement.htm
#%%
# could we further consolidate zip read and extract? seems not.
#zf = zipfile.ZipFile(url, 'r')
| mit | 3,442,475,768,866,982,000 | 30.424658 | 102 | 0.709677 | false |
AtsushiSakai/jsk_visualization_packages | jsk_rqt_plugins/src/jsk_rqt_plugins/hist.py | 1 | 7882 | #!/usr/bin/env python
from rqt_gui_py.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Qt, QTimer, qWarning, Slot
from python_qt_binding.QtGui import QAction, QIcon, QMenu, QWidget
from python_qt_binding.QtGui import QWidget, QVBoxLayout, QSizePolicy, QColor
from rqt_py_common.topic_completer import TopicCompleter
from matplotlib.colors import colorConverter
from rqt_py_common.topic_helpers import is_slot_numeric
from rqt_plot.rosplot import ROSData as _ROSData
from rqt_plot.rosplot import RosPlotException
from matplotlib.collections import (PolyCollection,
PathCollection, LineCollection)
import matplotlib
import matplotlib.patches as mpatches
import rospkg
import rospy
from cStringIO import StringIO
import cv2
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from jsk_recognition_msgs.msg import HistogramWithRange, HistogramWithRangeBin
import os, sys
import argparse
try:
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
except ImportError:
# work around bug in dateutil
import sys
import thread
sys.modules['_thread'] = thread
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import numpy as np
import matplotlib.pyplot as plt
class ROSData(_ROSData):
def _get_data(self, msg):
val = msg
try:
if not self.field_evals:
return val
for f in self.field_evals:
val = f(val)
return val
except IndexError:
self.error = RosPlotException("[%s] index error for: %s" % (self.name, str(val).replace('\n', ', ')))
except TypeError:
self.error = RosPlotException("[%s] value was not numeric: %s" % (self.name, val))
class HistogramPlot(Plugin):
def __init__(self, context):
super(HistogramPlot, self).__init__(context)
self.setObjectName('HistogramPlot')
self._args = self._parse_args(context.argv())
self._widget = HistogramPlotWidget(self._args.topics)
context.add_widget(self._widget)
def _parse_args(self, argv):
parser = argparse.ArgumentParser(prog='rqt_histogram_plot', add_help=False)
HistogramPlot.add_arguments(parser)
args = parser.parse_args(argv)
return args
@staticmethod
def add_arguments(parser):
group = parser.add_argument_group('Options for rqt_histogram plugin')
group.add_argument('topics', nargs='?', default=[], help='Topics to plot')
class HistogramPlotWidget(QWidget):
_redraw_interval = 40
def __init__(self, topics):
super(HistogramPlotWidget, self).__init__()
self.setObjectName('HistogramPlotWidget')
rp = rospkg.RosPack()
ui_file = os.path.join(rp.get_path('jsk_rqt_plugins'),
'resource', 'plot_histogram.ui')
loadUi(ui_file, self)
self.cv_bridge = CvBridge()
self.subscribe_topic_button.setIcon(QIcon.fromTheme('add'))
self.pause_button.setIcon(QIcon.fromTheme('media-playback-pause'))
self.clear_button.setIcon(QIcon.fromTheme('edit-clear'))
self.data_plot = MatHistogramPlot(self)
self.data_plot_layout.addWidget(self.data_plot)
self._topic_completer = TopicCompleter(self.topic_edit)
self._topic_completer.update_topics()
self.topic_edit.setCompleter(self._topic_completer)
self.data_plot.dropEvent = self.dropEvent
self.data_plot.dragEnterEvent = self.dragEnterEvent
self._start_time = rospy.get_time()
self._rosdata = None
if len(topics) != 0:
self.subscribe_topic(topics)
self._update_plot_timer = QTimer(self)
self._update_plot_timer.timeout.connect(self.update_plot)
self._update_plot_timer.start(self._redraw_interval)
@Slot('QDropEvent*')
def dropEvent(self, event):
if event.mimeData().hasText():
topic_name = str(event.mimeData().text())
else:
droped_item = event.source().selectedItems()[0]
topic_name = str(droped_item.data(0, Qt.UserRole))
self.subscribe_topic(topic_name)
@Slot()
def on_topic_edit_returnPressed(self):
if self.subscribe_topic_button.isEnabled():
self.subscribe_topic(str(self.topic_edit.text()))
@Slot()
def on_subscribe_topic_button_clicked(self):
self.subscribe_topic(str(self.topic_edit.text()))
def subscribe_topic(self, topic_name):
self.topic_with_field_name = topic_name
self.pub_image = rospy.Publisher(topic_name + "/histogram_image", Image)
if not self._rosdata:
self._rosdata = ROSData(topic_name, self._start_time)
else:
if self._rosdata != topic_name:
self._rosdata.close()
self.data_plot.clear()
self._rosdata = ROSData(topic_name, self._start_time)
else:
rospy.logwarn("%s is already subscribed", topic_name)
def enable_timer(self, enabled=True):
if enabled:
self._update_plot_timer.start(self._redraw_interval)
else:
self._update_plot_timer.stop()
@Slot()
def on_clear_button_clicked(self):
self.data_plot.clear()
@Slot(bool)
def on_pause_button_clicked(self, checked):
self.enable_timer(not checked)
def update_plot(self):
if not self._rosdata:
return
data_x, data_y = self._rosdata.next()
if len(data_y) == 0:
return
axes = self.data_plot._canvas.axes
axes.cla()
if self._rosdata.sub.data_class is HistogramWithRange:
xs = [y.count for y in data_y[-1].bins]
pos = [y.min_value for y in data_y[-1].bins]
widths = [y.max_value - y.min_value for y in data_y[-1].bins]
axes.set_xlim(xmin=pos[0], xmax=pos[-1] + widths[-1])
else:
xs = data_y[-1]
pos = np.arange(len(xs))
widths = [1] * len(xs)
axes.set_xlim(xmin=0, xmax=len(xs))
#axes.xticks(range(5))
for p, x, w in zip(pos, xs, widths):
axes.bar(p, x, color='r', align='center', width=w)
axes.legend([self.topic_with_field_name], prop={'size': '8'})
self.data_plot._canvas.draw()
buffer = StringIO()
self.data_plot._canvas.figure.savefig(buffer, format="png")
buffer.seek(0)
img_array = np.asarray(bytearray(buffer.read()), dtype=np.uint8)
img = cv2.imdecode(img_array, cv2.CV_LOAD_IMAGE_COLOR)
self.pub_image.publish(self.cv_bridge.cv2_to_imgmsg(img, "bgr8"))
class MatHistogramPlot(QWidget):
class Canvas(FigureCanvas):
def __init__(self, parent=None):
super(MatHistogramPlot.Canvas, self).__init__(Figure())
self.axes = self.figure.add_subplot(111)
self.figure.tight_layout()
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.updateGeometry()
def resizeEvent(self, event):
super(MatHistogramPlot.Canvas, self).resizeEvent(event)
self.figure.tight_layout()
def __init__(self, parent=None):
super(MatHistogramPlot, self).__init__(parent)
self._canvas = MatHistogramPlot.Canvas()
self._toolbar = NavigationToolbar(self._canvas, self._canvas)
vbox = QVBoxLayout()
vbox.addWidget(self._toolbar)
vbox.addWidget(self._canvas)
self.setLayout(vbox)
def redraw(self):
pass
def clear(self):
self._canvas.axes.cla()
self._canvas.draw()
| mit | 4,489,442,462,833,596,400 | 39.214286 | 113 | 0.632581 | false |
guziy/basemap | examples/save_background.py | 2 | 1364 | from __future__ import (absolute_import, division, print_function)
import matplotlib, sys
matplotlib.use('Agg')
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
# this example shows how to save a map background and
# reuse it in another figure.
# make sure we have all the same properties on all figs
figprops = dict(figsize=(8,6), dpi=100, facecolor='white')
# generate the first figure.
fig1 = plt.figure(1,**figprops)
ax1 = fig1.add_subplot(111)
# create basemap instance, plot coastlines.
map = Basemap(projection='moll',lon_0=0)
map.drawcoastlines()
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
fig1.canvas.draw()
background = fig1.canvas.copy_from_bbox(fig1.bbox)
# save figure 1.
fig1.savefig('figure1.png', dpi=100)
# generate the second figure, re-using the background
# from figure 1.
fig2 = plt.figure(2,frameon=False,**figprops)
# make sure frame is off, or everything in existing background
# will be obliterated.
ax2 = fig2.add_subplot(111,frameon=False)
# restore previous background.
fig2.canvas.restore_region(background)
# draw parallels and meridians on existing background.
map.drawparallels(range(-90,90,30))
map.drawmeridians(range(-180,180,60))
# save figure 2.
fig2.savefig('figure2.png', dpi=100)
sys.stdout.write('images saved in figure1.png and figure2.png\n')
| gpl-2.0 | 4,759,339,645,438,496,000 | 32.268293 | 66 | 0.76173 | false |
vatsan/pandas_via_psql | setup.py | 2 | 4301 | from setuptools import setup, find_packages
from distutils.util import convert_path
import os,sys
from fnmatch import fnmatchcase
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info','plots')
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, '__init__.py'))
and not prefix):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
setup(
name='ppsqlviz',
version='1.0.1',
author='Srivatsan Ramanujam',
author_email='[email protected]',
url='http://vatsan.github.io/pandas_via_psql/',
packages=find_packages(),
package_data=find_package_data(only_in_packages=False,show_ignored=True),
include_package_data=True,
license='LICENSE.txt',
description='A command line visualization utility for SQL using Pandas library in Python.',
long_description=open('README.md').read(),
install_requires=[
"pandas >= 0.13.0"
],
)
| bsd-2-clause | -4,267,674,333,726,588,400 | 40.355769 | 95 | 0.549872 | false |
stephane-caron/pymanoid | examples/contact_stability/zmp_support_area.py | 3 | 5631 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2020 Stephane Caron <[email protected]>
#
# This file is part of pymanoid <https://github.com/stephane-caron/pymanoid>.
#
# pymanoid is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pymanoid is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pymanoid. If not, see <http://www.gnu.org/licenses/>.
"""
This example computes the multi-contact ZMP support area for a given robot
stance (contacts and CoM position). See [Caron16] for details.
"""
import IPython
from numpy import zeros
import pymanoid
from pymanoid import Stance
from pymanoid.gui import PointMassWrenchDrawer
from pymanoid.gui import draw_polygon
from pymanoid.misc import matplotlib_to_rgb, norm
com_height = 0.9 # [m]
z_polygon = 2.
class SupportAreaDrawer(pymanoid.Process):
"""
Draw the pendular ZMP area of a contact set.
Parameters
----------
stance : Stance
Contacts and COM position of the robot.
height : scalar, optional
Height of the ZMP support area in the world frame.
color : tuple or string, optional
Area color.
"""
def __init__(self, stance, height=0., color=None):
self.stance = stance # before calling parent constructor
if color is None:
color = (0., 0.5, 0., 0.5)
if type(color) is str:
color = matplotlib_to_rgb(color) + [0.5]
super(SupportAreaDrawer, self).__init__()
self.color = color
self.contact_poses = {}
self.handle = None
self.height = height
self.last_com = stance.com.p
self.stance = stance
#
self.update_contact_poses()
self.update_polygon()
def clear(self):
self.handle = None
def update_contact_poses(self):
for contact in self.stance.contacts:
self.contact_poses[contact.name] = contact.pose
def update_height(self, height):
self.height = height
self.update_polygon()
def update_polygon(self):
self.handle = None
try:
vertices = self.stance.compute_zmp_support_area(self.height)
self.handle = draw_polygon(
[(x[0], x[1], self.height) for x in vertices],
normal=[0, 0, 1], color=(0.0, 0.0, 0.5, 0.5))
except Exception as e:
print("SupportAreaDrawer: {}".format(e))
def on_tick(self, sim):
if self.handle is None:
self.update_polygon()
for contact in self.stance.contacts:
if norm(contact.pose - self.contact_poses[contact.name]) > 1e-10:
self.update_contact_poses()
self.update_polygon()
break
if norm(self.stance.com.p - self.last_com) > 1e-10:
self.update_contact_poses()
self.update_polygon()
self.last_com = self.stance.com.p
class StaticWrenchDrawer(PointMassWrenchDrawer):
"""
Draw contact wrenches applied to a robot in static-equilibrium.
Parameters
----------
stance : Stance
Contacts and COM position of the robot.
"""
def __init__(self, stance):
super(StaticWrenchDrawer, self).__init__(stance.com, stance)
stance.com.set_accel(zeros((3,)))
self.stance = stance
def find_supporting_wrenches(self, sim):
return self.stance.find_static_supporting_wrenches()
class COMSync(pymanoid.Process):
def __init__(self, stance, com_above):
super(COMSync, self).__init__()
self.com_above = com_above
self.stance = stance
def on_tick(self, sim):
self.stance.com.set_x(self.com_above.x)
self.stance.com.set_y(self.com_above.y)
if __name__ == "__main__":
sim = pymanoid.Simulation(dt=0.03)
robot = pymanoid.robots.JVRC1('JVRC-1.dae', download_if_needed=True)
sim.set_viewer()
sim.viewer.SetCamera([
[0.60587192, -0.36596244, 0.70639274, -2.4904027],
[-0.79126787, -0.36933163, 0.48732874, -1.6965636],
[0.08254916, -0.85420468, -0.51334199, 2.79584694],
[0., 0., 0., 1.]])
robot.set_transparency(0.25)
com_above = pymanoid.Cube(0.02, [0.05, 0.04, z_polygon], color='b')
stance = Stance.from_json('stances/double.json')
stance.bind(robot)
robot.ik.solve()
com_sync = COMSync(stance, com_above)
support_area_drawer = SupportAreaDrawer(stance, z_polygon)
wrench_drawer = StaticWrenchDrawer(stance)
sim.schedule(robot.ik)
sim.schedule_extra(com_sync)
sim.schedule_extra(support_area_drawer)
sim.schedule_extra(wrench_drawer)
sim.start()
print("""
ZMP support area
================
Ready to go! The GUI displays the ZMP pendular support area in blue. You can
move the blue box (in the plane above the robot) around to make the robot move
its center of mass. Contact wrenches are displayed at each contact (green dot
is COP location, arrow is resultant force). When the COM exists the
static-equilibrium polygon, you should see the background turn red as no
feasible contact wrenches can be found.
Enjoy :)
""")
if IPython.get_ipython() is None:
IPython.embed()
| gpl-3.0 | -3,208,270,963,893,853,000 | 30.110497 | 79 | 0.640206 | false |
rema-git/lichtmalen | image_to_tpm2.py | 1 | 3589 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 27 00:33:17 2014
@author: Reinhardt A.W. Maier <[email protected]>
"""
import os
import argparse
import binascii
import numpy as np
import Image as pil
#import textwrap
#import matplotlib.pyplot as plt
def tpm2(image, lastFrameBlack=False):
"""
generate TPM2 file format:
* image as numpy array with dim(height, width, color)
* returns tpm2 as string
"""
dim = tuple((np.shape(image)[0], np.shape(image)[1]))
frameheader = 'C9DA{:04X}'.format(dim[1]*3)
output = []
for frame in range(dim[0]): # loop over lines = height
output += frameheader
for led in range(dim[1]): # loop over columns = width
output += '{:02X}{:02X}{:02X}'.format(*image[frame][led])
output += '36' # end-of-frame
if lastFrameBlack:
output += frameheader + '0'*6*dim[1] + '36' # black frame
print 'Added black frame to EOF'
return ''.join(output)
def imageFilter(image):
"""
example filter function
"""
filteredImage = image.rotate(-90)
return filteredImage
def imageFit2LEDs(image, n_LEDs=121):
"""
resize image to number of LEDs
"""
scale = n_LEDs / float(image.size[0])
hsize = int((float(image.size[1]) * float(scale)))
image = image.resize((n_LEDs, hsize))
return image
def rgb2grb(image):
"""
swap color order of numpy array: RGB -> GRB
"""
R, G, B = image.T
return np.array([G, R, B]).T
def main(imageFilename, tpm2Filename, *opts):
"""
open image, apply filter function and save as TPM2 binary file
"""
# open image file
try:
image = pil.open(imageFilename)
print 'Image read from', imageFilename
except:
print 'ERROR: cannot read input image file!'
# filter image
if image.mode != 'RGB':
print 'Convert image to RGB'
image = image.convert('RGB')
image = imageFilter(image)
image = imageFit2LEDs(image)
# convert to numpy array with dim(height, width, color)
image = np.array(image)
# swap colors
image = rgb2grb(image)
# display image
#plt.imshow(image, interpolation='none')
#plt.show()
# convert image to tpm2
tpm2string = tpm2(image, *opts)
print 'Image successfully converted'
# show result to screen
#print textwrap.fill('\n' + tpm2string + '\n')
# write result to file
with open(tpm2Filename, 'wb') as binFile:
tpm2binary = binascii.a2b_hex(tpm2string)
binFile.write(tpm2binary)
print 'TPM2 file written to', tpm2Filename
if __name__ == "__main__":
# if this module is being run directly use command line arguments
parser = argparse.ArgumentParser(description='convert an image file to tpm2 format')
parser.add_argument('--noloop',
action='store_true', dest='lastFrameBlack',
help='add a black frame to stop with')
parser.add_argument('infile',
type=argparse.FileType('r'),
help="image file to be converted. Supported are all common image formats, e.g. .jpg, .png, .gif, .bmp")
parser.add_argument('outfile',
type=argparse.FileType('w'), default=None, nargs='?',
help="tpm2 file to be created (default: infile.tp2)")
args = parser.parse_args()
# set output filename, if not given use input filename with extension .tp2
if args.outfile == None:
outfile = os.path.splitext(args.infile.name)[0] + '.tp2'
else:
outfile = args.outfile.name
main(args.infile.name, outfile, args.lastFrameBlack)
| gpl-3.0 | -8,740,749,091,339,044,000 | 28.178862 | 111 | 0.629145 | false |
activitynet/ActivityNet | Evaluation/get_ava_active_speaker_performance.py | 1 | 8421 | r"""Compute active speaker detection performance for the AVA dataset.
Please send any questions about this code to the Google Group ava-dataset-users:
https://groups.google.com/forum/#!forum/ava-dataset-users
Example usage:
python -O get_ava_active_speaker_performance.py \
-g testdata/eval.csv \
-p testdata/predictions.csv \
-v
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import time
import numpy as np
import pandas as pd
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError("If precision is None, recall must also be None")
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(
recall, np.ndarray):
raise ValueError("precision and recall must be numpy array")
if precision.dtype != np.float or recall.dtype != np.float:
raise ValueError("input must be float numpy array.")
if len(precision) != len(recall):
raise ValueError("precision and recall must be of the same size.")
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError("Precision must be in the range of [0, 1].")
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError("recall must be in the range of [0, 1].")
if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)):
raise ValueError("recall must be a non-decreasing array")
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Smooth precision to be monotonically decreasing.
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision
def load_csv(filename, column_names):
"""Loads CSV from the filename using given column names.
Adds uid column.
Args:
filename: Path to the CSV file to load.
column_names: A list of column names for the data.
Returns:
df: A Pandas DataFrame containing the data.
"""
# Here and elsewhere, df indicates a DataFrame variable.
df = pd.read_csv(filename, header=None, names=column_names)
# Creates a unique id from frame timestamp and entity id.
df["uid"] = (df["frame_timestamp"].map(str) + ":" + df["entity_id"])
return df
def eq(a, b, tolerance=1e-09):
"""Returns true if values are approximately equal."""
return abs(a - b) <= tolerance
def merge_groundtruth_and_predictions(df_groundtruth, df_predictions):
"""Merges groundtruth and prediction DataFrames.
The returned DataFrame is merged on uid field and sorted in descending order
by score field. Bounding boxes are checked to make sure they match between
groundtruth and predictions.
Args:
df_groundtruth: A DataFrame with groundtruth data.
df_predictions: A DataFrame with predictions data.
Returns:
df_merged: A merged DataFrame, with rows matched on uid column.
"""
if df_groundtruth["uid"].count() != df_predictions["uid"].count():
raise ValueError(
"Groundtruth and predictions CSV must have the same number of "
"unique rows.")
if df_predictions["label"].unique() != ["SPEAKING_AUDIBLE"]:
raise ValueError(
"Predictions CSV must contain only SPEAKING_AUDIBLE label.")
if df_predictions["score"].count() < df_predictions["uid"].count():
raise ValueError("Predictions CSV must contain score value for every row.")
# Merges groundtruth and predictions on uid, validates that uid is unique
# in both frames, and sorts the resulting frame by the predictions score.
df_merged = df_groundtruth.merge(
df_predictions,
on="uid",
suffixes=("_groundtruth", "_prediction"),
validate="1:1").sort_values(
by=["score"], ascending=False).reset_index()
# Validates that bounding boxes in ground truth and predictions match for the
# same uids.
df_merged["bounding_box_correct"] = np.where(
eq(df_merged["entity_box_x1_groundtruth"],
df_merged["entity_box_x1_prediction"])
& eq(df_merged["entity_box_x2_groundtruth"],
df_merged["entity_box_x2_prediction"])
& eq(df_merged["entity_box_y1_groundtruth"],
df_merged["entity_box_y1_prediction"])
& eq(df_merged["entity_box_y2_groundtruth"],
df_merged["entity_box_y2_prediction"]), True, False)
if (~df_merged["bounding_box_correct"]).sum() > 0:
raise ValueError(
"Mismatch between groundtruth and predictions bounding boxes found at "
+ str(list(df_merged[~df_merged["bounding_box_correct"]]["uid"])))
return df_merged
def get_all_positives(df_merged):
"""Counts all positive examples in the groundtruth dataset."""
return df_merged[df_merged["label_groundtruth"] ==
"SPEAKING_AUDIBLE"]["uid"].count()
def calculate_precision_recall(df_merged):
"""Calculates precision and recall arrays going through df_merged row-wise."""
all_positives = get_all_positives(df_merged)
# Populates each row with 1 if this row is a true positive
# (at its score level).
df_merged["is_tp"] = np.where(
(df_merged["label_groundtruth"] == "SPEAKING_AUDIBLE") &
(df_merged["label_prediction"] == "SPEAKING_AUDIBLE"), 1, 0)
# Counts true positives up to and including that row.
df_merged["tp"] = df_merged["is_tp"].cumsum()
# Calculates precision for every row counting true positives up to
# and including that row over the index (1-based) of that row.
df_merged["precision"] = df_merged["tp"] / (df_merged.index + 1)
# Calculates recall for every row counting true positives up to
# and including that row over all positives in the groundtruth dataset.
df_merged["recall"] = df_merged["tp"] / all_positives
logging.info(
"\n%s\n",
df_merged.head(10)[[
"uid", "score", "label_groundtruth", "is_tp", "tp", "precision",
"recall"
]])
return np.array(df_merged["precision"]), np.array(df_merged["recall"])
def run_evaluation(groundtruth, predictions):
"""Runs AVA Active Speaker evaluation, printing average precision result."""
df_groundtruth = load_csv(
groundtruth,
column_names=[
"video_id", "frame_timestamp", "entity_box_x1", "entity_box_y1",
"entity_box_x2", "entity_box_y2", "label", "entity_id"
])
df_predictions = load_csv(
predictions,
column_names=[
"video_id", "frame_timestamp", "entity_box_x1", "entity_box_y1",
"entity_box_x2", "entity_box_y2", "label", "entity_id", "score"
])
df_merged = merge_groundtruth_and_predictions(df_groundtruth, df_predictions)
precision, recall = calculate_precision_recall(df_merged)
print("average precision: ", compute_average_precision(precision, recall))
def parse_arguments():
"""Parses command-line flags.
Returns:
args: a named tuple containing three file objects args.labelmap,
args.groundtruth, and args.detections.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-g",
"--groundtruth",
help="CSV file containing ground truth.",
type=argparse.FileType("r"),
required=True)
parser.add_argument(
"-p",
"--predictions",
help="CSV file containing active speaker predictions.",
type=argparse.FileType("r"),
required=True)
parser.add_argument(
"-v", "--verbose", help="Increase output verbosity.", action="store_true")
return parser.parse_args()
def main():
start = time.time()
args = parse_arguments()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
del args.verbose
run_evaluation(**vars(args))
logging.info("Computed in %s seconds", time.time() - start)
if __name__ == "__main__":
main()
| mit | -3,192,975,491,083,719,000 | 33.093117 | 80 | 0.676879 | false |
CCS-Lab/hBayesDM | Python/hbayesdm/models/_pst_Q.py | 1 | 10143 | from typing import Sequence, Union, Any
from collections import OrderedDict
from numpy import Inf, exp
import pandas as pd
from hbayesdm.base import TaskModel
from hbayesdm.preprocess_funcs import pst_preprocess_func
__all__ = ['pst_Q']
class PstQ(TaskModel):
def __init__(self, **kwargs):
super().__init__(
task_name='pst',
model_name='Q',
model_type='',
data_columns=(
'subjID',
'type',
'choice',
'reward',
),
parameters=OrderedDict([
('alpha', (0, 0.5, 1)),
('beta', (0, 1, 10)),
]),
regressors=OrderedDict([
]),
postpreds=['y_pred'],
parameters_desc=OrderedDict([
('alpha', 'learning rate'),
('beta', 'inverse temperature'),
]),
additional_args_desc=OrderedDict([
]),
**kwargs,
)
_preprocess_func = pst_preprocess_func
def pst_Q(
data: Union[pd.DataFrame, str, None] = None,
niter: int = 4000,
nwarmup: int = 1000,
nchain: int = 4,
ncore: int = 1,
nthin: int = 1,
inits: Union[str, Sequence[float]] = 'vb',
ind_pars: str = 'mean',
model_regressor: bool = False,
vb: bool = False,
inc_postpred: bool = False,
adapt_delta: float = 0.95,
stepsize: float = 1,
max_treedepth: int = 10,
**additional_args: Any) -> TaskModel:
"""Probabilistic Selection Task - Q Learning Model
Hierarchical Bayesian Modeling of the Probabilistic Selection Task
using Q Learning Model [Frank2007]_ with the following parameters:
"alpha" (learning rate), "beta" (inverse temperature).
.. [Frank2007] Frank, M. J., Moustafa, A. A., Haughey, H. M., Curran, T., & Hutchison, K. E. (2007). Genetic triple dissociation reveals multiple roles for dopamine in reinforcement learning. Proceedings of the National Academy of Sciences, 104(41), 16311-16316.
.. codeauthor:: David Munoz Tord <[email protected]>
User data should contain the behavioral data-set of all subjects of interest for
the current analysis. When loading from a file, the datafile should be a
**tab-delimited** text file, whose rows represent trial-by-trial observations
and columns represent variables.
For the Probabilistic Selection Task, there should be 4 columns of data
with the labels "subjID", "type", "choice", "reward". It is not necessary for the columns to be
in this particular order; however, it is necessary that they be labeled
correctly and contain the information below:
- "subjID": A unique identifier for each subject in the data-set.
- "type": Two-digit number indicating which pair of stimuli were presented for that trial, e.g. 12, 34, or 56. The digit on the left (tens-digit) indicates the presented stimulus for option1, while the digit on the right (ones-digit) indicates that for option2. Code for each stimulus type (1~6) is defined as for 80\% (type 1), 20\% (type 2), 70\% (type 3), 30\% (type 4), 60\% (type 5), 40\% (type 6). The modeling will still work even if different probabilities are used for the stimuli; however, the total number of stimuli should be less than or equal to 6.
- "choice": Whether the subject chose the left option (option1) out of the given two options (i.e. if option1 was chosen, 1; if option2 was chosen, 0).
- "reward": Amount of reward earned as a result of the trial.
.. note::
User data may contain other columns of data (e.g. ``ReactionTime``,
``trial_number``, etc.), but only the data within the column names listed
above will be used during the modeling. As long as the necessary columns
mentioned above are present and labeled correctly, there is no need to
remove other miscellaneous data columns.
.. note::
``adapt_delta``, ``stepsize``, and ``max_treedepth`` are advanced options that
give the user more control over Stan's MCMC sampler. It is recommended that
only advanced users change the default values, as alterations can profoundly
change the sampler's behavior. See [Hoffman2014]_ for more information on the
sampler control parameters. One can also refer to 'Section 34.2. HMC Algorithm
Parameters' of the `Stan User's Guide and Reference Manual`__.
.. [Hoffman2014]
Hoffman, M. D., & Gelman, A. (2014).
The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo.
Journal of Machine Learning Research, 15(1), 1593-1623.
__ https://mc-stan.org/users/documentation/
Parameters
----------
data
Data to be modeled. It should be given as a Pandas DataFrame object,
a filepath for a data file, or ``"example"`` for example data.
Data columns should be labeled as: "subjID", "type", "choice", "reward".
niter
Number of iterations, including warm-up. Defaults to 4000.
nwarmup
Number of iterations used for warm-up only. Defaults to 1000.
``nwarmup`` is a numerical value that specifies how many MCMC samples
should not be stored upon the beginning of each chain. For those
familiar with Bayesian methods, this is equivalent to burn-in samples.
Due to the nature of the MCMC algorithm, initial values (i.e., where the
sampling chains begin) can have a heavy influence on the generated
posterior distributions. The ``nwarmup`` argument can be set to a
higher number in order to curb the effects that initial values have on
the resulting posteriors.
nchain
Number of Markov chains to run. Defaults to 4.
``nchain`` is a numerical value that specifies how many chains (i.e.,
independent sampling sequences) should be used to draw samples from
the posterior distribution. Since the posteriors are generated from a
sampling process, it is good practice to run multiple chains to ensure
that a reasonably representative posterior is attained. When the
sampling is complete, it is possible to check the multiple chains for
convergence by running the following line of code:
.. code:: python
output.plot(type='trace')
ncore
Number of CPUs to be used for running. Defaults to 1.
nthin
Every ``nthin``-th sample will be used to generate the posterior
distribution. Defaults to 1. A higher number can be used when
auto-correlation within the MCMC sampling is high.
``nthin`` is a numerical value that specifies the "skipping" behavior
of the MCMC sampler. That is, only every ``nthin``-th sample is used to
generate posterior distributions. By default, ``nthin`` is equal to 1,
meaning that every sample is used to generate the posterior.
inits
String or list specifying how the initial values should be generated.
Options are ``'fixed'`` or ``'random'``, or your own initial values.
ind_pars
String specifying how to summarize the individual parameters.
Current options are: ``'mean'``, ``'median'``, or ``'mode'``.
model_regressor
Whether to export model-based regressors. Currently not available for this model.
vb
Whether to use variational inference to approximately draw from a
posterior distribution. Defaults to ``False``.
inc_postpred
Include trial-level posterior predictive simulations in
model output (may greatly increase file size). Defaults to ``False``.
adapt_delta
Floating point value representing the target acceptance probability of a new
sample in the MCMC chain. Must be between 0 and 1. See note below.
stepsize
Integer value specifying the size of each leapfrog step that the MCMC sampler
can take on each new iteration. See note below.
max_treedepth
Integer value specifying how many leapfrog steps the MCMC sampler can take
on each new iteration. See note below.
**additional_args
Not used for this model.
Returns
-------
model_data
An ``hbayesdm.TaskModel`` instance with the following components:
- ``model``: String value that is the name of the model ('pst_Q').
- ``all_ind_pars``: Pandas DataFrame containing the summarized parameter values
(as specified by ``ind_pars``) for each subject.
- ``par_vals``: OrderedDict holding the posterior samples over different parameters.
- ``fit``: A PyStan StanFit object that contains the fitted Stan model.
- ``raw_data``: Pandas DataFrame containing the raw data used to fit the model,
as specified by the user.
Examples
--------
.. code:: python
from hbayesdm import rhat, print_fit
from hbayesdm.models import pst_Q
# Run the model and store results in "output"
output = pst_Q(data='example', niter=2000, nwarmup=1000, nchain=4, ncore=4)
# Visually check convergence of the sampling chains (should look like "hairy caterpillars")
output.plot(type='trace')
# Plot posterior distributions of the hyper-parameters (distributions should be unimodal)
output.plot()
# Check Rhat values (all Rhat values should be less than or equal to 1.1)
rhat(output, less=1.1)
# Show the LOOIC and WAIC model fit estimates
print_fit(output)
"""
return PstQ(
data=data,
niter=niter,
nwarmup=nwarmup,
nchain=nchain,
ncore=ncore,
nthin=nthin,
inits=inits,
ind_pars=ind_pars,
model_regressor=model_regressor,
vb=vb,
inc_postpred=inc_postpred,
adapt_delta=adapt_delta,
stepsize=stepsize,
max_treedepth=max_treedepth,
**additional_args)
| gpl-3.0 | -2,116,428,267,287,271,000 | 42.161702 | 566 | 0.643104 | false |
linebp/pandas | pandas/tests/dtypes/test_inference.py | 1 | 35947 | # -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import tslib, lib
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical)
from pandas.compat import u, PY2, PY3, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
_ensure_int32,
_ensure_categorical)
from pandas.core.dtypes.missing import isnull
from pandas.util import testing as tm
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_list_like():
passes = ([], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str)
fails = (1, '2', object())
for p in passes:
assert inference.is_list_like(p)
for f in fails:
assert not inference.is_list_like(f)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
def test_is_dict_like():
passes = [{}, {'A': 1}, Series([1])]
fails = ['1', 1, [1, 2], (1, 2), range(2), Index([1])]
for p in passes:
assert inference.is_dict_like(p)
for f in fails:
assert not inference.is_dict_like(f)
def test_is_file_like():
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
if PY3:
from unittest import mock
assert not is_file(mock.Mock())
def test_is_named_tuple():
passes = (collections.namedtuple('Test', list('abc'))(1, 2, 3), )
fails = ((1, 2, 3), 'a', Series({'pi': 3.14}))
for p in passes:
assert inference.is_named_tuple(p)
for f in fails:
assert not inference.is_named_tuple(f)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, collections.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
def test_is_re():
passes = re.compile('ad'),
fails = 'x', 2, 3, object()
for p in passes:
assert inference.is_re(p)
for f in fails:
assert not inference.is_re(f)
def test_is_recompilable():
passes = (r'a', u('x'), r'asdf', re.compile('adsf'), u(r'\u2233\s*'),
re.compile(r''))
fails = 1, [], object()
for p in passes:
assert inference.is_re_compilable(p)
for f in fails:
assert not inference.is_re_compilable(f)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
def test_isinf_scalar(self):
# GH 11352
assert lib.isposinf_scalar(float('inf'))
assert lib.isposinf_scalar(np.inf)
assert not lib.isposinf_scalar(-np.inf)
assert not lib.isposinf_scalar(1)
assert not lib.isposinf_scalar('a')
assert lib.isneginf_scalar(float('-inf'))
assert lib.isneginf_scalar(-np.inf)
assert not lib.isneginf_scalar(np.inf)
assert not lib.isneginf_scalar(1)
assert not lib.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = set(['', 'NULL', 'nan'])
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with tm.assert_raises_regex(ValueError, msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = set([-999, -999.0])
for coerce_type in (True, False):
out = lib.maybe_convert_numeric(data, nan_values, coerce_type)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
def test_convert_numeric_uint64_nan(self):
msg = 'uint64 array detected'
cases = [(np.array([2**63, np.nan], dtype=object), set()),
(np.array([str(2**63), np.nan], dtype=object), set()),
(np.array([np.nan, 2**63], dtype=object), set()),
(np.array([np.nan, str(2**63)], dtype=object), set()),
(np.array([2**63, 2**63 + 1], dtype=object), set([2**63])),
(np.array([str(2**63), str(2**63 + 1)],
dtype=object), set([2**63]))]
for coerce in (True, False):
for arr, na_values in cases:
if coerce:
with tm.assert_raises_regex(ValueError, msg):
lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
else:
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(
arr, na_values), arr)
def test_convert_numeric_int64_uint64(self):
msg = 'uint64 and negative values detected'
cases = [np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)]
for coerce in (True, False):
for case in cases:
if coerce:
with tm.assert_raises_regex(ValueError, msg):
lib.maybe_convert_numeric(case, set(),
coerce_numeric=coerce)
else:
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(
case, set()), case)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_string(self):
pass
def test_unicode(self):
pass
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_array(arr)
assert not lib.is_timedelta64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_array(arr)
assert lib.is_timedelta64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_array(arr)
assert not lib.is_timedelta64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_array(arr)
assert lib.is_timedelta64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_array(arr)
assert not lib.is_timedelta64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
def test_date(self):
dates = [date(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
class TestNumberScalar(object):
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool('x')
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, 'D'))
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer('x')
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta('1 days'))
# questionable
assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(np.bool(False))
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float('x')
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64('2011-01-01'))
assert not is_float(Timestamp('2011-01-01'))
assert not is_float(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, 'D'))
assert not is_float(Timedelta('1 days'))
def test_is_datetime_dtypes(self):
ts = pd.date_range('20130101', periods=3)
tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')
assert is_datetime64_dtype('datetime64')
assert is_datetime64_dtype('datetime64[ns]')
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype('datetime64')
assert is_datetime64_ns_dtype('datetime64[ns]')
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype('datetime64')
assert is_datetime64_any_dtype('datetime64[ns]')
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype('datetime64')
assert not is_datetime64tz_dtype('datetime64[ns]')
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
for tz in ['US/Eastern', 'UTC']:
dtype = 'datetime64[ns, {}]'.format(tz)
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype('timedelta64')
assert is_timedelta64_dtype('timedelta64[ns]')
assert not is_timedelta64_ns_dtype('timedelta64')
assert is_timedelta64_ns_dtype('timedelta64[ns]')
tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64')
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]'))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64'))
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]'))
class Testisscalar(object):
def test_isscalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(0.)
assert is_scalar(np.nan)
assert is_scalar('foobar')
assert is_scalar(b'foobar')
assert is_scalar(u('efoobar'))
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
def test_isscalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1, ))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_isscalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.))
assert is_scalar(np.int32(1))
assert is_scalar(np.object_('foobar'))
assert is_scalar(np.str_('foobar'))
assert is_scalar(np.unicode_(u('foobar')))
assert is_scalar(np.bytes_(b'foobar'))
assert is_scalar(np.datetime64('2014-01-01'))
assert is_scalar(np.timedelta64(1, 'h'))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
assert not is_scalar(zerodim)
assert is_scalar(lib.item_from_zerodim(zerodim))
def test_isscalar_numpy_arrays(self):
assert not is_scalar(np.array([]))
assert not is_scalar(np.array([[]]))
assert not is_scalar(np.matrix('1; 2'))
def test_isscalar_pandas_scalars(self):
assert is_scalar(Timestamp('2014-01-01'))
assert is_scalar(Timedelta(hours=1))
assert is_scalar(Period('2014-01-01'))
def test_lisscalar_pandas_containers(self):
assert not is_scalar(Series())
assert not is_scalar(Series([1]))
assert not is_scalar(DataFrame())
assert not is_scalar(DataFrame([[1]]))
with catch_warnings(record=True):
assert not is_scalar(Panel())
assert not is_scalar(Panel([[[1]]]))
assert not is_scalar(Index([]))
assert not is_scalar(Index([1]))
def test_datetimeindex_from_empty_datetime64_array():
for unit in ['ms', 'us', 'ns']:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert (len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A': np.asarray(
lrange(10), dtype='float64'),
'B': Timestamp('20010101')
}))
df.iloc[3:6, :] = np.nan
result = df.loc[4, 'B'].value
assert (result == tslib.iNaT)
s = df['B'].copy()
s._data = s._data.setitem(indexer=tuple([slice(8, 9)]), value=np.nan)
assert (isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert (s[8].value == np.datetime64('NaT').astype(np.int64))
def test_is_scipy_sparse(spmatrix): # noqa: F811
tm._skip_if_no_scipy()
assert is_scipy_sparse(spmatrix([[0, 1]]))
assert not is_scipy_sparse(np.array([1]))
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = _ensure_int32(values)
assert (result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = _ensure_int32(values)
assert (result.dtype == np.int32)
def test_ensure_categorical():
values = np.arange(10, dtype=np.int32)
result = _ensure_categorical(values)
assert (result.dtype == 'category')
values = Categorical(values)
result = _ensure_categorical(values)
tm.assert_categorical_equal(result, values)
| bsd-3-clause | 3,442,768,526,486,067,000 | 33.300573 | 79 | 0.567919 | false |
Ernestyj/PyStudy | finance/DaysTest/MICAnalysis.py | 1 | 4363 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from minepy import MINE
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", context="talk")
from sklearn import preprocessing
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 30)
pd.set_option('precision', 7)
pd.options.display.float_format = '{:,.3f}'.format
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
'''
读入一支股票指定年份的ohlcv数据
输入:baseDir,stockCode为字符, startYear,yearNum为整数,
输出:dataframe
'''
def readWSDFile(baseDir, stockCode, startYear, yearNum=1):
# 解析日期
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d').date()
df = 0
for i in range(yearNum):
tempDF = pd.read_csv(baseDir+stockCode+'/wsd_'+stockCode+'_'+str(startYear+i)+'.csv',
index_col=0, sep='\t', usecols=[0,2,3,4,5,6,7,9,10,12,15], header=None,
skiprows=1, names=['Date','Open','High','Low','Close','Volume','Amount',
'Chg','Chg Pct','Avg','Turn'],
parse_dates=True, date_parser=dateparse)
if i==0: df = tempDF
else: df = df.append(tempDF)
return df
usecols = [0, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 36, 37]
# usecols = [0, 6, 16, 17, 24, 31]
usecols = [0, 2,11,24,26,29,30]
# usecols = [0, 5,7,11,19,24,26,28]
def readWSDIndexFile(baseDir, stockCode, startYear, yearNum=1):
# 解析日期
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d').date()
df = 0
for i in range(yearNum):
tempDF = pd.read_csv(baseDir+'I'+stockCode+'/wsd_'+stockCode+'_'+str(startYear+i)+'.csv',
index_col=0, sep=',', parse_dates=True, date_parser=dateparse
# , usecols=usecols
)
if i==0: df = tempDF
else: df = df.append(tempDF)
return df
baseDir = '/Users/eugene/Downloads/data/'
stockCodes = ['000300.SH', '000016.SH', '000905.SH']
i = 0
startYear = 2014
number = 2
df = readWSDFile(baseDir, stockCodes[i], startYear, number)
R = df['Close'].pct_change()
R[0] = R[1]
upOrDowns = []
for v in R.values:
if v>0: upOrDowns.append(1)
else: upOrDowns.append(-1)
# print upOrDowns
print 'Day count:', len(df)
# print df.head(5)
# df['R'] = R
dfi = readWSDIndexFile(baseDir, stockCodes[i], startYear, number)
dfi['R'] = R
print np.shape(df), np.shape(dfi)
allDF = pd.concat([df, dfi], axis=1)
scaler = preprocessing.MinMaxScaler()
X_Standard = scaler.fit_transform(df)
X_Standard_T = np.transpose(X_Standard)
Xi_Standard = scaler.fit_transform(dfi)
Xi_Standard_T = np.transpose(Xi_Standard)
X_ALL_Standard = scaler.fit_transform(allDF)
X_ALL_Standard_T = np.transpose(X_ALL_Standard)
print np.shape(X_ALL_Standard_T)
mine = MINE(alpha=0.6, c=15, est="mic_approx")
mics = []
# mine.compute_score(df['Close'].values, df['R'].values); print mine.mic()
# # for i in range(0,10):
# # mine.compute_score(X_Standard_T[i], X_Standard_T[10])
# # mics.append(mine.mic())
# # print i, mine.mic()
# for i in [7,9]:
# mine.compute_score(X_Standard_T[i], X_Standard_T[10])
# mics.append(mine.mic())
# print i, mine.mic()
# # for i in range(0,38):
# # mine.compute_score(Xi_Standard_T[i], Xi_Standard_T[38])
# # mics.append(mine.mic())
# # print i, mine.mic()
# for i in range(0,7):
# mine.compute_score(Xi_Standard_T[i], Xi_Standard_T[7])
# mics.append(mine.mic())
# print i, mine.mic()
#
for i in range(48):
mine.compute_score(X_ALL_Standard_T[i], X_ALL_Standard_T[48])
mics.append(mine.mic())
names = []
for c in allDF.columns.values: names.append(c)
map = {}
for i in range(48):
map[names[i]] = mics[i]
import operator
sorted_tuple = sorted(map.items(), key=operator.itemgetter(1))
vs = []
ks = []
for k,v in sorted_tuple:
ks.append(k); vs.append(v)
ks = ks[::-1]
vs = vs[::-1]
def plotMICHist():
f, ax = plt.subplots(figsize=(12, 6))
sns.barplot(ks, vs, palette="BuGn_d", ax=ax)
ax.set_ylabel("MIC")
plt.xticks(rotation=90)
f.subplots_adjust(bottom=0.2)
plt.show() | apache-2.0 | -3,788,239,183,814,603,000 | 29.06993 | 104 | 0.604327 | false |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 72