seq_id
stringlengths 4
11
| text
stringlengths 113
2.92M
| repo_name
stringlengths 4
125
⌀ | sub_path
stringlengths 3
214
| file_name
stringlengths 3
160
| file_ext
stringclasses 18
values | file_size_in_byte
int64 113
2.92M
| program_lang
stringclasses 1
value | lang
stringclasses 93
values | doc_type
stringclasses 1
value | stars
int64 0
179k
⌀ | dataset
stringclasses 3
values | pt
stringclasses 78
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|
34058594289
|
import traceback
import logging
import logging.config
import sys
from django.conf import settings
class SysLogger(object):
"""
system logger
"""
INFO_LOGGER = logging.getLogger(settings.PROJECT_INFO_LOG)
ERROR_LOGGER = logging.getLogger(settings.PROJECT_ERROR_LOG)
EXCEPTION_LOGGER = logging.getLogger(settings.PROJECT_EXCEPTION_LOG)
@classmethod
def debug(cls, msg):
"""
logging debug message
:param msg:
:return:
"""
extra = {
"realLocation": repr(traceback.format_stack(limit=2)[0])
}
# repr函数将对象转化为可读的形式
cls.INFO_LOGGER.debug(msg, extra=extra)
@classmethod
def info(cls, msg):
"""
logging info message,
:param msg:
:return:
"""
cls.INFO_LOGGER.info(msg)
@classmethod
def warn(cls, msg):
"""
logging warn message
:param msg:
:return:
"""
extra = {
"realLocation": repr(traceback.format_stack(limit=2)[0]),
}
cls.INFO_LOGGER.warn(msg, extra=extra)
@classmethod
def error(cls, msg):
"""
logging error message
:param msg:
:return:
"""
extra = {
"realLocation": repr(traceback.format_stack(limit=2)[0])
}
cls.INFO_LOGGER.error(msg, extra=extra)
@classmethod
def exception(cls, exp, request=None):
"""
loggig exception message
:param msg:
:return:
"""
extra = {
"realLocation": repr(traceback.format_stack(limit=2)[0]),
"request": request
}
# format_stack A shorthand for format_list(extra)
# A shorthand for format_list(extract_stack(f, limit)).
# format_list
cls.INFO_LOGGER.error(exp, extra=extra)
if sys.version_info >= (2, 7, 7):
cls.EXCEPTION_LOGGER.exception(exp, extra=extra)
else:
cls.EXCEPTION_LOGGER.exception(exp)
|
cnbillow/startupadmin-django2-python3-react
|
startup/libs/djangos/logger/syslogger.py
|
syslogger.py
|
py
| 2,060 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26283351078
|
import os
import openai
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
class OpenAIFeatures():
def __init__(self):
self.model = "gpt-3.5-turbo-1106"
def test_seed(self, seed: int = None):
# Below context taken from https://www.investopedia.com/terms/b/blockchain.asp
context = "A blockchain is a distributed database or ledger shared among a computer network's nodes. They are best " \
"known for their crucial role in cryptocurrency systems for maintaining a secure and decentralized record " \
"of transactions, but they are not limited to cryptocurrency uses. Blockchains can be used to make data in " \
"any industry immutable—the term used to describe the inability to be altered. "
system_message = "You are a helpful chat assistant.You answer questions based on provided context"
user_request_template = "Please answer the question based on provided context only.If answer is not there " \
"in context, please politely say that you do not know the answer " \
"context: " \
"{context}" \
"question: " \
"{question} "
question = "How can we use blockchain. Please provide a summarized answer ?"
user_request = user_request_template.format(context=context,question=question)
try:
messages = [
{"role": "system", "content": system_message},
{"role": "user", "content": user_request},
]
if seed is None:
response = openai.ChatCompletion.create(
model=self.model,
messages=messages,
max_tokens=200,
temperature=0.7,
api_key=OPENAI_API_KEY
)
else:
response = openai.ChatCompletion.create(
model=self.model,
messages=messages,
max_tokens=200,
seed=seed,
temperature=0.7,
api_key=OPENAI_API_KEY)
response_content = response["choices"][0]["message"]["content"]
system_fingerprint = response["system_fingerprint"]
prompt_tokens = response["usage"]["prompt_tokens"]
completion_tokens = (
response["usage"]["total_tokens"] - response["usage"]["prompt_tokens"]
)
return response_content, system_fingerprint, prompt_tokens, completion_tokens
except Exception as e:
print(f"An error occurred: {e}")
return None
def test_vision_model(self):
pass
if __name__ == "__main__":
open_ai_features = OpenAIFeatures()
for i in range(5):
response_content, system_fingerprint, prompt_tokens, completion_tokens = open_ai_features.test_seed(None)
print("Response :")
print(response_content)
print("system_fingerprint :")
print(system_fingerprint)
print("prompt tokens :")
print(prompt_tokens)
print("completion tokens :")
print(completion_tokens)
|
kpister/prompt-linter
|
data/scraping/repos/rajib76~langchain_examples/examples~how_to_use_seed_in_openai.py
|
examples~how_to_use_seed_in_openai.py
|
py
| 3,297 |
python
|
en
|
code
| 0 |
github-code
|
50
|
69969448477
|
# Oppgave 1 (oppgave 5)
tempe_fahrenheit = input("Skriv inn en temperatur i fahrenheit: ") # Lagrer input i en variabel
is_number = False
while not is_number: # Ved hjelp av variabelen "is_number", så kjører koden under.
try: # Så lenge det kommer en feilmelding når man prøver å konvertere input, kjører programmet på nytt.
float(tempe_fahrenheit) # Prøver å konvertere input.
is_number = True
except ValueError: # Om koden ovenfor gir en feilmelding, så kjører koden under.
is_number = False
print("Ikke en gydlig tallverdi!") # Printer
tempe_fahrenheit = input("Skriv inn en temperatur i fahrenheit: ")
# Oppgave 2
print(tempe_fahrenheit)
# Oppgave 3
tempe_celsius = (float(tempe_fahrenheit) - 32) * 5 / 9 # Konverterer fahrenheit til celsius
# Oppgave 4
print(format(tempe_celsius, ".2f")) # Minsker komma til 2 tall
# Oppgave 5
# Se oppgave 1, 2, 3, 4
|
DanielDuy/in1000_host_2022_innleveringer
|
in1000_oblig2/fahrenheit_celsius.py
|
fahrenheit_celsius.py
|
py
| 965 |
python
|
no
|
code
| 0 |
github-code
|
50
|
42093212638
|
import gtk
import cairo
import gobject
class HIGToolItem(gtk.HBox):
"""
A HBox that emulate the behaviour of ToolItem
"""
def __init__(self, lbl, image_widg=None, markup=True):
"""
Initialize an instance of HIGToolItem
@param lbl the text for label
@param image_widg the widget to use as image
@param markup if you want to use markup in the label
"""
gtk.HBox.__init__(self)
self.set_spacing(6)
self._markup = markup
self._label = gtk.Label(lbl)
self._label.set_use_markup(markup)
self._image = None
if isinstance(image_widg, gtk.Widget):
self.pack_start(image_widg, False, False, 0)
self._image = image_widg
self.vbox = gtk.VBox()
self.vbox.pack_start(self._label)#, False, False, 0)
self.pack_start(self.vbox)#, False, False, 0)
self.set_border_width(6)
self.set_size_request(-1, 36)
self.show_all()
def get_label(self):
"@return the label widget"
return self._label
def set_label(self, txt):
"Set the label to text (with markup setted in __init__)"
self._label.set_text(txt)
self._label.set_use_markup(self._markup)
def get_image(self):
"@return the image widget"
return self._image
label = property(get_label, set_label)
image = property(get_image)
class StatusToolItem(HIGToolItem):
"""
StatusToolItem is a special ToolItem that
provides a label and a progressbar
"""
def __init__(self):
"""
Create an instance of StatusToolItem
"""
HIGToolItem.__init__(self, "", gtk.Image())
self.label.set_alignment(0, 0.5)
self.vbox.set_border_width(1)
self.progress = gtk.ProgressBar()
self.progress.set_size_request(-1, 12)
self.vbox.pack_start(self.progress, False, False, 0)
self.connect('realize', self.__on_realize)
def __on_realize(self, widget):
self.progress.hide()
class HIGToolBar(gtk.EventBox):
"""
HIGToolBar is a widget that provides a toolbar
like the firefox one
"""
__gtype_name__ = "HIGToolBar"
__gsignals__ = {
# Emitted when the user click on the toolitems
'changed' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, \
(gobject.TYPE_INT,))
}
def __init__(self):
"""
Create an instance of HIGToolBar
"""
gtk.EventBox.__init__(self)
self.vbox = gtk.VBox()
self.add(self.vbox)
self.items_hbox = gtk.HBox(False, 4)
self.hover = None
self.active = None
self.show_status = False
self.status_item = StatusToolItem()
# We use 2 hbox to choose the visual mode
self.vbox.pack_start(self.items_hbox, False, False, 0)
self.vbox.pack_start(self.status_item, False, False, 0)
self.items = []
def draw_round_rect(self, cr, x, y, w, h, radius_x=5, radius_y=5):
"Simple func to write a round rect with a cairo context"
ARC_TO_BEZIER = 0.55228475
if radius_x > w - radius_x:
radius_x = w / 2
if radius_y > h - radius_y:
radius_y = h / 2
c1 = ARC_TO_BEZIER * radius_x
c2 = ARC_TO_BEZIER * radius_y
cr.new_path()
cr.move_to(x + radius_x, y)
cr.rel_line_to(w - 2 * radius_x, 0.0)
cr.rel_curve_to(c1, 0.0, radius_x, c2, radius_x, radius_y)
cr.rel_line_to(0, h - 2 * radius_y)
cr.rel_curve_to(0.0, c2, c1 - radius_x, radius_y, -radius_x, radius_y)
cr.rel_line_to(-w + 2 * radius_x, 0)
cr.rel_curve_to(-c1, 0, -radius_x, -c2, -radius_x, -radius_y)
cr.rel_line_to(0, -h + 2 * radius_y)
cr.rel_curve_to(0.0, -c2, radius_x - c1, -radius_y, radius_x, -radius_y)
cr.close_path()
def set_source_color(self, cr, color):
"""
Set the source pattern from a gtk.gdk.Color
@param cr the cairo context
@param color a gtk.gdk.Color
"""
cr.set_source_rgb(
*self.to_cairo_color(color)
)
def to_cairo_color(self, color):
"""
Transform a cairo color to r/g/b value
@param the gtk.gdk.Color to convert
@return a tuple of (r, g, b) value for color
"""
t = (
float(float(color.red >> 8) / 255.0),
float(float(color.green >> 8) / 255.0),
float(float(color.blue >> 8) / 255.0)
)
return t
def do_realize(self):
gtk.EventBox.do_realize(self)
self.add_events(gtk.gdk.MOTION_NOTIFY | gtk.gdk.BUTTON_PRESS)
#self.style.attach(self.window)
self.do_size_allocate(self.allocation)
self.status_item.hide()
def do_size_allocate(self, alloc):
gtk.EventBox.do_size_allocate(self, alloc)
# We have to force to 0 0 to have the
# correct gradient behaviour :)
alloc = self.allocation
alloc.x, alloc.y = 0, 0
# Color
self.normal_bg_begin = self.to_cairo_color(
self.style.light[gtk.STATE_NORMAL]
)
self.normal_bg_end = self.to_cairo_color(
self.style.dark[gtk.STATE_NORMAL]
)
self.prelight_bg_begin = self.to_cairo_color(
self.style.bg[gtk.STATE_PRELIGHT]
)
self.prelight_bg_end = self.to_cairo_color(
self.style.light[gtk.STATE_PRELIGHT]
)
self.selected_bg_begin = self.to_cairo_color(
self.style.light[gtk.STATE_SELECTED]
)
self.selected_bg_end = self.to_cairo_color(
self.style.mid[gtk.STATE_SELECTED]
)
# Gradient stuff
self.normal_bg = cairo.LinearGradient(
alloc.x, alloc.y, alloc.x, alloc.y + alloc.height
)
self.normal_bg.add_color_stop_rgb(0.3, *self.normal_bg_begin)
self.normal_bg.add_color_stop_rgb(0.9, *self.normal_bg_end)
self.prelight_bg = cairo.LinearGradient(
alloc.x, alloc.y, alloc.x, alloc.y + alloc.height
)
self.prelight_bg.add_color_stop_rgb(0.3, *self.prelight_bg_begin)
self.prelight_bg.add_color_stop_rgb(0.9, *self.prelight_bg_end)
self.selected_bg = cairo.LinearGradient(
alloc.x, alloc.y, alloc.x, alloc.y + alloc.height
)
self.selected_bg.add_color_stop_rgb(0.3, *self.selected_bg_begin)
self.selected_bg.add_color_stop_rgb(0.9, *self.selected_bg_end)
self.queue_draw()
def do_expose_event(self, evt):
cr = self.window.cairo_create()
alloc = self.allocation
alloc.x, alloc.y = 0, 0
#alloc.width -= 3
alloc.height -= 3
self.draw_round_rect(cr,
alloc.x,
alloc.y,
alloc.width,
alloc.height,
9, 9
)
# Fill the rect
cr.set_source(self.normal_bg)
cr.fill_preserve()#
cr.set_source_rgb( \
*self.to_cairo_color(self.style.dark[gtk.STATE_ACTIVE]) \
)
cr.set_line_width(2)
cr.stroke()#
alloc.x += 1
alloc.y += 1
alloc.width -= 2
alloc.height -= 2
self.draw_round_rect(cr, alloc.x, alloc.y, alloc.width, alloc.height)#
cr.set_source_rgb(\
*self.to_cairo_color(self.style.light[gtk.STATE_NORMAL]) \
)
cr.set_line_width(0.5)
cr.stroke()#
if not self.show_status:
# hover -> active
self.draw_item(cr, True)
self.draw_item(cr, False)
self.propagate_expose(self.vbox, evt)
return False
def draw_item(self, cr, hover=False):
"Used to draw actived/hovered items"
item = self.hover
if not hover:
item = self.active
if not item:
return
alloc = item.get_allocation()
alloc.x += 1
alloc.y += 1
alloc.width -= 2
alloc.height -= 4
# Draw the borders
self.draw_round_rect(cr,
alloc.x,
alloc.y,
alloc.width,
alloc.height
)
self.set_source_color(cr, self.style.dark[gtk.STATE_NORMAL])
cr.set_line_width(2)
cr.stroke()
alloc.x += 1
alloc.y += 1
alloc.width -= 2
alloc.height -= 2
if hover:
cr.set_source(self.prelight_bg)
else:
cr.set_source(self.selected_bg)
self.draw_round_rect(cr,
alloc.x,
alloc.y,
alloc.width,
alloc.height
)
cr.fill_preserve()
cr.stroke()
def get_item_under_cursor(self, evt):
"""
Get the item under cursor
@param evt a gtk.gdk.Event
@return a item if found or None
"""
lst = []
self.items_hbox.foreach(lambda w, x: x.append(w), lst)
for item in lst:
f = item.flags()
# If the widget is not realized or not visible
if not f & gtk.REALIZED or not f & gtk.VISIBLE:
continue
alloc = item.get_allocation()
if evt.x >= alloc.x and evt.x <= alloc.x + alloc.width and \
evt.y >= alloc.y and evt.y <= alloc.y + alloc.height:
return item
return None
def do_motion_notify_event(self, evt):
if self.show_status:
return
self.hover = self.get_item_under_cursor(evt)
self.queue_draw()
def do_leave_notify_event(self, evt):
if self.show_status:
return
self.hover = None
self.queue_draw()
def do_button_release_event(self, evt):
if self.show_status:
return
item = self.get_item_under_cursor(evt)
if item:
self.active = self.get_item_under_cursor(evt)
self.hover = None
self.queue_draw()
if self.active:
self.emit('changed', self.items.index(self.active))
def set_active(self, idx):
"Set the active item from index"
if self.show_status:
return
try:
self.active = self.items[idx]
self.emit('changed', idx)
except:
pass
def get_active(self):
"Get the index of active item"
if self.show_status:
return -1
if self.active:
return self.items.index(self.active)
def append(self, item):
"Append a HIGToolItem to the toolbar"
assert isinstance(item, HIGToolItem), "must be HIGToolItem"
self.items_hbox.pack_start(item, False, False, 0)
self.items.append(item)
def show_message(self, msg, stock=None, file=None):
"""
Show a message using the StatusToolItem
You could use stock OR file for image, not both.
(file has precedence if both are specified)
@param msg the message to show (could use markup)
@param stock the stock for the image
@param file the file for the image
"""
self.status_item.label = msg
self.status_item.image.show()
if file:
self.status_item.image.set_from_file(file)
elif stock:
self.status_item.image.set_from_stock(stock, \
gtk.ICON_SIZE_LARGE_TOOLBAR)
else:
self.status_item.image.hide()
self.show_status = True
self.items_hbox.hide()
self.status_item.show()
def set_status_progress(self, fract=0, show=True):
"""
Set the progressbar fraction for StatusToolItem
@param fract the fraction to set
@param show if the progressbar should be showed or not
"""
self.status_item.progress.set_fraction(fract)
if show:
self.status_item.progress.show()
else:
self.status_item.progress.hide()
def get_status_progress(self):
"Get the fraction of StatusToolItem's statusbar"
return self.status_item.progress.get_fraction()
def unset_status(self):
"Hide the StatusToolItem resetting the initial situation"
self.show_status = False
self.status_item.label = ""
self.status_item.image.set_from_stock(
gtk.STOCK_MISSING_IMAGE,
gtk.ICON_SIZE_DIALOG
)
self.status_item.image.hide()
self.status_item.hide()
self.items_hbox.show()
if __name__ == "__main__":
w = gtk.Window()
t = HIGToolBar()
t.append(HIGToolItem("test"))
t.append(HIGToolItem("test"))
t.append(HIGToolItem("test"))
t.append(HIGToolItem("test"))
t.append(HIGToolItem("test"))
box = gtk.VBox()
box.pack_start(t, False, False, 0)
w.add(box)
w.show_all()
gtk.main()
|
umitproject/packet-manipulator
|
umit/pm/higwidgets/higtoolbars.py
|
higtoolbars.py
|
py
| 13,081 |
python
|
en
|
code
| 16 |
github-code
|
50
|
16709923415
|
import numpy as np
import cv2
#for gray scale
img=cv2.imread('apple1.jpg',0)
#show image
cv2.namedWindow('image_window',cv2.WINDOW_NORMAL)
cv2.imshow('image_window',img)
k=cv2.waitKey(0)
if k==ord(s):
cv2.imwrite('applegrey.png',img)
cv2.destroyAllWindows()
|
himanshushukla254/OPENCV_Python_Codes
|
key_involve.py
|
key_involve.py
|
py
| 264 |
python
|
en
|
code
| 0 |
github-code
|
50
|
10079458197
|
import tkinter
import pandas as pd
from tkinter import ttk
from tkcalendar import DateEntry
import time
from tkinter import messagebox
import os.path as path
class InputStudentWindow(tkinter.Frame):
def __init__(self, master=None, path="data/estudiantes.json"):
super().__init__(master)
self.pack()
self.master.title("Entrada de alumnos")
self.master.resizable(False, False)
self.default_path = path
# self.master.protocol("WM_DELETE_WINDOW", self.faa)
self.lb_name = tkinter.Label(self.master, text="Nombre:")
self.lb_name.pack()
self.in_name = tkinter.Entry(self.master, width=26)
self.in_name.pack()
self.lb_age = tkinter.Label(self.master, text="Edad:")
self.lb_age.pack()
self.in_age = tkinter.Entry(self.master, width=26)
self.in_age.pack()
self.lb_school = tkinter.Label(self.master, text="Estudios:")
self.lb_school.pack()
self.cb_school = ttk.Combobox(self.master, state="readonly")
self.cb_school["values"] = ["Primaria", "Secundaria", "Preparatoria", "Licenciatura", "Posgrado"]
self.cb_school.current(0)
self.cb_school.pack()
self.lb_date = tkinter.Label(self.master, text="Fecha:")
self.lb_date.pack()
self.cal = DateEntry(self.master, width=12, background='darkblue', foreground='white', borderwidth=2)
self.cal.pack()
self.lb_time = tkinter.Label(self.master, text="Hora:")
self.lb_time.pack()
self.in_time = tkinter.Entry(self.master, width=26)
self.hour = time.strftime("%H:%M:%S")
self.in_time.insert(0, self.hour)
self.in_time.pack()
self.bt_save = tkinter.Button(self.master, text="Guardar", command=self.save_student)
self.bt_save.pack(pady=10)
def save_student(self):
"""
Valida que la información puesta sea coherente
guarda el nuevo estudiante en el último documento abierto
"""
if self.in_name.get() is None or self.in_name.get() == "" or self.cb_school.get().isdigit():
messagebox.showerror("Error", "Por favor ingrese el nombre del estudiante. Verifique el formato.")
elif self.in_age.get() is None or self.in_age.get() == "" or not self.in_age.get().isdigit():
messagebox.showerror("Error", "Por favor ingrese la edad del estudiante. Verifique el formato.")
elif self.cb_school.get() is None or self.cb_school.get() == "" or self.cb_school.get().isdigit():
messagebox.showerror("Error", "Por favor ingrese la escolaridad del estudiante. Verifique la selección.")
elif self.cal.get() is None or self.cal.get() == "":
messagebox.showerror("Error", "Por favor ingrese la fecha de captura.")
elif self.in_time.get() is None or self.in_time.get() == "":
messagebox.showerror("Error", "Por favor ingrese la hora de captura.")
else:
output = pd.DataFrame({"Nombre": [self.in_name.get()],
"Edad": [self.in_age.get()],
"Escolaridad": [self.cb_school.get()],
"Fecha": [self.cal.get()],
"Hora": [self.in_time.get()]})
if not (path.exists(self.default_path)):
output.to_json(self.default_path)
else:
input_json = pd.read_json(self.default_path)
output = pd.concat([input_json, output], axis=0)
output.reset_index(drop=True, inplace=True)
output.to_json(self.default_path)
|
ichcanziho/Interfaz_grafica_tablas_python
|
core/classes/input_student.py
|
input_student.py
|
py
| 3,666 |
python
|
en
|
code
| 0 |
github-code
|
50
|
41454300062
|
import os
import abbrs
def current_path():
p = os.path.realpath(__file__)
p = os.path.split(p)[0]
p = os.path.split(p)[-1]
return p
PACKAGE_NAME = current_path()
RC_FILENAME = f'{PACKAGE_NAME}.json'
def make_dat(ls):
def is_mp4(x):
s = x.split('.')
return len(s) >= 2 and (s[-1] == 'mp4' or s[-2] == 'mp4')
def check_collision(d):
for i, v in enumerate(d):
for j in range(i + 1, len(d)):
if d[i][0] == d[j][0]:
return j
dat = [ [ abbrs.cool_hash(i), i ] for i in filter(is_mp4, ls) ]
while True:
i = check_collision(dat)
if i:
print(f'Repairing collision: {dat[i][0]} -> {dat[i][1]}')
dat[i][0] = abbrs.cool_hash(dat[i][0])
else:
break
return dat
def restore():
dat = abbrs.load_json(RC_FILENAME)
for hash, filename in dat:
os.rename(hash, filename)
abbrs.suspend_file(RC_FILENAME)
def secure():
dat = make_dat(os.listdir())
if len(dat) == 0:
print('MP4 files not found.')
return
abbrs.json_dump(RC_FILENAME, dat)
print(abbrs.table([ 'Hash', 'Filename' ], dat))
for hash, filename in dat:
os.rename(filename, hash)
def main():
if os.path.exists(RC_FILENAME):
restore()
else:
secure()
if __name__ == '__main__':
main()
|
frankschweitzer/TV_Ad_Scraper
|
myenv/lib/python3.11/site-packages/filenames_secure/__init__.py
|
__init__.py
|
py
| 1,256 |
python
|
en
|
code
| 0 |
github-code
|
50
|
10366567275
|
import os
import cgsensor
import mh_z19
import requests
from dotenv import load_dotenv
def get_sensor_info():
result = {}
bme280 = cgsensor.BME280(i2c_addr=0x76)
bme280.forced()
result["temp"] = bme280.temperature
result["humid"] = bme280.humidity
result["pressure"] = bme280.pressure
tsl2572 = cgsensor.TSL2572()
tsl2572.single_auto_measure()
result["brightness"] = tsl2572.illuminance
result["co2"] = mh_z19.read()["co2"]
return result
def main():
load_dotenv()
post_url = os.getenv("POST_URL")
response = requests.post(post_url, json=get_sensor_info())
print(response.status_code)
if __name__ == "__main__":
main()
|
mjun0812/raspi
|
cron.py
|
cron.py
|
py
| 691 |
python
|
en
|
code
| 0 |
github-code
|
50
|
15326574774
|
# Vigenere Cipher Frequency Hacker
import itertools
import os
import re
import sys
import use_original.CheckEnglishUseinEight as CE
import use_original.FrequencyFinderUseinSeventeen as FF
import use_original.VigenereCipherUseinSixteen as VC
import use_original.ProcessingBar as PB
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
SILENT_MODE = True
NUM_MOST_FRE_LETTERS = 3
MAX_KEY_LENGTH = 16
CLEAR_FORMAT = re.compile('[^A-Z]')
# Main Process
def Processing():
# file or message?
print("> input (M)essage or (F)ile ?")
back = input("<<< ")
mode = 0
if back.lower().startswith('m'):
mode = 1
print("> input message: ")
message = input("<<< ")
elif back.lower().startswith('f'):
mode = 2
print("> input your input File Name:")
inputFileName = input("<<< ")
if not os.path.exists(inputFileName):
print("> [ERROR] There is no \"{}\" File. ".format(inputFileName))
sys.exit()
f = open(inputFileName)
message = f.read()
f.close()
else :
print("[ERROR] Don\'t know what you input...")
sys.exit()
# main work
translate = hackVigenerebyFrequency(message)
# result output
if translate == None:
print("\n> Hacking Failed!")
else :
print("\n> Hacking Answer:")
if mode == 2:
FileName = inputFileName[:len(inputFileName) - 4]
outputFileName = FileName + "_trans.txt"
print(">\n{}...".format(translate[:300]))
print("> Save as {}.".format(outputFileName))
f = open(outputFileName, 'w')
f.write(translate)
f.close()
else:
print("> {}".format(translate))
# Hack Vigenere by Frequency
def hackVigenerebyFrequency(message):
print("> Hacking...")
allPossibleKeyLengths = kasiskiExam(message)
keyLengthStr = ''
for keyLength in allPossibleKeyLengths:
keyLengthStr = keyLengthStr + "%s " % (keyLength)
print("\n> KasisKi Examination result: most likely key length is {}.".format(keyLengthStr))
print("\n> Step 2/2: Attemp Hack With Key Length")
for keyLength in allPossibleKeyLengths:
# if not SILENT_MODE:
print("\n> Attempiiing hack with key length {}, total possible is {} keys ".format(keyLength, NUM_MOST_FRE_LETTERS ** keyLength))
translate = attempHackWithKeyLength(message, keyLength)
if translate != None:
break
return translate
# Step 1: KasisKi Examination: Get the Possible Key Lengths
def kasiskiExam(message):
print("> Step 1/2: KasisKi Examination")
repeatSS = findRepeatSequencesSpacings(message)
seqFactors = {}
print("\n> Get all the factors of the number...")
count, limited = 0, len(repeatSS)
for seq in repeatSS:
count = count + 1
PB.symbolBar(count, limited)
seqFactors[seq] = []
for spacing in repeatSS[seq]:
seqFactors[seq].extend(getAllFactors(spacing))
# extend not attend, a = [a,b,c], b = [1,2,3]
# extend = [a,b,c,1,2,3], attend = [[a,b,c], [1,2,3]]
factorByCount = getMostCommonFacters(seqFactors)
# [eg g.txt] factorByCount = [(3, 556), (2, 541), (6, 529), (4, 331), (12, 325), (8, 171), (9, 156), (16, 105), (5, 98), (11, 86), (10, 84), (15, 84), (7, 83), (14, 68), (13, 52)]
allPossibleKeyLengths = []
for pairs in factorByCount:
allPossibleKeyLengths.append(pairs[0])
# [eg g.txt] allPossibleKeyLengths = [3, 2, 6, 4, 12, 8, 9, 16, 5, 11, 10, 15, 7, 14, 13]
return allPossibleKeyLengths
# Find Repeat Sequences Spacings, Use in "kasiskiExam"
def findRepeatSequencesSpacings(message):
# [eg. g.txt]
message = CLEAR_FORMAT.sub("", message.upper())
print("> Find Repeat Sequences Spacings...")
seqSpacings = {}
count, limited = 0, (len(message) - 3) + (len(message) - 4) + (len(message) - 5)
for seqLen in range(3, 6):
for seqStart in range(len(message) - seqLen):
count = count + 1
PB.symbolBar(count, limited)
seq = message[seqStart:seqStart + seqLen]
for i in range(seqStart + seqLen, len(message) - seqLen):
if message[i:i + seqLen] == seq:
if seq not in seqSpacings:
seqSpacings[seq] = []
seqSpacings[seq].append(i - seqStart)
return seqSpacings
# Get all the factors of the number, Use in "kasiskiExam"
def getAllFactors(num):
if num < 2:
return []
factors = []
for i in range(2, MAX_KEY_LENGTH + 1):
if num % i == 0:
factors.append(i)
factors.append(int(num / i))
if 1 in factors:
factors.remove(1)
return list(set(factors))
# Use set: Remove duplicate values
# Get the most common facter, Use in "kasiskiExam"
def getMostCommonFacters(seqFactors):
print("\n> Get the most common facter...")
factorCounts = {}
count, limited = 0, len(seqFactors)
for seq in seqFactors:
count = count + 1
PB.symbolBar(count, limited)
factorList = seqFactors[seq]
for factor in factorList:
if factor not in factorCounts:
factorCounts[factor] = 0
factorCounts[factor] += 1
factorsByCount = []
for factor in factorCounts:
if factor <= MAX_KEY_LENGTH:
factorsByCount.append((factor, factorCounts[factor]))
factorsByCount.sort(key = getIndex, reverse = True)
return factorsByCount
# Fuction help "sort" index, Use in "getMostCommonFacters"
def getIndex(x):
return x[1]
# Step 2: Attemp Hack With Key Length
def attempHackWithKeyLength(message, keyLength):
Message = message.upper()
allFreScores = []
for nth in range(1,keyLength + 1):
nthLetters = getNthSubkeyLetters(nth, keyLength, Message)
freScores = []
for possibleKey in LETTERS:
translate = VC.decryptMessage(possibleKey, nthLetters)
matchPair = (possibleKey, FF.englishFrequencyMatch(translate))
# Frequency analysis determines which sub key is decrypted after the original text is more consistent with the "standard English alphabet frequency".
freScores.append(matchPair)
freScores.sort(key = getIndex, reverse = True)
allFreScores.append(freScores[:NUM_MOST_FRE_LETTERS])
# [eg. g.txt] allFreScores = [[('A', 9), ('E', 5), ('O', 4)], [('S', 10), ('D', 4), ('G', 4)], [('I', 11), ('V', 4), ('X', 4)], [('M', 10), ('Z', 5), ('Q', 4)], [('O', 11), ('B', 4), ('Z', 4)], [('V', 10), ('I', 5), ('K', 5)]]
for i in range(len(allFreScores)):
print("> Possible key for letter: {} -- ".format(i+1), end = '')
for freScores in allFreScores[i]:
print("{} ".format(freScores[0]), end = '')
print()
'''
# [eg. g.txt] output> > Possible key for letter: 1 -- A E O
> Possible key for letter: 2 -- S D G
> Possible key for letter: 3 -- I V X
> Possible key for letter: 4 -- M Z Q
> Possible key for letter: 5 -- O B Z
> Possible key for letter: 6 -- V I K
'''
count, limited = 0, NUM_MOST_FRE_LETTERS ** keyLength
for index in itertools.product(range(NUM_MOST_FRE_LETTERS), repeat = keyLength):
count = count + 1
PB.symbolBar(count,limited)
possibleKey = ''
for i in range(keyLength):
possibleKey = possibleKey + allFreScores[i][index[i]][0]
# Descartes product combination gets all possible keys.
# Descartes product --> itertools.product
if not SILENT_MODE:
print("> Attemping with key : {}".format(possibleKey))
translate = VC.decryptMessage(possibleKey, message)
if CE.isEnglish(translate)[0]:
origCase = []
for i in range(len(message)):
if message[i].isupper():
origCase.append(translate[i].upper())
else:
origCase.append(translate[i].lower())
translate = "".join(origCase)
print("\n> Possible answer:")
print("> Key: {}\n> Translate: ".format(possibleKey))
print(translate[:300])
print("> Enter \'D\' to done, or continue hacking...")
back = input("<<< ")
if back.strip().upper().startswith('D'):
return translate
return None
# Get n-th subkey letters, Use in "attempHackWithKeyLength"
def getNthSubkeyLetters(nth, keyLength, Message):
Message = CLEAR_FORMAT.sub("", Message)
i = nth - 1
letters = []
while i < len(Message):
letters.append(Message[i])
i = i + keyLength
return ''.join(letters)
if __name__ == "__main__":
Processing()
|
KampfWut/CodeGeneratedDuringLearning
|
Python Encryption/17_VigenereCipherFrequencyHacker.py
|
17_VigenereCipherFrequencyHacker.py
|
py
| 9,047 |
python
|
en
|
code
| 0 |
github-code
|
50
|
8715663169
|
from dash import dash, html
import dash
import dash_bootstrap_components as dbc
# Create Dash App
app = dash.Dash(__name__, use_pages=True, external_stylesheets=[dbc.themes.CYBORG],
meta_tags=[{'name': 'viewport',
'content': 'width=device-width, initial-scale=1.0'}])
#-------------------App Layout-------------------
app.layout = html.Div(
[
# Navbar
dbc.NavbarSimple(
dbc.DropdownMenu(
[
dbc.DropdownMenuItem(page["name"], href=page["path"])
for page in dash.page_registry.values()
if page["module"] != "pages.not_found_404"
],
nav=True,
label="More Pages",
className='me-5'
),
id='nav-bar',
brand="Fintix",
brand_href="#",
color="dark",
dark=True,
brand_style= {'fontSize':30},
fluid=True,
className='ms-2 me-2 mb-3'
),
# content of each page
dash.page_container
]
)
app.title = 'Fintix'
if __name__ == '__main__':
app.run_server(debug=False)
|
marcynn/Fintix
|
app.py
|
app.py
|
py
| 1,265 |
python
|
en
|
code
| 1 |
github-code
|
50
|
14600597575
|
from facenet_pytorch import MTCNN, InceptionResnetV1, fixed_image_standardization, training
import torch
from torch.utils.data import DataLoader
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
from torchvision import datasets, transforms
import os
from pysistant import helpers
from PIL import Image
from accelerate import Accelerator
import numpy as np
from sklearn.model_selection import train_test_split
# data_dir = '../data/test_images'
# data_dir = '/mnt/vmk/datasets/faces/casia_webface'
train_data_dir = '/mnt/vmk/datasets/faces/vgg_face_2/data/train_cropped'
test_data_dir = '/mnt/vmk/datasets/faces/vgg_face_2/data/test_cropped'
saved_model_dir = '/mnt/vmk/projects/ilyushin/ai-s/facenet_pytorch/results_distr_acc/models'
saved_checkpoints_dir = '/mnt/vmk/projects/ilyushin/ai-s/facenet_pytorch/results_distr_acc/checkpoints'
helpers.create_dir(saved_model_dir)
helpers.create_dir(saved_checkpoints_dir)
batch_size = 700
image_size = (256, 256)
epochs = 70
num_workers = 0 if os.name == 'nt' else 8
accelerator = Accelerator()
class VGGDataset(torch.utils.data.Dataset):
def __init__(self, file_names, transform):
super(VGGDataset, self).__init__()
self.file_names = file_names
self.transform = transform
classes, class_to_idx = self.find_classes()
self.classes = classes
self.class_to_idx = class_to_idx
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
path_to_img = self.file_names[idx]
# print('path_to_img - ', path_to_img)
raw_image = Image.open(path_to_img)
x = self.transform(raw_image.convert('RGB'))
y = path_to_img.split('/')[-2]
return x, self.class_to_idx[y]
def find_classes(self):
"""Find the class folders in a dataset structured as follows::
directory/
├── class_x
│ ├── xxx.ext
│ ├── xxy.ext
│ └── ...
│ └── xxz.ext
└── class_y
├── 123.ext
├── nsdf3.ext
└── ...
└── asd932_.ext
This method can be overridden to only consider
a subset of classes, or to adapt to a different dataset directory structure.
Args:
directory(str): Root directory path, corresponding to ``self.root``
Raises:
FileNotFoundError: If ``dir`` has no class folders.
Returns:
(Tuple[List[str], Dict[str, int]]): List of all classes and dictionary mapping each class to an index.
"""
classes = set(sorted(file_path.split('/')[-2] for file_path in self.file_names))
if not classes:
raise FileNotFoundError(f"Couldn't find any class in the list of path.")
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def get_datasets():
train_source_files = [item for item in helpers.find_files(train_data_dir, pattern=['.jpg'])]
train_x, train_y = [], []
for file_path in train_source_files:
train_x.append(file_path)
train_y.append(file_path.split('/')[-2])
x_train = np.array(train_x)
train_y = np.array(train_y)
x_train, x_val, y_train, y_val = train_test_split(x_train, train_y, test_size=0.2, stratify=train_y)
trans = transforms.Compose([
# np.float32,
# transforms.Resize(image_size),
transforms.ToTensor(),
fixed_image_standardization
])
train_dataset = VGGDataset(x_train, transform=trans)
val_dataset = VGGDataset(x_val, transform=trans)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers
)
return train_dataset, train_loader, val_loader
# def accuracy(logits, y):
# _, preds = torch.max(logits, 1)
# return (preds == y).float().mean()
train_dataset, train_loader, val_loader = get_datasets()
resnet = InceptionResnetV1(
classify=True,
num_classes=len(train_dataset.class_to_idx)
)
optimizer = optim.Adam(resnet.parameters(), lr=0.001)
scheduler = MultiStepLR(optimizer, [5, 10])
loss_fn = torch.nn.CrossEntropyLoss()
resnet, optimizer, train_loader, val_loader = accelerator.prepare(resnet, optimizer, train_loader, val_loader)
steps = len(train_dataset) // (batch_size*8)
for epoch in range(epochs):
accelerator.print('\nEpoch {}/{}'.format(epoch + 1, epochs))
accelerator.print('-' * 10)
accuracy = 0
num_elems = 0
resnet.train()
for step, (x, y) in enumerate(train_loader):
# x = x.to(accelerator.device)
# y = y.to(accelerator.device)
y_pred = resnet(x)
loss_batch = loss_fn(y_pred, y)
accelerator.backward(loss_batch)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
# loss_batch = loss_batch.detach().cpu()
#
# predictions = y_pred.argmax(dim=-1)
# accurate_preds = accelerator.gather(predictions) == accelerator.gather(y)
# num_elems += accurate_preds.shape[0]
# accuracy += accurate_preds.long().sum()
# if step % 100 == 0:
# accelerator.print(
# f"Train epoch {epoch}/{epochs}, step {step}/{steps}: loss {loss_batch.item():.4f}, accuracy {100 * accuracy.item() / num_elems:.2f}"
# )
resnet.eval()
loss = 0
accuracy = 0
num_elems = 0
for step, (x, y) in enumerate(val_loader):
with torch.no_grad():
y_pred = resnet(x)
predictions = y_pred.argmax(dim=-1)
accuracy_preds = accelerator.gather(predictions) == accelerator.gather(y)
num_elems += accuracy_preds.shape[0]
accuracy += accuracy_preds.long().sum()
eval_metric = accuracy.item() / num_elems
# eval_loss = loss.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print('-' * 10)
# accelerator.print(f"Eval epoch {epoch} from {epochs}: loss {eval_loss:.4f}, accuracy {100 * eval_metric:.2f}")
accelerator.print(f"Eval epoch {epoch+1} from {epochs}: accuracy {100 * eval_metric:.2f}")
accelerator.save({
'epoch': epoch,
'model_state_dict': resnet.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
}, os.path.join(saved_checkpoints_dir, f'epoch_{epoch}.tar'))
# Save
accelerator.save(resnet, os.path.join(saved_model_dir, 'resnet.pt'))
|
Ilyushin/facenet-pytorch
|
train_model_distributed_accelerator.py
|
train_model_distributed_accelerator.py
|
py
| 6,750 |
python
|
en
|
code
| null |
github-code
|
50
|
21874214998
|
"""
Definition of Interval.
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
"""
import heapq
class Solution:
"""
@param intervals: an array of meeting time intervals
@return: the minimum number of conference rooms required
"""
def minMeetingRooms(self, intervals):
# Write your code here
intervals = sorted(intervals, key=lambda x: x.start)
heap = []
for i in range(len(intervals)):
if heap and intervals[i].start > heap[0]:
heapq.heappop(heap)
heapq.heappush(heap, intervals[i].end)
return len(heap)
|
sunjianbo945/leetcode
|
src/amazon/919. Meeting Rooms II.py
|
919. Meeting Rooms II.py
|
py
| 674 |
python
|
en
|
code
| 0 |
github-code
|
50
|
12721264122
|
try:
from hdb_ha_dr.client import HADRBase
except ImportError as e:
print("Module HADRBase not found - running outside of SAP HANA? - {0}".format(e))
import os
"""
To use this HA/DR hook provide please:
1) create directory /usr/shared/myHooks, which should be owned by <sid>adm
2) copy this file to /usr/shared/myHooks
3) add the following lines to your global.ini:
[ha_dr_provider_myfirsthook]
provider = myFirstHook
path = /hana/shared/myHooks
execution_order = 5
4) create the directory /srhook, writable by <sid>adm
please see documentation on HANA hooks here:
https://help.sap.com/docs/SAP_HANA_PLATFORM/6b94445c94ae495c83a19646e7c3fd56/1367c8fdefaa4808a7485b09815ae0f3.html?version=2.0.01
"""
fhSRHookVersion = "0.162.0"
try:
class myFirstHook(HADRBase):
def __init__(self, *args, **kwargs):
# delegate construction to base class
super(myFirstHook, self).__init__(*args, **kwargs)
self.tracer.info("myFirstHook init()")
def about(self):
return {"provider_company": "SAMPLE",
"provider_name": "myFirstHook", # class name
"provider_description": "Execute after takeover",
"provider_version": "1.0"}
def postTakeover(self, rc, **kwargs):
"""
Hook description:
* time of call: as soon as all services with a volume return from their
assign-call (open SQL port)
* caller: the master host
* landscape: called only once on the master
* behavior upon failure: error trace is written
@param rc: the return code of the actual takeover process; 0=success,
1=waiting for forced takeover, 2=failure
@type rc: int
@param **kwargs: place holder for later usage (new parameters) to
keep the interface stable
@type **kwargs: dict
@return: information about success
@rtype: int
***this is strictly a sample, and you will need to implement your own ***
***logic here***
"""
with open('/srhook/readme.txt', 'a') as f:
f.write("HANA failed over to secondary node")
f.write('\n')
return 0
except NameError as e:
print("Could not find base class ({0})".format(e))
|
rsponholtz/NotificationHook
|
myFirstHook.py
|
myFirstHook.py
|
py
| 2,533 |
python
|
en
|
code
| 0 |
github-code
|
50
|
70942433115
|
"""
Module for instrument class and subclasses.
"""
import glob
import os
import astropy.io.fits as pyfits
import numpy as np
from scipy.ndimage.filters import median_filter
from . import utils as u
class Instrument:
"""
Instantiates an object that implements instrument-specific reduction techniques.
"""
name = None
npix = (
10 # Size of rot search; needs to be bigger if initial shifts are off.
)
def __init__(self, take_skies=False):
self.take_skies = take_skies
def bad_pix(self, image):
"""Read in bad pixel file, cut down to size, and replace NaN
pixels with median of surrounding pixels.
Inputs:
:image: (2D numpy array) image to be filtered for bad pixels.
Outputs:
:iamge: (2D numpy array) image, now filtered for bad pixels.
Same dimensions as input image.
"""
c_im = image.copy()
for i in range(3):
filtered = median_filter(c_im, size=10)
nan_indices = np.where(np.isnan(c_im))
c_im[nan_indices] = filtered[nan_indices]
return c_im
class ShARCS(Instrument):
"""
For use on the ShARCS camera on the Shane 3m telescope
at Lick observatory.
"""
name = "ShARCS"
center = (750, 1100) # row, col
npix = 1000 #Was 600. Using 1000x1000 prevents vertical and horizontal boundaries in final image.
plate_scale = 0.033 # arcsec/pixel
replace_filters = {
"BrG-2.16": ["Ks", "K"],
"K+CH4-2.4": ["Ks", "K"],
"J+CH4-1.2": "J",
"H2-2.2122": ["Ks", "K"],
}
filter_logtohead = {
"Ks": "Ks",
"BrG": "BrG-2.16",
"J+CH4-1.2": "J",
"K": "Ks",
}
filter_headtolog = {"Ks": "K", "BrG-2.16": "BrG", "J": "J+CH4-1.2"}
file_prefix = "s"
plim_inds = (250, 350)
off = (-250, -250)
def adjust_array(self, array, nims):
return np.array(
[
u.image_subsection(array[dd, :, :], self.npix, self.center)
for dd in range(nims)
]
)
def adjust_im(self, image):
return np.fliplr(image)
def head(self, file):
"""
Given a FITS file, returns its head.
Inputs:
:file: (str) path to file.
"""
return u.header_subsection(file, self.npix, self.center)
def filt(self, nims, head, filter_name):
"""
Given the header of a FITS file, returns its filter.
Inputs:
:nims: (int) number of images.
:head: (astropy.io.fits header object) head
of object of interest.
:filter_name: (str) name of filter to use
in the event that the filter is
unknown in the header.
Outputs:
:filt: (str) name of filter used to observe
object of interest.
"""
if head["FILT1NAM"] == "Unknown":
filt = filter_name
else:
filt = head["FILT1NAM"]
if head["FILT2NAM"] != "Open": #append Ch4-1.2 as needed
filt = filt + '+'+head["FILT2NAM"]
return filt
def itime(self, head):
"""
Given a FITS header, returns the true integration time
for a file.
Inputs:
:head: (astropy.io.fits header object) head
of object of interest.
Outputs:
:itime_val: (float) integration time for object of
interest.
"""
itime_val = head["ITIME0"] * 1e-6
return itime_val
def bad_pix(self, image):
"""Read in bad pixel file, cut down to size, replace bad
pixels with median of surrounding pixels.
Inputs:
:image: (2D numpy array) image to be filtered for bad pixels.
Outputs:
:iamge: (2D numpy array) image, now filtered for bad pixels.
Same dimensions as input image.
"""
script_dir = os.path.dirname(
__file__
) # <-- absolute dir the script is in
rel_path = "badpix.fits"
bpfile_name = os.path.join(script_dir, rel_path)
bpfile = pyfits.getdata(bpfile_name, 0)
bpfile = u.image_subsection(bpfile, self.npix, self.center)
bad = np.where(bpfile == 1) # locations of bad pixels
filtered = median_filter(image, size=7)
image[bad] = filtered[
bad
] # replace bad pixels with median of surrounding pixels
return image
def adjust_thisimage(self, thisimage, rawfile):
thisimage = u.image_subsection(thisimage, self.npix, self.center)
head = u.header_subsection(rawfile, self.npix, self.center)
return thisimage, head
def read_data(self, night, rawfilename, newfilename):
raise NotImplementedError(
"Data should not be read through"
"this method for ShARCS. Instead,"
"please run the driver function of "
"your choice on folders containing "
"your raw data."
)
class PHARO(Instrument):
"""
For use on the PHARO instrument at Palomar.
"""
name = "PHARO"
center = np.nan
npix = np.nan # Shouldn't matter
plate_scale = 0.025
filter_logtohead = {
"Ks": "K_short",
"BrG": "Br-gamma",
"BrG+H2": "Br-gamma",
"J": "J",
}
filter_headtolog = {"K-short": "Ks", "Br-gamma": "BrG", "J": "J"}
replace_filters = {
"BrG-2.16": ["Ks", "K"],
"K+CH4-2.4": ["Ks", "K"],
"J+CH4-1.2": "J",
"H2-2.2122": ["Ks", "K"],
}
file_prefix = "sph"
plim = (462, 562)
off = (-462, -462)
def adjust_im(self, image):
return image
def adjust_array(self, array, nims):
return array.astype(float)
def filt(self, nims, head, filter_name):
"""
Given the header of a FITS file, returns its filter.
Inputs:
:nims: (int) number of images.
:head: (astropy.io.fits header object) head
of object of interest.
:filter_name: (str) name of filter to use
in the event that the filter is
unknown in the header.
Outputs:
:filt: (str) name of filter used to observe
object of interest.
"""
filt = head["FILTER"]
return filt
def head(self, file):
"""
Returns the head of a FITS file.
Inputs:
:file: (str) path to FITS file of interest.
"""
return pyfits.getheader(file)
def itime(self, head):
"""
Given a FITS header, returns the true integration time
for a file.
Inputs:
:head: (astropy.io.fits header object) head
of object of interest.
Outputs:
:itime_val: (float) integration time for object of
interest.
"""
itime_val = head["T_INT"] / 1000.0
return itime_val
def adjust_thisimage(self, thisimage):
thisimage = thisimage.astype(float)
return thisimage
def read_data(self, raw_dir, new_dir):
"""
Reads data.
Inputs:
:rawdir: (string) absolute path to directory containing raw data.
File path should end with '/'.
:newdir: (string) absolute path to directory that will contain
4-quadrant data. File path should end with '/'.
Outputs:
None
"""
def read_pharo(raw_file_name, new_file_name):
"""
Read in the 4-quadrant data and flatten it.
Inputs:
:raw_file_name: (string) name of raw file.
:new_file_name: (string) name of flattened 4-quadrant data.
"""
im_cube = pyfits.getdata(raw_file_name)
header = pyfits.getheader(raw_file_name)
newfile = np.zeros((1024, 1024))
newfile[0:512, 512:1024] = im_cube[0, :, :] # Lower right
newfile[0:512, 0:512] = im_cube[1, :, :] # Lower left
newfile[512:1024, 0:512] = im_cube[2, :, :] # Upper left
newfile[512:1024, 512:1024] = im_cube[3, :, :] # Upper right
hdu = pyfits.PrimaryHDU(newfile, header=header)
hdu.writeto(new_file_name, overwrite=True, output_verify="ignore")
return newfile
def convert_night():
"""
Convert the cubes to flat files for a whole night.
"""
flist = glob.glob(raw_dir + "*.fits")
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
for fpath in flist:
fname = fpath.split("/")[-1]
newfname = new_dir + "s" + fname
read_pharo(fpath, newfname)
convert_night()
|
arjunsavel/SImMER
|
src/simmer/insts.py
|
insts.py
|
py
| 9,017 |
python
|
en
|
code
| 7 |
github-code
|
50
|
22923702477
|
# !/usr/bin/python3
# coding:utf-8
# author:panli
import pytest
import unittest
import HTMLTestRunner
import time
import os
def allTest():
suite=unittest.TestLoader().discover(
start_dir=os.path.dirname(__file__),
pattern='test_*.py',
top_level_dir=None)
return suite
def getNowTime():
return time.strftime('%Y-%m-%d %H_%M_%S', time.localtime(time.time()))
def run():
fp = os.path.join(os.path.dirname(__file__), 'report', getNowTime()+'testReport.html')
HTMLTestRunner.HTMLTestRunner(
stream=open(fp, 'wb'),
title='自动化测试报告',
description='自动化测试报告详细信息').run(allTest())
if __name__ == '__main__':
run()
|
17621606077pl/Test_Api
|
script/Day11/allTestRun.py
|
allTestRun.py
|
py
| 665 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38616349980
|
# Системный администратор вспомнил,
# что давно не делал архива пользовательских файлов.
# Однако, объем диска, куда он может поместить архив,
# может быть меньше чем суммарный объем архивируемых файлов.
# Известно, какой объем занимают файлы каждого пользователя.
# Напишите программу, которая по заданной информации о пользователях и
# свободному объему на архивном диске определит максимальное число
# пользователей, чьи данные можно поместить в архив.
li = list(map(int, input().split()))
n = li[1]
c = []
while n > 0:
c.append(int(input()))
n -= 1
c.sort()
kol = 0
sum = 0
for el in c:
sum += el
kol += 1
if sum > li[0]:
kol -= 1
break
print(kol)
|
AnnaSmelova/Python_programming_basics_course
|
week6/05_archive.py
|
05_archive.py
|
py
| 1,062 |
python
|
ru
|
code
| 1 |
github-code
|
50
|
41700012726
|
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal)
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from skutil.decomposition import *
from skutil.testing import assert_fails
from skutil.utils import load_iris_df
from skutil.decomposition.decompose import _BaseSelectiveDecomposer
# Def data for testing
iris = load_iris()
X = load_iris_df(False)
def test_selective_pca():
original = X
cols = [original.columns[0]] # Only perform on first...
compare_cols = np.array(original[['sepal width (cm)', 'petal length (cm)',
'petal width (cm)']].as_matrix()) # should be the same as the trans cols
transformer = SelectivePCA(cols=cols, n_components=0.85).fit(original)
transformed = transformer.transform(original)
untouched_cols = np.array(transformed[['sepal width (cm)', 'petal length (cm)', 'petal width (cm)']].as_matrix())
assert_array_almost_equal(compare_cols, untouched_cols)
assert 'PC1' in transformed.columns
assert transformed.shape[1] == 4
assert isinstance(transformer.get_decomposition(), PCA)
assert SelectivePCA().get_decomposition() is None
# test the selective mixin
assert isinstance(transformer.cols, list)
# what if we want to weight it?
pca_df = SelectivePCA(weight=True, n_components=0.99, as_df=False).fit_transform(original)
pca_arr = SelectivePCA(weight=True, n_components=0.99, as_df=False).fit_transform(iris.data)
assert_array_equal(pca_df, pca_arr)
# hack to assert they are not equal if weighted
pca_arr = SelectivePCA(weight=False, n_components=0.99, as_df=False).fit_transform(iris.data)
assert_fails(assert_array_equal, AssertionError, pca_df, pca_arr)
def test_selective_tsvd():
original = X
cols = [original.columns[0], original.columns[1]] # Only perform on first two columns...
compare_cols = np.array(
original[['petal length (cm)', 'petal width (cm)']].as_matrix()) # should be the same as the trans cols
transformer = SelectiveTruncatedSVD(cols=cols, n_components=1).fit(original)
transformed = transformer.transform(original)
untouched_cols = np.array(transformed[['petal length (cm)', 'petal width (cm)']].as_matrix())
assert_array_almost_equal(compare_cols, untouched_cols)
assert 'Concept1' in transformed.columns
assert transformed.shape[1] == 3
assert isinstance(transformer.get_decomposition(), TruncatedSVD)
assert SelectiveTruncatedSVD().get_decomposition() is None # default None
# test the selective mixin
assert isinstance(transformer.cols, list)
def test_not_implemented_failure():
# define anon decomposer
class AnonDecomposer(_BaseSelectiveDecomposer):
def __init__(self, cols=None, n_components=None, as_df=True):
super(AnonDecomposer, self).__init__(cols, n_components, as_df)
def get_decomposition(self):
return super(AnonDecomposer, self).get_decomposition()
assert_fails(AnonDecomposer().get_decomposition, NotImplementedError)
|
tgsmith61591/skutil
|
skutil/decomposition/tests/test_decompose.py
|
test_decompose.py
|
py
| 3,119 |
python
|
en
|
code
| 29 |
github-code
|
50
|
6513147544
|
import subprocess
import os
folder_path = '/media/usb'
des_path = '/home/sanjana/Braillie_Project/input_files'
def copy_files(source_path, destination_path):
#command to copy files
COPY_COMMAND = ['cp',source_path,destination_path]
#execute the command
try:
subprocess.check_call(COPY_COMMAND)
except subprocess.CalledProcessError as e:
print("Error:",e)
def check_folder():
if(os.path.exists('/home/sanjana/Braillie_Project/input_files')):
pass
else:
os.mkdir('/home/sanjana/Braillie_Project/input_files')
print('Folder created')
#command to find the files with .txt extension
FILES_COMMAND = ['find',folder_path,'-type','f','-name','*.txt']
#list to store the files found
output = subprocess.run(FILES_COMMAND, capture_output=True , text=True)
#storing the file paths in a available
file_paths = output.stdout.splitlines()
if(len(file_paths)==0):
print("No pdf or text files found")
else:
#print the file paths
for path in file_paths:
file = path.split('/') #converting string into a list to pr$
print("Press 1 to copy the folder")
print(file[3:])
var = input()
if(var == '1'):
check_folder()
copy_files(path,des_path)
else:
print("No file to copy")
|
VKSANJANA/Braillie_Project
|
copy.py
|
copy.py
|
py
| 1,488 |
python
|
en
|
code
| 0 |
github-code
|
50
|
4031923941
|
def clear(a):
for i in range(len(a)):
a[i] = a[i].strip()
return a
def toNumber(a):
for i in range(len(a)):
a[i] = eval(a[i])
return a
def tinhdiem_trungbinh(score,rate):
s=0
for i in range(len(score)):
s += score[i]* rate[i]
return round(s,2)
def xeploaihocsinh(x,y,z):
for i in range(len(x)):
if z < x[i] :
return y[i]
break
class BANGDIEM:
def __init__(self, duongdan_input, duongdan_output):
self.duongdan_input = duongdan_input
self.duongdan_output = duongdan_output
def load_dulieu(self):
self.f = open(self.duongdan_input)
self.f.close()
def tinhdiem_trungbinh(self): # phuong thuc load va xu ly data
tn = [0.05,0.1,0.15,0.7]
xh = [0.05,0.1,0,1,0.15,0.6]
dic = {}
self.f = open(self.duongdan_input)
header = self.f.readline()
header = clear(header.split(';'))
for line in self.f:
a = clear( line.split(';'))
dic[a[0]] = {}
for i in range(1, len(a)):
diem = toNumber(a[i].split(','))
if len(diem)== 4:
tb = tinhdiem_trungbinh(diem,tn)
else:
tb = tinhdiem_trungbinh(diem,xh)
dic[a[0]][header[i]] = tb
#print(dic)
return dic
self.f.close()
def print_dtb_dictionary(self):
print(self.tinhdiem_trungbinh())
def luudiem_trungbinh(self):
self.f = open(self.duongdan_input)
def clear(a):
for i in range(len(a)):
a[i] = a[i].strip()
return a
header = self.f.readline()
header = clear(header.split(';'))
g = open(self.duongdan_output,'w')
g.write(';\t'.join(header)+'\n')
for k in self.tinhdiem_trungbinh():
a = [k] + list((self.tinhdiem_trungbinh())[k].values())
g.write(';\t'.join([str(v) for v in a]) + '\n')
g.close()
class DANHGIA(BANGDIEM):
def __init__(self, duongdan_input, duongdan_output,duongdan_danhgia):
super().__init__(duongdan_input, duongdan_output)
self.duongdan_danhgia = duongdan_danhgia
def xeploai_hocsinh(self):
dic_classification = {}
for k in self.tinhdiem_trungbinh():
a = [k] + list((self.tinhdiem_trungbinh())[k].values())
dic_classification[a[0]] = {}
dtb_chuan = ((a[1] +a[5] + a[6])*2 + (a[2]+a[3]+a[4]+a[7]+a[8])*1)/11
dtb_chuan = round(dtb_chuan,2)
list_xl =[]
x= [6,6.5,8,9,10]
y = ['Tb','Tb Kha','Kha','Gioi','Xs']
list_xl = xeploaihocsinh(x,y,dtb_chuan)
dic_classification[a[0]] = list_xl
print(dic_classification)
return dic_classification
def xeploai_thidaihoc_hocsinh(self):
dic_rank = {}
for k in self.tinhdiem_trungbinh():
a = [k] + list((self.tinhdiem_trungbinh())[k].values())
dic_rank[a[0]] = {}
diem_kA = a[1] + a[2] + a[3]
diem_kA1 = a[1] + a[2] + a[6]
diem_kB = a[1] + a[3] + a[4]
list_ktn =[diem_kA, diem_kA1,diem_kB]
diem_kC = a[5] + a[7] + a[8]
diem_kD = a[1] + a[5] + 2*a[6]
list_rank = []
tn= [12,18,24,40]
c= [12,15,21,40]
d= [20,24,32,40]
loai = [4,3,2,1]
for i in list_ktn:
list_ktn = xeploaihocsinh(tn,loai,i)
list_rank.append(list_ktn)
listkC =xeploaihocsinh(c,loai,diem_kC)
list_rank.append(listkC)
listkD =xeploaihocsinh(d,loai,diem_kD)
list_rank.append(listkD)
dic_rank[a[0]] = list_rank
return (dic_rank)
def print_screen_xeploaidh(self):
print('XEP LOAI THI DAI HOC: ',self.xeploai_thidaihoc_hocsinh())
def luu_danhgia_hs(self):
h = open(self.duongdan_danhgia,'w')
h.write('“Ma HS”, “xeploai_TB chuan”, “xeploai_A”, “xeploai_A1”, “xeploai_B ”, “xeploai_C”, "xeploai_D” \n')
for k in self.xeploai_thidaihoc_hocsinh():
a = (self.xeploai_thidaihoc_hocsinh())[k]
h.write(k + "; " + self.xeploai_hocsinh()[k] + ' ; '+ '; '.join([str(v) for v in a]) + '\n')
h.close()
class TUNHIEN(DANHGIA):
def __init__(self, duongdan_input, duongdan_output,duongdan_danhgia):
super().__init__(duongdan_input, duongdan_output,duongdan_danhgia)
def danhgia_khoitunhien(self):
danhgia_khoitunhien=self.xeploai_thidaihoc_hocsinh()
dic_tunhien = {}
for k in self.xeploai_thidaihoc_hocsinh():
value =self.xeploai_thidaihoc_hocsinh()[k]
value = value[0:3]
dic_tunhien[k] = value
print('Xep loai khoi TUNHIEN: ',dic_tunhien)
return dic_tunhien
class XAHOI(DANHGIA):
def __init__(self, duongdan_input, duongdan_output,duongdan_danhgia):
super().__init__(duongdan_input, duongdan_output,duongdan_danhgia)
def danhgia_khoixahoi(self):
danhgia_khoixahoi=self.xeploai_thidaihoc_hocsinh()
dic_xahoi = {}
for k in self.xeploai_thidaihoc_hocsinh():
value =self.xeploai_thidaihoc_hocsinh()[k]
value = value[3]
dic_xahoi[k] = value
print('Xep loai khoi XAHOI: ',dic_xahoi)
return dic_xahoi
##
class COBAN(DANHGIA):
def __init__(self, duongdan_input, duongdan_output,duongdan_danhgia):
super().__init__(duongdan_input, duongdan_output,duongdan_danhgia)
def danhgia_khoicoban(self):
danhgia_khoicoban=self.xeploai_thidaihoc_hocsinh()
dic_coban = {}
for k in self.xeploai_thidaihoc_hocsinh():
value =self.xeploai_thidaihoc_hocsinh()[k]
value = value[4]
dic_coban[k] = value
print('Xep loai khoi COBAN: ',dic_coban)
return dic_coban
if __name__ == '__main__':
bangdiem = BANGDIEM("diemchitiet_OOP.txt", "diem_trungbinh_OOP.txt")
bangdiem.tinhdiem_trungbinh()
bangdiem.print_dtb_dictionary()
danhgia = DANHGIA("diemchitiet_OOP.txt", "diem_trungbinh_OOP.txt", "danhgiahocsinh_OOP.txt")
danhgia.xeploai_hocsinh()
danhgia.xeploai_thidaihoc_hocsinh()
#danhgia.luu_danhgia_hs()
danhgia.print_screen_xeploaidh()
TUNHIEN = TUNHIEN("diemchitiet_OOP.txt", "diem_trungbinh_OOP.txt", "danhgiahocsinh_OOP.txt")
TUNHIEN.danhgia_khoitunhien()
XAHOI = XAHOI("diemchitiet_OOP.txt", "diem_trungbinh_OOP.txt", "danhgiahocsinh_OOP.txt")
XAHOI.danhgia_khoixahoi()
COBAN = COBAN("diemchitiet_OOP.txt", "diem_trungbinh_OOP.txt", "danhgiahocsinh_OOP.txt")
COBAN.danhgia_khoicoban()
|
TrinhNKL/Learning_python_for_data-science
|
OOP Ass/tinhtoan_diemtongket_OO_rev.py
|
tinhtoan_diemtongket_OO_rev.py
|
py
| 7,590 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38302835935
|
from flask import Blueprint, render_template
import yfinance as yf
from flask_login import login_required, current_user
from db_manager import db_manager
from datetime import datetime
hist = Blueprint('hist',__name__)
def fetch_stock_history(symbol):
ticker = yf.Ticker(symbol)
end_date = datetime.now().strftime('%Y-%m-%d')
hist = ticker.history(period="1mo")
return hist
def get_closing_prices(symbol):
hist = fetch_stock_history(symbol)
hist = hist.to_dict()
closing_price = list(hist['Close'].values())
closing_price = [round(x, 2) for x in closing_price]
closing_price = closing_price[::-1]
return closing_price
def get_open_prices(symbol):
hist = fetch_stock_history(symbol)
hist = hist.to_dict()
open_price = list(hist['Open'].values())
open_price = [round(x, 2) for x in open_price]
open_price = open_price[::-1]
return open_price
def get_high_prices(symbol):
hist = fetch_stock_history(symbol)
hist = hist.to_dict()
high_price = list(hist['High'].values())
high_price = [round(x, 2) for x in high_price]
high_price = high_price[::-1]
return high_price
def get_date(symbol):
hist = fetch_stock_history(symbol)
hist = list(hist.index.values)
hist = [str(x) for x in hist]
hist = [x[5:10] for x in hist]
return hist
def get_low_prices(symbol):
hist = fetch_stock_history(symbol)
hist = hist.to_dict()
low_price = list(hist['Low'].values())
low_price = [round(x, 2) for x in low_price]
low_price = low_price[::-1]
return low_price
def get_volume(symbol):
hist = fetch_stock_history(symbol)
hist = hist.to_dict()
volume = list(hist['Volume'].values())
volume = [round(x, 2) for x in volume]
volume = volume[::-1]
return volume
def to_database(symbol):
cur = db_manager.get_cursor()
open_prices = get_open_prices(symbol)
close_prices = get_closing_prices(symbol)
high_prices = get_high_prices(symbol)
low_prices = get_low_prices(symbol)
volumes = get_volume(symbol)
dates = get_date(symbol)
name = yf.Ticker(symbol).info['longName']
# Assuming the lists are all the same length and in the correct order by date
for i in range(len(open_prices)):
cur.execute("INSERT INTO stock_history (stock_id, open_price, close_price, high_price, low_price, volume) SELECT id, %s, %s, %s, %s, %s FROM stocks1 WHERE symbol = %s", (open_prices[i], close_prices[i], high_prices[i], low_prices[i], volumes[i], symbol))
history = {
'symbol': symbol,
'date': dates,
'opens': open_prices,
'closes': close_prices,
'highs': high_prices,
'lows': low_prices,
'vols': volumes,
"name": name
}
db_manager.commit()
return (history)
@hist.route("/history/<symbol>", methods = ['GET', 'POST'])
@login_required
def render_stock_history(symbol):
stock_history = to_database(symbol)
print("W")
return render_template("history.html", user = current_user, stock_history = stock_history)
|
jkw944/DIS_Project
|
MyWebApp/stock_hist.py
|
stock_hist.py
|
py
| 3,078 |
python
|
en
|
code
| 0 |
github-code
|
50
|
70505940315
|
"""Module for Member_Payments model"""
from sqlalchemy.sql.functions import now
from models.Member_Payments import Member_Payments
from exceptions.Bad_Request import Bad_Request
from sqlalchemy.exc import IntegrityError
from models.Group_Payments import Group_Payments, db
from dataclasses import dataclass
@dataclass
class Member_Payment:
member_id: int
group_payment_id: int
amount: float
paid_on: str
paid: bool
created_on: str
group_payment: any
"""Class for logic abstraction from views"""
def __init__(self, payment: Member_Payments):
self.member_id = payment.member_id
self.group_payment_id = payment.group_payment_id
self.amount = float(payment.amount)
self.paid_on = payment.paid_on
self.paid = payment.paid
self.created_on = payment.created_on
self.key = (self.member_id, self.group_payment_id)
self.group_payment = payment.group_members_payments
def __repr__(self) -> str:
return f"<Member_Payment member_id={self.member_id} group_payment_id={self.group_payment_id} amount={self.amount} paid_on={self.paid_on} paid={self.paid} created_on={self.created_on}>"
@classmethod
def get_by_id(cls, member_id: str, group_payment_id: str):
"""Return a user using composite string: (member_id, group_payment_id)"""
payment: Member_Payments = Member_Payments.query.get((member_id, group_payment_id))
return cls(payment)
def edit(self, amount) -> None:
"""Edit payment using id"""
member_payment: Member_Payments = Member_Payments.query.get(self.key)
member_payment.amount = self.amount = amount or member_payment.amount
try:
db.session.commit()
except IntegrityError as error:
db.session.rollback()
[message] = error.orig.args
raise Bad_Request(message, "Database error", pgcode=error.orig.pgcode) from error
def delete(self) -> None:
"""Delete payment using id"""
Member_Payments.query.filter_by(
member_id=self.member_id,
group_payment_id = self.group_payment_id
).delete()
try:
db.session.commit()
except IntegrityError as error:
db.session.rollback()
[message] = error.orig.args
raise Bad_Request(message, "Database error", pgcode=error.orig.pgcode) from error
def pay(self) -> None:
"""Set member_payment to paid"""
member_payment: Member_Payments = Member_Payments.query.get(self.key)
member_payment.paid = self.paid = True
member_payment.paid_on = self.paid_on = now()
try:
db.session.commit()
except IntegrityError as error:
db.session.rollback()
[message] = error.orig.args
raise Bad_Request(message, "Database error", pgcode=error.orig.pgcode) from error
|
Juli03b/groupypay
|
db_helpers/Member_Payment.py
|
Member_Payment.py
|
py
| 3,001 |
python
|
en
|
code
| 0 |
github-code
|
50
|
73810922075
|
import logging
import struct
import numpy as np
import serial
import time
from serial import SerialException
class ArduinoSerial:
"""
Represents an Arduino or ESP32 Serial device
"""
__address = None
__baud_rate = 57600 #19200 # 38400
__byte_size = serial.EIGHTBITS
__timeout = 0.01
__parity = serial.PARITY_NONE
__stopbits = serial.STOPBITS_ONE
__xonxoff = 1
__delay = 0.01
__serial: serial.Serial = None
_log: logging.Logger = None
_previous_val: int = 0
def __init__(self, address: str):
self.__address = address
self.connect()
self._log = logging.getLogger(__name__)
self._log.addHandler(logging.NullHandler())
# create console handler and set level to debug
has_console_handler = False
if len(self._log.handlers) > 0:
for handler in self._log.handlers:
if isinstance(handler, logging.StreamHandler):
has_console_handler = True
if not has_console_handler:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self._log.addHandler(ch)
def logger(self) -> logging.Logger:
return self._log
def set_logger(self, log: logging.Logger):
self._log = log
def connect(self):
self.__serial = serial.Serial(
port=self.__address,
baudrate=self.__baud_rate,
bytesize=self.__byte_size,
timeout=self.__timeout,
parity=self.__parity,
stopbits=self.__stopbits,
xonxoff=self.__xonxoff
)
time.sleep(0.1)
@property
def timeout(self):
return self.__timeout
@timeout.setter
def timeout(self, value: float):
value = abs(float(value))
self.__timeout = value
self.__serial.timeout = value
@property
def delay(self) -> float:
return self.__delay
@delay.setter
def delay(self, value):
value = float(value)
if value > 0:
self.__delay = value
def close(self):
try:
self._log.info(f'Closing serial connection to Arduino at {self.__address}.')
self.__serial.flush()
self.__serial.close()
except AttributeError as e:
if self._log is not None:
self._log.warning('Connection already closed')
def write(self, q: str):
self.__serial.write(f'{q}\r'.encode('utf-8'))
time.sleep(self.__delay)
def query(self, q: str) -> str:
self.write(f"{q}")
line = self.__serial.readline()
time.sleep(self.__delay)
return line.decode('utf-8').rstrip("\n").rstrip(" ")
def query_binary(self, q, packets: bool = False, size: int = 2):
data = bytearray()
self.write(f"{q}")
if packets:
raw_msg_len = self.__serial.read(4)
n = struct.unpack('<I', raw_msg_len)[0]
while len(data) < n:
packet = self.__serial.read(n - len(data))
if not packet:
return None
data.extend(packet)
else:
data = self.__serial.read(size)
self.__serial.reset_input_buffer()
self.__serial.flush()
return data
def __del__(self):
try:
self.close()
except SerialException as e:
self._log.error(e)
class DeflectionReader(ArduinoSerial):
def __init__(self, address: str):
super().__init__(address=address)
if not self.check_id():
raise SerialException(f"Could not find deflection pot in {address}.")
def check_id(self, attempt: int = 0) -> bool:
# time.sleep(0.25)
old_delay = self.delay
old_timeout = self.timeout
self.delay = 0.05
self.timeout = 0.05
check_id = self.query('i')
self.delay = old_delay
self.timeout = old_timeout
if check_id != 'DEFLECTION_POT':
if attempt <= 3:
attempt += 1
return self.check_id(attempt=attempt)
else:
return False
else:
return True
def get_reading(self, attempts=0) -> int:
res = self.query_binary('r', size=2)
if res is None or len(res) < 2:
attempts += 1
if attempts < 3:
self._log.warning(f'Failed reading position (attempt {attempts+1} of 3). Trying again...')
return self.get_reading(attempts=attempts)
else:
self._log.warning(f'Failed reading position (attempt {attempts + 1} of 3). Returning previous value...')
return self._previous_val
adc = struct.unpack('<H', res)[0]
self._previous_val = adc
return adc
@property
def reading(self):
return self.get_reading()
|
erickmartinez/relozwall
|
instruments/position_pot.py
|
position_pot.py
|
py
| 4,911 |
python
|
en
|
code
| 0 |
github-code
|
50
|
32103077192
|
import firebase_admin
from firebase_admin import db
from datetime import datetime
from dotenv import load_dotenv
import os
import settings
load_dotenv()
DATABASE_URL = os.getenv('DATABASE_URL')
CREDENTIALS = os.getenv('CREDENTIALS')
class User():
def __init__(self, *args, **kwargs):
self.id=kwargs.get('id',None)
self.first_name=kwargs.get('first_name',None)
self.last_name=kwargs.get('last_name',None)
self.auth_date=kwargs.get('auth_date',None)
self.campo_id=kwargs.get('campo_id',None)
def __str__(self):
return f'Usuario: {self.first_name} {self.last_name}'
class FirebaseDB():
def __init__(self):
firebase_admin.initialize_app(firebase_admin.credentials.Certificate(CREDENTIALS), {'databaseURL':DATABASE_URL})
def authenticate(self, user_id):
_user_data = db.reference(f"user/{user_id}").get()
if _user_data:
_user_data.update({'id':user_id})
return User(**_user_data)
return None
def send_realtime_data(self, user_id, data):
"""
Modifica solo la informacion diaria del campo
"""
user = self.authenticate(user_id)
if user:
campo_id=user.campo_id
_diaria_data = db.reference(f"campo/{campo_id}/diaria/").get()
d = {}
if _diaria_data:
for k, v in _diaria_data.items():
valor = 0
try:
valor = int(v)
except:
pass
d[k] = valor
if isinstance(data, dict):
for key, value in data.items():
d[key] = d[key] + value
else:
data['categoria'] = data.categoria.apply(lambda row: f'{row}s') #ovejas, carneros, corderos
for index, row in data.iterrows():
if row.accion.upper() in settings.ACCIONES_AUMENTO: #comprar, nacer
d[row.categoria] = d[row.categoria] + row.cantidad
else: #morir, vender
d[row.categoria] = d[row.categoria] - row.cantidad
# Reemplazar por 0 valores negativos
d2 = {}
for k, v in d.items():
d2[k] = v if v >= 0 else 0
db.reference(f'campo/{campo_id}/diaria/').set(d2)
|
Anele13/voice_notes
|
firebase.py
|
firebase.py
|
py
| 2,426 |
python
|
en
|
code
| 0 |
github-code
|
50
|
25672403537
|
import copy
import numpy
def create_augmented_matrix():
array = numpy.empty(12).reshape(3, 4) # matrix (3 rows x 4 columns) (12 values)
array[0] = [1, 0, 2, 1]
array[1] = [2, -1, 3, -1]
array[2] = [4, 1, 8, 2]
return array # returns array
def create_matrix():
array = numpy.empty(9).reshape(3, 3) # matrix (3 rows x 3 columns) (9 values)
array[0] = [1, -1, 0]
array[1] = [-2, 2, -1]
array[2] = [0, 1, -2]
return array # returns array
def gauss_jordan(array):
rows = array.shape[0] # height
columns = array.shape[1] # width
swap_index = 0 # index of swappable rows
column_max = 0 # max value in column
abs_column_max = 0 # absolute max value in column
max_row = 0 # index of row w/ column max
for column in range(0, columns - 1): # once per column (excluding result column)
for row in range(swap_index, rows): # row swap test
if abs(array[row][column]) > abs(column_max): # max magnitude in column
abs_column_max = abs(array[row][column])
column_max = array[row][column]
max_row = row
if column_max != 0: # cannot divide by zero
array[max_row] = array[max_row] / column_max
if swap_index < rows: # row swap
temp_row = copy.deepcopy(array[swap_index])
array[swap_index] = array[max_row]
array[max_row] = temp_row
for row in range(0, rows): # matrix operation
if row != swap_index:
if swap_index < rows:
array[row] = array[row] - (array[row][column] * array[swap_index])
swap_index = swap_index + 1 # increment
abs_column_max = 0 # reset
column_max = 0 # reset
max_row = 0 # reset
return array # returns array
def matrix_inverse(array):
rows = array.shape[0] # height
columns = array.shape[1] # width
if rows != columns:
return "Matrix rows and columns must match"
augmented = numpy.empty(rows * (columns * 2)).reshape(rows, columns * 2) # matrix (rows x (columns*2))
new_rows = augmented.shape[0] # height
new_columns = augmented.shape[1] # width
for row in range(0, rows): # copies original array
for column in range(0, columns):
augmented[row][column] = array[row][column]
for row in range(0, new_rows): # adds identity matrix
for column in range(columns, new_columns):
if row == (column - rows):
augmented[row][column] = 1
else:
augmented[row][column] = 0
swap_index = 0 # index of swappable rows
column_max = 0 # max value in column
abs_column_max = 0 # absolute max value in column
max_row = 0 # index of row w/ column max
for column in range(0, columns): # once per original columns
for row in range(swap_index, rows): # row swap test
if abs(augmented[row][column]) > abs(column_max): # max magnitude in column
abs_column_max = abs(augmented[row][column])
column_max = augmented[row][column]
max_row = row
if column_max != 0: # cannot divide by zero
augmented[max_row] = augmented[max_row] / column_max
if swap_index < rows: # row swap
temp_row = copy.deepcopy(augmented[swap_index])
augmented[swap_index] = augmented[max_row]
augmented[max_row] = temp_row
for row in range(0, rows): # matrix operation
if row != swap_index:
if swap_index < rows:
augmented[row] = augmented[row] - (augmented[row][column] * augmented[swap_index])
swap_index = swap_index + 1 # increment
abs_column_max = 0 # reset
column_max = 0 # reset
max_row = 0 # reset
inverse = numpy.empty(rows * columns).reshape(rows, columns) # matrix (rows x columns)
for row in range(0, rows): # copies inverse array
for column in range(columns, new_columns):
inverse[row][column - columns] = augmented[row][column]
return inverse # returns array
def gaussian(array):
rows = array.shape[0] # height
columns = array.shape[1] # width
swap_index = 0 # index of swappable rows
column_max = 0 # max value in column
max_row = 0 # index of row w/ column max
for column in range(0, columns - 1): # once per column (excluding result column)
for row in range(swap_index, rows): # row swap test
if abs(array[row][column]) > abs(column_max): # max magnitude in column
column_max = array[row][column]
max_row = row
if swap_index < rows: # row swap
temp_row = copy.deepcopy(array[swap_index])
array[swap_index] = array[max_row]
array[max_row] = temp_row
for row in range(swap_index, rows): # matrix operation
if row != swap_index:
if swap_index < rows:
array[row] = array[row] - ((array[row][column] / column_max) * array[swap_index])
swap_index = swap_index + 1 # increment
column_max = 0 # reset
max_row = 0 # reset
# bootleg algebra code
# should make this scale
c = array[rows - 1][columns - 1] / array[rows - 1][columns - 2]
b = (array[rows - 2][columns - 1] - (c * array[rows - 2][columns - 2])) / array[rows - 2][columns - 3]
a = (array[rows - 3][columns - 1] - (c * array[rows - 3][columns - 2]) - (b * array[rows - 3][columns - 3])) / \
array[rows - 3][columns - 4]
return [array, a, b, c] # returns list
def gaussian_determinant(array):
rows = array.shape[0] # height
columns = array.shape[1] # width
swap_index = 0 # index of swappable rows
column_max = 0 # max value in column
max_row = 0 # index of row w/ column max
swaps = 0 # number of swaps
for column in range(0, columns): # once per column
for row in range(swap_index, rows): # row swap test
if abs(array[row][column]) > abs(column_max): # max magnitude in column
column_max = array[row][column]
max_row = row
if max_row != swap_index: # row swap
if swap_index < rows:
temp_row = copy.deepcopy(array[swap_index])
array[swap_index] = array[max_row]
array[max_row] = temp_row
swaps = swaps + 1
for row in range(swap_index, rows): # matrix operation
if row != swap_index:
if swap_index < rows:
array[row] = array[row] - ((array[row][column] / column_max) * array[swap_index])
swap_index = swap_index + 1 # increment
column_max = 0 # reset
max_row = 0 # reset
determinant = 1 # default multiplication value
for index in range(0, rows): # calculates determinant
determinant = determinant * array[index][index]
determinant = determinant * (-1) ** swaps
return [array, determinant] # returns list
def format_array(array): # formats array ".2f"
formatted = copy.deepcopy(array)
rows = formatted.shape[0] # height
columns = formatted.shape[1] # width
for row in range(0, rows):
for column in range(0, columns):
formatted[row][column] = round(formatted[row][column], 2)
return formatted
def main():
array1 = create_augmented_matrix()
print("Augmented Array")
print(array1)
array_gj = gauss_jordan(array1)
print("\nGauss-Jordan Elimination")
print(array_gj)
array2 = create_augmented_matrix()
array_g = gaussian(array2)
formatted_array_g = format_array(array_g[0])
print("\nGaussian Elimination")
print("A = " + str(array_g[1]) + "; B = " + str(array_g[2]) + "; C = " + str(array_g[3]))
print(formatted_array_g)
array3 = create_matrix()
print("\nArray")
print(array3)
array_mi = matrix_inverse(array3)
print("\nInverse using Gauss-Jordan")
print(array_mi)
array4 = create_matrix()
array_gd = gaussian_determinant(array4)
print("\nGaussian Determinant")
print("Determinant = " + str(array_gd[1]))
print(array_gd[0])
main()
|
Aemxander/scientific-computing-course
|
Assignment 2/assignment 2.py
|
assignment 2.py
|
py
| 8,296 |
python
|
en
|
code
| 0 |
github-code
|
50
|
21277570393
|
'''Descript:
Ugly number is a number that only have factors 2, 3 and 5.
Design an algorithm to find the nth ugly number. The first 10 ugly numbers are 1, 2, 3, 4, 5, 6, 8, 9, 10, 12...
Notice
Note that 1 is typically treated as an ugly number.
'''
import heapq
class Solution:
"""
@param n: An integer
@return: the nth prime number as description.
丑数计算过程:
1
2, 3, 5
4, 6, 10, 6, 9, 15, 10, 15, 25
"""
def nthUglyNumber(self, n):
# write your code here
if n <= 1:
return n
factors = [2, 3, 5]
ugly_numbers = factors + [1]
heap = heapq.heapify(ugly_numbers)
count = 0
while count < n - 1:
ugly_num = heapq.heappop(ugly_numbers)
count += 1
for factor in factors:
n_ugly_num = ugly_num * factor
if n_ugly_num not in ugly_numbers:
heapq.heappush(ugly_numbers, n_ugly_num)
return heapq.heappop(ugly_numbers)
'''Summary
算法武器:heap
求nth元素这类问题,我们一般都使用堆。使用堆时我们可以固定堆的大小比如为k,或是不固定大小,两种不同方式下的解题模板不同。
如果固定堆的大小为k,那么我们出堆一次,堆顶元素就是我们要求的kth元素
如果不固定堆的大小,那么我们一般用一个while循环连续出堆k - 1次,返回结果部分我们再出一次堆就是我们要求的结果。
这道题是之前那道Ugly Number 丑陋数的延伸,这里让我们找到第n个丑陋数,还好题目中给了很多提示,基本上相当于告诉我们解法了,根据提示中的信息,我们知道丑陋数序列可以拆分为下面3个子列表:
(1) 1×2, 2×2, 3×2, 4×2, 5×2, …
(2) 1×3, 2×3, 3×3, 4×3, 5×3, …
(3) 1×5, 2×5, 3×5, 4×5, 5×5, …
仔细观察上述三个列表,我们可以发现每个子列表都是一个丑陋数分别乘以2,3,5,而要求的丑陋数就是从已经生成的序列中取出来的,我们每次都从三个列表中取出当前最小的那个加入序列,请参见代码如下:
def nthUglyNumber(self, n):
# 判断边界条件,如果=1,则就返回1,因为1是丑数
if n <= 1:
return n
# factors存放的是丑数主因子
factors = [2, 3, 5]
# 这是我们的堆,存放丑数
# 将四个丑数放入堆中
# 注意丑数3个因子就是三个丑数!!!
# 注:python可以对元祖进行排序,默认排序规则是按元祖中的第一个数,然后按第二个数
heap = factors + [1]
import heapq
heapq.heapify(heap)
# 注意这里的n大于1是结束条件,这样保证我在返回时再出一次堆就是最终结果
while n > 1:
# 取出最小丑数,然后依次乘以丑数因子,再将其入堆
uglyNum = heapq.heappop(heap)
# 每次出堆依次,我们把n-1
n -= 1
# level 取值在0 ~ 2之间
for factor in factors:
nUglyNum = factor * uglyNum
# 注意:这里一定要剔除相同的元素,保证我们放入堆中的元素是不同的
if nUglyNum not in heap:
heapq.heappush(heap, nUglyNum)
return heapq.heappop(heap)
'''
|
dragonforce2010/interview-algothims
|
none_ladder/4_Ugly_NumberII.py
|
4_Ugly_NumberII.py
|
py
| 3,450 |
python
|
zh
|
code
| 19 |
github-code
|
50
|
31795427348
|
from problems.independence import IndependenceProblem
from gui import BaseGUI
import time
def greedy_search(problem: IndependenceProblem, gui: BaseGUI):
"""The BEST-IN-GREEDY algorithm."""
# Step 1: Get elements sorted by costs
element_iterator = problem.get_sorted_elements()
# Step 2: Initialize the empty set (F in the slides)
independent_set = problem.get_empty_independence_set()
# Step 3: As long as the set remains independent, put elements into it
for element in element_iterator:
independent_set.add(element)
if not problem.is_independent(independent_set):
independent_set.remove(element)
else:
if gui.is_searching:
gui.set_and_animate_independence_set(independent_set)
if not gui.is_searching:
break
# Tell gui that search is over
gui.stop_search()
return independent_set
|
jonasgrebe/tu-opti-algo-project
|
algos/greedy.py
|
greedy.py
|
py
| 913 |
python
|
en
|
code
| 2 |
github-code
|
50
|
16761913042
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import scipy.optimize as spc
def OOR2(x, p1):
return p1/(x**2)
def PlotLightCollection(positions, LC_factors, axis=None):
'''For a given axis, plot the light collection as a function of
that axis (x,y,or z supported)'''
sns.set_style("whitegrid")
xkcd_colors = ['slate blue','black']
sns.set_palette(sns.xkcd_palette(xkcd_colors))
axis_dict = {'x':0, 'y':1, 'z':2}
for a in axis_dict:
if axis==a:
theaxis = axis_dict[a]
the_positions = []
for i in xrange(len(positions)):
the_positions.append(positions[i][theaxis])
the_positions = np.array(the_positions)
LC_factors = np.array(LC_factors)
plt.plot(the_positions, LC_factors, linestyle='none',marker='o',markersize=7,
label="LC factor")
plt.legend(loc=1)
plt.show()
def PlotLightCollection_OnePMT(positions, LC_factors, axis=None):
'''For a given axis, plot the light collection as a function of
that axis (x,y,or z supported)'''
sns.set_style("whitegrid")
xkcd_colors = ['slate blue','black']
sns.set_palette(sns.xkcd_palette(xkcd_colors))
axis_dict = {'x':0, 'y':1, 'z':2}
for a in axis_dict:
if axis==a:
theaxis = axis_dict[a]
the_positions = []
for i in xrange(len(positions)):
the_positions.append(positions[i][theaxis])
the_positions = np.array(the_positions)
the_distance = 8203.0 - the_positions
LC_factors = np.array(LC_factors)
plt.plot(the_distance, LC_factors, linestyle='none',marker='o',markersize=7,
label="LC factor")
popt, pcov = spc.curve_fit(OOR2, the_distance, LC_factors, p0=[0.01])
print("BEST FIT VALUES: " + str(popt))
print("PCOVARIANCE: " + str(pcov))
x = np.arange(min(the_distance),max(the_distance),
(max(the_distance)-min(the_distance))/100.0)
bfvals = OOR2(x, popt[0])
plt.plot(x, bfvals, linewidth=4,label=r'$A/r^{2}$ fit')
plt.legend(loc=1)
plt.show()
def ShowPositions(positions):
if len(positions)<=0:
print("No positions have been filled yet.")
return
else:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x, y, z = [], [], []
for i in xrange(len(positions)):
x.append(positions[i][0])
y.append(positions[i][1])
z.append(positions[i][2])
#X,Y = np.meshgrid(x, y)
ax.scatter(x,y, z,label='PMT positions')
ax.set_xlabel("X position (mm)")
ax.set_ylabel("Y position (mm)")
ax.set_zlabel("Z position (mm)")
plt.title("Distribution of positions in input array")
plt.legend()
plt.show()
def ContourMap_XYSlice(positions,light_factors,zrange=[-1000.0,1000.0],pmt_positions=None):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x, y, lf = [], [], []
for i in xrange(len(positions)):
if zrange[0] < positions[i][2] < zrange[1]:
x.append(positions[i][0])
y.append(positions[i][1])
lf.append(light_factors[i])
if pmt_positions is not None:
for i in xrange(len(pmt_positions)):
px, py, pz = [], [], []
for i in xrange(len(positions)):
px.append(positions[i][0])
py.append(positions[i][1])
pz.append(positions[i][2])
#X,Y = np.meshgrid(x, y)
ax.scatter(px,py, pz,label='PMT positions')
ax.plot_trisurf(x,y,light_factors,cmap=plt.cm.jet, linewidth=0.2,label='LC factor')
ax.set_xlabel("X position (mm)")
ax.set_ylabel("Y position (mm)")
ax.set_zlabel("LC Factor")
plt.title("Light collection factor through WATCHMAN fiducial volume\n"+\
"%s points in slice; avg. LC factor: %s\n"%(str(len(lf)),\
str(np.average(lf)))+"zrange (mm): %s"%(str(zrange)))
plt.legend()
plt.show()
def ColorMap(positions,light_factors):
if len(positions)<=0:
print("No positions have been filled yet.")
return
else:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x, y, z = [], [], []
for i in xrange(len(positions)):
x.append(positions[i][0])
y.append(positions[i][1])
z.append(positions[i][2])
ax.scatter(x,y,z,np.array(light_factors)*1000.0,cmap=plt.cm.spring)
ax.set_xlabel("X position (mm)")
ax.set_ylabel("Y position (mm)")
ax.set_zlabel("Z position (mm)")
plt.title("Light collection factor through WATCHMAN fiducial volume\n"+\
"%s points presented"%(str(len(light_factors))))
plt.show()
|
pershint/WATCHSHAPES
|
lib/Plots.py
|
Plots.py
|
py
| 4,794 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38925027559
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
from scipy import integrate
class Wire:
'''
Implements an arbitrary shaped wire
'''
#coordz = np.array([])
'''Coordinates of the vertex of the wire in the form [X,Y,Z]'''
#I = 1
'''Complex current carried by the wire'''
def __init__(self):
'''
By default initited nothing
'''
return
def Set_Current(self, current):
'''Sets current in wire'''
self.I = current
return
def Create_Toroidal_Coil(self, R1, R2, N, step):
'''
Create_Toroidal_Coil( R1 , R2 , N , step )
Creates a toroidal coil of major radius R1, minor radius R2 with N
turns and a step step
Initiates coordz
'''
a = R1
b = R2
c = N
t = np.r_[0:2 * np.pi:step]
X = (a + b * np.sin(c * t)) * np.cos(t);
Y = (a + b * np.sin(c * t)) * np.sin(t);
Z = b * np.cos(c * t);
self.coordz = np.array([X, Y, Z])
return
def Create_Solenoid(self, R, N, l, step):
'''
Create_Solenoid(self, R , N , l , step )
Creates a solenoid whose length is l with radius R, N turns with step
step along the z axis
'''
a = R;
b = l / (2 * np.pi * N);
T = l / b;
t = np.r_[0:T:step]
X = a * np.cos(t);
Y = a * np.sin(t);
Z = b * t;
self.coordz = np.array([X, Y, Z])
return
def Create_Loop(self, center, radius, NOP, theta, Orientation='xy'):
'''
Create_Loop(self,center,radius,NOP)
a circle with center defined as
a vector CENTER, radius as a scaler RADIS. NOP is
the number of points on the circle.
'''
t = np.linspace(0, 2 * np.pi, NOP)
if Orientation == 'xy':
X = center[0] + radius * np.sin(t)
Y = center[1] + radius * np.cos(t)
Z = np.zeros(NOP)
elif Orientation == 'xz':
X = center[0] + radius * np.sin(t)
Z = center[1] + radius * np.cos(t)
Y = np.zeros(NOP)
elif Orientation == 'yz':
Y = center[0] + radius * np.sin(t)
Z = center[1] + radius * np.cos(t)
X = np.zeros(NOP)
XYZ_add = self.Rotation_Z(np.array([X, Y, Z]), theta)
try:
self.coordz
except AttributeError:
self.coordz = XYZ_add
else:
XYZ = self.coordz
self.coordz = np.concatenate((XYZ, XYZ_add), axis=1)
return
def Create_D_Shape_coil(self, theta):
def create_d_shaped_coil(r_in, r_out, x_in):
#returns 1 branch of the d shaped electromagnetic coil with set inner and outer radii
#https://lss.fnal.gov/conf/C720919/p240.pdf
k = np.log(r_out/r_in)
def d_coil_func(x):
return(2*np.log(x)/(((k**2)-4*(np.log(x)**2))**0.5))
def calculate_y(x):
return integrate.quad(d_coil_func, r_in, x)
vect_y = np.vectorize(calculate_y)
return vect_y(x_in)[0]
def Rotation_Z(theta):
return np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
x = np.linspace(0.5,2,1000)
z = create_d_shaped_coil(1,4, x)
X = np.concatenate((x,np.flip(x),np.zeros(100)+0.5))
Z = np.concatenate((-z+2*z[-1],np.flip(z),np.linspace(z[0],-z[0]+2*z[-1],100))) - max(z)
Y = np.zeros(len(X))
self.coordz = Rotation_Z(theta) @ np.array([X,Y,Z])
return
def Transform_Shift(self, x_shift, y_shift, z_shift):
X = self.coordz[0] + x_shift
Y = self.coordz[1] + y_shift
Z = self.coordz[2] + z_shift
self.coordz = np.array([X, Y, Z])
return
def Rotation_Z(self, vect, theta):
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
return rotation_matrix @ vect
|
arnaudbergeron/TokamakTrack
|
SimulationTokamak/WireShape.py
|
WireShape.py
|
py
| 4,345 |
python
|
en
|
code
| 1 |
github-code
|
50
|
34401125439
|
import psycopg2
import pandas as pd
def serialize_array(arr):
ret = " ".join(arr)
if not ret:
return "NULL"
return ret
def de_serialize_array(array_str):
return array_str.split(" ")
# connect to PostgreSQL database, get connection
conn = psycopg2.connect(
host="database",
dbname="postgres",
user="postgres",
password="postgres"
)
cursor = conn.cursor()
# Re-Initialize database upon application stop and start
drop_table = "DROP TABLE IF EXISTS course CASCADE"
cursor.execute(drop_table)
create_table = """
CREATE TABLE IF NOT EXISTS course(
id SERIAL PRIMARY KEY,
code VARCHAR(20) NOT NULL UNIQUE,
name TEXT,
division TEXT,
course_description TEXT,
department TEXT,
pre_requisites TEXT,
course_level TEXT,
utsc_breadth TEXT,
apsc_electives TEXT,
campus TEXT,
term TEXT,
activity TEXT,
last_updated TEXT,
exclusion TEXT,
utm_distribution TEXT,
corequisite TEXT,
recommended_preparation TEXT,
arts_and_science_breadth TEXT,
arts_and_science_distribution TEXT,
later_term_course_details TEXT,
course TEXT,
fase_available TEXT,
maybe_restricted TEXT,
majors_outcomes TEXT,
minors_outcomes TEXT,
ai_pre_reqs TEXT,
views INT
)
"""
# initialize course data in PostgreSQL database
cursor.execute(create_table)
# de-pickle course data
df = pd.read_pickle('resources/df_processed.pickle').set_index('Code')
# iterate through all courses (course code, course info)
for course_code, course_attributes in df.iterrows():
# storage for row to be inserted into database
current_row = []
#course code
current_row.append(str(course_code))
# course attributes
current_row.append(str(course_attributes["Name"]))
current_row.append(str(course_attributes["Division"]))
# handle course descriptions
course_desc = str(course_attributes["Course Description"])
current_row.append(str(course_attributes["Course Description"])) #course_desc.replace("'", "''"))
current_row.append(str(course_attributes["Department"]))
current_row.append(serialize_array(course_attributes["Pre-requisites"]))
current_row.append(str(course_attributes["Course Level"]))
current_row.append(str(course_attributes["UTSC Breadth"]))
current_row.append(str(course_attributes["APSC Electives"]))
current_row.append(str(course_attributes["Campus"]))
current_row.append(serialize_array(course_attributes["Term"]))
current_row.append("NULL") # Activity, NULL for now
current_row.append(str(course_attributes["Last updated"]))
current_row.append(serialize_array(course_attributes["Exclusion"]))
current_row.append(str(course_attributes["UTM Distribution"]))
current_row.append(serialize_array(course_attributes["Corequisite"]))
current_row.append(serialize_array(course_attributes["Recommended Preparation"]))
current_row.append(str(course_attributes["Arts and Science Breadth"]))
current_row.append(str(course_attributes["Arts and Science Distribution"]))
current_row.append(str(course_attributes["Later term course details"]))
current_row.append(str(course_attributes["Course"]))
current_row.append(str(course_attributes["FASEAvailable"]))
current_row.append(str(course_attributes["MaybeRestricted"]))
current_row.append(serialize_array(course_attributes["MajorsOutcomes"]))
current_row.append(serialize_array(course_attributes["MinorsOutcomes"]))
current_row.append(serialize_array(course_attributes["AIPreReqs"]))
# view count
current_row.append("0")
# formatting
for i in range(len(current_row)):
current_row[i] = current_row[i].replace("'", "''")
# print(current_row)
insert = """
INSERT INTO course (
code,
name,
division,
course_description,
department,
pre_requisites,
course_level,
utsc_breadth,
apsc_electives,
campus,
term,
activity,
last_updated,
exclusion,
utm_distribution,
corequisite,
recommended_preparation,
arts_and_science_breadth,
arts_and_science_distribution,
later_term_course_details,
course,
fase_available,
maybe_restricted,
majors_outcomes,
minors_outcomes,
ai_pre_reqs,
views
) VALUES (
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s',
'%s'
)
""" % tuple(current_row)
# print(insert)
# insert row into table
cursor.execute(insert)
cursor.execute("commit")
# close connection
cursor.close()
conn.close()
|
ECE444-2021Fall/project1-education-pathways-group-11-sigmalab
|
education_pathways/db_init.py
|
db_init.py
|
py
| 5,490 |
python
|
en
|
code
| 2 |
github-code
|
50
|
74769111516
|
# RandomForestClassifier.py: 随机森林分类器模块
import os, sys, time
import numpy as np
import math
from mpi4py import MPI
from DecisionTreeCartContinue import DecisionTreeCartContinue
from DecisionTreeCartDiscrete import DecisionTreeCartDiscrete
class RandomForestClassifier:
'''
: RandomForestClassifier: 并行化随机森林分类器模块
'''
def __init__(self, comm, main=0, n_estimator=10, continuity=True, oob_score=False, max_features='sqrt', min_impurity_decrease=None, max_depth=None, min_samples_leaf=1, min_samples_split=2):
'''
: __init__: 随机森林初始化
: param comm: mpi4py.MPI.Intracomm, 并行化训练使用的MPI通信子
: param main: int, 随机森林中训练和预测作为主进程的编号
: param n_estimator: int, Bagging框架参数,随机森林中包含的基分类器/决策树的数量,默认值为10
: param continuity: bool, Bagging框架参数,指定输入的数据集是连续数据还是离散数据,默认值为True
: param oob_score: bool, Bagging框架参数,是否使用袋外样本评估模型的性能和表现,若使用袋外样本则会对随机森林进行迭代以找到最优随机森林,默认值为False
: param max_features: int/float/str, 决策树剪枝参数,决策树每次分裂时需要考虑的特征数量,若输入为int,则表示每次分裂考虑的特征具体值;若输入为float,则表示每次分裂考虑的特征比例;若输入为str,'sqrt'表示每次分裂考虑的特征数量为总特征数量的平方根,'log'表示每次分裂考虑的特征数量为总特征数量的以2作为底的对数
: param min_impurity_decrease: float, 决策树剪枝参数,选择特征进行分枝过程时的信息增益阈值,信息增益小于输入值的结点分枝操作不会发生,默认值为None
: param max_depth: int, 决策树剪枝参数,决策树的最大深度,默认值为None
: param min_samples_leaf: int, 决策树剪枝参数,限制一个结点分枝后的子结点中均至少含有的样本数量,不满足条件则分枝不会发生,默认值为1
: param min_samples_split: int, 决策树剪枝参数,限制一个分枝的结点所至少需要含有的样本数量,默认值为2
'''
# 1. Bagging框架参数
self.n_estimator = n_estimator # 随机森林训练的决策树数量
self.oob_score = oob_score # 是否使用袋外样本评估的标志
self.continuity = continuity # 指定输入的数据的连续性
# 2. 决策树剪枝参数
self.max_features = max_features # 决策树每次分裂时需要考虑的特征数量
self.min_impurity_decrease = min_impurity_decrease # 决策树每次分裂时的样本评估参数(信息增益比,信息增益值,或者基尼系数)的阈值
self.max_depth = max_depth # 决策树的最大深度
self.min_samples_leaf = min_samples_leaf # 决策树分枝后的子结点中至少含有的样本数量
self.min_samples_split = min_samples_split # 决策树分枝所至少含有的样本数量
# 3. 基础参数与数据
self.comm = comm # 并行训练和预测使用的MPI通信子
self.main = main # 主进程编号
self.rank = comm.rank # 当前线程编号
self.size = comm.size # 所有进程总数
self.treeroot = [] # 决策树根结点列表
self.oob_accuracy = [] # 决策树的袋外准确率列表
self.n_label = 0 # 训练时的标签种类数量
def fit(self, data=None, label=None):
'''
: fit: 使用训练数据集并行化训练随机森林模型
: param data: np.array, 二维训练数据集,其中行代表样本,列代表特征,主进程需要提供该参数,辅进程则直接使用默认值None
: param label: np.array, 一维训练标签集,主进程需要提供该参数,辅进程则直接使用默认值None
'''
# 1. 主进程将原始的数据集和标签集广播给各个辅进程,由于随机森林可能需要进行迭代,因此由主进程进行有放回随机抽样后再分发给各个辅进程的方式实际上表现不佳
data = comm.bcast(data, root=self.main) # 广播发送/接收训练数据集
label = comm.bcast(label, root=self.main) # 广播发送/接收训练标签集
# 2. 进程计算自身需要构造的决策树数量,所有的辅进程均构造floor(self.n_estimator/self.size)个决策树,而主进程构造(self.n_estimator - (size-1)*floor(self.n_estimator/self.size))个决策树
if self.rank:
n_tree = math.floor(self.n_estimator/self.size) # 本进程需要训练的决策树数量
else:
n_tree = self.n_estimator - (self.size - 1)*math.floor(self.n_estimator/self.size)
# 3. 进行训练过程
n_sample = label.size # 训练集的样本数量
self.n_label = np.unique(label).size
for i in range(n_tree):
# 3.1. 每个进程从原始数据集和标签集中进行有重复随机抽样,抽样进行n_sample次
inbag_index = np.unique(np.random.randint(0, n_sample, size=n_sample)) # 有重复随机抽样后再去重得到的袋内样本下标
if self.oob_score:
outbag_index = np.setdiff1d(np.arange(n_sample), inbag_index) # 如果用户指定需要进行袋外预测和评估,则生成对应的袋外样本下标
# 3.2 按照抽样的样本创建并且训练指定种类的决策树
tree = DecisionTreeCartContinue() if self.continuity else DecisionTreeCartDiscrete()
tree.train(data[inbag_index], label[inbag_index], self.min_impurity_decrease, self.max_depth, self.min_samples_leaf, self.min_samples_split, self.max_features)
self.treeroot.append(tree)
# 3.3 若用户指定需要袋外准确率数据,则生成对应的袋外样本准确率数据
if self.oob_score:
self.oob_accuracy.append(tree.score(data[outbag_index], label[outbag_index]))
return
def predict(self, data=None):
'''
: predict: 使用测试数据集并行化预测并返回预测结果
: param data: np.array, 二维测试数据集,其中行代表样本,列代表特征,主进程需要提供该参数,辅进程则直接使用默认值None
: return: np.array, 一维测试标签集,预测得到的一维测试集标签,主进程返回结果,辅进程返回None
'''
# 1. 主进程将测试数据集分发给各个辅进程
data = comm.bcast(data, root=self.main) # 广播发送/接收测试数据集
# 2. 各个进程在本地依次处理测试数据集中的样本
label = []
for tree in self.treeroot:
temp = tree.predict(data)
label.append(temp)
label = np.array(label).T # 此处测试标签矩阵的行代表样本,列代表当前进程所创建的决策树的预测结果
label_counts = []
for x in label:
label_counts.append(np.bincount(x, minlength=self.n_label))
label_counts = np.array(label_counts)
# 3. 主进程从各个进程搜集数据并且统计出最终预测结果
label_counts = comm.gather(label_counts, root=self.main)
if self.rank!=self.main:
return None
else:
sum = 0
for x in label_counts:
sum += x
res = np.array([np.argmax(x) for x in sum])
return res
def predict_proba(self, data=None):
'''
: predict_proba: 使用测试集进行并行化预测,并且返回预测中取各个值的概率
: param data: np.array, 二维测试数据集,其中行代表样本,列代表特征,主进程需要提供该参数,辅进程则直接使用默认值None
: return: np.array, 二维测试集标签,其中行代表样本,列代表样本判别为各个类别的概率
'''
# 1. 主进程将测试数据集分发给各个辅进程
data = comm.bcast(data, root=self.main) # 广播发送/接收测试数据集
# 2. 各个进程在本地依次处理测试数据集中的样本
label = []
for tree in self.treeroot:
temp = tree.predict(data)
label.append(temp)
label = np.array(label).T # 此处测试标签矩阵的行代表样本,列代表当前进程所创建的决策树的预测结果
label_counts = []
for x in label:
label_counts.append(np.bincount(x, minlength=self.n_label)/self.n_estimator)
label_counts = np.array(label_counts)
# 3. 主进程从各个进程搜集数据并且统计出最终预测结果
label_counts = comm.gather(label_counts, root=self.main)
if self.rank!=self.main:
return None
else:
res = 0
for x in label_counts:
res += x
return res
def score(self, data=None, label=None):
'''
: score: 使用测试数据集并行化预测,并返回预测准确率
: param data: np.array, 二维测试数据集,其中行代表样本,列代表特征,主进程需要提供该参数,辅进程则直接使用默认值None
: param label: np.array, 一维测试标签集,主进程需要提供该参数,辅进程则直接使用默认值None
: return: float, 预测准确率
'''
predict = self.predict(data)
if self.rank!=self.main:
return None
else:
accuracy = np.sum(predict==label)/label.size
return accuracy
def get_oob_score(self):
'''
: get_oob_score: 返回当前进程所创建的决策树的袋外预测准确率
: return: np.array: 当前进程所创建的决策树的袋外预测准确率,若用户创建随机森林时给定的参数oob_score值为False,则返回None
'''
if self.oob_score:
return self.oob_accuracy
else:
return None
|
Happyxianyueveryday/mpi-random-forest
|
RandomForestClassifier/RandomForestClassifier.py
|
RandomForestClassifier.py
|
py
| 10,609 |
python
|
zh
|
code
| 1 |
github-code
|
50
|
7008365857
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class User(AbstractUser):
"""
Esta tabla reemplaza al User de django auth
"""
last_module_id = models.CharField(max_length=60, null=True, blank=True)
class Meta:
verbose_name = "User"
verbose_name_plural = "Users"
|
LP2-20162/DinningCashier-S.A.C_copia
|
dinning_Cashier_service/caja/usuario/models.py
|
models.py
|
py
| 362 |
python
|
en
|
code
| 0 |
github-code
|
50
|
20345531155
|
import numpy as np
from src.bbox import get_bbox
from src.util import get_radius
class ClusterInfo():
"""
cluster info
"""
def __init__(self, in_a, in_b) -> None:
## indices array of points inside the cluster
self.in_a = in_a
self.in_b = in_b
self.n_pts_a = len(in_a)
self.n_pts_b = len(in_b)
self.n_pts = self.n_pts_a + self.n_pts_b
## Matched points in previous frame
self.matched_a = 0
## Number of points within the fiducial volumn
self.matched_b = 0
## matching score
self.matched_score = 0
## Radius of the cluster
self.radius = 0
## Bounding box of this cluster
self.vertices = np.zeros((4,2))
self.width = self.length = 0
## Probability Array
self.prob_vector = np.zeros(16)
## Associated target: the entire cluster belongs to the target
## Association requirements:
# 1. >=50% of in_a points are assigned to the target in frame a
# 2. >=50% of in_b points are within anticipated fiducial box
self.associated_target = None
## List of affiliated targets
self.affiliated_list = []
## Category: 1 => pedestrian or cyclist; 2 => vehicles
self.category = 0
## assignment indicator:
# 0: ommit this cluster, don't do anything
# 1: append the entire cluster as a new object
# 2: segment the cluster into sub clusters and append the new sub clusters
# 3: assign the entire cluster to an existing target
# 4: assign the cluster to k affiliated targets
self.assign_indicator = 0
# Allocate the points
self.xy_arr, self.h_arr = None, None
def construct_h_arr(self, frm_a, frm_b):
"""
Calculate the height of the cluster
"""
self.h_arr = np.hstack([frm_a.hs[self.in_a], frm_b.hs[self.in_b]])
def set_radius(self, frm_a, frm_b):
"""
Calculate the radius
"""
self.xy_arr = np.vstack([frm_a.pos[self.in_a, :2], frm_b.pos[self.in_b, :2]])
self.radius = get_radius(self.xy_arr)
def bkg_check(self, frm_a, frm_b, cfg):
"""
Return the bool whether this cluster is a background cluster:
if not background, it will set the radius and points for the cluster
"""
self.construct_h_arr(frm_a, frm_b)
h_min, h_max = self.h_arr.min(), self.h_arr.max()
if h_max > min(h_min, cfg["global_dz"]) + cfg["height_cut"] - cfg["h_grid_size"]:
# the top point is too high
return True
if h_max < h_min + cfg["global_dz"]:
# the height is too low
return True
if h_min > cfg["lidar_height"]:
# the cluster is too high above
return True
self.set_radius(frm_a, frm_b)
return self.radius > cfg["max_sig_radius"]
def matcher(self, frm_a, frm_b, target):
"""
Set the matched_a and matched_b, which are used to
determine whether this cluster needs to be segmented
"""
matched_a = (frm_a.sn_arr[self.in_a] == target.sn).sum()
matched_b = 0
if len(self.in_b) > 0:
# Fetch the points in frame b
xy_b = frm_b.pos[self.in_b, :2]
# Append the center position
xy_b = np.vstack([xy_b, xy_b.mean(axis=0)])
matched_b = (target.in_fiducial(xy_b)).sum()
score = (matched_a + 1) * matched_b
if score > self.matched_score:
self.matched_score = score
self.matched_a, self.matched_b = matched_a, matched_b
return matched_a, matched_b
def set_prob_vector(self, model, cfg):
"""
Set the prob vector for the cluster
"""
points = np.c_[self.xy_arr, self.h_arr]
# Set bounding box
bbox = get_bbox(points[:, :2])
self.vertices = bbox.vertices
self.width, self.length = bbox.width, bbox.length
# convert the mm to m
self.prob_vector[:len(model['mu'])] = get_prob_arr(points, model, cfg)
# set the category
if self.prob_vector[:2].sum() > 0.5:
# Pedestrian or Cyclist
self.category = 1
elif self.prob_vector[2:].sum() > 0.5:
self.category = 2
def sn_label(self, cid, model, cfg):
"""
Label the points in the cluster with proper SN id
"""
if (len(self.affiliated_list) == 1) & (
not self.associated_target
) and self.affiliated_list[0].cid == cid:
self.associated_target = self.affiliated_list[0]
if self.associated_target:
self.assign_indicator = 3
return None
if len(self.affiliated_list)==0:
# No affiliated targets
# Calculate the probability of this cluster
self.set_prob_vector(model, cfg)
if self.prob_vector.sum() > 0.04:
# Append the new object
self.assign_indicator = 1
elif (self.n_pts > cfg["min_segment_npts"]) & (self.n_pts > cfg["min_segment_density"] * self.radius**2):
# Segement this cluster into smaller sub clusters and append
self.assign_indicator = 2
elif len(self.affiliated_list) > 1:
self.assign_indicator = 4
|
ZukSkyWalker/sniper_cuda
|
src/cluster_info.py
|
cluster_info.py
|
py
| 4,583 |
python
|
en
|
code
| 0 |
github-code
|
50
|
11075605514
|
from __future__ import annotations
import contextlib
import inspect
import logging
import os
import re
from asyncio import iscoroutinefunction
from datetime import timedelta
from fnmatch import fnmatch
from importlib import import_module
from typing import Any, Callable, Sequence
from asgiref.sync import async_to_sync
from channels.db import database_sync_to_async
from django.db.models import ManyToManyField, ManyToOneRel, prefetch_related_objects
from django.db.models.base import Model
from django.db.models.query import QuerySet
from django.http import HttpRequest, HttpResponse
from django.template import engines
from django.utils import timezone
from django.utils.encoding import smart_str
from django.views import View
from reactpy.core.layout import Layout
from reactpy.types import ComponentConstructor
from reactpy_django.exceptions import (
ComponentDoesNotExistError,
ComponentParamError,
ViewDoesNotExistError,
)
_logger = logging.getLogger(__name__)
_component_tag = r"(?P<tag>component)"
_component_path = r"(?P<path>\"[^\"'\s]+\"|'[^\"'\s]+')"
_component_kwargs = r"(?P<kwargs>[\s\S]*?)"
COMMENT_REGEX = re.compile(r"<!--[\s\S]*?-->")
COMPONENT_REGEX = re.compile(
r"{%\s*"
+ _component_tag
+ r"\s*"
+ _component_path
+ r"\s*"
+ _component_kwargs
+ r"\s*%}"
)
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
async def render_view(
view: Callable | View,
request: HttpRequest,
args: Sequence,
kwargs: dict,
) -> HttpResponse:
"""Ingests a Django view (class or function) and returns an HTTP response object."""
# Convert class-based view to function-based view
if getattr(view, "as_view", None):
view = view.as_view() # type: ignore[union-attr]
# Async function view
if iscoroutinefunction(view):
response = await view(request, *args, **kwargs)
# Sync function view
else:
response = await database_sync_to_async(view)(request, *args, **kwargs)
# TemplateView
if getattr(response, "render", None):
response = await database_sync_to_async(response.render)()
return response
def register_component(component: ComponentConstructor | str):
"""Adds a component to the list of known registered components.
Args:
component: The component to register. Can be a component function or dotted path to a component.
"""
from reactpy_django.config import (
REACTPY_FAILED_COMPONENTS,
REACTPY_REGISTERED_COMPONENTS,
)
dotted_path = (
component if isinstance(component, str) else generate_obj_name(component)
)
try:
REACTPY_REGISTERED_COMPONENTS[dotted_path] = import_dotted_path(dotted_path)
except AttributeError as e:
REACTPY_FAILED_COMPONENTS.add(dotted_path)
raise ComponentDoesNotExistError(
f"Error while fetching '{dotted_path}'. {(str(e).capitalize())}."
) from e
def register_iframe(view: Callable | View | str):
"""Registers a view to be used as an iframe component.
Args:
view: The view to register. Can be a function or class based view, or a dotted path to a view.
"""
from reactpy_django.config import REACTPY_REGISTERED_IFRAME_VIEWS
if hasattr(view, "view_class"):
view = view.view_class
dotted_path = view if isinstance(view, str) else generate_obj_name(view)
try:
REACTPY_REGISTERED_IFRAME_VIEWS[dotted_path] = import_dotted_path(dotted_path)
except AttributeError as e:
raise ViewDoesNotExistError(
f"Error while fetching '{dotted_path}'. {(str(e).capitalize())}."
) from e
def import_dotted_path(dotted_path: str) -> Callable:
"""Imports a dotted path and returns the callable."""
module_name, component_name = dotted_path.rsplit(".", 1)
try:
module = import_module(module_name)
except ImportError as error:
raise RuntimeError(
f"Failed to import {module_name!r} while loading {component_name!r}"
) from error
return getattr(module, component_name)
class RootComponentFinder:
"""Searches Django templates to find and register all root components.
This should only be `run` once on startup to maintain synchronization during mulitprocessing.
"""
def run(self):
"""Registers all ReactPy components found within Django templates."""
# Get all template folder paths
paths = self.get_paths()
# Get all HTML template files
templates = self.get_templates(paths)
# Get all components
components = self.get_components(templates)
# Register all components
self.register_components(components)
def get_loaders(self):
"""Obtains currently configured template loaders."""
template_source_loaders = []
for e in engines.all():
if hasattr(e, "engine"):
template_source_loaders.extend(
e.engine.get_template_loaders(e.engine.loaders)
)
loaders = []
for loader in template_source_loaders:
if hasattr(loader, "loaders"):
loaders.extend(loader.loaders)
else:
loaders.append(loader)
return loaders
def get_paths(self) -> set[str]:
"""Obtains a set of all template directories."""
paths: set[str] = set()
for loader in self.get_loaders():
with contextlib.suppress(ImportError, AttributeError, TypeError):
module = import_module(loader.__module__)
get_template_sources = getattr(module, "get_template_sources", None)
if get_template_sources is None:
get_template_sources = loader.get_template_sources
paths.update(smart_str(origin) for origin in get_template_sources(""))
return paths
def get_templates(self, paths: set[str]) -> set[str]:
"""Obtains a set of all HTML template paths."""
extensions = [".html"]
templates: set[str] = set()
for path in paths:
for root, _, files in os.walk(path, followlinks=False):
templates.update(
os.path.join(root, name)
for name in files
if not name.startswith(".")
and any(fnmatch(name, f"*{glob}") for glob in extensions)
)
return templates
def get_components(self, templates: set[str]) -> set[str]:
"""Obtains a set of all ReactPy components by parsing HTML templates."""
components: set[str] = set()
for template in templates:
with contextlib.suppress(Exception):
with open(template, "r", encoding="utf-8") as template_file:
clean_template = COMMENT_REGEX.sub("", template_file.read())
regex_iterable = COMPONENT_REGEX.finditer(clean_template)
component_paths = [
match.group("path").replace('"', "").replace("'", "")
for match in regex_iterable
]
components.update(component_paths)
if not components:
_logger.warning(
"\033[93m"
"ReactPy did not find any components! "
"You are either not using any ReactPy components, "
"using the template tag incorrectly, "
"or your HTML templates are not registered with Django."
"\033[0m"
)
return components
def register_components(self, components: set[str]) -> None:
"""Registers all ReactPy components in an iterable."""
if components:
_logger.debug("Auto-detected ReactPy root components:")
for component in components:
try:
_logger.debug("\t+ %s", component)
register_component(component)
except Exception:
_logger.exception(
"\033[91m"
"ReactPy failed to register component '%s'!\n"
"This component path may not be valid, "
"or an exception may have occurred while importing.\n"
"See the traceback below for more information."
"\033[0m",
component,
)
def generate_obj_name(obj: Any) -> str:
"""Makes a best effort to create a name for an object.
Useful for JSON serialization of Python objects."""
# First attempt: Dunder methods
if hasattr(obj, "__module__"):
if hasattr(obj, "__name__"):
return f"{obj.__module__}.{obj.__name__}"
if hasattr(obj, "__class__") and hasattr(obj.__class__, "__name__"):
return f"{obj.__module__}.{obj.__class__.__name__}"
# Second attempt: String representation
with contextlib.suppress(Exception):
return str(obj)
# Fallback: Empty string
return ""
def django_query_postprocessor(
data: QuerySet | Model, many_to_many: bool = True, many_to_one: bool = True
) -> QuerySet | Model:
"""Recursively fetch all fields within a `Model` or `QuerySet` to ensure they are not performed lazily.
Behaviors can be modified through `QueryOptions` within your `use_query` hook.
Args:
data: The `Model` or `QuerySet` to recursively fetch fields from.
Keyword Args:
many_to_many: Whether or not to recursively fetch `ManyToManyField` relationships.
many_to_one: Whether or not to recursively fetch `ForeignKey` relationships.
Returns:
The `Model` or `QuerySet` with all fields fetched.
"""
# `QuerySet`, which is an iterable of `Model`/`QuerySet` instances
# https://github.com/typeddjango/django-stubs/issues/704
if isinstance(data, QuerySet): # type: ignore[misc]
for model in data:
django_query_postprocessor(
model,
many_to_many=many_to_many,
many_to_one=many_to_one,
)
# `Model` instances
elif isinstance(data, Model):
prefetch_fields: list[str] = []
for field in data._meta.get_fields():
# Force the query to execute
getattr(data, field.name, None)
if many_to_one and type(field) == ManyToOneRel: # noqa: #E721
prefetch_fields.append(field.related_name or f"{field.name}_set")
elif many_to_many and isinstance(field, ManyToManyField):
prefetch_fields.append(field.name)
if prefetch_fields:
prefetch_related_objects([data], *prefetch_fields)
for field_str in prefetch_fields:
django_query_postprocessor(
getattr(data, field_str).get_queryset(),
many_to_many=many_to_many,
many_to_one=many_to_one,
)
# Unrecognized type
else:
raise TypeError(
f"Django query postprocessor expected a Model or QuerySet, got {data!r}.\n"
"One of the following may have occurred:\n"
" - You are using a non-Django ORM.\n"
" - You are attempting to use `use_query` to fetch non-ORM data.\n\n"
"If these situations seem correct, you may want to consider disabling the postprocessor via `QueryOptions`."
)
return data
def validate_component_args(func, *args, **kwargs):
"""
Validate whether a set of args/kwargs would work on the given function.
Raises `ComponentParamError` if the args/kwargs are invalid.
"""
signature = inspect.signature(func)
try:
signature.bind(*args, **kwargs)
except TypeError as e:
name = generate_obj_name(func)
raise ComponentParamError(
f"Invalid args for '{name}'. {str(e).capitalize()}."
) from e
def create_cache_key(*args):
"""Creates a cache key string that starts with `reactpy_django` contains
all *args separated by `:`."""
if not args:
raise ValueError("At least one argument is required to create a cache key.")
return f"reactpy_django:{':'.join(str(arg) for arg in args)}"
def delete_expired_sessions(immediate: bool = False):
"""Deletes expired component sessions from the database.
As a performance optimization, this is only run once every REACTPY_SESSION_MAX_AGE seconds.
"""
from .config import REACTPY_DEBUG_MODE, REACTPY_SESSION_MAX_AGE
from .models import ComponentSession, Config
config = Config.load()
start_time = timezone.now()
cleaned_at = config.cleaned_at
clean_needed_by = cleaned_at + timedelta(seconds=REACTPY_SESSION_MAX_AGE)
# Delete expired component parameters
if immediate or timezone.now() >= clean_needed_by:
expiration_date = timezone.now() - timedelta(seconds=REACTPY_SESSION_MAX_AGE)
ComponentSession.objects.filter(last_accessed__lte=expiration_date).delete()
config.cleaned_at = timezone.now()
config.save()
# Check if cleaning took abnormally long
if REACTPY_DEBUG_MODE:
clean_duration = timezone.now() - start_time
if clean_duration.total_seconds() > 1:
_logger.warning(
"ReactPy has taken %s seconds to clean up expired component sessions. "
"This may indicate a performance issue with your system, cache, or database.",
clean_duration.total_seconds(),
)
class SyncLayout(Layout):
"""Sync adapter for ReactPy's `Layout`. Allows it to be used in Django template tags.
This can be removed when Django supports async template tags.
"""
def __enter__(self):
async_to_sync(self.__aenter__)()
return self
def __exit__(self, *_):
async_to_sync(self.__aexit__)(*_)
def render(self):
return async_to_sync(super().render)()
|
reactive-python/reactpy-django
|
src/reactpy_django/utils.py
|
utils.py
|
py
| 13,946 |
python
|
en
|
code
| 248 |
github-code
|
50
|
40779293015
|
# CoApyright (c) 2018 The CommerceBlock Developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from __future__ import print_function
import sys
import tkinter
from tkinter import *
from tkinter.ttk import *
from tkinter import filedialog
import time
import datetime
from cb_idcheck import cb_onfido
from pprint import pprint
import csv
import collections
from cb_idcheck.statusbar import statusbar
import argparse
import os
from cb_idcheck.idcheck_config import idcheck_config
from pprint import pprint
import numpy as np
class idcheck:
def str2bool(self, v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parse_args(self, argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('--gui', required=False, type=self.str2bool, nargs='?', const=True, help="Use a data entry graphical user interface? Default=False", default=False)
parser.add_argument('--create_check', required=False, type=self.str2bool, nargs='?', const=True, help="Create a check? If False, create an applicant without creating a check. Default=True", default=True)
parser.add_argument('--token', required=False, type=str, help="Onfido API token. Default=$IDCHECK_API_TOKEN", default=self.token)
parser.add_argument('--keys', required=False, type=str, help="A file containing a list of the applicant's public keys, as generated by the Commerceblock Ocean wallet.", default=None)
parser.add_argument('--first_name', required=False, type=str, help="Applicant's first name.", default="")
parser.add_argument('--last_name', required=False, type=str, help="Applicant's last name. Default=None", default="")
parser.add_argument('--email', required=False, type=str, help="Applicant's email address. Default=None", default="")
parser.add_argument('--dob_year', required=False, type=str, help="Applicant's year of birth: YYYY. Default=0001", default="0001")
parser.add_argument('--dob_month', required=False, type=str, help="Applicant's month of birth: MM. Default=01", default="01")
parser.add_argument('--dob_day', required=False, type=str, help="Applicant's day of birth: DD. Default=01", default="01")
parser.add_argument('--idDocType', required=False, type=str, help="ID document type. One of: passport, national_identity_card, driving_licence, uk_biometric_residence_permit, tax_id, voter_id. Default=passport", default="passport")
parser.add_argument('--idDocSide1', required=False, type=str, help="Side1 of ID document as a path to a file containing a jpg or png formatted image. Default=None", default=None)
parser.add_argument('--idDocSide2', required=False, type=str, help="Side2 of ID document, if document is 2-sided as a path to a file containing a jpg or png formatted image. Default=None", default=None)
parser.add_argument('--photo', required=False, type=str, help="Live photo of applicant as a path to a file containing a jpg or png formatted image. Default=None", default=None)
args = parser.parse_args(argv)
self.create_check = args.create_check
self.idcheck_token=args.token
self.id_api=cb_onfido.cb_onfido(token=self.idcheck_token)
self.api_instance=self.id_api.api_instance
self.cfg=idcheck_config(self.id_api.onfido.Check(type='express'))
self.gui = args.gui
self.setApplicant(args.first_name, args.last_name, args.dob_year, args.dob_month, args.dob_day, args.email)
self.setIDDocument(args.idDocType, args.idDocSide1, args.idDocSide2)
self.setPhoto(args.photo)
self.importKeys(args.keys)
def __init__(self, token=os.environ.get('IDCHECK_API_TOKEN', None), master=None):
self.token=token
self.addresses=[]
self.master=master
self.title="CommerceBlock ID check"
self.keys=[]
self.progress_value=0
#1 is both sides are required. 0 otherwise.
self.docTypeSides={"passport": 0,
"national_identity_card":1,
"driving_licence":1,
"uk_biometric_residence_permit":1,
"tax_id":0,
"voter_id":1
}
self.docTypes=list(self.docTypeSides.keys())
def run(self):
frameStatus = Frame(self.master)
frameStatus.pack(side=BOTTOM, fill=X)
self.status=statusbar(frameStatus)
frameProgress = Frame(self.master)
frameProgress.pack(side=BOTTOM, fill=X)
self.progress=Progressbar(frameProgress, orient='horizontal', mode='indeterminate', value=self.progress_value)
self.progress.pack(side=LEFT, fill=BOTH, expand=1)
frameTitle = Frame(self.master)
frameTitle.pack()
self.listboxTitle =Listbox(frameTitle, selectmode=EXTENDED, exportselection=0, height=1)
self.listboxTitle.delete(0,END)
size=0
for item in ["Miss", "Mr", "Mrs", "Ms"]:
self.listboxTitle.insert(END,item)
size=size+1
self.listboxTitle.selection_set(1)
self.listboxTitle.config(height=size)
self.listboxTitle.pack(side=LEFT)
labelTitle = Label(frameTitle, text='Title')
labelTitle.pack(side=LEFT)
frameFirstName = Frame(self.master)
frameFirstName.pack()
self.entryFirstName = Entry(frameFirstName)
self.entryFirstName.pack(side=LEFT)
self.entryFirstName.insert(0,"John")
labelFirstName = Label(frameFirstName, text='First name')
labelFirstName.pack(side=LEFT)
frameMiddleName = Frame(self.master)
frameMiddleName.pack()
self.entryMiddleName = Entry(frameMiddleName)
self.entryMiddleName.pack(side=LEFT)
self.entryMiddleName.insert(0,"Edward")
labelMiddleName = Label(frameMiddleName, text='Middle name')
labelMiddleName.pack(side=LEFT)
frameLastName = Frame(self.master)
frameLastName.pack()
self.entryLastName = Entry(frameLastName)
self.entryLastName.pack(side=LEFT)
self.entryLastName.insert(0,"Smith")
labelLastName = Label(frameLastName, text='Last name')
labelLastName.pack(side=LEFT)
frameEmail = Frame(self.master)
frameEmail.pack()
self.entryEmail = Entry(frameEmail)
self.entryEmail.pack(side=LEFT)
self.entryEmail.insert(0,"[email protected]")
labelEmail = Label(frameEmail, text='email')
labelEmail.pack(side=LEFT)
frameGender = Frame(self.master)
frameGender.pack()
self.listboxGender = Listbox(frameGender, exportselection=0)
self.listboxGender.delete(0,END)
size=0
for item in ["male", "female"]:
self.listboxGender.insert(END,item)
size=size+1
self.listboxGender.selection_set(0)
self.listboxGender.config(height=size)
self.listboxGender.pack(side=LEFT)
labelGender = Label(frameGender, text='Gender')
labelGender.pack(side=LEFT)
frameDOB = Frame(self.master)
frameDOB.pack()
self.entryDay = Entry(frameDOB, width=2)
self.entryDay.pack(side=LEFT)
self.entryDay.insert(0,24)
self.entryMonth = Entry(frameDOB, width=2)
self.entryMonth.pack(side=LEFT)
self.entryMonth.insert(0,12)
self.entryYear = Entry(frameDOB, width=4)
self.entryYear.pack(side=LEFT)
self.entryYear.insert(0,1975)
labelDOB = Label(frameDOB, text='Date of birth: DD MM YYYY')
labelDOB.pack(side=LEFT)
#A entry box for each side of the ID documentt
frameIDDoc1 = Frame(self.master)
frameIDDoc1.pack()
self.entryIDDoc1 = Entry(frameIDDoc1, width=25)
self.entryIDDoc1.pack(side=LEFT)
self.entryIDDoc1.insert(0,"/Users/lawrence/Projects/ocean_idcheck/ticketFront.jpg")
buttonIDDocFileOpen1 = Button(frameIDDoc1, text='ID document front', command=self.openIDDocFile1)
buttonIDDocFileOpen1.pack(side=LEFT)
frameIDDoc2 = Frame(self.master)
frameIDDoc2.pack()
self.entryIDDoc2 = Entry(frameIDDoc2, width=25)
self.entryIDDoc2.pack(side=LEFT)
self.entryIDDoc2.insert(0,"/Users/lawrence/Projects/ocean_idcheck/ticketBack.jpg")
buttonIDDocFileOpen2 = Button(frameIDDoc2, text='ID document back', command=self.openIDDocFile2)
buttonIDDocFileOpen2.pack(side=LEFT)
frameIDDocType = Frame(self.master)
frameIDDocType.pack()
self.listboxIDDocType = Listbox(frameIDDocType, exportselection=0)
self.listboxIDDocType.delete(0,END)
size=0
for item in self.docTypes:
self.listboxIDDocType.insert(END,item)
size=size+1
self.listboxIDDocType.selection_set(0)
self.listboxIDDocType.config(height=size)
self.listboxIDDocType.pack(side=LEFT)
labelIDDocType = Label(frameIDDocType, text='ID document type')
labelIDDocType.pack(side=LEFT)
framePhoto = Frame(self.master)
framePhoto.pack()
self.entryPhoto = Entry(framePhoto, width=25)
self.entryPhoto.pack(side=LEFT)
self.entryPhoto.insert(0,"/Users/lawrence/Projects/ocean_idcheck/testPicture.png")
buttonPhotoFileOpen = Button(framePhoto, text='Live photo', command=self.openPhotoFile)
buttonPhotoFileOpen.pack(side=LEFT)
# labelAddress = Label(self.master, text='Address')
# labelAddress.pack()
# frameBuildingNo = Frame(self.master)
# frameBuildingNo.pack()
# self.entryBuildingNo = Entry(frameBuildingNo)
# self.entryBuildingNo.pack(side=LEFT)
# self.entryBuildingNo.insert(0,"10")
# labelBuildingNo = Label(frameBuildingNo, text='Building number')
# labelBuildingNo.pack(side=LEFT)
# frameStreet = Frame(self.master)
# frameStreet.pack()
# self.entryStreet = Entry(frameStreet)
# self.entryStreet.pack(side=LEFT)
# self.entryStreet.insert(0,"Main Street")
# labelStreet = Label(frameStreet, text='Street')
# labelStreet.pack(side=LEFT)
# frameTown = Frame(self.master)
# frameTown.pack()
# self.entryTown = Entry(frameTown)
# self.entryTown.pack(side=LEFT)
# self.entryTown.insert(0,"London")
# labelTown = Label(frameTown, text='Town')
# labelTown.pack(side=LEFT)
# framePostcode = Frame(self.master)
# framePostcode.pack()
# self.entryPostcode = Entry(framePostcode)
# self.entryPostcode.pack(side=LEFT)
# self.entryPostcode.delete(0,END)
# self.entryPostcode.insert(0,"SW4 6EH")
# labelPostcode = Label(framePostcode, text='Postcode')
# labelPostcode.pack(side=LEFT)
# frameCountry = Frame(self.master)
# frameCountry.pack()
# self.entryCountry = Entry(frameCountry)
# self.entryCountry.pack(side=LEFT)
# self.entryCountry.delete(0,END)
# self.entryCountry.insert(0,"GBR")
# labelCountry = Label(frameCountry, text='Country')
# labelCountry.pack(side=LEFT)
frameKeyFile = Frame(self.master)
frameKeyFile.pack()
self.entryKeyFile = Entry(frameKeyFile, width=25)
self.entryKeyFile.pack(side=LEFT)
self.entryKeyFile.insert(0,"/Users/lawrence/Projects/ocean-demo/kycfile.dat")
buttonKeyFileOpen = Button(frameKeyFile, text='Key file', command=self.openKeyFile)
buttonKeyFileOpen.pack(side=LEFT)
self.status.pack(side=BOTTOM, fill=X)
frameSubmit = Frame(self.master)
frameSubmit.pack(side=BOTTOM, fill=X)
buttonSubmit = Button(frameSubmit, text='Submit ID check', command=self.submitFromGUI)
buttonSubmit.pack(side=RIGHT)
#Enter applicant data
def setApplicant(self, first_name, last_name, dob_year, dob_month, dob_day, email):
self.applicant=self.id_api.onfido.Applicant(
country='GBR', #This is the jurisdiction where the ID check takes place, not the applicant's home country.
first_name=first_name,
last_name=last_name,
dob=datetime.date(year=int(dob_year),month=int(dob_month),day=int(dob_day)),
addresses=self.addresses,
email=email
)
#Enter address data
# def setAddress(self, flat_number="", building_number="", building_name="", street="", sub_street="", town="", postcode, country):
# self.applicant=self.id_api.onfido.Applicant(
# country='GBR', #This is the jurisdiction where the ID check takes place, not the applicant's home country.
# first_name=first_name,
# last_name=last_name,
# dob=datetime.date(year=int(dob_year),month=int(dob_month),day=int(dob_day)),
# addresses=self.addresses
# )
#Fill applicant data from GUI
def fillApplicant(self):
self.setApplicant(first_name = self.entryFirstName.get(),
last_name = self.entryLastName.get(),
dob_year=self.entryYear.get(),
dob_month=self.entryMonth.get(),
dob_day=self.entryDay.get(),
email=self.entryEmail.get())
# def fillAddress(self):
# self.address.building_number=self.entryBuildingNo.get()
# self.address.street=self.entryStreet.get()
# self.address.town=self.entryTown.get()
# self.address.postcode=self.entryPostcode.get()
# self.address.country=self.entryCountry.get()
# self.applicant.addresses=[self.address]
def setIDDocument(self, idDocType="passport", idDocSide1File="", idDocSide2File=""):
self.idDocType=idDocType
self.idDoc2Sided=(self.docTypeSides[self.idDocType] == 1),
self.idDocSide1File=idDocSide1File
self.idDocSide2File=idDocSide2File
def fillIDDocument(self):
self.setIDDocument(idDocType=self.docTypes[self.listboxIDDocType.curselection()[0]],
idDocSide1File=self.entryIDDoc1.get(),
idDocSide2File=self.entryIDDoc2.get())
def uploadIDDocument(self):
self.printStatus("Uploading id document...")
api_response = []
api_response.append(self.api_instance.upload_document(self.applicant.id, self.idDocType, side="front", file=self.idDocSide1File))
if (self.idDoc2Sided==True):
api_response.append(self.api_instance.upload_document(self.applicant.id, self.idDocType, side="back", file=self.idDocSide2File))
self.printStatus("...id document upload complete.")
return api_response
def setPhoto(self, photoFile):
self.photoFile=photoFile
def fillPhoto(self):
self.setPhoto(photoFile=self.entryPhoto.get())
def uploadPhoto(self):
self.printStatus("Uploading live photo...")
api_response = self.api_instance.upload_live_photo(applicant_id=self.applicant.id, file=self.photoFile, advanced_validation=True)
self.printStatus("...live photo upload complete.")
return api_response
def printStatus(self, msg):
if self.gui is True:
self.status.set(msg)
print(msg)
def openphoto(self, entry):
fileOpened = filedialog.askopenfilename(initialdir = "/", title = "Select file", filetypes = (("jpg files","*.jpg"),("png files","*.png"),("pdf files","*.pdf")))
entry.delete(0,END)
entry.insert(0,fileOpened)
def openIDDocFile1(self):
self.openphoto(self.entryIDDoc1)
def openIDDocFile2(self):
self.openphoto(self.entryIDDoc2)
def openPhotoFile(self):
self.openphoto(self.entryPhoto)
def openKeyFile(self,entry=None):
if(entry==None):
entry=self.entryKeyFile
fileOpened = filedialog.askopenfilename(initialdir = "/", title = "Select key file")
entry.delete(0,END)
entry.insert(0,fileOpened)
def fillKeys(self):
self.importKeys(keyFile=self.entryKeyFile.get())
def importKeys(self, keyFile):
if keyFile is None:
return
with open(keyFile,'rt') as csvfile:
myDialect = csv.excel
myDialect.delimiter=' '
dictReader1 = csv.DictReader(filter(lambda row: row[0]!='#', csvfile), fieldnames=['onboard_pub_key', 'user_onboard_pub_key', 'nbytes'],dialect=myDialect)
for row in dictReader1:
if(row['nbytes']):
self.kyc_header=row
# pprint(self.kyc_header)
break
with open(keyFile,'rt') as csvfile:
dictReader2 = csv.DictReader(filter(lambda row: row[0]!='#', csvfile), fieldnames=['addrdata_encrypted'],dialect=myDialect)
for row in dictReader2:
addrdata_encrypted=row['addrdata_encrypted']
nbytes=int(self.kyc_header['nbytes'])
if(len(addrdata_encrypted) == nbytes):
self.kyc_addrdata=addrdata_encrypted
break
self.keys=[]
self.keys.append("onboard_pub_key:"+str(self.kyc_header['onboard_pub_key']))
self.keys.append("user_onboard_pub_key:"+str(self.kyc_header['user_onboard_pub_key']))
self.keys.append("nbytes:"+self.kyc_header['nbytes'])
#Split the long addrdata string into smaller substrings 100 chars each
n=100
kyc_addrdata_split=[self.kyc_addrdata[i:i+n] for i in range(0, len(self.kyc_addrdata), n)]
for kyc_addrdata in kyc_addrdata_split:
#print(kyc_addrdata)
self.keys.append("add:"+kyc_addrdata)
self.cfg.check.tags=self.keys
def submitFromGUI(self):
self.fillDataFromGUI()
self.createApplicant()
if self.create_check is True:
self.submit()
def fillDataFromGUI(self):
self.fillApplicant()
# self.fillAddress()
self.fillIDDocument()
self.fillPhoto()
self.fillKeys()
def createApplicant(self):
if self.gui is True:
self.progress.start()
self.printStatus("Creating applicant...")
api_response = self.api_instance.create_applicant(self.applicant)
self.applicant.id=api_response.id
self.printStatus("Created applicant with id: " + self.applicant.id)
return self.applicant.id
if self.gui is True:
self.progress.stop()
def submit(self):
if self.gui is True:
self.progress.start()
self.printStatus("Submitting...")
try:
api_response=self.uploadIDDocument()
api_response=self.uploadPhoto()
api_response=self.api_instance.create_check(self.applicant.id, self.cfg.check)
self.printStatus("Submission complete.")
time.sleep(1)
self.master.quit()
except cb_onfido.ApiException as e:
pprint(e.body)
self.printStatus("Error: " + e.body)
if self.gui is True:
self.progress.stop()
def __str__(self):
print ("Applicant:")
pprint(idc.applicant)
print("Check:")
pprint(idc.check)
if __name__ == "__main__":
from cb_idcheck import idcheck
idc=idcheck.idcheck()
idc.parse_args()
if idc.gui is True:
root = tkinter.Tk()
idc.master=root
root.title(idc.title)
idc.run()
root.mainloop()
else:
idc.createApplicant()
if idc.create_check is True:
idc.submit()
|
commerceblock/cb_idcheck
|
cb_idcheck/idcheck.py
|
idcheck.py
|
py
| 20,179 |
python
|
en
|
code
| 1 |
github-code
|
50
|
31100584211
|
"""Unit tests for the job_offers_optimal_buckets module."""
import typing
from typing import Iterable
import unittest
import pandas
from bob_emploi.data_analysis.modeling import job_offers_optimal_buckets
class _TestCase(typing.NamedTuple):
name: str
offers: Iterable[int]
expected: Iterable[str]
class BucketsTestCase(unittest.TestCase):
"""Tests for the function computing optimal buckets."""
def test_apply_bucketize(self) -> None:
"""Basic usage of apply_bucketize."""
# List of tuple of (experience, expected_labels).
experiences_expected_labels = [
_TestCase(
name='All offers expect 1 year of experience',
offers=[12 for x in range(20)],
expected=['[0, 999[' for x in range(20)]),
_TestCase(
name='All offers expect no experience',
offers=[0 for x in range(20)],
expected=['[0, 999[' for x in range(20)]),
_TestCase(
name='Half expect 1 year, half expect 3 years of experience',
offers=[12 for x in range(10)] + [36 for x in range(10)],
expected=['[0, 24[' for x in range(10)] + ['[24, 999[' for x in range(10)]),
_TestCase(
name='All offers expect more than 8 years of experience',
offers=[100 for x in range(20)],
expected=['[0, 999[' for x in range(20)]),
]
for test in experiences_expected_labels:
bucketize = job_offers_optimal_buckets.create_bucketizer(
job_offers_optimal_buckets.OPTIMAL_BUCKETS)
actual = bucketize(pandas.DataFrame({
'rome_id': ['A' for x in test.offers],
'annual_minimum_salary': [20000 for x in test.offers],
'experience_min_duration': test.offers,
}))
self.assertEqual(test.expected, actual.exp_bucket.tolist(), msg=test.name)
if __name__ == '__main__':
unittest.main()
|
bayesimpact/bob-emploi
|
data_analysis/modeling/test/job_offers_optimal_bucket_test.py
|
job_offers_optimal_bucket_test.py
|
py
| 2,032 |
python
|
en
|
code
| 139 |
github-code
|
50
|
35864071104
|
from __future__ import print_function, division
import psycopg2
from psycopg2.extras import Json
from psycopg2.extensions import TransactionRollbackError
import code
# {{{ database setup
SCHEMA = ["""
CREATE TYPE job_state AS ENUM (
'waiting',
'running',
'error',
'complete');
""",
"""
CREATE TABLE run (
id SERIAL PRIMARY KEY,
-- filled on generation
creation_time TIMESTAMP DEFAULT current_timestamp,
creation_machine_name VARCHAR(255),
run_class VARCHAR(255),
run_properties JSONB,
-- updated throughout
state job_state,
-- filled on run
env_properties JSONB,
state_time TIMESTAMP,
state_machine_name VARCHAR(255),
results JSONB
);
"""]
def try_create_schema(db_conn):
with db_conn:
with db_conn.cursor() as cur:
try:
cur.execute(SCHEMA[0])
except psycopg2.ProgrammingError as e:
if "already exists" in str(e):
return
else:
raise
for s in SCHEMA[1:]:
cur.execute(s)
def get_db_connection(create_schema):
import os
db_host = os.environ.get("TUNE_DBHOST", None)
db_name = os.environ.get("TUNE_DBNAME", "tune_db")
import getpass
db_user = os.environ.get("TUNE_DBUSER", getpass.getuser())
db_password = os.environ.get("TUNE_DBPASSWORD")
db_conn = psycopg2.connect(
host=db_host, dbname=db_name, user=db_user,
password=db_password, sslmode="require")
if create_schema:
try_create_schema(db_conn)
db_conn.set_session(isolation_level="serializable")
return db_conn
# }}}
def get_git_rev(module=None):
if module:
from importlib import import_module
mod = import_module(module)
from os.path import dirname
cwd = dirname(mod.__file__)
else:
from os import getcwd
cwd = getcwd()
from subprocess import check_output
return check_output(
["git", "rev-parse", "--short", "HEAD"],
cwd=cwd).strip().decode()
class UnableToRun(Exception):
"""A temporary run failure. Will not mark the job as permanently failed."""
class RunBase(object):
@classmethod
def enumerate_runs(cls, options):
raise NotImplementedError()
@classmethod
def get_env_properties(cls, run_props):
import sys
return {
"python_version": sys.version
}
@classmethod
def run(cls, run_props):
pass
class ClassNotFoundError(RuntimeError):
pass
def import_class(name):
components = name.split('.')
if len(components) < 2:
# need at least one module plus class name
raise ClassNotFoundError(name)
module_name = ".".join(components[:-1])
try:
mod = __import__(module_name)
except ImportError:
raise ClassNotFoundError(name)
for comp in components[1:]:
try:
mod = getattr(mod, comp)
except AttributeError:
raise ClassNotFoundError(name)
return mod
# {{{ cl utilities
def enumerate_distinct_cl_devices(exclude_platforms=[]):
import pyopencl as cl
seen = set()
for plat in cl.get_platforms():
if any(
excl.lower() in plat.name.lower()
or
excl.lower() in plat.vendor.lower()
for excl in exclude_platforms):
continue
for dev in plat.get_devices():
pd = (plat.name, dev.name)
if pd in seen:
continue
seen.add(pd)
yield {
"cl_platform": pd[0],
"cl_device": pd[1],
}
class CLDeviceNotFound(UnableToRun):
pass
def get_cl_device(run_props):
import pyopencl as cl
for plat in cl.get_platforms():
if plat.name == run_props["cl_platform"]:
for dev in plat.get_devices():
if dev.name == run_props["cl_device"]:
return dev
raise CLDeviceNotFound(str(run_props["cl_platform"]) + ", "
+ str(run_props["cl_device"]))
def get_cl_properties(dev):
plat = dev.platform
return {
"pyopencl_git_rev": get_git_rev("pyopencl"),
"cl_platform_vendor": plat.vendor,
"cl_platform_version": plat.version,
"cl_device_name": dev.name,
"cl_device_vendor": dev.vendor,
"cl_device_version": dev.version,
"cl_device_extensions": dev.extensions,
"cl_device_address_bits": dev.address_bits,
}
# }}}
def parse_filters(filter_args):
filters = []
filter_kwargs = {}
for f in filter_args:
f = f.strip()
op_ind = max(f.find("~"), f.find("="))
op = f[op_ind]
if op_ind < 0:
raise ValueError("invalid filter: %s" % f)
fname = f[:op_ind]
fval = f[op_ind+1:]
if fname in [
"id",
"run_class",
"creation_machine_name",
]:
lhs = "text(%s)" % fname
else:
lhs = "run_properties->>'%s'" % fname
from random import choice
f_kwarg_name = fname
while f_kwarg_name in filter_kwargs:
f_kwarg_name += choice("0123456789")
if op == "~":
filters.append(
# case insensitive regex
lhs + " ~* " +
"%%(%s)s" % f_kwarg_name)
filter_kwargs[f_kwarg_name] = ".*" + fval + ".*"
elif op == "=":
filters.append(lhs + "=" + "%%(%s)s" % f_kwarg_name)
filter_kwargs[f_kwarg_name] = fval
else:
raise ValueError("invalid operand")
return filters, filter_kwargs
# {{{ enumerate
def batch_up(n, iterator):
batch = []
for i in iterator:
batch.append(i)
if len(batch) >= n:
yield batch
del batch[:]
if batch:
yield batch
def limit_iterator(max_count, it):
count = 0
for item in it:
yield item
count += 1
if max_count is not None and count >= max_count:
return
def enumerate_runs(args):
db_conn = get_db_connection(create_schema=True)
run_class = import_class(args.run_class)
from socket import gethostname
host = gethostname()
enum_options = {}
if args.options:
for s in args.options:
s = s.strip()
equal_ind = s.find("=")
if equal_ind < 0:
raise ValueError("invalid enum argument: %s" % s)
aname = s[:equal_ind]
aval = s[equal_ind+1:]
enum_options[aname] = aval
def get_enum_iterator():
it = run_class.enumerate_runs(enum_options)
it = limit_iterator(args.limit, it)
return it
total_count = 0
print("counting...")
for ijob, run_props in enumerate(get_enum_iterator()):
if ijob % 10000 == 0 and ijob:
print("%d jobs, still counting..." % ijob)
total_count += 1
print("creating %d jobs..." % total_count)
def add_args(run_props):
run_props = run_props.copy()
if args.tags:
run_props["tags"] = args.tags
if enum_options:
run_props["enum_options"] = enum_options
return run_props
with db_conn:
with db_conn.cursor() as cur:
from pytools import ProgressBar
pb = ProgressBar("enumerate jobs", total_count)
batch_size = 20
count = 0
for ibatch, batch in enumerate(batch_up(
batch_size, (
(host, args.run_class, Json(add_args(run_props)))
for run_props in get_enum_iterator()))):
cur.executemany("INSERT INTO run ("
"creation_machine_name,"
"run_class,"
"run_properties,"
"state) values (%s,%s,%s,'waiting');",
batch)
pb.progress(len(batch))
count += len(batch)
pb.finished()
print("%d jobs created" % count)
# }}}
def reset_running(args):
db_conn = get_db_connection(create_schema=True)
filters = [
("state = 'running'"),
]
filter_kwargs = {}
if args.filter:
f, fk = parse_filters(args.filter)
filters.extend(f)
filter_kwargs.update(fk)
where_clause = " AND ".join(filters)
with db_conn:
with db_conn.cursor() as cur:
cur.execute(
"UPDATE run SET state = 'waiting' "
"WHERE " + where_clause + ";",
filter_kwargs)
print("%d jobs reset" % cur.rowcount)
# {{{ run
def run(args):
db_conn = get_db_connection(create_schema=True)
from socket import gethostname
host = gethostname()
import sys
filters = [("state = 'waiting'")]
filter_kwargs = {}
if args.filter:
f, fk = parse_filters(args.filter)
filters.extend(f)
filter_kwargs.update(fk)
where_clause = " AND ".join(filters)
quit_flag = False
while not quit_flag:
try:
# Start transaction for atomic state update.
with db_conn:
with db_conn.cursor() as cur:
cur.execute(
"SELECT id, run_class, run_properties FROM run "
"WHERE " + where_clause + " " +
"OFFSET floor(random()*("
" SELECT COUNT(*) FROM run "
" WHERE " + where_clause + " " +
")) LIMIT 1",
filter_kwargs)
rows = list(cur)
if not rows:
break
(id_, run_class, run_props), = rows
if not args.dry_run:
cur.execute(
"UPDATE run SET state = 'running' WHERE id = %s;",
(id_,))
except TransactionRollbackError:
if args.verbose:
print("Retrying job retrieval...")
continue
if args.verbose:
print(75*"=")
print(id_, run_class, run_props)
print(75*"-")
env_properties = None
run_class = import_class(run_class)
try:
env_properties = run_class.get_env_properties(run_props)
result = run_class.run(run_props)
state = "complete"
except UnableToRun:
state = "waiting"
result = None
if args.verbose:
print(75*"-")
print("-> unable to run")
from traceback import print_exc
print_exc()
except KeyboardInterrupt:
state = "waiting"
result = None
disposition_msg = "interrupted (will be retried)"
quit_flag = True
except Exception as e:
from traceback import format_exc
tb = format_exc()
if args.retry_on_error:
state = "waiting"
result = None
disposition_msg = "error (will be retried)"
else:
state = "error"
result = {
"error": type(e).__name__,
"error_value": str(e),
"traceback": tb,
}
disposition_msg = "error (permanent)"
if args.verbose:
print(75*"-")
print("->", disposition_msg)
from traceback import print_exc
print_exc()
else:
if args.verbose:
print("->", state)
print(" ", result)
print(" ", env_properties)
if not args.dry_run:
while True:
try:
# Start transaction. Otherwise we'll implicitly start a
# transaction that contains the rest of our run.
with db_conn:
with db_conn.cursor() as cur:
if state != "waiting" or (
state == "error" and not args.stop_on_error):
cur.execute(
"UPDATE run "
"SET (state, env_properties, "
" state_time, state_machine_name, results) "
"= (%(new_state)s, %(env_properties)s, "
" current_timestamp, %(host)s, %(result)s) "
"WHERE id = %(id)s AND state = 'running';",
{"id": id_,
"env_properties": Json(env_properties),
"host": host,
"result": Json(result),
"new_state": state})
else:
cur.execute(
"UPDATE run SET state = 'waiting' "
"WHERE id = %(id)s AND state = 'running';",
{"id": id_})
except TransactionRollbackError:
if args.verbose:
print("Retrying job result submission...")
else:
break
if args.stop_on_error and state == "error":
print(tb, file=sys.stderr)
break
# }}}
# {{{ shell
def table_from_cursor(cursor):
from pytools import Table
if cursor.description is None:
return None
tbl = Table()
tbl.add_row([column[0] for column in cursor.description])
for row in cursor:
tbl.add_row(row)
return tbl
def mangle_query(qry):
import re
qry, _ = re.subn(r"rp\.([a-zA-Z_0-9]+)", r"(run_properties->>'\1')", qry)
qry, _ = re.subn(r"ep\.([a-zA-Z_0-9]+)", r"(env_properties->>'\1')", qry)
qry, _ = re.subn(r"res\.([a-zA-Z_0-9]+)", r"(results->>'\1')", qry)
qry, _ = re.subn(r"rp\.\.([a-zA-Z_0-9]+)", r"(run_properties->'\1')", qry)
qry, _ = re.subn(r"ep\.\.([a-zA-Z_0-9]+)", r"(env_properties->'\1')", qry)
qry, _ = re.subn(r"res\.\.([a-zA-Z_0-9]+)", r"(results->'\1')", qry)
return qry
def make_disttune_symbols(db_conn):
def q(qry, *arg_dicts, **extra_kwargs):
args = {}
args.update(extra_kwargs)
for d in arg_dicts:
args.update(d)
cur = db_conn.cursor()
cur.execute(mangle_query(qry), args)
return cur
return {
"__name__": "__console__",
"__doc__": None,
"db_conn": db_conn,
"q": q,
"p": lambda qry: print(table_from_cursor(q(qry))),
"table_from_cursor": table_from_cursor,
"mangle_query": mangle_query,
}
class DisttuneConsole(code.InteractiveConsole):
def __init__(self, db_conn):
self.db_conn = db_conn
code.InteractiveConsole.__init__(self,
make_disttune_symbols(db_conn))
try:
import numpy # noqa
self.runsource("import numpy as np")
except ImportError:
pass
try:
import matplotlib.pyplot # noqa
self.runsource("import matplotlib.pyplot as pt")
except ImportError:
pass
except RuntimeError:
pass
try:
import readline
import rlcompleter # noqa
have_readline = True
except ImportError:
have_readline = False
if have_readline:
import os
import atexit
readline.set_history_length(-1)
histfile = os.path.join(os.environ["HOME"], ".disttunehist")
if os.access(histfile, os.R_OK):
readline.read_history_file(histfile)
atexit.register(readline.write_history_file, histfile)
readline.parse_and_bind("tab: complete")
self.last_push_result = False
def push(self, cmdline):
if cmdline.startswith("."):
try:
self.execute_magic(cmdline)
except:
import traceback
traceback.print_exc()
else:
self.last_push_result = code.InteractiveConsole.push(self, cmdline)
return self.last_push_result
def execute_magic(self, cmdline):
cmd_end = cmdline.find(" ")
if cmd_end == -1:
cmd = cmdline[1:]
args = ""
else:
cmd = cmdline[1:cmd_end]
args = cmdline[cmd_end+1:]
if cmd == "help":
print("""
Commands:
.help show this help message
.q SQL execute a (potentially mangled) query
Available Python symbols:
db_conn: the database
q(query_str): get database cursor for query_str
dbprint(cursor): print result of cursor
table_from_cursor(cursor)
""")
elif cmd == "q":
with self.db_conn:
with self.db_conn.cursor() as cur:
cur.execute(mangle_query(args))
tbl = table_from_cursor(cur)
if tbl is not None:
print(tbl)
print("%d rows" % cur.rowcount)
else:
print("invalid magic command")
def console(args):
db_conn = get_db_connection(create_schema=False)
import sys
cons = DisttuneConsole(db_conn)
cons.interact("Disttune running on Python %s\n"
"Copyright (c) Andreas Kloeckner 2015\n"
"Run .help to see help" % sys.version)
def script(args):
db_conn = get_db_connection(create_schema=False)
from os.path import abspath, dirname
scriptdir = dirname(abspath(args.script))
import sys
sys.path.append(scriptdir)
namespace = make_disttune_symbols(db_conn)
with open(args.script, "rt") as s:
script_contents = s.read()
exec(compile(script_contents, args.script, 'exec'), namespace)
# }}}
def main():
import argparse
parser = argparse.ArgumentParser()
subp = parser.add_subparsers()
parser_enum = subp.add_parser("enum")
parser_enum.add_argument("run_class")
parser_enum.add_argument("--options", metavar="KEY=VAL", nargs="*",
help="specify options to be passed to enumerate_runs()")
parser_enum.add_argument("--tags", metavar="TAG", nargs="*")
parser_enum.add_argument("--limit", metavar="COUNT", type=int,
help="create at most COUNT jobs")
parser_enum.set_defaults(func=enumerate_runs)
parser_reset_running = subp.add_parser("reset-running")
parser_reset_running.add_argument(
"--filter", metavar="prop=val or prop~val", nargs="*")
parser_reset_running.set_defaults(func=reset_running)
parser_run = subp.add_parser("run")
parser_run.add_argument("--stop-on-error",
help="stop execution on exceptions", action="store_true")
parser_run.add_argument("--retry-on-error",
help="if execution fails with an error, return run to 'waiting' status",
action="store_true")
parser_run.add_argument("-n", "--dry-run",
help="do not modify database", action="store_true")
parser_run.add_argument("-v", "--verbose", action="store_true")
parser_run.add_argument(
"--filter", metavar="prop=val or prop~val", nargs="*")
parser_run.set_defaults(func=run)
parser_console = subp.add_parser("console")
parser_console.set_defaults(func=console)
parser_script = subp.add_parser("script")
parser_script.add_argument("script", metavar="SCRIPT.PY")
parser_script.add_argument("script_args", metavar="ARG",
nargs="*")
parser_script.set_defaults(func=script)
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_usage()
import sys
sys.exit(1)
args.func(args)
# vim: foldmethod=marker
|
inducer/disttune
|
disttune/__init__.py
|
__init__.py
|
py
| 20,589 |
python
|
en
|
code
| 0 |
github-code
|
50
|
40807516385
|
import math
import matplotlib.pyplot as plt
import numpy
lamb_1 = 0.4211 #
lamb_2 = 0.7371 #
M_0 = 15 #
sigma_Sqr_0 = 3 #
alfa_0 = 0.15 #
epsilon = 0.004 #
A1 = 1.68 # 1.7
A2 = .32 # 0.32
Ns = 15 #
lamb_3 = 0
S = 6 # 5 или 6
N = 200
def rand(l1, l2):
l3 = (l1 * l2 * 1e8) // 100
l3 = l3 % 10000
l3 = l3 / 10000
return l3
Middle = lambda arr: sum(arr) / len(arr)
def Dispersion(arr):
_sum = 0
for i in arr:
_sum += (i - Middle(arr)) ** 2
return _sum / len(arr)
def Process(k):
i = k
sum2 = 0
while i < k + Ns:
sum2 += rand_arr[i] * math.pow((sigma_Sqr_0 / (alfa_0 * A2 * Dispersion(rand_arr))), 0.5) * A1 * math.exp(
-A2 * alfa_0 * (i - k))
i += 1
return (sum2 / Ns) + M_0
def Corell(z, S):
p = 0
sum3 = 0
for p in range(len(z) - S):
sum3 += (z[p] - Middle(z)) * (z[p + S] - Middle(z))
p += 1
return sum3 / p
def Corell2(S, alpha):
return Dispersion(randProcessArr) * math.exp(-alpha * S)
def GetRandomArr():
arr = []
i = 0
while i < N - Ns:
arr.append(Process(i))
i += 1
return arr
rand_arr = []
Corr_arr = []
Corr_arr_ = []
alfa = []
randProcessArr = []
oldA = 0
aldM = 0
# random
for i in range(N - 1):
rand_arr.append(lamb_1 - 0.5)
lamb_3 = rand(lamb_1, lamb_2)
lamb_1 = lamb_2
lamb_2 = lamb_3
i = 0
# random f
randProcessArr = GetRandomArr()
oldS = Dispersion(randProcessArr)
dd = abs(Dispersion(randProcessArr) - sigma_Sqr_0)
h = 0.1
# co f[i]
i = 0
while i < S:
Corr_arr.insert(i, Corell(randProcessArr, i))
i += 1
alfa_f = 0
Fi = 10000
i = 0
while Fi > epsilon:
Fi = 0
Corr_arr_.clear()
while i < S:
Fi += (Corell2(i,alfa_f) - Corr_arr[i]) ** 2
Corr_arr_.insert(i, Corell2(i,alfa_f))
i += 1
alfa_f += 0.01
Fi = Fi / S
i = 0
alfa_f -= 0.01
print("Мат. ожидания ряда = ", Middle(rand_arr))
print("Дисперсия ряда = ", Dispersion(rand_arr))
print("Мат. ожидание = ", Middle(randProcessArr))
print("Дисперсия = ", Dispersion(randProcessArr))
print("Параметр аппроксимации функции = ", alfa_f)
# graph
TN = numpy.arange(0, N - Ns)
TS = numpy.arange(0, S)
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_ylabel('Знач. случ. процесса')
ax1.set_xlabel('N')
ax1.plot(TN, randProcessArr)
ax2.set_ylabel('Знач. коррел. функции')
ax2.set_xlabel('S')
ax2.plot(TS, Corr_arr, color="blue", marker=".")
ax2.plot(TS, Corr_arr_, color="red", marker=".")
plt.show()
|
XYphrodite/MandMADD
|
6.py
|
6.py
|
py
| 2,628 |
python
|
en
|
code
| 0 |
github-code
|
50
|
11043979180
|
from odoo.addons.point_of_sale.tests.test_frontend import TestPointOfSaleHttpCommon
from odoo.tests import Form, tagged
@tagged("post_install", "-at_install")
class TestUi(TestPointOfSaleHttpCommon):
def setUp(self):
super().setUp()
self.promo_programs = self.env["coupon.program"]
# code promo program -> discount on specific products
self.code_promo_program = self.env["coupon.program"].create(
{
"name": "Promo Code Program - Discount on Specific Products",
"program_type": "promotion_program",
"promo_code_usage": "code_needed",
"promo_code": "promocode",
"discount_apply_on": "specific_products",
"discount_percentage": 50,
"discount_specific_product_ids": (
self.whiteboard_pen | self.magnetic_board | self.desk_organizer
).ids,
}
)
self.promo_programs |= self.code_promo_program
# auto promo program on current order
# -> discount on cheapest product
self.auto_promo_program_current = self.env["coupon.program"].create(
{
"name": "Auto Promo Program - Cheapest Product",
"program_type": "promotion_program",
"promo_code_usage": "no_code_needed",
"discount_apply_on": "cheapest_product",
"discount_percentage": 90,
}
)
self.promo_programs |= self.auto_promo_program_current
# auto promo program on next order
# -> discount on order (global discount)
self.auto_promo_program_next = self.env["coupon.program"].create(
{
"name": "Auto Promo Program - Global Discount",
"program_type": "promotion_program",
"promo_code_usage": "no_code_needed",
"promo_applicability": "on_next_order",
"discount_apply_on": "on_order",
"discount_percentage": 10,
}
)
self.promo_programs |= self.auto_promo_program_next
# coupon program -> free product
self.coupon_program = self.env["coupon.program"].create(
{
"name": "Coupon Program - Buy 3 Take 2 Free Product",
"program_type": "coupon_program",
"rule_products_domain": "[('name', 'ilike', 'Desk Organizer')]",
"reward_type": "product",
"rule_min_quantity": 3,
"reward_product_id": self.desk_organizer.id,
"reward_product_quantity": 2,
}
)
# Create coupons for the coupon program and change the code
# to be able to use them in the frontend tour.
self.env["coupon.generate.wizard"].with_context(
{"active_id": self.coupon_program.id}
).create({"nbr_coupons": 4}).generate_coupon()
(
self.coupon1,
self.coupon2,
self.coupon3,
self.coupon4,
) = self.coupon_program.coupon_ids
self.coupon1.write({"code": "1234"})
self.coupon2.write({"code": "5678"})
self.coupon3.write({"code": "1357"})
self.coupon4.write({"code": "2468"})
def test_pos_coupon_tour_basic(self):
"""PoS Coupon Basic Tour"""
# Set the programs to the pos config.
# Remove fiscal position and pricelist.
with Form(self.main_pos_config) as pos_config:
pos_config.tax_regime_selection = False
pos_config.use_pricelist = False
pos_config.pricelist_id = self.env["product.pricelist"].create(
{"name": "PoS Default Pricelist",}
)
pos_config.use_coupon_programs = True
pos_config.coupon_program_ids.add(self.coupon_program)
for promo_program in self.promo_programs:
pos_config.promo_program_ids.add(promo_program)
self.main_pos_config.open_session_cb(check_coa=False)
##
# Tour Part 1
# This part will generate coupons for `auto_promo_program_next`
# that will be used in the second part of the tour.
#
self.start_tour(
"/pos/web?config_id=%d" % self.main_pos_config.id,
"PosCouponTour1",
login="accountman",
)
# check coupon usage
self.assertEqual(
self.coupon1.state, "used", msg="`1234` coupon should have been used."
)
self.assertEqual(
self.coupon2.state,
"new",
msg="`5678` coupon code is used but was eventually freed.",
)
# check pos_order_count in each program
self.assertEqual(self.auto_promo_program_current.pos_order_count, 3)
self.assertEqual(self.auto_promo_program_next.pos_order_count, 0)
self.assertEqual(self.code_promo_program.pos_order_count, 1)
self.assertEqual(self.coupon_program.pos_order_count, 1)
# check number of generated coupons
self.assertEqual(len(self.auto_promo_program_next.coupon_ids), 5)
# check number of orders in the session
pos_session = self.main_pos_config.current_session_id
self.assertEqual(
len(pos_session.order_ids), 5, msg="5 orders were made in tour part1."
)
##
# Tour Part 2
# The coupons generated in the first part will be used in this tour.
#
# Manually set the code for some `auto_promo_program_next` coupons
# to be able to use them in defining the part2 tour.
(
promo_coupon1,
promo_coupon2,
promo_coupon3,
promo_coupon4,
*_,
) = self.auto_promo_program_next.coupon_ids
promo_coupon1.write({"code": "123456"})
promo_coupon2.write({"code": "345678"})
promo_coupon3.write({"code": "567890"})
promo_coupon4.write({"code": "098765"})
# use here the generated coupon
self.start_tour(
"/pos/web?config_id=%d" % self.main_pos_config.id,
"PosCouponTour2",
login="accountman",
)
self.assertEqual(self.coupon4.state, "new")
self.assertEqual(promo_coupon4.state, "new")
# check pos_order_count in each program
self.assertEqual(self.auto_promo_program_current.pos_order_count, 5)
self.assertEqual(self.auto_promo_program_next.pos_order_count, 2)
self.assertEqual(self.code_promo_program.pos_order_count, 2)
self.assertEqual(self.coupon_program.pos_order_count, 3)
|
anhjean/beanbakery_v15
|
addons/pos_coupon/tests/test_frontend.py
|
test_frontend.py
|
py
| 6,664 |
python
|
en
|
code
| 5 |
github-code
|
50
|
74434592795
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 18 16:07:23 2022
@author: Yu Jen
"""
import pandas_datareader as DR
import pandas as pd
import dateutil.parser as psr
from pandas_datareader._utils import RemoteDataError
import matplotlib.pyplot as plt
import sys
plt.rcParams['font.sans-serif']=['Arial Unicode MS']
plt.rcParams['axes.unicode_minus']=False
stockdata=pd.read_csv('TaiwanStockID.csv',index_col=0,squeeze=True).to_dict()
stock=input('請輸入台灣股票名稱或代號:')
startDate = psr.parse(input("請輸入查詢起始日期:"))
endDate=psr.parse(input("請輸入查詢截止日期:"))
if stock.isdigit()==True:
word=str(stock) + ".tw"
try:
data=DR.DataReader(word,'yahoo',startDate.date(),endDate.date())
except RemoteDataError:
print("股票代號錯誤")
sys.exit()
except ValueError:
print('起始日期必須早於截止日期')
sys.exit()
newdic={v:k for k, v in stockdata.items()}
stock=newdic [int(stock)]
elif stock.isdigit()==False:
word=stockdata[stock]
word=str(word)+".tw"
try:
data=DR.DataReader(word,'yahoo',startDate.date(),endDate.date())
except RemoteDataError:
print("股票代號錯誤")
sys.exit()
except ValueError:
print('起始日期必須早於截止日期')
sys.exit()
data=DR.DataReader(word,'yahoo',startDate.date(),endDate.date())
close_price=data["Close"]
pic1=close_price.plot(label='收盤價')
pic2=close_price.rolling(window=20).mean().plot(label="20MA")
pic3=close_price.rolling(window=60).mean().plot(label="60MA")
plt.title(str(stock)+str(startDate.date())+'~'+str(endDate.date())+'收盤價')
plt.xlabel("Date")
plt.ylabel("指數")
plt.show()
|
yjchen0228/python_project
|
台灣股票市場線圖製作/台灣股市線圖製作.py
|
台灣股市線圖製作.py
|
py
| 1,759 |
python
|
en
|
code
| 0 |
github-code
|
50
|
11013272830
|
from odoo import models, fields, api
class PackageType(models.Model):
_inherit = 'stock.package.type'
shipper_package_code = fields.Char('Carrier Code')
package_carrier_type = fields.Selection([('none', 'No carrier integration')], string='Carrier', default='none')
@api.onchange('package_carrier_type')
def _onchange_carrier_type(self):
carrier_id = self.env['delivery.carrier'].search([('delivery_type', '=', self.package_carrier_type)], limit=1)
if carrier_id:
self.shipper_package_code = carrier_id._get_default_custom_package_code()
else:
self.shipper_package_code = False
|
anhjean/beanbakery_v15
|
addons/delivery/models/stock_package_type.py
|
stock_package_type.py
|
py
| 649 |
python
|
en
|
code
| 5 |
github-code
|
50
|
31178623803
|
from django.shortcuts import render
from products.models import Product
from .models import Cart, CartDetail, Order, OrderDetail
# Create your views here.
def add_to_cart(request):
if request.method == "POST" :
product_id = request.POST['product_id']
quantity = request.POST['quantity']
product = Product.objects.get(id=product_id)
cart = Cart.objects.get(user=request.user, status='inprogress')
cart_detail, created = CartDetail.objects.get_or_create(
cart=card,
product=product
)
cart_detail.quantity = int(quantity)
cart_detail.price = product.price
cart_detail.total = int(quantity) * product.price
cart_detail.save()
|
pythone-developer/Ecomerce-Website
|
orders/views.py
|
views.py
|
py
| 746 |
python
|
en
|
code
| 1 |
github-code
|
50
|
26636042324
|
# 获取自带数据集
from sklearn.datasets import load_iris,load_boston,fetch_20newsgroups
# 特征工程
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import StandardScaler
# 分割数据集
from sklearn.model_selection import train_test_split
# 网格搜索,获取较优超参数
from sklearn.model_selection import GridSearchCV
# 机器学习算法
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression,Ridge,LogisticRegression
# 评估预测结果
from sklearn.metrics import classification_report,mean_squared_error
# 保存训练好的模型
from sklearn.externals import joblib
import pandas as pd
import numpy as np
class SimpleMachineLearning(object):
def __init__(self,data=None,data_path=None,function=None):
self.data = data
self.data_path = data_path
self.func = self.get_function(function)
def get_function(self,function):
if hasattr(self,function):
func = getattr(self,function)
return func
raise Exception("no such function")
def run_function(self):
res = self.func()
return res
def knncls(self):
df = pd.read_csv(self.data_path)
df.query("x > 1.0 & x < 1.25 & y > 2.5 & y < 2.75")
time_value = pd.to_datetime(df['time'], unit="s")
time_obj = pd.DatetimeIndex(time_value)
df['day'] = time_obj.day
df['hour'] = time_obj.hour
df['weekday'] = time_obj.weekday
df = df.drop(["time",],axis=1)
places_count = df.groupby(by="place_id").count()
selected_places = places_count.loc[places_count["row_id"]>2].reset_index()
df = df.loc[df["place_id"].isin(selected_places["place_id"])]
targ = df['place_id']
feat = df.drop(['place_id'], axis=1)
x_train, x_test, y_train, y_test = train_test_split(feat, targ, test_size=0.2)
std_obj = StandardScaler()
x_train = std_obj.fit_transform(x_train)
x_test = std_obj.transform(x_test)
knn = KNeighborsClassifier()
param = {"n_neighbors": [2, 5, 10]}
gs_obj = GridSearchCV(knn,param,cv=5)
gs_obj.fit(x_train,y_train)
accuracy = gs_obj.score(x_test, y_test)
best_score = gs_obj.best_score_
best_estimator = gs_obj.best_estimator_
cv_res = gs_obj.cv_results_
res = {"accuracy":accuracy,"best_score":best_score,"best_estimator":best_estimator,"cv_res":cv_res,"type":"classifier"}
return res
def naviebayes(self):
news = fetch_20newsgroups(subset='all')
# tf = TfidfVectorizer()
# data = tf.fit_transform(news.data).toarray()
# print(type(list(data)))
# print(len(data),len(data[0]))
# print(len(news.data),len(news.data[0]))
# x_train, x_test, y_train, y_test = train_test_split(list(data), news.target, test_size=0.2)
x_train, x_test, y_train, y_test = train_test_split(news.data, news.target, test_size=0.2)
# print("news_type: ",type(x_train),type(y_train))
tf = TfidfVectorizer()
x_train = tf.fit_transform(x_train)
x_test = tf.transform(x_test)
mlt = MultinomialNB(alpha=1.0)
mlt.fit(x_train, y_train)
y_predict = mlt.predict(x_test)
accuracy = mlt.score(x_test,y_test)
c_rep = classification_report(y_test,y_predict,target_names=news.target_names)
res = {"accuracy":accuracy,"c_rep":c_rep,"type":"classifier"}
return res
def d_tree(self):
df = pd.read_csv("data_source/titanic.txt")
# 删除没有在哪里登船的样本
df.drop(labels=df[df["embarked"].isna()].index,axis=0,inplace=True)
# 将年龄为空的样本,设置为平均年龄
df["age"].fillna(df["age"].mean(),inplace=True)
# 获取特征值和目标值
feat = df[['pclass', 'age', 'sex','embarked']]
targ = df['survived']
# 分割测试集和训练集
x_train,x_test,y_train,y_test = train_test_split(feat,targ,test_size=0.2)
# 将特征值变成onehot编码
dict_vec = DictVectorizer(sparse=False)
x_train = dict_vec.fit_transform(x_train.to_dict(orient="records"))
x_test = dict_vec.transform(x_test.to_dict(orient="records"))
# 使用决策树进行预测
d_tree_obj = DecisionTreeClassifier(max_depth=10)
d_tree_obj.fit(x_train,y_train)
y_predict = d_tree_obj.predict(x_test)
accuracy = d_tree_obj.score(x_test,y_test)
c_rep = classification_report(y_test,y_predict,target_names=["死翘翘","活下来"])
res = {"accuracy":accuracy,"c_rep":c_rep,"type":"classifier"}
return res
def rf_pre(self):
df = pd.read_csv("data_source/titanic.txt")
# 删除没有在哪里登船的样本
df.drop(labels=df[df["embarked"].isna()].index,axis=0,inplace=True)
# 将年龄为空的样本,设置为平均年龄
df["age"].fillna(df["age"].mean(),inplace=True)
# 获取特征值和目标值
feat = df[['pclass', 'age', 'sex','embarked']]
targ = df['survived']
# 分割测试集和训练集
x_train,x_test,y_train,y_test = train_test_split(feat,targ,test_size=0.2)
# 将特征值变成onehot编码
dict_vec = DictVectorizer(sparse=False)
x_train = dict_vec.fit_transform(x_train.to_dict(orient="records"))
x_test = dict_vec.transform(x_test.to_dict(orient="records"))
rf_obj = RandomForestClassifier(n_jobs=-1)
params = {"n_estimators": [120, 200, 300, 500, 800, 1200], "max_depth": [5, 8, 15, 25, 30]}
gs = GridSearchCV(estimator=rf_obj,param_grid=params,cv=5)
gs.fit(x_train,y_train)
y_predict = gs.predict(x_test)
accuracy = gs.score(x_test,y_test)
c_rep = classification_report(y_test,y_predict,target_names=["死翘翘","活下来"])
# print("最好超参数:",gs.best_params_)
# print("最好得分:",gs.best_score_)
'''
最好超参数: {'max_depth': 5, 'n_estimators': 800}
最好得分: 0.8185975609756098
'''
res = {"accuracy":accuracy,"c_rep":c_rep,"type":"classifier"}
return res
def my_linear(self):
hp = load_boston()
x_train,x_test,y_train,y_test = train_test_split(hp.data,hp.target,test_size=0.2)
# print(y_test)
x_std = StandardScaler()
y_std = StandardScaler()
x_train = x_std.fit_transform(x_train)
x_test = x_std.transform(x_test)
y_train = y_std.fit_transform(y_train.reshape(-1,1))
y_test = y_std.transform(y_test.reshape(-1,1))
lr_obj = LinearRegression()
lr_obj.fit(x_train,y_train)
lr_pre = y_std.inverse_transform(lr_obj.predict(x_test))
msr = mean_squared_error(y_std.inverse_transform(y_test),lr_pre)
mycoef = lr_obj.coef_
res = {"pre":lr_pre,"msr":msr,"mycoef":mycoef,"type":"Regression"}
'''
27.601277917691508
****************************************************************************************************
[[-0.10995107 0.11950577 0.00806897 0.08103766 -0.18413522 0.3481043
-0.05114878 -0.34999744 0.26324756 -0.23371828 -0.21594368 0.08605247
-0.34542951]]
'''
return res
def my_ridge(self):
hp = load_boston()
x_train,x_test,y_train,y_test = train_test_split(hp.data,hp.target,test_size=0.2)
# print(y_test)
x_std = StandardScaler()
y_std = StandardScaler()
x_train = x_std.fit_transform(x_train)
x_test = x_std.transform(x_test)
y_train = y_std.fit_transform(y_train.reshape(-1,1))
y_test = y_std.transform(y_test.reshape(-1,1))
rd_obj = Ridge()
rd_obj.fit(x_train,y_train)
rd_pre = y_std.inverse_transform(rd_obj.predict(x_test))
msr = mean_squared_error(y_std.inverse_transform(y_test),rd_pre)
mycoef = rd_obj.coef_
res = {"pre":rd_pre,"msr":msr,"mycoef":mycoef,"type":"Regression"}
'''
23.374692941047943
****************************************************************************************************
[[-0.12411777 0.08261566 -0.00672291 0.10267077 -0.19931229 0.34224458
-0.03708516 -0.30653053 0.26792711 -0.19554446 -0.21336845 0.07825969
-0.34234288]]
'''
return res
def my_logistic(self):
bc = pd.read_csv(
"data_source/breast-cancer-wisconsin.data",
names=[
'Sample code number','Clump Thickness','Uniformity of Cell Size',
'Uniformity of Cell Shape','Marginal Adhesion',
'Single Epithelial Cell Size','Bare Nuclei','Bland Chromatin',
'Normal Nucleoli','Mitoses','Class'
])
# print(bc)
bc.replace(to_replace="?",value=np.nan,inplace=True)
bc.dropna(inplace=True)
target = pd.Series(bc.iloc[:,10])
x_train,x_test,y_train,y_test = train_test_split(bc.iloc[:,1:10],target,test_size=0.2)
# print(x_test)
# print(y_test)
st_obj = StandardScaler()
x_train = st_obj.fit_transform(x_train)
x_test = st_obj.transform(x_test)
# lr_obj = LogisticRegression(C=1.0)
# lr_obj.fit(x_train,y_train)
# joblib.dump(lr_obj,"model_results/lrobj.pkl")
# 读取训练好的模型进行预测,不需要每次预测前都进行训练了
lr_obj = joblib.load('model_results/lrobj.pkl')
lr_pre = lr_obj.predict(x_test)
accuracy = lr_obj.score(x_test,y_test)
bset_weights = lr_obj.coef_
c_rep = classification_report(y_test,lr_pre,labels=[2,4],target_names=["良性", "恶性"])
res = {"accuracy":accuracy,"c_rep":c_rep,"bset_weights":bset_weights,"type":"classifier"}
'''
0.9927007299270073
****************************************************************************************************
precision recall f1-score support
良性 1.00 0.99 0.99 93
恶性 0.98 1.00 0.99 44
micro avg 0.99 0.99 0.99 137
macro avg 0.99 0.99 0.99 137
weighted avg 0.99 0.99 0.99 137
'''
return res
if __name__ == '__main__':
sml_obj = SimpleMachineLearning(function="my_logistic")
res = sml_obj.run_function()
if res.get("type") == "classifier":
print(res.get("accuracy"))
print("*"*100)
print(res.get("c_rep"))
elif res.get("type") == "Regression":
print(res.get("msr"))
print("*"*100)
print(res.get("mycoef"))
print("*"*100)
print(res.get("pre"))
|
PeterZhangxing/codewars
|
AI/machine_learning/knn_nby_class.py
|
knn_nby_class.py
|
py
| 11,224 |
python
|
en
|
code
| 0 |
github-code
|
50
|
12485265076
|
import time
import random
leoeaten = 0
daddyeaten = 0
mummyeaten = 0
minutes = random.randint(0,5)
if minutes != 0:
if minutes > 1:
print("They have just over", minutes, "minutes!")
else:
print("They have just over", minutes, "minute!")
for x in range(minutes):
if minutes != 0:
def after():
global leoeaten
global daddyeaten
global mummyeaten
time.sleep(random.randint(50,60))
leoeaten = leoeaten + 1
print("Leo ate a sweet!")
time.sleep(random.randint(0,10))
mummyeaten = mummyeaten + 1
print("Mummy ate one too!")
time.sleep(random.randint(0,5))
daddyeaten = daddyeaten + 1
print("So did Daddy!")
after()
print("Daddy ate", daddyeaten, "sweets!")
print("Mummy ate", mummyeaten, "sweets!")
print("Leo ate", leoeaten, "sweets!")
|
tjrobinson/LeoPython
|
pygamezero/food.py
|
food.py
|
py
| 920 |
python
|
en
|
code
| 1 |
github-code
|
50
|
31519995563
|
import os
import re
from string import punctuation
class Data():
'''
Import and parse data for training word-prediction
Arguments:
filepath: path to file
Raises:
FileNotFoundError if filepath not found
'''
def __init__(self, filepath):
self.filepath = filepath
if not os.path.isfile(self.filepath):
raise FileNotFoundError('Could not find %s' % self.filepath)
self.f = open(self.filepath, 'r', encoding='latin')
self.word_map = { '':0 } # 0 is null word
self.char_map = { '':0 } # 0 is null char
self.word_map_index = 1
self.char_map_index = 1
def parse(self, column=2, min_sen_len=5, num_max=None):
sentances = []
for line in self.f.readlines()[:num_max]:
# convert to array; tab deliminated
line = line.rstrip().split('\t')
# get tweet from column
try:
text = line[column]
except IndexError:
continue
#remove urls
text = self.__remove_urls(text)
# resolve hashtags and mentions
text = self.__remove_hashtags(text)
text = self.__remove_mentions(text)
# split text into sentances (not naive)
sentances.append(self.__split_into_sentences(text, min_sen_len))
sentances = [item for sublist in sentances for item in sublist]
return sentances
def build_training_data(self, sentances, num_prev_words=2, char_token_len=None):
'''
Take in a list of sentances and return the training data required to train model.
Arguments:
sentances: list of sentances to train on
Optional Arguments:
num_prev_words (default: 2): context lookback distance, uses NULL if no word exists
char_token_len (defualt: None): manually define the max character token length
'''
data = []
expected_words = []
if char_token_len == None:
self.char_token_len = self.__get_max_char_token_len(sentances)
else:
self.char_token_len = char_token_len
for s in sentances:
split = s.split(' ')
for i, word in enumerate(split):
# assign id to word based on encounter
try:
self.word_map[word]
except:
self.word_map.update({ word:self.word_map_index })
self.word_map_index+=1
prev_words = []
# get indices of previous words and iterate through them to process them.
for prev_index in [ i - j for j in reversed(range(1, num_prev_words + 1)) ]:
if prev_index < 0:
prev_words.append(self.word_map[''])
else:
prev_words.append(self.word_map[split[prev_index]])
# skip trying to predict words that are too long.
if len(word) >= self.char_token_len:
continue
seqs = self.transform_event([prev_words, self.sequence(word)]) # generate training data sequences from word sequences.
data += seqs # add new seqs to data
expected_words += [self.word_map[word] for _ in range(len(seqs))] # populate enough words to match the number of sequences populated.
# data.append([prev_words, self.sequence(word), self.word_map[word]])
assert(len(self.word_map) == self.word_map_index) # make sure nothing went wrong with indexing words
assert(len(data) == len(expected_words))
# implicit data ordering
return (data, expected_words)
def transform_event(self, event):
'''
Intermediary function used to transform a word event into many training samples.
Arguments:
event: list of [prev_words, self.sequence(word)]
Returns:
List of structure: [[prev_words, seq1], [prev_words, seq2], ...]
'''
data = []
for seq in event[1]:
data.append([event[0], seq])
return data
def sequence(self, word, tokenize=True):
''' Return a list of all sequences of a word.
> Data().sequence('hello', tokenize=False)
['', 'h', 'he', 'hel', 'hell', 'hello']
> Data().sequence('hello', tokenize=True)
[[0, 0, 0, ...], [5, 0, 0, 0], 'he', 'hel', 'hell', 'hello']
'''
if tokenize:
a = [0 for _ in range(self.char_token_len)]
l = [a.copy()]
for i, c in enumerate(word):
# assign id to character encounter
try:
self.char_map[c]
except:
self.char_map.update({ c:self.char_map_index })
self.char_map_index+=1
try:
a[i] = self.char_map[c]
except IndexError: # out of range (likely user defined max char len)
break
l.append(a.copy())
else:
s = ''
l = ['']
for c in word:
s += c
l.append(s)
return l
def __get_max_char_token_len(self, sentances):
m = 0
mword = ''
msen = ''
for sentance in sentances:
for word in sentance.split(' '):
if len(word) > m:
msen = sentance
m = len(word)
mword = word
return m
def __remove_hashtags(self, text):
return ' '.join(filter(lambda x:x[0]!='@', text.split()))
def __remove_mentions(self, text):
return ' '.join(filter(lambda x:x[0]!='#', text.split()))
def __split_into_sentences(self, text, min_sen_len):
# using this spliting algorithm becuase of the sheer amount of edge cases
# credit: https://stackoverflow.com/a/31505798
caps = "([A-Z])"
prefixes = "(Mr|St|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|Mt)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov|me|edu)"
text = " " + text + " "
text = text.replace("\n"," ")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + caps + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(caps + "[.]" + caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + caps + "[.]"," \\1<prd>",text)
if "”" in text: text = text.replace(".”","”.")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [re.sub(r'-|/|\\', ' ', s.strip(r'.|!|?').strip().replace(',','')) for s in sentences if len(s.split()) >= min_sen_len]
return sentences
def __remove_urls(self, text):
# source: https://stackoverflow.com/a/11332543
return re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', text)
if __name__ == '__main__':
d = Data(r'F:\Datasets\twitter_cikm_2010\training_set_tweets.txt')
sentances = d.parse(min_sen_len=2, num_max=10000)
td = d.build_training_data(sentances, char_token_len=15)
for i in range(len(td[0])):
print(td[0][i], td[1][i])
|
Coopss/word-prediction
|
data.py
|
data.py
|
py
| 8,289 |
python
|
en
|
code
| 1 |
github-code
|
50
|
72538868634
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import logging
import os
from typing import Text
from tfx.components import CsvExampleGen
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.components import StatisticsGen # Step 3
from tfx.components import SchemaGen # Step 3
from tfx.components import ExampleValidator # Step 3
# from tfx.components import Transform # Step 4
# from tfx.proto import trainer_pb2 # Step 5
# from tfx.components import Trainer # Step 5
# from tfx.proto import evaluator_pb2 # Step 6
# from tfx.components import Evaluator # Step 6
# from tfx.proto import pusher_pb2 # Step 7
# from tfx.components import ModelValidator # Step 7
# from tfx.components import Pusher # Step 7
from tfx.orchestration.airflow.airflow_dag_runner import AirflowDagRunner
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'taxi'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'PycharmProjects', 'airflow_tfx')
_data_root = os.path.join(_taxi_root, 'data', 'taxi_data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'dags', 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(_taxi_root, 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
direct_num_workers: int) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
# pylint: disable=line-too-long
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) # Step 3
# pylint: enable=line-too-long
# Generates schema based on statistics files.
infer_schema = SchemaGen( # Step 3
statistics=statistics_gen.outputs['statistics'], # Step 3
infer_feature_shape=False) # Step 3
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator( # Step 3
statistics=statistics_gen.outputs['statistics'], # Step 3
schema=infer_schema.outputs['schema']) # Step 3
# Performs transformations and feature engineering in training and serving.
# transform = Transform( # Step 4
# examples=example_gen.outputs['examples'], # Step 4
# schema=infer_schema.outputs['schema'], # Step 4
# module_file=module_file) # Step 4
# Uses user-provided Python function that implements a model using TF-Learn.
# trainer = Trainer( # Step 5
# module_file=module_file, # Step 5
# transformed_examples=transform.outputs['transformed_examples'], # Step 5
# schema=infer_schema.outputs['schema'], # Step 5
# transform_graph=transform.outputs['transform_graph'], # Step 5
# train_args=trainer_pb2.TrainArgs(num_steps=10000), # Step 5
# eval_args=trainer_pb2.EvalArgs(num_steps=5000)) # Step 5
# Uses TFMA to compute a evaluation statistics over features of a model.
# model_analyzer = Evaluator( # Step 6
# examples=example_gen.outputs['examples'], # Step 6
# model=trainer.outputs['model'], # Step 6
# feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[ # Step 6
# evaluator_pb2.SingleSlicingSpec( # Step 6
# column_for_slicing=['trip_start_hour']) # Step 6
# ])) # Step 6
# Performs quality validation of a candidate model (compared to a baseline).
# model_validator = ModelValidator( # Step 7
# examples=example_gen.outputs['examples'], # Step 7
# model=trainer.outputs['model']) # Step 7
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
# pusher = Pusher( # Step 7
# model=trainer.outputs['model'], # Step 7
# model_blessing=model_validator.outputs['blessing'], # Step 7
# push_destination=pusher_pb2.PushDestination( # Step 7
# filesystem=pusher_pb2.PushDestination.Filesystem( # Step 7
# base_directory=_serving_model_dir))) # Step 7
return pipeline.Pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
components=[
example_gen,
statistics_gen, infer_schema, validate_stats, # Step 3
# transform, # Step 4
# trainer, # Step 5
# model_analyzer, # Step 6
# model_validator, pusher # Step 7
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=['--direct_num_workers=%d' % direct_num_workers]
)
# 'DAG' below need to be kept for Airflow to detect dag.
DAG = AirflowDagRunner(_airflow_config).run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
# 0 means auto-detect based on on the number of CPUs available during
# execution time.
direct_num_workers=0))
|
guravtanvi/Big-Data-Systems-and-Int-Analytics-INFO-7245
|
Labs/Lab8-Airflow_tfx/dags/taxi_pipeline.py
|
taxi_pipeline.py
|
py
| 6,540 |
python
|
en
|
code
| 6 |
github-code
|
50
|
38622676937
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
from itertools import repeat
from matplotlib import pyplot as plt
from skimage.io import imread
import networkx as nx
import os
from tqdm import tqdm
from skimage.transform import resize
from Melanoma_cellgraph_globalfeats_functions import getGraphFeats
def plot_graph(points, edges, image=None):
if image is not None:
plt.imshow(image)
for node1, node2 in edges:
x1, y1 = points[node1]
x2, y2 = points[node2]
plt.plot([x1, x2], [y1, y2], "k.-", ms=3)
plt.gca().set_aspect('equal')
plt.show()
def get_edge_list(points, distance):
edge_list_1st_row = []
edge_list_2nd_row = []
dist_list = []
for i in range(len(points)):
dist = np.sqrt((points[i,0] - points[:,0])**2 + (points[i,1] - points[:,1])**2)
dist = dist.reshape(-1,1)
#print(dist)
x = np.where(dist<=distance)
node_index = list(x[0])
dist_list.append(np.average(dist[node_index]))
edge_list_2nd_row.append(node_index)
edge_list_1st_row.extend(repeat(i,len(node_index)))
edge_list_2nd_row = [item for sublist in edge_list_2nd_row for item in sublist]
edge_list = [edge_list_1st_row, edge_list_2nd_row]
edge_list = np.array(edge_list)
dist_list = np.array(dist_list)
dist_list = dist_list.reshape(-1,1)
return edge_list, dist_list
def full_edge_list(C):
distance = 64.0
edges, _ = get_edge_list(C, distance)
full_edge = np.array(edges).T
return full_edge
def get_graph(edge_list):
G = nx.Graph()
G.add_edges_from(edge_list)
return G
def get_pointlists():
order = list(range(24)) + [25, 26, 27, 30, 24, 28, 29] + list(range(31, 150))
with open("C:/Users/TheNa/Desktop/properties.csv", "r") as infile:
image_num = 0
next(infile)
line = infile.readline().strip().split(",")
while len(line):
points = []
while line[0] == str(order[image_num]):
points.append([float(line[2]), float(line[3])])
line = infile.readline().split(",")
yield points
image_num += 1
IMG_WIDTH = 7680
IMG_HEIGHT = 4608
IMG_CHANNELS = 3
CRC_PATH = 'C:/Users/TheNa/Desktop/CRC_Dataset'
crc_ids = os.listdir(CRC_PATH)
DISPLAY_IMGS = False
START_AT = 107
crc_images = []
if DISPLAY_IMGS:
print("\nReading images...")
for n, id_ in tqdm(enumerate(crc_ids), total=len(crc_ids)):
img = imread(CRC_PATH + "/" + id_)[:,:,:IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
crc_images.append(img)
points_generator = get_pointlists()
for i in range(START_AT):
next(points_generator)
with open("C:/Users/TheNa/Desktop/features.csv", "a") as outfile:
for i in tqdm(range(len(crc_ids)-START_AT)):
print(f"\nReading points for image {START_AT+i}...")
points = np.array(next(points_generator))
print(f"\tRead in {len(points)} points!")
if len(points) < 100:
print("Point reading error:", points)
break
print("Finding edges...")
edges = full_edge_list(points)
print("Generating graph...")
graph = get_graph(edges)
print("Calculating features...")
f = getGraphFeats(graph, library=0, bool_spect=True)
f = f.reshape(1,-1)[0]
print("Saving...")
out = ""
for feat in f:
out += str(feat) + ","
out = out[:-1] + "\n"
outfile.write(out)
outfile.flush()
plt.plot([1], [1])
plt.show()
|
TheNamor/masters
|
UNet/generate_feats.py
|
generate_feats.py
|
py
| 3,614 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22419658231
|
import hashlib
import sys
from pyspark import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql import functions as f
from pyspark.sql import types as types
def create_tables():
spark.sql(
"""create table airbnb_dv.sat_hosts(
sat_id string,
sk_host string,
host_url string,
host_name string,
response_time_id string,
valid_from timestamp,
valid_to timestamp,
layer string)"""
)
spark.sql(
"""create table airbnb_dv.sat_listings(
sat_id string,
sk_listing string,
listing_url string,
name string,
location_id int,
valid_from timestamp,
valid_to timestamp,
layer string)"""
)
spark.sql(
"""create table airbnb_dv.sat_reviews(
sat_id string,
sk_review string,
review_date string,
comments string,
valid_from timestamp,
valid_to timestamp,
layer string)"""
)
spark.sql(
"""create table airbnb_dv.sat_users(
sat_id string,
sk_users string,
user_name string,
comments string,
country_id int,
valid_from timestamp,
valid_to timestamp,
layer string)"""
)
get_hash = f.udf(
lambda a, b, c: hashlib.sha256(
str(a).encode("utf-8") + str(b).encode("utf-8") + str(c).encode("utf-8")
).hexdigest()
)
def get_sat_hosts(hosts):
# ideally you'd store all the informational cols in a sat,
# but I'll only store some of them out convenience
# since they don't hold any logical importance
sat = (
hosts.select(
"host_id",
"host_url",
"host_name",
f.col("host_response_time"),
"valid_as_of",
)
.withColumn("source_system_name", f.lit(source_system_name))
.cache()
)
sat = sat.join(
ref_source_systems, on=["source_system_name"], how="left"
).withColumnRenamed("id", "source_id")
sat = (
sat.join(ref_response_times, on=["host_response_time"], how="left")
.withColumnRenamed("id", "response_time_id")
.cache()
)
sat = sat.fillna(0, subset=["response_time_id"])
sat = (
sat.withColumn(
"sk_host", get_hash(f.col("host_id"), f.lit(" host "), f.col("source_id"))
)
.withColumnRenamed("valid_as_of", "valid_from_")
.withColumn("valid_to_", f.lit(None))
.withColumn("layer", f.lit("processed"))
.withColumn(
"sat_id", get_hash(f.col("sk_host"), f.lit(" sat "), f.lit(" host "))
)
)
sat = sat.withColumn("valid_to", sat.valid_to_.cast(types.TimestampType()))
# valid_from gets mostly filled with strings so I have to do it manually
sat = sat.withColumn("valid_from", f.lit("7-9-2022 12:00:00.0"))
sat = sat.select(
"sat_id",
"sk_host",
"host_url",
"host_name",
"response_time_id",
"valid_from",
"valid_to",
"layer",
)
return sat
def get_sat_listings(listings):
# ideally you'd store all the informational cols in a sat,
# but I'll only store some of them out convenience
# since they don't hold any logical importance
sat = (
listings.withColumnRenamed("id", "listing_id")
.select(
"listing_id",
"listing_url",
"name",
f.col("neighbourhood").alias("location_name"),
"valid_as_of",
)
.withColumn("source_system_name", f.lit(source_system_name))
.cache()
)
sat = sat.join(
ref_source_systems, on=["source_system_name"], how="left"
).withColumnRenamed("id", "source_id")
sat = sat.join(ref_locations, on=["location_name"], how="left").withColumnRenamed(
"id", "location_id"
)
sat = sat.fillna(0, subset=["location_id"])
sat = (
sat.withColumn(
"sk_listing",
get_hash(f.col("listing_id"), f.lit(" listing "), f.col("source_id")),
)
.withColumnRenamed("valid_as_of", "valid_from_")
.withColumn("valid_to_", f.lit(None))
.withColumn("layer", f.lit("processed"))
.withColumn(
"sat_id", get_hash(f.col("sk_listing"), f.lit(" sat "), f.lit(" listing "))
)
)
sat = sat.withColumn("valid_to", sat.valid_to_.cast(types.TimestampType()))
# valid_from gets mostly filled with strings so I have to do it manually
sat = sat.withColumn("valid_from", f.lit("7-9-2022 12:00:00.0"))
sat = sat.select(
"sat_id",
"sk_listing",
"listing_url",
"name",
"location_id",
"valid_from",
"valid_to",
"layer",
)
return sat
def fill_hosts():
df = get_sat_hosts(hosts)
df.write.mode("overwrite").saveAsTable("airbnb_dv.sat_hosts")
def fill_listings():
df = get_sat_listings(listings)
df.write.mode("overwrite").saveAsTable("airbnb_dv.sat_hosts")
def create_sats():
create_tables()
fill_hosts()
fill_listings()
if __name__ == "__main__":
app_name = "hive_init"
conf = SparkConf()
hdfs_host = "hdfs://namenode:8020"
conf.set("hive.metastore.uris", "http://hive-metastore:9083")
conf.set("spark.kerberos.access.hadoopFileSystem", hdfs_host)
conf.set("spark.sql.warehouse.dir", f"{hdfs_host}/user/hive/warehouse")
conf.set("hive.metastore.warehouse.dir", f"{hdfs_host}/user/hive/warehouse")
conf.set("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation", "true")
conf.setMaster("local[*]")
spark = (
SparkSession.builder.appName(app_name)
.config(conf=conf)
.enableHiveSupport()
.getOrCreate()
)
existing_hosts_path = "/airflow/data/csv/hosts_existing_7_Sep_2022.csv"
existing_listings_path = "/airflow/data/csv/listings_existing_7_Sep_2022.csv"
hosts = spark.read.option("header", True).csv(existing_hosts_path)
listings = spark.read.option("header", True).csv(existing_listings_path)
source_system_name = sys.argv[1]
ref_source_systems = spark.sql("select * from airbnb_dv.ref_source_systems")
ref_response_times = spark.sql("select * from airbnb_dv.ref_response_times")
ref_locations = spark.sql("select * from airbnb_dv.ref_listing_locations")
create_sats()
|
tmspacechimp/Data-Engineering
|
Final Project/AirflowEnv/jobs/create_sats.py
|
create_sats.py
|
py
| 6,818 |
python
|
en
|
code
| 0 |
github-code
|
50
|
6317452733
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' COMMAND FUNCTION '''
def get_additonal_info() -> List[Dict]:
alerts = demisto.context().get('Core', {}).get('OriginalAlert')[0]
if not alerts:
raise DemistoException('Original Alert is not configured in context')
if not isinstance(alerts, list):
alerts = [alerts]
results = []
for alert in alerts:
if alert == {}:
continue
if isinstance(alert, list):
alert = tuple(alert)
alert_event = alert.get('event')
res = {'Identity Name': alert_event.get('identity_name'),
'Identity Type': alert_event.get('identity_type'),
'Access Key ID': alert_event.get('identity_invoked_by_uuid'),
'Identity Invoke Type': alert_event.get('identity_invoked_by_type'),
'Identity Invoke Sub Type': alert_event.get('identity_invoked_by_sub_type')}
results.append(res)
return results
''' MAIN FUNCTION '''
def main():
try:
results = get_additonal_info()
command_results = CommandResults(
readable_output=tableToMarkdown('Cloud Identity', results,
headers=list(results[0].keys()) if results else None))
return_results(command_results)
except Exception as ex:
return_error(f'Failed to execute XCloudIdentitiesWidget. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
demisto/content
|
Packs/CloudIncidentResponse/Scripts/XCloudIdentitiesWidget/XCloudIdentitiesWidget.py
|
XCloudIdentitiesWidget.py
|
py
| 1,561 |
python
|
en
|
code
| 1,023 |
github-code
|
50
|
16034967559
|
user = int(input('''till which number you want to print fibonacci number: '''))
def fib(inp):
"""print fibonacci series till nth fibo number (n-user input)"""
a, b = 0, 1
count = 0
while count <= inp:
print(a, end=' ')
a, b = b, a + b
count += 1
def fib_2(inp):
"""Prints fibonacci series till asked number"""
a, b = 0, 1
while a < inp: # note: here we do not have count var
print(a, end=' ')
a, b = b, a + b
print()
fib(user)
|
divya-raichura/Dr-Python
|
CODES/functions/functions_1/_10_fibo.py
|
_10_fibo.py
|
py
| 506 |
python
|
en
|
code
| 0 |
github-code
|
50
|
40774354058
|
import mt_tkinter as tk
class InfoUI:
@staticmethod
def is_num(string):
try:
value = float(string)
return True
except:
return False
def __init__(self, main_ui):
self.main_ui = main_ui
self.pop_up_box(main_ui.root)
def input_number(self):
if self.is_num(self.unit_entry.get().strip()) and self.is_num(self.offset_entry.get().strip()):
if float(self.unit_entry.get().strip()) > 0 and float(self.offset_entry.get().strip()) > 0 :
self.root.withdraw()
self.root.quit()
return
def get_unit(self):
return float(self.unit_entry.get().strip())
def get_offset(self):
beats_first_bar = float(self.offset_entry.get().strip())
unit = float(self.unit_entry.get().strip())
offset = unit - beats_first_bar
while offset < 0:
offset += unit
return offset
def pop_up_box(self, main_form):
self.root = tk.Toplevel(master = main_form)
self.root.title('')
self.root.protocol('WM_DELETE_WINDOW', self.input_number)
frame = tk.Frame(master = self.root)
frame.grid(padx = 8, pady = 4)
tk.Label(master = frame,
text = 'How many beats per chord?' + '\n' + 'Please input a positive number.'
).grid(row = 0, column = 0)
self.unit_entry = tk.Entry(master = frame, width = 21,
textvariable = tk.StringVar(frame, value='2'))
self.unit_entry.grid(row = 1, column = 0)
tk.Label(master = frame,
text = 'How many beats in the first bar?' + '\n' + 'Please input a positive number.'
).grid(row = 2, column = 0)
self.offset_entry = tk.Entry(master = frame, width = 21,
textvariable = tk.StringVar(frame, value='2'))
self.offset_entry.grid(row = 3, column = 0)
button = tk.Button(master = frame, text = 'OK', width = 19,
command = self.input_number)
button.grid(row = 4, column = 0)
self.main_ui.center_window(self.root)
self.root.resizable(False, False)
self.unit_entry.focus()
self.root.mainloop()
return
|
hongyu19930808/Emotional-Equalizer
|
source/ui_info.py
|
ui_info.py
|
py
| 2,359 |
python
|
en
|
code
| 6 |
github-code
|
50
|
1223945914
|
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter(name="addclass")
def add_class(field, css):
old_css = field.field.widget.attrs.get("class", None)
if old_css:
css = old_css + css
return field.as_widget(attrs={"class": css})
@register.filter(name="upto")
@stringfilter
def upto(value, delimiter=" "):
return value.split(delimiter)[0]
|
thomasherzog/DeltaX
|
core/templatetags/task_filters.py
|
task_filters.py
|
py
| 443 |
python
|
en
|
code
| 0 |
github-code
|
50
|
24598195082
|
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.urls import reverse, reverse_lazy
from django.db.models import Q, Count
from django.views.generic import (
ListView,
CreateView,
DetailView,
UpdateView,
DeleteView,
)
from pathlib import Path
import os
from django.views.generic.edit import FormMixin
from .models import Product, Question, Answer
from .forms import ProductForm, QuestionForm, AnswerForm
from massive_homework.settings import BASE_DIR
# Create your views here.
### PRODUCTS VIEWS ###
class ProductListView(ListView):
queryset = Product.objects.all()
class ProductCreateView(CreateView):
model = Product
form_class = ProductForm
exclude = ["user"]
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
success_url = reverse_lazy("showcase_app:index")
class ProductDetailView(FormMixin, DetailView):
model = Product
path_input_image = Product.images
path_input_image_abs = f"{BASE_DIR}/{path_input_image}"
form_class = QuestionForm
def get_context_data(self, **kwargs):
question = (
Question.objects.annotate(number_of_answers=Count("answer"))
.filter(~Q(archived=True), product_id=self.get_object())
.order_by("created_at")
)
context = super(ProductDetailView, self).get_context_data(**kwargs)
context["form"] = QuestionForm(
initial={"user_id": self.request.user.pk, "product": self.object}
)
context["question"] = question
return context
def post(self, request, *args, **kwargs):
new_question = Question(
user=request.user,
header=request.POST.get("header"),
body=request.POST.get("body"),
product_id=self.get_object(),
)
new_question.save()
return self.get(self, request, *args, **kwargs)
class ProductUpdateView(UpdateView):
template_name_suffix = "_update_form"
model = Product
form_class = ProductForm
def get_success_url(self):
return reverse("showcase_app:product", kwargs={"pk": self.object.pk})
class ProductDeleteView(DeleteView):
success_url = reverse_lazy("showcase_app:index")
queryset = Product.objects.filter(~Q(archived=True)).all()
### QUESTION VIEWS ###
class QuestionDetailView(FormMixin, DetailView):
model = Question
form_class = AnswerForm
def get_context_data(self, **kwargs):
answer = Answer.objects.filter(question_id=self.get_object()).order_by("id")
context = super(QuestionDetailView, self).get_context_data(**kwargs)
context["form"] = AnswerForm(
initial={"user_id": self.request.user.pk, "question": self.object}
)
context["answer"] = answer
return context
# @login_required
def post(self, request, *args, **kwargs):
new_answer = Answer(
user=request.user,
body=request.POST.get("body"),
question_id=self.get_object().id,
)
new_answer.save()
return self.get(self, request, *args, **kwargs)
class QuestionDeleteView(DeleteView):
queryset = Question.objects.filter(~Q(archived=True)).all()
def get_success_url(self):
return reverse("showcase_app:product", kwargs={"pk": self.object.product.pk})
class QuestionListView(ListView):
model = Question
def get_queryset(self, **kwargs):
qs = super().get_queryset(**kwargs)
return qs.filter(product_id=self.kwargs["pk"])
class AnswerDeleteView(DeleteView):
queryset = Answer.objects.filter(~Q(archived=True)).all()
def get_success_url(self):
return reverse("showcase_app:question", kwargs={"pk": self.object.question.pk})
|
Lalkeen/massive_homework
|
showcase_app/views.py
|
views.py
|
py
| 3,843 |
python
|
en
|
code
| 0 |
github-code
|
50
|
24769604688
|
from brushes.brush import Brush
class StrokeBrush(Brush):
def __init__(self):
super().__init__(False)
def use(self, canvas_view, mouse_state, current_color):
# Keep it here in case we want to add extra functionality to base class
super().use(canvas_view, mouse_state, current_color)
canvas_view.draw_line(
mouse_state["canvas_start_pos"],
canvas_view.get_canvas_point(
mouse_state["current_pos"]
),
current_color,
0,
# Do not transform to canvas relative coordinates as they've already been transformed
transform_to_canvas_relative_coordinates=False
)
|
lycayan18/magicapixel
|
brushes/stroke_brush.py
|
stroke_brush.py
|
py
| 704 |
python
|
en
|
code
| 0 |
github-code
|
50
|
12790437394
|
#!/usr/bin/env python
# coding=utf-8
"""Variant on standard library's cmd with extra features.
To use, simply import cmd2.Cmd instead of cmd.Cmd; use precisely as though you
were using the standard library's cmd, while enjoying the extra features.
Searchable command history (commands: "history")
Load commands from file, save to file, edit commands in file
Multi-line commands
Special-character shortcut commands (beyond cmd's "@" and "!")
Settable environment parameters
Parsing commands with `argparse` argument parsers (flags)
Redirection to file with >, >>; input from file with <
Easy transcript-based testing of applications (see examples/example.py)
Bash-style ``select`` available
Note that redirection with > and | will only work if `self.poutput()`
is used in place of `print`.
- Catherine Devlin, Jan 03 2008 - catherinedevlin.blogspot.com
Git repository on GitHub at https://github.com/python-cmd2/cmd2
"""
import argparse
import atexit
import cmd
import codecs
import collections
import copy
import datetime
import functools
import glob
import io
import optparse
import os
import platform
import re
import shlex
import signal
import six
import sys
import tempfile
import traceback
import unittest
from code import InteractiveConsole
try:
from enum34 import Enum
except ImportError:
from enum import Enum
import pyparsing
import pyperclip
# Collection is a container that is sizable and iterable
# It was introduced in Python 3.6. We will try to import it, otherwise use our implementation
try:
from collections.abc import Collection, Iterable
except ImportError:
if six.PY3:
from collections.abc import Sized, Iterable, Container
else:
from collections import Sized, Iterable, Container
# noinspection PyAbstractClass
class Collection(Sized, Iterable, Container):
__slots__ = ()
# noinspection PyPep8Naming
@classmethod
def __subclasshook__(cls, C):
if cls is Collection:
if any("__len__" in B.__dict__ for B in C.__mro__) and \
any("__iter__" in B.__dict__ for B in C.__mro__) and \
any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
# Newer versions of pyperclip are released as a single file, but older versions had a more complicated structure
try:
from pyperclip.exceptions import PyperclipException
except ImportError:
# noinspection PyUnresolvedReferences
from pyperclip import PyperclipException
# next(it) gets next item of iterator it. This is a replacement for calling it.next() in Python 2 and next(it) in Py3
from six import next
# Possible types for text data. This is basestring() in Python 2 and str in Python 3.
from six import string_types
# Used for sm.input: raw_input() for Python 2 or input() for Python 3
import six.moves as sm
# itertools.zip() for Python 2 or zip() for Python 3 - produces an iterator in both cases
from six.moves import zip
# If using Python 2.7, try to use the subprocess32 package backported from Python 3.2 due to various improvements
# NOTE: The feature to pipe output to a shell command won't work correctly in Python 2.7 without this
try:
# noinspection PyPackageRequirements
import subprocess32 as subprocess
except ImportError:
import subprocess
# Python 3.4 and earlier require contextlib2 for temporarily redirecting stderr and stdout
if sys.version_info < (3, 5):
from contextlib2 import redirect_stdout, redirect_stderr
else:
from contextlib import redirect_stdout, redirect_stderr
if six.PY3:
from io import StringIO # Python3
else:
from io import BytesIO as StringIO # Python2
# Detect whether IPython is installed to determine if the built-in "ipy" command should be included
ipython_available = True
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
from IPython import embed
except ImportError:
ipython_available = False
# Prefer statically linked gnureadline if available (for macOS compatibility due to issues with libedit)
try:
import gnureadline as readline
except ImportError:
# Try to import readline, but allow failure for convenience in Windows unit testing
# Note: If this actually fails, you should install readline on Linux or Mac or pyreadline on Windows
try:
# noinspection PyUnresolvedReferences
import readline
except ImportError:
pass
# Check what implementation of readline we are using
class RlType(Enum):
GNU = 1
PYREADLINE = 2
NONE = 3
rl_type = RlType.NONE
if 'pyreadline' in sys.modules:
rl_type = RlType.PYREADLINE
# Save the original pyreadline display completion function since we need to override it and restore it
# noinspection PyProtectedMember
orig_pyreadline_display = readline.rl.mode._display_completions
############################################################################################################
# pyreadline is incomplete in terms of the Python readline API. Add the missing functions we need.
############################################################################################################
# readline.redisplay()
try:
getattr(readline, 'redisplay')
except AttributeError:
# noinspection PyProtectedMember
readline.redisplay = readline.rl.mode._update_line
# readline.remove_history_item()
try:
getattr(readline, 'remove_history_item')
except AttributeError:
# noinspection PyProtectedMember
def pyreadline_remove_history_item(pos):
"""
An implementation of remove_history_item() for pyreadline
:param pos: The 0-based position in history to remove
"""
# Save of the current location of the history cursor
saved_cursor = readline.rl.mode._history.history_cursor
# Delete the history item
del (readline.rl.mode._history.history[pos])
# Update the cursor if needed
if saved_cursor > pos:
readline.rl.mode._history.history_cursor -= 1
readline.remove_history_item = pyreadline_remove_history_item
elif 'gnureadline' in sys.modules or 'readline' in sys.modules:
# We don't support libedit
if 'libedit' not in readline.__doc__:
rl_type = RlType.GNU
# We need wcswidth to calculate display width of tab completions
from wcwidth import wcswidth
# Load the readline lib so we can make changes to it
import ctypes
readline_lib = ctypes.CDLL(readline.__file__)
rl_basic_quote_characters = ctypes.c_char_p.in_dll(readline_lib, "rl_basic_quote_characters")
orig_rl_basic_quotes = ctypes.cast(rl_basic_quote_characters, ctypes.c_void_p).value
if rl_type == RlType.NONE:
rl_warning = "Readline features including tab completion have been disabled since no \n" \
"supported version of readline was found. To resolve this, install \n" \
"pyreadline on Windows or gnureadline on Mac.\n\n"
sys.stderr.write(rl_warning)
else:
# Used by rlcompleter in Python console loaded by py command
orig_rl_delims = readline.get_completer_delims()
# BrokenPipeError and FileNotFoundError exist only in Python 3. Use IOError for Python 2.
if six.PY3:
BROKEN_PIPE_ERROR = BrokenPipeError
FILE_NOT_FOUND_ERROR = FileNotFoundError
else:
BROKEN_PIPE_ERROR = FILE_NOT_FOUND_ERROR = IOError
# On some systems, pyperclip will import gtk for its clipboard functionality.
# The following code is a workaround for gtk interfering with printing from a background
# thread while the CLI thread is blocking in raw_input() in Python 2 on Linux.
if six.PY2 and sys.platform.startswith('lin'):
try:
# noinspection PyUnresolvedReferences
import gtk
gtk.set_interactive(0)
except ImportError:
pass
__version__ = '0.8.8'
# Pyparsing enablePackrat() can greatly speed up parsing, but problems have been seen in Python 3 in the past
pyparsing.ParserElement.enablePackrat()
# Override the default whitespace chars in Pyparsing so that newlines are not treated as whitespace
pyparsing.ParserElement.setDefaultWhitespaceChars(' \t')
# The next 3 variables and associated setter functions effect how arguments are parsed for decorated commands
# which use one of the decorators such as @with_argument_list or @with_argparser
# The defaults are sane and maximize ease of use for new applications based on cmd2.
# To maximize backwards compatibility, we recommend setting USE_ARG_LIST to "False"
# Use POSIX or Non-POSIX (Windows) rules for splitting a command-line string into a list of arguments via shlex.split()
POSIX_SHLEX = False
# Strip outer quotes for convenience if POSIX_SHLEX = False
STRIP_QUOTES_FOR_NON_POSIX = True
# For @options commands, pass a list of argument strings instead of a single argument string to the do_* methods
USE_ARG_LIST = True
# Used for tab completion and word breaks. Do not change.
QUOTES = ['"', "'"]
REDIRECTION_CHARS = ['|', '<', '>']
# optional attribute, when tagged on a function, allows cmd2 to categorize commands
HELP_CATEGORY = 'help_category'
HELP_SUMMARY = 'help_summary'
def categorize(func, category):
"""Categorize a function.
The help command output will group this function under the specified category heading
:param func: Union[Callable, Iterable] - function to categorize
:param category: str - category to put it in
"""
if isinstance(func, Iterable):
for item in func:
setattr(item, HELP_CATEGORY, category)
else:
setattr(func, HELP_CATEGORY, category)
def set_posix_shlex(val):
""" Allows user of cmd2 to choose between POSIX and non-POSIX splitting of args for decorated commands.
:param val: bool - True => POSIX, False => Non-POSIX
"""
global POSIX_SHLEX
POSIX_SHLEX = val
def set_strip_quotes(val):
""" Allows user of cmd2 to choose whether to automatically strip outer-quotes when POSIX_SHLEX is False.
:param val: bool - True => strip quotes on args for decorated commands if POSIX_SHLEX is False.
"""
global STRIP_QUOTES_FOR_NON_POSIX
STRIP_QUOTES_FOR_NON_POSIX = val
def set_use_arg_list(val):
""" Allows user of cmd2 to choose between passing @options commands an argument string or list of arg strings.
:param val: bool - True => arg is a list of strings, False => arg is a string (for @options commands)
"""
global USE_ARG_LIST
USE_ARG_LIST = val
class OptionParser(optparse.OptionParser):
"""Subclass of optparse.OptionParser which stores a reference to the do_* method it is parsing options for.
Used mostly for getting access to the do_* method's docstring when printing help.
"""
def __init__(self):
# Call super class constructor. Need to do it in this way for Python 2 and 3 compatibility
optparse.OptionParser.__init__(self)
# The do_* method this class is parsing options for. Used for accessing docstring help.
self._func = None
def exit(self, status=0, msg=None):
"""Called at the end of showing help when either -h is used to show help or when bad arguments are provided.
We override exit so it doesn't automatically exit the application.
"""
if self.values is not None:
self.values._exit = True
if msg:
print(msg)
def print_help(self, *args, **kwargs):
"""Called when optparse encounters either -h or --help or bad arguments. It prints help for options.
We override it so that before the standard optparse help, it prints the do_* method docstring, if available.
"""
if self._func.__doc__:
print(self._func.__doc__)
optparse.OptionParser.print_help(self, *args, **kwargs)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
raise optparse.OptParseError(msg)
def remaining_args(opts_plus_args, arg_list):
""" Preserves the spacing originally in the arguments after the removal of options.
:param opts_plus_args: str - original argument string, including options
:param arg_list: List[str] - list of strings containing the non-option arguments
:return: str - non-option arguments as a single string, with original spacing preserved
"""
pattern = '\s+'.join(re.escape(a) for a in arg_list) + '\s*$'
match_obj = re.search(pattern, opts_plus_args)
try:
remaining = opts_plus_args[match_obj.start():]
except AttributeError:
# Don't preserve spacing, but at least we don't crash and we do preserve args and their order
remaining = ' '.join(arg_list)
return remaining
def _which(editor):
try:
editor_path = subprocess.check_output(['which', editor], stderr=subprocess.STDOUT).strip()
if six.PY3:
editor_path = editor_path.decode()
except subprocess.CalledProcessError:
editor_path = None
return editor_path
def strip_quotes(arg):
""" Strip outer quotes from a string.
Applies to both single and double quotes.
:param arg: str - string to strip outer quotes from
:return str - same string with potentially outer quotes stripped
"""
quote_chars = '"' + "'"
if len(arg) > 1 and arg[0] == arg[-1] and arg[0] in quote_chars:
arg = arg[1:-1]
return arg
def parse_quoted_string(cmdline):
"""Parse a quoted string into a list of arguments."""
if isinstance(cmdline, list):
# arguments are already a list, return the list we were passed
lexed_arglist = cmdline
else:
# Use shlex to split the command line into a list of arguments based on shell rules
lexed_arglist = shlex.split(cmdline, posix=POSIX_SHLEX)
# If not using POSIX shlex, make sure to strip off outer quotes for convenience
if not POSIX_SHLEX and STRIP_QUOTES_FOR_NON_POSIX:
temp_arglist = []
for arg in lexed_arglist:
temp_arglist.append(strip_quotes(arg))
lexed_arglist = temp_arglist
return lexed_arglist
def with_category(category):
"""A decorator to apply a category to a command function"""
def cat_decorator(func):
categorize(func, category)
return func
return cat_decorator
def with_argument_list(func):
"""A decorator to alter the arguments passed to a do_* cmd2
method. Default passes a string of whatever the user typed.
With this decorator, the decorated method will receive a list
of arguments parsed from user input using shlex.split()."""
@functools.wraps(func)
def cmd_wrapper(self, cmdline):
lexed_arglist = parse_quoted_string(cmdline)
return func(self, lexed_arglist)
cmd_wrapper.__doc__ = func.__doc__
return cmd_wrapper
def with_argparser_and_unknown_args(argparser):
"""A decorator to alter a cmd2 method to populate its ``args`` argument by parsing arguments with the given
instance of argparse.ArgumentParser, but also returning unknown args as a list.
:param argparser: argparse.ArgumentParser - given instance of ArgumentParser
:return: function that gets passed parsed args and a list of unknown args
"""
# noinspection PyProtectedMember
def arg_decorator(func):
@functools.wraps(func)
def cmd_wrapper(instance, cmdline):
lexed_arglist = parse_quoted_string(cmdline)
args, unknown = argparser.parse_known_args(lexed_arglist)
return func(instance, args, unknown)
# argparser defaults the program name to sys.argv[0]
# we want it to be the name of our command
argparser.prog = func.__name__[3:]
# If the description has not been set, then use the method docstring if one exists
if argparser.description is None and func.__doc__:
argparser.description = func.__doc__
if func.__doc__:
setattr(cmd_wrapper, HELP_SUMMARY, func.__doc__)
cmd_wrapper.__doc__ = argparser.format_help()
# Mark this function as having an argparse ArgumentParser (used by do_help)
cmd_wrapper.__dict__['has_parser'] = True
# If there are subcommands, store their names in a list to support tab-completion of subcommand names
if argparser._subparsers is not None:
# Key is subcommand name and value is completer function
subcommands = collections.OrderedDict()
# Get all subcommands and check if they have completer functions
for name, parser in argparser._subparsers._group_actions[0]._name_parser_map.items():
if 'completer' in parser._defaults:
completer = parser._defaults['completer']
else:
completer = None
subcommands[name] = completer
cmd_wrapper.__dict__['subcommands'] = subcommands
return cmd_wrapper
return arg_decorator
def with_argparser(argparser):
"""A decorator to alter a cmd2 method to populate its ``args`` argument by parsing arguments
with the given instance of argparse.ArgumentParser.
:param argparser: argparse.ArgumentParser - given instance of ArgumentParser
:return: function that gets passed parsed args
"""
# noinspection PyProtectedMember
def arg_decorator(func):
@functools.wraps(func)
def cmd_wrapper(instance, cmdline):
lexed_arglist = parse_quoted_string(cmdline)
args = argparser.parse_args(lexed_arglist)
return func(instance, args)
# argparser defaults the program name to sys.argv[0]
# we want it to be the name of our command
argparser.prog = func.__name__[3:]
# If the description has not been set, then use the method docstring if one exists
if argparser.description is None and func.__doc__:
argparser.description = func.__doc__
if func.__doc__:
setattr(cmd_wrapper, HELP_SUMMARY, func.__doc__)
cmd_wrapper.__doc__ = argparser.format_help()
# Mark this function as having an argparse ArgumentParser (used by do_help)
cmd_wrapper.__dict__['has_parser'] = True
# If there are subcommands, store their names in a list to support tab-completion of subcommand names
if argparser._subparsers is not None:
# Key is subcommand name and value is completer function
subcommands = collections.OrderedDict()
# Get all subcommands and check if they have completer functions
for name, parser in argparser._subparsers._group_actions[0]._name_parser_map.items():
if 'completer' in parser._defaults:
completer = parser._defaults['completer']
else:
completer = None
subcommands[name] = completer
cmd_wrapper.__dict__['subcommands'] = subcommands
return cmd_wrapper
return arg_decorator
def options(option_list, arg_desc="arg"):
"""Used as a decorator and passed a list of optparse-style options,
alters a cmd2 method to populate its ``opts`` argument from its
raw text argument.
Example: transform
def do_something(self, arg):
into
@options([make_option('-q', '--quick', action="store_true",
help="Makes things fast")],
"source dest")
def do_something(self, arg, opts):
if opts.quick:
self.fast_button = True
"""
if not isinstance(option_list, list):
# If passed a single option instead of a list of options, convert it to a list with one option
option_list = [option_list]
def option_setup(func):
"""Decorator function which modifies on of the do_* methods that use the @options decorator.
:param func: do_* method which uses the @options decorator
:return: modified version of the do_* method
"""
option_parser = OptionParser()
for option in option_list:
option_parser.add_option(option)
# Allow reasonable help for commands defined with @options and an empty list of options
if len(option_list) > 0:
option_parser.set_usage("%s [options] %s" % (func.__name__[3:], arg_desc))
else:
option_parser.set_usage("%s %s" % (func.__name__[3:], arg_desc))
option_parser._func = func
@functools.wraps(func)
def new_func(instance, arg):
"""For @options commands this replaces the actual do_* methods in the instance __dict__.
First it does all of the option/argument parsing. Then it calls the underlying do_* method.
:param instance: cmd2.Cmd2 derived class application instance
:param arg: str - command-line arguments provided to the command
:return: bool - returns whatever the result of calling the underlying do_* method would be
"""
try:
# Use shlex to split the command line into a list of arguments based on shell rules
opts, new_arglist = option_parser.parse_args(shlex.split(arg, posix=POSIX_SHLEX))
# If not using POSIX shlex, make sure to strip off outer quotes for convenience
if not POSIX_SHLEX and STRIP_QUOTES_FOR_NON_POSIX:
temp_arglist = []
for arg in new_arglist:
temp_arglist.append(strip_quotes(arg))
new_arglist = temp_arglist
# Also strip off outer quotes on string option values
for key, val in opts.__dict__.items():
if isinstance(val, str):
opts.__dict__[key] = strip_quotes(val)
# Must find the remaining args in the original argument list, but
# mustn't include the command itself
# if hasattr(arg, 'parsed') and new_arglist[0] == arg.parsed.command:
# new_arglist = new_arglist[1:]
if USE_ARG_LIST:
arg = new_arglist
else:
new_args = remaining_args(arg, new_arglist)
if isinstance(arg, ParsedString):
arg = arg.with_args_replaced(new_args)
else:
arg = new_args
except optparse.OptParseError as e:
print(e)
option_parser.print_help()
return
if hasattr(opts, '_exit'):
return None
result = func(instance, arg, opts)
return result
new_func.__doc__ = '%s%s' % (func.__doc__ + '\n' if func.__doc__ else '', option_parser.format_help())
return new_func
return option_setup
# Can we access the clipboard? Should always be true on Windows and Mac, but only sometimes on Linux
# noinspection PyUnresolvedReferences
try:
# Get the version of the pyperclip module as a float
pyperclip_ver = float('.'.join(pyperclip.__version__.split('.')[:2]))
# The extraneous output bug in pyperclip on Linux using xclip was fixed in more recent versions of pyperclip
if sys.platform.startswith('linux') and pyperclip_ver < 1.6:
# Avoid extraneous output to stderr from xclip when clipboard is empty at cost of overwriting clipboard contents
pyperclip.copy('')
else:
# Try getting the contents of the clipboard
_ = pyperclip.paste()
except PyperclipException:
can_clip = False
else:
can_clip = True
def get_paste_buffer():
"""Get the contents of the clipboard / paste buffer.
:return: str - contents of the clipboard
"""
pb_str = pyperclip.paste()
# If value returned from the clipboard is unicode and this is Python 2, convert to a "normal" Python 2 string first
if six.PY2 and not isinstance(pb_str, str):
import unicodedata
pb_str = unicodedata.normalize('NFKD', pb_str).encode('ascii', 'ignore')
return pb_str
def write_to_paste_buffer(txt):
"""Copy text to the clipboard / paste buffer.
:param txt: str - text to copy to the clipboard
"""
pyperclip.copy(txt)
class ParsedString(str):
"""Subclass of str which also stores a pyparsing.ParseResults object containing structured parse results."""
# pyarsing.ParseResults - structured parse results, to provide multiple means of access to the parsed data
parsed = None
# Function which did the parsing
parser = None
def full_parsed_statement(self):
"""Used to reconstruct the full parsed statement when a command isn't recognized."""
new = ParsedString('%s %s' % (self.parsed.command, self.parsed.args))
new.parsed = self.parsed
new.parser = self.parser
return new
def with_args_replaced(self, newargs):
"""Used for @options commands when USE_ARG_LIST is False.
It helps figure out what the args are after removing options.
"""
new = ParsedString(newargs)
new.parsed = self.parsed
new.parser = self.parser
new.parsed['args'] = newargs
new.parsed.statement['args'] = newargs
return new
def replace_with_file_contents(fname):
"""Action to perform when successfully matching parse element definition for inputFrom parser.
:param fname: str - filename
:return: str - contents of file "fname"
"""
try:
# Any outer quotes are not part of the filename
unquoted_file = strip_quotes(fname[0])
with open(os.path.expanduser(unquoted_file)) as source_file:
result = source_file.read()
except IOError:
result = '< %s' % fname[0] # wasn't a file after all
# TODO: IF pyparsing input parser logic gets fixed to support empty file, add support to get from paste buffer
return result
class EmbeddedConsoleExit(SystemExit):
"""Custom exception class for use with the py command."""
pass
class EmptyStatement(Exception):
"""Custom exception class for handling behavior when the user just presses <Enter>."""
pass
# Regular expression to match ANSI escape codes
ANSI_ESCAPE_RE = re.compile(r'\x1b[^m]*m')
def strip_ansi(text):
"""Strip ANSI escape codes from a string.
:param text: str - a string which may contain ANSI escape codes
:return: str - the same string with any ANSI escape codes removed
"""
return ANSI_ESCAPE_RE.sub('', text)
def _pop_readline_history(clear_history=True):
"""Returns a copy of readline's history and optionally clears it (default)"""
# noinspection PyArgumentList
if rl_type == RlType.NONE:
return []
history = [
readline.get_history_item(i)
for i in range(1, 1 + readline.get_current_history_length())
]
if clear_history:
readline.clear_history()
return history
def _push_readline_history(history, clear_history=True):
"""Restores readline's history and optionally clears it first (default)"""
if rl_type != RlType.NONE:
if clear_history:
readline.clear_history()
for line in history:
readline.add_history(line)
def _complete_from_cmd(cmd_obj, text, line, begidx, endidx):
"""Complete as though the user was typing inside cmd's cmdloop()"""
from itertools import takewhile
command_subcommand_params = line.split(None, 3)
if len(command_subcommand_params) < (3 if text else 2):
n = len(command_subcommand_params[0])
n += sum(1 for _ in takewhile(str.isspace, line[n:]))
return cmd_obj.completenames(text, line[n:], begidx - n, endidx - n)
command, subcommand = command_subcommand_params[:2]
n = len(command) + sum(1 for _ in takewhile(str.isspace, line))
cfun = getattr(cmd_obj, 'complete_' + subcommand, cmd_obj.complete)
return cfun(text, line[n:], begidx - n, endidx - n)
class AddSubmenu(object):
"""Conveniently add a submenu (Cmd-like class) to a Cmd
e.g. given "class SubMenu(Cmd): ..." then
@AddSubmenu(SubMenu(), 'sub')
class MyCmd(cmd.Cmd):
....
will have the following effects:
1. 'sub' will interactively enter the cmdloop of a SubMenu instance
2. 'sub cmd args' will call do_cmd(args) in a SubMenu instance
3. 'sub ... [TAB]' will have the same behavior as [TAB] in a SubMenu cmdloop
i.e., autocompletion works the way you think it should
4. 'help sub [cmd]' will print SubMenu's help (calls its do_help())
"""
class _Nonexistent(object):
"""
Used to mark missing attributes.
Disable __dict__ creation since this class does nothing
"""
__slots__ = () #
def __init__(self,
submenu,
command,
aliases=(),
reformat_prompt="{super_prompt}>> {sub_prompt}",
shared_attributes=None,
require_predefined_shares=True,
create_subclass=False,
preserve_shares=False,
persistent_history_file=None
):
"""Set up the class decorator
submenu (Cmd): Instance of something cmd.Cmd-like
command (str): The command the user types to access the SubMenu instance
aliases (iterable): More commands that will behave like "command"
reformat_prompt (str): Format str or None to disable
if it's a string, it should contain one or more of:
{super_prompt}: The current cmd's prompt
{command}: The command in the current cmd with which it was called
{sub_prompt}: The subordinate cmd's original prompt
the default is "{super_prompt}{command} {sub_prompt}"
shared_attributes (dict): dict of the form {'subordinate_attr': 'parent_attr'}
the attributes are copied to the submenu at the last moment; the submenu's
attributes are backed up before this and restored afterward
require_predefined_shares: The shared attributes above must be independently
defined in the subordinate Cmd (default: True)
create_subclass: put the modifications in a subclass rather than modifying
the existing class (default: False)
"""
self.submenu = submenu
self.command = command
self.aliases = aliases
if persistent_history_file:
self.persistent_history_file = os.path.expanduser(persistent_history_file)
else:
self.persistent_history_file = None
if reformat_prompt is not None and not isinstance(reformat_prompt, str):
raise ValueError("reformat_prompt should be either a format string or None")
self.reformat_prompt = reformat_prompt
self.shared_attributes = {} if shared_attributes is None else shared_attributes
if require_predefined_shares:
for attr in self.shared_attributes.keys():
if not hasattr(submenu, attr):
raise AttributeError("The shared attribute '{attr}' is not defined in {cmd}. Either define {attr} "
"in {cmd} or set require_predefined_shares=False."
.format(cmd=submenu.__class__.__name__, attr=attr))
self.create_subclass = create_subclass
self.preserve_shares = preserve_shares
def _get_original_attributes(self):
return {
attr: getattr(self.submenu, attr, AddSubmenu._Nonexistent)
for attr in self.shared_attributes.keys()
}
def _copy_in_shared_attrs(self, parent_cmd):
for sub_attr, par_attr in self.shared_attributes.items():
setattr(self.submenu, sub_attr, getattr(parent_cmd, par_attr))
def _copy_out_shared_attrs(self, parent_cmd, original_attributes):
if self.preserve_shares:
for sub_attr, par_attr in self.shared_attributes.items():
setattr(parent_cmd, par_attr, getattr(self.submenu, sub_attr))
else:
for attr, value in original_attributes.items():
if attr is not AddSubmenu._Nonexistent:
setattr(self.submenu, attr, value)
else:
delattr(self.submenu, attr)
def __call__(self, cmd_obj):
"""Creates a subclass of Cmd wherein the given submenu can be accessed via the given command"""
def enter_submenu(parent_cmd, line):
"""
This function will be bound to do_<submenu> and will change the scope of the CLI to that of the
submenu.
"""
submenu = self.submenu
original_attributes = self._get_original_attributes()
history = _pop_readline_history()
if self.persistent_history_file and rl_type != RlType.NONE:
try:
readline.read_history_file(self.persistent_history_file)
except FILE_NOT_FOUND_ERROR:
pass
try:
# copy over any shared attributes
self._copy_in_shared_attrs(parent_cmd)
if line.parsed.args:
# Remove the menu argument and execute the command in the submenu
line = submenu.parser_manager.parsed(line.parsed.args)
submenu.precmd(line)
ret = submenu.onecmd(line)
submenu.postcmd(ret, line)
else:
if self.reformat_prompt is not None:
prompt = submenu.prompt
submenu.prompt = self.reformat_prompt.format(
super_prompt=parent_cmd.prompt,
command=self.command,
sub_prompt=prompt,
)
submenu.cmdloop()
if self.reformat_prompt is not None:
# noinspection PyUnboundLocalVariable
self.submenu.prompt = prompt
finally:
# copy back original attributes
self._copy_out_shared_attrs(parent_cmd, original_attributes)
# write submenu history
if self.persistent_history_file and rl_type != RlType.NONE:
readline.write_history_file(self.persistent_history_file)
# reset main app history before exit
_push_readline_history(history)
def complete_submenu(_self, text, line, begidx, endidx):
"""
This function will be bound to complete_<submenu> and will perform the complete commands of the submenu.
"""
submenu = self.submenu
original_attributes = self._get_original_attributes()
try:
# copy over any shared attributes
self._copy_in_shared_attrs(_self)
# Reset the submenu's tab completion parameters
submenu.allow_appended_space = True
submenu.allow_closing_quote = True
submenu.display_matches = []
submenu.matches_delimited = False
return _complete_from_cmd(submenu, text, line, begidx, endidx)
finally:
# copy back original attributes
self._copy_out_shared_attrs(_self, original_attributes)
# Pass the submenu's tab completion parameters back up to the menu that called complete()
_self.allow_appended_space = submenu.allow_appended_space
_self.allow_closing_quote = submenu.allow_closing_quote
_self.display_matches = copy.copy(submenu.display_matches)
_self.matches_delimited = submenu.matches_delimited
original_do_help = cmd_obj.do_help
original_complete_help = cmd_obj.complete_help
def help_submenu(_self, line):
"""
This function will be bound to help_<submenu> and will call the help commands of the submenu.
"""
tokens = line.split(None, 1)
if tokens and (tokens[0] == self.command or tokens[0] in self.aliases):
self.submenu.do_help(tokens[1] if len(tokens) == 2 else '')
else:
original_do_help(_self, line)
def _complete_submenu_help(_self, text, line, begidx, endidx):
"""autocomplete to match help_submenu()'s behavior"""
tokens = line.split(None, 1)
if len(tokens) == 2 and (
not (not tokens[1].startswith(self.command) and not any(
tokens[1].startswith(alias) for alias in self.aliases))
):
return self.submenu.complete_help(
text,
tokens[1],
begidx - line.index(tokens[1]),
endidx - line.index(tokens[1]),
)
else:
return original_complete_help(_self, text, line, begidx, endidx)
if self.create_subclass:
class _Cmd(cmd_obj):
do_help = help_submenu
complete_help = _complete_submenu_help
else:
_Cmd = cmd_obj
_Cmd.do_help = help_submenu
_Cmd.complete_help = _complete_submenu_help
# Create bindings in the parent command to the submenus commands.
setattr(_Cmd, 'do_' + self.command, enter_submenu)
setattr(_Cmd, 'complete_' + self.command, complete_submenu)
# Create additional bindings for aliases
for _alias in self.aliases:
setattr(_Cmd, 'do_' + _alias, enter_submenu)
setattr(_Cmd, 'complete_' + _alias, complete_submenu)
return _Cmd
class Cmd(cmd.Cmd):
"""An easy but powerful framework for writing line-oriented command interpreters.
Extends the Python Standard Library’s cmd package by adding a lot of useful features
to the out of the box configuration.
Line-oriented command interpreters are often useful for test harnesses, internal tools, and rapid prototypes.
"""
# Attributes used to configure the ParserManager (all are not dynamically settable at runtime)
blankLinesAllowed = False
commentGrammars = pyparsing.Or([pyparsing.pythonStyleComment, pyparsing.cStyleComment])
commentInProgress = pyparsing.Literal('/*') + pyparsing.SkipTo(pyparsing.stringEnd ^ '*/')
legalChars = u'!#$%.:?@_-' + pyparsing.alphanums + pyparsing.alphas8bit
multilineCommands = []
prefixParser = pyparsing.Empty()
redirector = '>' # for sending output to file
shortcuts = {'?': 'help', '!': 'shell', '@': 'load', '@@': '_relative_load'}
aliases = dict()
terminators = [';'] # make sure your terminators are not in legalChars!
# Attributes which are NOT dynamically settable at runtime
allow_cli_args = True # Should arguments passed on the command-line be processed as commands?
allow_redirection = True # Should output redirection and pipes be allowed
default_to_shell = False # Attempt to run unrecognized commands as shell commands
quit_on_sigint = False # Quit the loop on interrupt instead of just resetting prompt
reserved_words = []
# Attributes which ARE dynamically settable at runtime
colors = (platform.system() != 'Windows')
continuation_prompt = '> '
debug = False
echo = False
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform[:3] == 'win':
editor = 'notepad'
else:
# Favor command-line editors first so we don't leave the terminal to edit
for editor in ['vim', 'vi', 'emacs', 'nano', 'pico', 'gedit', 'kate', 'subl', 'geany', 'atom']:
if _which(editor):
break
feedback_to_output = False # Do not include nonessentials in >, | output by default (things like timing)
locals_in_py = True
quiet = False # Do not suppress nonessential output
timing = False # Prints elapsed time for each command
# To make an attribute settable with the "do_set" command, add it to this ...
# This starts out as a dictionary but gets converted to an OrderedDict sorted alphabetically by key
settable = {'colors': 'Colorized output (*nix only)',
'continuation_prompt': 'On 2nd+ line of input',
'debug': 'Show full error stack on error',
'echo': 'Echo command issued into output',
'editor': 'Program used by ``edit``',
'feedback_to_output': 'Include nonessentials in `|`, `>` results',
'locals_in_py': 'Allow access to your application in py via self',
'prompt': 'The prompt issued to solicit input',
'quiet': "Don't print nonessential feedback",
'timing': 'Report execution times'}
def __init__(self, completekey='tab', stdin=None, stdout=None, persistent_history_file='',
persistent_history_length=1000, startup_script=None, use_ipython=False, transcript_files=None):
"""An easy but powerful framework for writing line-oriented command interpreters, extends Python's cmd package.
:param completekey: str - (optional) readline name of a completion key, default to Tab
:param stdin: (optional) alternate input file object, if not specified, sys.stdin is used
:param stdout: (optional) alternate output file object, if not specified, sys.stdout is used
:param persistent_history_file: str - (optional) file path to load a persistent readline history from
:param persistent_history_length: int - (optional) max number of lines which will be written to the history file
:param startup_script: str - (optional) file path to a a script to load and execute at startup
:param use_ipython: (optional) should the "ipy" command be included for an embedded IPython shell
:param transcript_files: str - (optional) allows running transcript tests when allow_cli_args is False
"""
# If use_ipython is False, make sure the do_ipy() method doesn't exit
if not use_ipython:
try:
del Cmd.do_ipy
except AttributeError:
pass
# If persistent readline history is enabled, then read history from file and register to write to file at exit
if persistent_history_file and rl_type != RlType.NONE:
persistent_history_file = os.path.expanduser(persistent_history_file)
try:
readline.read_history_file(persistent_history_file)
# default history len is -1 (infinite), which may grow unruly
readline.set_history_length(persistent_history_length)
except FILE_NOT_FOUND_ERROR:
pass
atexit.register(readline.write_history_file, persistent_history_file)
# Call super class constructor. Need to do it in this way for Python 2 and 3 compatibility
cmd.Cmd.__init__(self, completekey=completekey, stdin=stdin, stdout=stdout)
# Commands to exclude from the help menu and tab completion
self.hidden_commands = ['eof', 'eos', '_relative_load']
# Commands to exclude from the history command
self.exclude_from_history = '''history edit eof eos'''.split()
self._finalize_app_parameters()
self.initial_stdout = sys.stdout
self.history = History()
self.pystate = {}
self.py_history = []
self.keywords = self.reserved_words + [fname[3:] for fname in dir(self) if fname.startswith('do_')]
self.parser_manager = ParserManager(redirector=self.redirector, terminators=self.terminators,
multilineCommands=self.multilineCommands,
legalChars=self.legalChars, commentGrammars=self.commentGrammars,
commentInProgress=self.commentInProgress,
blankLinesAllowed=self.blankLinesAllowed, prefixParser=self.prefixParser,
preparse=self.preparse, postparse=self.postparse, aliases=self.aliases,
shortcuts=self.shortcuts)
self._transcript_files = transcript_files
# Used to enable the ability for a Python script to quit the application
self._should_quit = False
# True if running inside a Python script or interactive console, False otherwise
self._in_py = False
# Stores results from the last command run to enable usage of results in a Python script or interactive console
# Built-in commands don't make use of this. It is purely there for user-defined commands and convenience.
self._last_result = None
# Used to save state during a redirection
self.kept_state = None
self.kept_sys = None
# Codes used for exit conditions
self._STOP_AND_EXIT = True # cmd convention
self._colorcodes = {'bold': {True: '\x1b[1m', False: '\x1b[22m'},
'cyan': {True: '\x1b[36m', False: '\x1b[39m'},
'blue': {True: '\x1b[34m', False: '\x1b[39m'},
'red': {True: '\x1b[31m', False: '\x1b[39m'},
'magenta': {True: '\x1b[35m', False: '\x1b[39m'},
'green': {True: '\x1b[32m', False: '\x1b[39m'},
'underline': {True: '\x1b[4m', False: '\x1b[24m'},
'yellow': {True: '\x1b[33m', False: '\x1b[39m'}}
# Used load command to store the current script dir as a LIFO queue to support _relative_load command
self._script_dir = []
# Used when piping command output to a shell command
self.pipe_proc = None
# Used by complete() for readline tab completion
self.completion_matches = []
# Used to keep track of whether we are redirecting or piping output
self.redirecting = False
# If this string is non-empty, then this warning message will print if a broken pipe error occurs while printing
self.broken_pipe_warning = ''
# If a startup script is provided, then add it in the queue to load
if startup_script is not None:
startup_script = os.path.expanduser(startup_script)
if os.path.exists(startup_script) and os.path.getsize(startup_script) > 0:
self.cmdqueue.append('load {}'.format(startup_script))
############################################################################################################
# The following variables are used by tab-completion functions. They are reset each time complete() is run
# using reset_completion_defaults() and it is up to completer functions to set them before returning results.
############################################################################################################
# If true and a single match is returned to complete(), then a space will be appended
# if the match appears at the end of the line
self.allow_appended_space = True
# If true and a single match is returned to complete(), then a closing quote
# will be added if there is an unmatched opening quote
self.allow_closing_quote = True
# If the tab-completion suggestions should be displayed in a way that is different than the actual match values,
# then place those results in this list. The full matches still must be returned from your completer function.
# For an example, look at path_complete() which uses this to show only the basename of paths as the
# suggestions. delimiter_complete() also populates this list.
self.display_matches = []
# Used by functions like path_complete() and delimiter_complete() to properly
# quote matches that are completed in a delimited fashion
self.matches_delimited = False
# ----- Methods related to presenting output to the user -----
@property
def visible_prompt(self):
"""Read-only property to get the visible prompt with any ANSI escape codes stripped.
Used by transcript testing to make it easier and more reliable when users are doing things like coloring the
prompt using ANSI color codes.
:return: str - prompt stripped of any ANSI escape codes
"""
return strip_ansi(self.prompt)
def _finalize_app_parameters(self):
self.commentGrammars.ignore(pyparsing.quotedString).setParseAction(lambda x: '')
# noinspection PyUnresolvedReferences
self.shortcuts = sorted(self.shortcuts.items(), reverse=True)
# Make sure settable parameters are sorted alphabetically by key
self.settable = collections.OrderedDict(sorted(self.settable.items(), key=lambda t: t[0]))
def poutput(self, msg, end='\n'):
"""Convenient shortcut for self.stdout.write(); by default adds newline to end if not already present.
Also handles BrokenPipeError exceptions for when a commands's output has been piped to another process and
that process terminates before the cmd2 command is finished executing.
:param msg: str - message to print to current stdout - anything convertible to a str with '{}'.format() is OK
:param end: str - string appended after the end of the message if not already present, default a newline
"""
if msg is not None and msg != '':
try:
msg_str = '{}'.format(msg)
self.stdout.write(msg_str)
if not msg_str.endswith(end):
self.stdout.write(end)
except BROKEN_PIPE_ERROR:
# This occurs if a command's output is being piped to another process and that process closes before the
# command is finished. If you would like your application to print a warning message, then set the
# broken_pipe_warning attribute to the message you want printed.
if self.broken_pipe_warning:
sys.stderr.write(self.broken_pipe_warning)
def perror(self, errmsg, exception_type=None, traceback_war=True):
""" Print error message to sys.stderr and if debug is true, print an exception Traceback if one exists.
:param errmsg: str - error message to print out
:param exception_type: str - (optional) type of exception which precipitated this error message
:param traceback_war: bool - (optional) if True, print a message to let user know they can enable debug
:return:
"""
if self.debug:
traceback.print_exc()
if exception_type is None:
err = self.colorize("ERROR: {}\n".format(errmsg), 'red')
sys.stderr.write(err)
else:
err = "EXCEPTION of type '{}' occurred with message: '{}'\n".format(exception_type, errmsg)
sys.stderr.write(self.colorize(err, 'red'))
if traceback_war:
war = "To enable full traceback, run the following command: 'set debug true'\n"
sys.stderr.write(self.colorize(war, 'yellow'))
def pfeedback(self, msg):
"""For printing nonessential feedback. Can be silenced with `quiet`.
Inclusion in redirected output is controlled by `feedback_to_output`."""
if not self.quiet:
if self.feedback_to_output:
self.poutput(msg)
else:
sys.stderr.write("{}\n".format(msg))
def ppaged(self, msg, end='\n'):
"""Print output using a pager if it would go off screen and stdout isn't currently being redirected.
Never uses a pager inside of a script (Python or text) or when output is being redirected or piped or when
stdout or stdin are not a fully functional terminal.
:param msg: str - message to print to current stdout - anything convertible to a str with '{}'.format() is OK
:param end: str - string appended after the end of the message if not already present, default a newline
"""
if msg is not None and msg != '':
try:
msg_str = '{}'.format(msg)
if not msg_str.endswith(end):
msg_str += end
# Attempt to detect if we are not running within a fully functional terminal.
# Don't try to use the pager when being run by a continuous integration system like Jenkins + pexpect.
functional_terminal = False
if self.stdin.isatty() and self.stdout.isatty():
if sys.platform.startswith('win') or os.environ.get('TERM') is not None:
functional_terminal = True
# Don't attempt to use a pager that can block if redirecting or running a script (either text or Python)
# Also only attempt to use a pager if actually running in a real fully functional terminal
if functional_terminal and not self.redirecting and not self._in_py and not self._script_dir:
if sys.platform.startswith('win'):
pager_cmd = 'more'
else:
# Here is the meaning of the various flags we are using with the less command:
# -S causes lines longer than the screen width to be chopped (truncated) rather than wrapped
# -R causes ANSI "color" escape sequences to be output in raw form (i.e. colors are displayed)
# -X disables sending the termcap initialization and deinitialization strings to the terminal
# -F causes less to automatically exit if the entire file can be displayed on the first screen
pager_cmd = 'less -SRXF'
self.pipe_proc = subprocess.Popen(pager_cmd, shell=True, stdin=subprocess.PIPE)
try:
self.pipe_proc.stdin.write(msg_str.encode('utf-8', 'replace'))
self.pipe_proc.stdin.close()
except (IOError, KeyboardInterrupt):
pass
# Less doesn't respect ^C, but catches it for its own UI purposes (aborting search etc. inside less)
while True:
try:
self.pipe_proc.wait()
except KeyboardInterrupt:
pass
else:
break
self.pipe_proc = None
else:
self.stdout.write(msg_str)
except BROKEN_PIPE_ERROR:
# This occurs if a command's output is being piped to another process and that process closes before the
# command is finished. If you would like your application to print a warning message, then set the
# broken_pipe_warning attribute to the message you want printed.
if self.broken_pipe_warning:
sys.stderr.write(self.broken_pipe_warning)
def colorize(self, val, color):
"""Given a string (``val``), returns that string wrapped in UNIX-style
special characters that turn on (and then off) text color and style.
If the ``colors`` environment parameter is ``False``, or the application
is running on Windows, will return ``val`` unchanged.
``color`` should be one of the supported strings (or styles):
red/blue/green/cyan/magenta, bold, underline"""
if self.colors and (self.stdout == self.initial_stdout):
return self._colorcodes[color][True] + val + self._colorcodes[color][False]
return val
def get_subcommands(self, command):
"""
Returns a list of a command's subcommand names if they exist
:param command: the command we are querying
:return: A subcommand list or None
"""
subcommand_names = None
# Check if is a valid command
funcname = self._func_named(command)
if funcname:
# Check to see if this function was decorated with an argparse ArgumentParser
func = getattr(self, funcname)
subcommands = func.__dict__.get('subcommands', None)
if subcommands is not None:
subcommand_names = subcommands.keys()
return subcommand_names
def get_subcommand_completer(self, command, subcommand):
"""
Returns a subcommand's tab completion function if one exists
:param command: command which owns the subcommand
:param subcommand: the subcommand we are querying
:return: A completer or None
"""
completer = None
# Check if is a valid command
funcname = self._func_named(command)
if funcname:
# Check to see if this function was decorated with an argparse ArgumentParser
func = getattr(self, funcname)
subcommands = func.__dict__.get('subcommands', None)
if subcommands is not None:
completer = subcommands[subcommand]
return completer
# ----- Methods related to tab completion -----
def reset_completion_defaults(self):
"""
Resets tab completion settings
Needs to be called each time readline runs tab completion
"""
self.allow_appended_space = True
self.allow_closing_quote = True
self.display_matches = []
self.matches_delimited = False
if rl_type == RlType.GNU:
readline.set_completion_display_matches_hook(self._display_matches_gnu_readline)
elif rl_type == RlType.PYREADLINE:
readline.rl.mode._display_completions = self._display_matches_pyreadline
def tokens_for_completion(self, line, begidx, endidx):
"""
Used by tab completion functions to get all tokens through the one being completed
:param line: str - the current input line with leading whitespace removed
:param begidx: int - the beginning index of the prefix text
:param endidx: int - the ending index of the prefix text
:return: A 2 item tuple where the items are
On Success
tokens: list of unquoted tokens
this is generally the list needed for tab completion functions
raw_tokens: list of tokens with any quotes preserved
this can be used to know if a token was quoted or is missing a closing quote
Both lists are guaranteed to have at least 1 item
The last item in both lists is the token being tab completed
On Failure
Both items are None
"""
unclosed_quote = ''
quotes_to_try = copy.copy(QUOTES)
tmp_line = line[:endidx]
tmp_endidx = endidx
# Parse the line into tokens
while True:
try:
# Use non-POSIX parsing to keep the quotes around the tokens
initial_tokens = shlex.split(tmp_line[:tmp_endidx], posix=False)
# If the cursor is at an empty token outside of a quoted string,
# then that is the token being completed. Add it to the list.
if not unclosed_quote and begidx == tmp_endidx:
initial_tokens.append('')
break
except ValueError:
# ValueError can be caused by missing closing quote
if not quotes_to_try:
# Since we have no more quotes to try, something else
# is causing the parsing error. Return None since
# this means the line is malformed.
return None, None
# Add a closing quote and try to parse again
unclosed_quote = quotes_to_try[0]
quotes_to_try = quotes_to_try[1:]
tmp_line = line[:endidx]
tmp_line += unclosed_quote
tmp_endidx = endidx + 1
if self.allow_redirection:
# Since redirection is enabled, we need to treat redirection characters (|, <, >)
# as word breaks when they are in unquoted strings. Go through each token
# and further split them on these characters. Each run of redirect characters
# is treated as a single token.
raw_tokens = []
for cur_initial_token in initial_tokens:
# Save tokens up to 1 character in length or quoted tokens. No need to parse these.
if len(cur_initial_token) <= 1 or cur_initial_token[0] in QUOTES:
raw_tokens.append(cur_initial_token)
continue
# Iterate over each character in this token
cur_index = 0
cur_char = cur_initial_token[cur_index]
# Keep track of the token we are building
cur_raw_token = ''
while True:
if cur_char not in REDIRECTION_CHARS:
# Keep appending to cur_raw_token until we hit a redirect char
while cur_char not in REDIRECTION_CHARS:
cur_raw_token += cur_char
cur_index += 1
if cur_index < len(cur_initial_token):
cur_char = cur_initial_token[cur_index]
else:
break
else:
redirect_char = cur_char
# Keep appending to cur_raw_token until we hit something other than redirect_char
while cur_char == redirect_char:
cur_raw_token += cur_char
cur_index += 1
if cur_index < len(cur_initial_token):
cur_char = cur_initial_token[cur_index]
else:
break
# Save the current token
raw_tokens.append(cur_raw_token)
cur_raw_token = ''
# Check if we've viewed all characters
if cur_index >= len(cur_initial_token):
break
else:
raw_tokens = initial_tokens
# Save the unquoted tokens
tokens = [strip_quotes(cur_token) for cur_token in raw_tokens]
# If the token being completed had an unclosed quote, we need
# to remove the closing quote that was added in order for it
# to match what was on the command line.
if unclosed_quote:
raw_tokens[-1] = raw_tokens[-1][:-1]
return tokens, raw_tokens
# noinspection PyUnusedLocal
@staticmethod
def basic_complete(text, line, begidx, endidx, match_against):
"""
Performs tab completion against a list
:param text: str - the string prefix we are attempting to match (all returned matches must begin with it)
:param line: str - the current input line with leading whitespace removed
:param begidx: int - the beginning index of the prefix text
:param endidx: int - the ending index of the prefix text
:param match_against: Collection - the list being matched against
:return: List[str] - a list of possible tab completions
"""
return [cur_match for cur_match in match_against if cur_match.startswith(text)]
def delimiter_complete(self, text, line, begidx, endidx, match_against, delimiter):
"""
Performs tab completion against a list but each match is split on a delimiter and only
the portion of the match being tab completed is shown as the completion suggestions.
This is useful if you match against strings that are hierarchical in nature and have a
common delimiter.
An easy way to illustrate this concept is path completion since paths are just directories/files
delimited by a slash. If you are tab completing items in /home/user you don't get the following
as suggestions:
/home/user/file.txt /home/user/program.c
/home/user/maps/ /home/user/cmd2.py
Instead you are shown:
file.txt program.c
maps/ cmd2.py
For a large set of data, this can be visually more pleasing and easier to search.
Another example would be strings formatted with the following syntax: company::department::name
In this case the delimiter would be :: and the user could easily narrow down what they are looking
for if they were only shown suggestions in the category they are at in the string.
:param text: str - the string prefix we are attempting to match (all returned matches must begin with it)
:param line: str - the current input line with leading whitespace removed
:param begidx: int - the beginning index of the prefix text
:param endidx: int - the ending index of the prefix text
:param match_against: Collection - the list being matched against
:param delimiter: str - what delimits each portion of the matches (ex: paths are delimited by a slash)
:return: List[str] - a list of possible tab completions
"""
matches = self.basic_complete(text, line, begidx, endidx, match_against)
# Display only the portion of the match that's being completed based on delimiter
if matches:
# Set this to True for proper quoting of matches with spaces
self.matches_delimited = True
# Get the common beginning for the matches
common_prefix = os.path.commonprefix(matches)
prefix_tokens = common_prefix.split(delimiter)
# Calculate what portion of the match we are completing
display_token_index = 0
if prefix_tokens:
display_token_index = len(prefix_tokens) - 1
# Get this portion for each match and store them in self.display_matches
for cur_match in matches:
match_tokens = cur_match.split(delimiter)
display_token = match_tokens[display_token_index]
if not display_token:
display_token = delimiter
self.display_matches.append(display_token)
return matches
def flag_based_complete(self, text, line, begidx, endidx, flag_dict, all_else=None):
"""
Tab completes based on a particular flag preceding the token being completed
:param text: str - the string prefix we are attempting to match (all returned matches must begin with it)
:param line: str - the current input line with leading whitespace removed
:param begidx: int - the beginning index of the prefix text
:param endidx: int - the ending index of the prefix text
:param flag_dict: dict - dictionary whose structure is the following:
keys - flags (ex: -c, --create) that result in tab completion for the next
argument in the command line
values - there are two types of values
1. iterable list of strings to match against (dictionaries, lists, etc.)
2. function that performs tab completion (ex: path_complete)
:param all_else: Collection or function - an optional parameter for tab completing any token that isn't preceded
by a flag in flag_dict
:return: List[str] - a list of possible tab completions
"""
# Get all tokens through the one being completed
tokens, _ = self.tokens_for_completion(line, begidx, endidx)
if tokens is None:
return []
completions_matches = []
match_against = all_else
# Must have at least 2 args for a flag to precede the token being completed
if len(tokens) > 1:
flag = tokens[-2]
if flag in flag_dict:
match_against = flag_dict[flag]
# Perform tab completion using a Collection
if isinstance(match_against, Collection):
completions_matches = self.basic_complete(text, line, begidx, endidx, match_against)
# Perform tab completion using a function
elif callable(match_against):
completions_matches = match_against(text, line, begidx, endidx)
return completions_matches
def index_based_complete(self, text, line, begidx, endidx, index_dict, all_else=None):
"""
Tab completes based on a fixed position in the input string
:param text: str - the string prefix we are attempting to match (all returned matches must begin with it)
:param line: str - the current input line with leading whitespace removed
:param begidx: int - the beginning index of the prefix text
:param endidx: int - the ending index of the prefix text
:param index_dict: dict - dictionary whose structure is the following:
keys - 0-based token indexes into command line that determine which tokens
perform tab completion
values - there are two types of values
1. iterable list of strings to match against (dictionaries, lists, etc.)
2. function that performs tab completion (ex: path_complete)
:param all_else: Collection or function - an optional parameter for tab completing any token that isn't at an
index in index_dict
:return: List[str] - a list of possible tab completions
"""
# Get all tokens through the one being completed
tokens, _ = self.tokens_for_completion(line, begidx, endidx)
if tokens is None:
return []
matches = []
# Get the index of the token being completed
index = len(tokens) - 1
# Check if token is at an index in the dictionary
if index in index_dict:
match_against = index_dict[index]
else:
match_against = all_else
# Perform tab completion using a Collection
if isinstance(match_against, Collection):
matches = self.basic_complete(text, line, begidx, endidx, match_against)
# Perform tab completion using a function
elif callable(match_against):
matches = match_against(text, line, begidx, endidx)
return matches
# noinspection PyUnusedLocal
def path_complete(self, text, line, begidx, endidx, dir_exe_only=False, dir_only=False):
"""Performs completion of local file system paths
:param text: str - the string prefix we are attempting to match (all returned matches must begin with it)
:param line: str - the current input line with leading whitespace removed
:param begidx: int - the beginning index of the prefix text
:param endidx: int - the ending index of the prefix text
:param dir_exe_only: bool - only return directories and executables, not non-executable files
:param dir_only: bool - only return directories
:return: List[str] - a list of possible tab completions
"""
# Used to complete ~ and ~user strings
def complete_users():
# We are returning ~user strings that resolve to directories,
# so don't append a space or quote in the case of a single result.
self.allow_appended_space = False
self.allow_closing_quote = False
users = []
# Windows lacks the pwd module so we can't get a list of users.
# Instead we will add a slash once the user enters text that
# resolves to an existing home directory.
if sys.platform.startswith('win'):
expanded_path = os.path.expanduser(text)
if os.path.isdir(expanded_path):
users.append(text + os.path.sep)
else:
import pwd
# Iterate through a list of users from the password database
for cur_pw in pwd.getpwall():
# Check if the user has an existing home dir
if os.path.isdir(cur_pw.pw_dir):
# Add a ~ to the user to match against text
cur_user = '~' + cur_pw.pw_name
if cur_user.startswith(text):
if add_trailing_sep_if_dir:
cur_user += os.path.sep
users.append(cur_user)
return users
# Determine if a trailing separator should be appended to directory completions
add_trailing_sep_if_dir = False
if endidx == len(line) or (endidx < len(line) and line[endidx] != os.path.sep):
add_trailing_sep_if_dir = True
# Used to replace cwd in the final results
cwd = os.getcwd()
cwd_added = False
# Used to replace expanded user path in final result
orig_tilde_path = ''
expanded_tilde_path = ''
# If the search text is blank, then search in the CWD for *
if not text:
search_str = os.path.join(os.getcwd(), '*')
cwd_added = True
else:
# Purposely don't match any path containing wildcards - what we are doing is complicated enough!
wildcards = ['*', '?']
for wildcard in wildcards:
if wildcard in text:
return []
# Start the search string
search_str = text + '*'
# Handle tilde expansion and completion
if text.startswith('~'):
sep_index = text.find(os.path.sep, 1)
# If there is no slash, then the user is still completing the user after the tilde
if sep_index == -1:
return complete_users()
# Otherwise expand the user dir
else:
search_str = os.path.expanduser(search_str)
# Get what we need to restore the original tilde path later
orig_tilde_path = text[:sep_index]
expanded_tilde_path = os.path.expanduser(orig_tilde_path)
# If the search text does not have a directory, then use the cwd
elif not os.path.dirname(text):
search_str = os.path.join(os.getcwd(), search_str)
cwd_added = True
# Set this to True for proper quoting of paths with spaces
self.matches_delimited = True
# Find all matching path completions
matches = glob.glob(search_str)
# Filter based on type
if dir_exe_only:
matches = [c for c in matches if os.path.isdir(c) or os.access(c, os.X_OK)]
elif dir_only:
matches = [c for c in matches if os.path.isdir(c)]
# Don't append a space or closing quote to directory
if len(matches) == 1 and os.path.isdir(matches[0]):
self.allow_appended_space = False
self.allow_closing_quote = False
# Build display_matches and add a slash to directories
for index, cur_match in enumerate(matches):
# Display only the basename of this path in the tab-completion suggestions
self.display_matches.append(os.path.basename(cur_match))
# Add a separator after directories if the next character isn't already a separator
if os.path.isdir(cur_match) and add_trailing_sep_if_dir:
matches[index] += os.path.sep
self.display_matches[index] += os.path.sep
# Remove cwd if it was added to match the text readline expects
if cwd_added:
matches = [cur_path.replace(cwd + os.path.sep, '', 1) for cur_path in matches]
# Restore the tilde string if we expanded one to match the text readline expects
if expanded_tilde_path:
matches = [cur_path.replace(expanded_tilde_path, orig_tilde_path, 1) for cur_path in matches]
return matches
@staticmethod
def get_exes_in_path(starts_with):
"""
Returns names of executables in a user's path
:param starts_with: str - what the exes should start with. leave blank for all exes in path.
:return: List[str] - a list of matching exe names
"""
# Purposely don't match any executable containing wildcards
wildcards = ['*', '?']
for wildcard in wildcards:
if wildcard in starts_with:
return []
# Get a list of every directory in the PATH environment variable and ignore symbolic links
paths = [p for p in os.getenv('PATH').split(os.path.pathsep) if not os.path.islink(p)]
# Use a set to store exe names since there can be duplicates
exes_set = set()
# Find every executable file in the user's path that matches the pattern
for path in paths:
full_path = os.path.join(path, starts_with)
matches = [f for f in glob.glob(full_path + '*') if os.path.isfile(f) and os.access(f, os.X_OK)]
for match in matches:
exes_set.add(os.path.basename(match))
return list(exes_set)
def shell_cmd_complete(self, text, line, begidx, endidx, complete_blank=False):
"""Performs completion of executables either in a user's path or a given path
:param text: str - the string prefix we are attempting to match (all returned matches must begin with it)
:param line: str - the current input line with leading whitespace removed
:param begidx: int - the beginning index of the prefix text
:param endidx: int - the ending index of the prefix text
:param complete_blank: bool - If True, then a blank will complete all shell commands in a user's path
If False, then no completion is performed
Defaults to False to match Bash shell behavior
:return: List[str] - a list of possible tab completions
"""
# Don't tab complete anything if no shell command has been started
if not complete_blank and not text:
return []
# If there are no path characters in the search text, then do shell command completion in the user's path
if not text.startswith('~') and os.path.sep not in text:
return self.get_exes_in_path(text)
# Otherwise look for executables in the given path
else:
return self.path_complete(text, line, begidx, endidx, dir_exe_only=True)
def _redirect_complete(self, text, line, begidx, endidx, compfunc):
"""
Called by complete() as the first tab completion function for all commands
It determines if it should tab complete for redirection (|, <, >, >>) or use the
completer function for the current command
:param text: str - the string prefix we are attempting to match (all returned matches must begin with it)
:param line: str - the current input line with leading whitespace removed
:param begidx: int - the beginning index of the prefix text
:param endidx: int - the ending index of the prefix text
:param compfunc: Callable - the completer function for the current command
this will be called if we aren't completing for redirection
:return: List[str] - a list of possible tab completions
"""
if self.allow_redirection:
# Get all tokens through the one being completed. We want the raw tokens
# so we can tell if redirection strings are quoted and ignore them.
_, raw_tokens = self.tokens_for_completion(line, begidx, endidx)
if raw_tokens is None:
return []
if len(raw_tokens) > 1:
# Build a list of all redirection tokens
all_redirects = REDIRECTION_CHARS + ['>>']
# Check if there are redirection strings prior to the token being completed
seen_pipe = False
has_redirection = False
for cur_token in raw_tokens[:-1]:
if cur_token in all_redirects:
has_redirection = True
if cur_token == '|':
seen_pipe = True
# Get token prior to the one being completed
prior_token = raw_tokens[-2]
# If a pipe is right before the token being completed, complete a shell command as the piped process
if prior_token == '|':
return self.shell_cmd_complete(text, line, begidx, endidx)
# Otherwise do path completion either as files to redirectors or arguments to the piped process
elif prior_token in all_redirects or seen_pipe:
return self.path_complete(text, line, begidx, endidx)
# If there were redirection strings anywhere on the command line, then we
# are no longer tab completing for the current command
elif has_redirection:
return []
# Call the command's completer function
return compfunc(text, line, begidx, endidx)
@staticmethod
def _pad_matches_to_display(matches_to_display):
"""
Adds padding to the matches being displayed as tab completion suggestions.
The default padding of readline/pyreadine is small and not visually appealing
especially if matches have spaces. It appears very squished together.
:param matches_to_display: the matches being padded
:return: the padded matches and length of padding that was added
"""
if rl_type == RlType.GNU:
# Add 2 to the padding of 2 that readline uses for a total of 4.
padding = 2 * ' '
elif rl_type == RlType.PYREADLINE:
# Add 3 to the padding of 1 that pyreadline uses for a total of 4.
padding = 3 * ' '
else:
return matches_to_display, 0
return [cur_match + padding for cur_match in matches_to_display], len(padding)
def _display_matches_gnu_readline(self, substitution, matches, longest_match_length):
"""
Prints a match list using GNU readline's rl_display_match_list()
This exists to print self.display_matches if it has data. Otherwise matches prints.
:param substitution: str - the substitution written to the command line
:param matches: list[str] - the tab completion matches to display
:param longest_match_length: int - longest printed length of the matches
"""
if rl_type == RlType.GNU:
# Check if we should show display_matches
if self.display_matches:
matches_to_display = self.display_matches
# Recalculate longest_match_length for display_matches
longest_match_length = 0
for cur_match in matches_to_display:
cur_length = wcswidth(cur_match)
if cur_length > longest_match_length:
longest_match_length = cur_length
else:
matches_to_display = matches
# Add padding for visual appeal
matches_to_display, padding_length = self._pad_matches_to_display(matches_to_display)
longest_match_length += padding_length
# We will use readline's display function (rl_display_match_list()), so we
# need to encode our string as bytes to place in a C array.
if six.PY3:
encoded_substitution = bytes(substitution, encoding='utf-8')
encoded_matches = [bytes(cur_match, encoding='utf-8') for cur_match in matches_to_display]
else:
encoded_substitution = bytes(substitution)
encoded_matches = [bytes(cur_match) for cur_match in matches_to_display]
# rl_display_match_list() expects matches to be in argv format where
# substitution is the first element, followed by the matches, and then a NULL.
# noinspection PyCallingNonCallable,PyTypeChecker
strings_array = (ctypes.c_char_p * (1 + len(encoded_matches) + 1))()
# Copy in the encoded strings and add a NULL to the end
strings_array[0] = encoded_substitution
strings_array[1:-1] = encoded_matches
strings_array[-1] = None
# Call readline's display function
# rl_display_match_list(strings_array, number of completion matches, longest match length)
readline_lib.rl_display_match_list(strings_array, len(encoded_matches), longest_match_length)
# rl_forced_update_display() is the proper way to redraw the prompt and line, but we
# have to use ctypes to do it since Python's readline API does not wrap the function
readline_lib.rl_forced_update_display()
# Since we updated the display, readline asks that rl_display_fixed be set for efficiency
display_fixed = ctypes.c_int.in_dll(readline_lib, "rl_display_fixed")
display_fixed.value = 1
def _display_matches_pyreadline(self, matches):
"""
Prints a match list using pyreadline's _display_completions()
This exists to print self.display_matches if it has data. Otherwise matches prints.
:param matches: list[str] - the tab completion matches to display
"""
if rl_type == RlType.PYREADLINE:
# Check if we should show display_matches
if self.display_matches:
matches_to_display = self.display_matches
else:
matches_to_display = matches
# Add padding for visual appeal
matches_to_display, _ = self._pad_matches_to_display(matches_to_display)
# Display the matches
orig_pyreadline_display(matches_to_display)
# ----- Methods which override stuff in cmd -----
def complete(self, text, state):
"""Override of command method which returns the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
This method gets called directly by readline because it is set as the tab-completion function.
This completer function is called as complete(text, state), for state in 0, 1, 2, …, until it returns a
non-string value. It should return the next possible completion starting with text.
:param text: str - the current word that user is typing
:param state: int - non-negative integer
"""
if state == 0 and rl_type != RlType.NONE:
unclosed_quote = ''
self.reset_completion_defaults()
# lstrip the original line
orig_line = readline.get_line_buffer()
line = orig_line.lstrip()
stripped = len(orig_line) - len(line)
# Calculate new indexes for the stripped line. If the cursor is at a position before the end of a
# line of spaces, then the following math could result in negative indexes. Enforce a max of 0.
begidx = max(readline.get_begidx() - stripped, 0)
endidx = max(readline.get_endidx() - stripped, 0)
# Shortcuts are not word break characters when tab completing. Therefore shortcuts become part
# of the text variable if there isn't a word break, like a space, after it. We need to remove it
# from text and update the indexes. This only applies if we are at the the beginning of the line.
shortcut_to_restore = ''
if begidx == 0:
for (shortcut, expansion) in self.shortcuts:
if text.startswith(shortcut):
# Save the shortcut to restore later
shortcut_to_restore = shortcut
# Adjust text and where it begins
text = text[len(shortcut_to_restore):]
begidx += len(shortcut_to_restore)
break
# If begidx is greater than 0, then we are no longer completing the command
if begidx > 0:
# Parse the command line
command, args, expanded_line = self.parseline(line)
# We overwrote line with a properly formatted but fully stripped version
# Restore the end spaces since line is only supposed to be lstripped when
# passed to completer functions according to Python docs
rstripped_len = len(line) - len(line.rstrip())
expanded_line += ' ' * rstripped_len
# Fix the index values if expanded_line has a different size than line
if len(expanded_line) != len(line):
diff = len(expanded_line) - len(line)
begidx += diff
endidx += diff
# Overwrite line to pass into completers
line = expanded_line
# Get all tokens through the one being completed
tokens, raw_tokens = self.tokens_for_completion(line, begidx, endidx)
# Either had a parsing error or are trying to complete the command token
# The latter can happen if default_to_shell is True and parseline() allowed
# assumed something like " or ' was a command.
if tokens is None or len(tokens) == 1:
self.completion_matches = []
return None
# Text we need to remove from completions later
text_to_remove = ''
# Get the token being completed with any opening quote preserved
raw_completion_token = raw_tokens[-1]
# Check if the token being completed has an opening quote
if raw_completion_token and raw_completion_token[0] in QUOTES:
# Since the token is still being completed, we know the opening quote is unclosed
unclosed_quote = raw_completion_token[0]
# readline still performs word breaks after a quote. Therefore something like quoted search
# text with a space would have resulted in begidx pointing to the middle of the token we
# we want to complete. Figure out where that token actually begins and save the beginning
# portion of it that was not part of the text readline gave us. We will remove it from the
# completions later since readline expects them to start with the original text.
actual_begidx = line[:endidx].rfind(tokens[-1])
if actual_begidx != begidx:
text_to_remove = line[actual_begidx:begidx]
# Adjust text and where it begins so the completer routines
# get unbroken search text to complete on.
text = text_to_remove + text
begidx = actual_begidx
# Check if a valid command was entered
if command in self.get_all_commands():
# Get the completer function for this command
try:
compfunc = getattr(self, 'complete_' + command)
except AttributeError:
compfunc = self.completedefault
subcommands = self.get_subcommands(command)
if subcommands is not None:
# Since there are subcommands, then try completing those if the cursor is in
# the token at index 1, otherwise default to using compfunc
index_dict = {1: subcommands}
compfunc = functools.partial(self.index_based_complete,
index_dict=index_dict,
all_else=compfunc)
# A valid command was not entered
else:
# Check if this command should be run as a shell command
if self.default_to_shell and command in self.get_exes_in_path(command):
compfunc = self.path_complete
else:
compfunc = self.completedefault
# Attempt tab completion for redirection first, and if that isn't occurring,
# call the completer function for the current command
self.completion_matches = self._redirect_complete(text, line, begidx, endidx, compfunc)
if self.completion_matches:
# Eliminate duplicates
matches_set = set(self.completion_matches)
self.completion_matches = list(matches_set)
display_matches_set = set(self.display_matches)
self.display_matches = list(display_matches_set)
if not self.display_matches:
# Since self.display_matches is empty, set it to self.completion_matches
# before we alter them. That way the suggestions will reflect how we parsed
# the token being completed and not how readline did.
self.display_matches = copy.copy(self.completion_matches)
# Check if we need to add an opening quote
if not unclosed_quote:
add_quote = False
# This is the tab completion text that will appear on the command line.
common_prefix = os.path.commonprefix(self.completion_matches)
if self.matches_delimited:
# Check if any portion of the display matches appears in the tab completion
display_prefix = os.path.commonprefix(self.display_matches)
# For delimited matches, we check what appears before the display
# matches (common_prefix) as well as the display matches themselves.
if (' ' in common_prefix) or (display_prefix and ' ' in ''.join(self.display_matches)):
add_quote = True
# If there is a tab completion and any match has a space, then add an opening quote
elif common_prefix and ' ' in ''.join(self.completion_matches):
add_quote = True
if add_quote:
# Figure out what kind of quote to add and save it as the unclosed_quote
if '"' in ''.join(self.completion_matches):
unclosed_quote = "'"
else:
unclosed_quote = '"'
self.completion_matches = [unclosed_quote + match for match in self.completion_matches]
# Check if we need to remove text from the beginning of tab completions
elif text_to_remove:
self.completion_matches = \
[m.replace(text_to_remove, '', 1) for m in self.completion_matches]
# Check if we need to restore a shortcut in the tab completions
# so it doesn't get erased from the command line
if shortcut_to_restore:
self.completion_matches = \
[shortcut_to_restore + match for match in self.completion_matches]
else:
# Complete token against aliases and command names
alias_names = set(self.aliases.keys())
visible_commands = set(self.get_visible_commands())
strs_to_match = list(alias_names | visible_commands)
self.completion_matches = self.basic_complete(text, line, begidx, endidx, strs_to_match)
# Handle single result
if len(self.completion_matches) == 1:
str_to_append = ''
# Add a closing quote if needed and allowed
if self.allow_closing_quote and unclosed_quote:
str_to_append += unclosed_quote
# If we are at the end of the line, then add a space if allowed
if self.allow_appended_space and endidx == len(line):
str_to_append += ' '
self.completion_matches[0] += str_to_append
# Otherwise sort matches
elif self.completion_matches:
self.completion_matches.sort()
self.display_matches.sort()
try:
return self.completion_matches[state]
except IndexError:
return None
def get_all_commands(self):
"""
Returns a list of all commands
"""
return [cur_name[3:] for cur_name in self.get_names() if cur_name.startswith('do_')]
def get_visible_commands(self):
"""
Returns a list of commands that have not been hidden
"""
commands = self.get_all_commands()
# Remove the hidden commands
for name in self.hidden_commands:
if name in commands:
commands.remove(name)
return commands
def get_help_topics(self):
""" Returns a list of help topics """
return [name[5:] for name in self.get_names() if name.startswith('help_')]
def complete_help(self, text, line, begidx, endidx):
"""
Override of parent class method to handle tab completing subcommands and not showing hidden commands
Returns a list of possible tab completions
"""
# The command is the token at index 1 in the command line
cmd_index = 1
# The subcommand is the token at index 2 in the command line
subcmd_index = 2
# Get all tokens through the one being completed
tokens, _ = self.tokens_for_completion(line, begidx, endidx)
if tokens is None:
return []
matches = []
# Get the index of the token being completed
index = len(tokens) - 1
# Check if we are completing a command or help topic
if index == cmd_index:
# Complete token against topics and visible commands
topics = set(self.get_help_topics())
visible_commands = set(self.get_visible_commands())
strs_to_match = list(topics | visible_commands)
matches = self.basic_complete(text, line, begidx, endidx, strs_to_match)
# Check if we are completing a subcommand
elif index == subcmd_index:
# Match subcommands if any exist
command = tokens[cmd_index]
matches = self.basic_complete(text, line, begidx, endidx, self.get_subcommands(command))
return matches
# noinspection PyUnusedLocal
def sigint_handler(self, signum, frame):
"""Signal handler for SIGINTs which typically come from Ctrl-C events.
If you need custom SIGINT behavior, then override this function.
:param signum: int - signal number
:param frame
"""
# Save copy of pipe_proc since it could theoretically change while this is running
pipe_proc = self.pipe_proc
if pipe_proc is not None:
pipe_proc.terminate()
# Re-raise a KeyboardInterrupt so other parts of the code can catch it
raise KeyboardInterrupt("Got a keyboard interrupt")
def preloop(self):
""""Hook method executed once when the cmdloop() method is called."""
# Register a default SIGINT signal handler for Ctrl+C
signal.signal(signal.SIGINT, self.sigint_handler)
def precmd(self, statement):
"""Hook method executed just before the command is processed by ``onecmd()`` and after adding it to the history.
:param statement: ParsedString - subclass of str which also contains pyparsing ParseResults instance
:return: ParsedString - a potentially modified version of the input ParsedString statement
"""
return statement
# ----- Methods which are cmd2-specific lifecycle hooks which are not present in cmd -----
# noinspection PyMethodMayBeStatic
def preparse(self, raw):
"""Hook method executed just before the command line is interpreted, but after the input prompt is generated.
:param raw: str - raw command line input
:return: str - potentially modified raw command line input
"""
return raw
# noinspection PyMethodMayBeStatic
def postparse(self, parse_result):
"""Hook that runs immediately after parsing the command-line but before ``parsed()`` returns a ParsedString.
:param parse_result: pyparsing.ParseResults - parsing results output by the pyparsing parser
:return: pyparsing.ParseResults - potentially modified ParseResults object
"""
return parse_result
# noinspection PyMethodMayBeStatic
def postparsing_precmd(self, statement):
"""This runs after parsing the command-line, but before anything else; even before adding cmd to history.
NOTE: This runs before precmd() and prior to any potential output redirection or piping.
If you wish to fatally fail this command and exit the application entirely, set stop = True.
If you wish to just fail this command you can do so by raising an exception:
- raise EmptyStatement - will silently fail and do nothing
- raise <AnyOtherException> - will fail and print an error message
:param statement: - the parsed command-line statement
:return: (bool, statement) - (stop, statement) containing a potentially modified version of the statement
"""
stop = False
return stop, statement
# noinspection PyMethodMayBeStatic
def postparsing_postcmd(self, stop):
"""This runs after everything else, including after postcmd().
It even runs when an empty line is entered. Thus, if you need to do something like update the prompt due
to notifications from a background thread, then this is the method you want to override to do it.
:param stop: bool - True implies the entire application should exit.
:return: bool - True implies the entire application should exit.
"""
if not sys.platform.startswith('win'):
# Fix those annoying problems that occur with terminal programs like "less" when you pipe to them
if self.stdin.isatty():
proc = subprocess.Popen(shlex.split('stty sane'))
proc.communicate()
return stop
def parseline(self, line):
"""Parse the line into a command name and a string containing the arguments.
NOTE: This is an override of a parent class method. It is only used by other parent class methods. But
we do need to override it here so that the additional shortcuts present in cmd2 get properly expanded for
purposes of tab completion.
Used for command tab completion. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
:param line: str - line read by readline
:return: (str, str, str) - tuple containing (command, args, line)
"""
line = line.strip()
if not line:
# Deal with empty line or all whitespace line
return None, None, line
# Make a copy of aliases so we can edit it
tmp_aliases = list(self.aliases.keys())
keep_expanding = len(tmp_aliases) > 0
# Expand aliases
while keep_expanding:
for cur_alias in tmp_aliases:
keep_expanding = False
if line == cur_alias or line.startswith(cur_alias + ' '):
line = line.replace(cur_alias, self.aliases[cur_alias], 1)
# Do not expand the same alias more than once
tmp_aliases.remove(cur_alias)
keep_expanding = len(tmp_aliases) > 0
break
# Expand command shortcut to its full command name
for (shortcut, expansion) in self.shortcuts:
if line.startswith(shortcut):
# If the next character after the shortcut isn't a space, then insert one
shortcut_len = len(shortcut)
if len(line) == shortcut_len or line[shortcut_len] != ' ':
expansion += ' '
# Expand the shortcut
line = line.replace(shortcut, expansion, 1)
break
i, n = 0, len(line)
# If we are allowing shell commands, then allow any character in the command
if self.default_to_shell:
while i < n and line[i] != ' ':
i += 1
# Otherwise only allow those in identchars
else:
while i < n and line[i] in self.identchars:
i += 1
command, arg = line[:i], line[i:].strip()
return command, arg, line
def onecmd_plus_hooks(self, line):
"""Top-level function called by cmdloop() to handle parsing a line and running the command and all of its hooks.
:param line: str - line of text read from input
:return: bool - True if cmdloop() should exit, False otherwise
"""
stop = 0
try:
statement = self._complete_statement(line)
(stop, statement) = self.postparsing_precmd(statement)
if stop:
return self.postparsing_postcmd(stop)
try:
if self.allow_redirection:
self._redirect_output(statement)
timestart = datetime.datetime.now()
statement = self.precmd(statement)
stop = self.onecmd(statement)
stop = self.postcmd(stop, statement)
if self.timing:
self.pfeedback('Elapsed: %s' % str(datetime.datetime.now() - timestart))
finally:
if self.allow_redirection and self.redirecting:
self._restore_output(statement)
except EmptyStatement:
pass
except ValueError as ex:
# If shlex.split failed on syntax, let user know whats going on
self.perror("Invalid syntax: {}".format(ex), traceback_war=False)
except Exception as ex:
self.perror(ex, type(ex).__name__)
finally:
return self.postparsing_postcmd(stop)
def runcmds_plus_hooks(self, cmds):
"""Convenience method to run multiple commands by onecmd_plus_hooks.
This method adds the given cmds to the command queue and processes the
queue until completion or an error causes it to abort. Scripts that are
loaded will have their commands added to the queue. Scripts may even
load other scripts recursively. This means, however, that you should not
use this method if there is a running cmdloop or some other event-loop.
This method is only intended to be used in "one-off" scenarios.
NOTE: You may need this method even if you only have one command. If
that command is a load, then you will need this command to fully process
all the subsequent commands that are loaded from the script file. This
is an improvement over onecmd_plus_hooks, which expects to be used
inside of a command loop which does the processing of loaded commands.
Example: cmd_obj.runcmds_plus_hooks(['load myscript.txt'])
:param cmds: list - Command strings suitable for onecmd_plus_hooks.
:return: bool - True implies the entire application should exit.
"""
stop = False
self.cmdqueue = list(cmds) + self.cmdqueue
try:
while self.cmdqueue and not stop:
line = self.cmdqueue.pop(0)
if self.echo and line != 'eos':
self.poutput('{}{}'.format(self.prompt, line))
stop = self.onecmd_plus_hooks(line)
finally:
# Clear out the command queue and script directory stack, just in
# case we hit an error and they were not completed.
self.cmdqueue = []
self._script_dir = []
# NOTE: placing this return here inside the finally block will
# swallow exceptions. This is consistent with what is done in
# onecmd_plus_hooks and _cmdloop, although it may not be
# necessary/desired here.
return stop
def _complete_statement(self, line):
"""Keep accepting lines of input until the command is complete."""
if not line or (not pyparsing.Or(self.commentGrammars).setParseAction(lambda x: '').transformString(line)):
raise EmptyStatement()
statement = self.parser_manager.parsed(line)
while statement.parsed.multilineCommand and (statement.parsed.terminator == ''):
statement = '%s\n%s' % (statement.parsed.raw,
self.pseudo_raw_input(self.continuation_prompt))
statement = self.parser_manager.parsed(statement)
if not statement.parsed.command:
raise EmptyStatement()
return statement
def _redirect_output(self, statement):
"""Handles output redirection for >, >>, and |.
:param statement: ParsedString - subclass of str which also contains pyparsing ParseResults instance
"""
if statement.parsed.pipeTo:
self.kept_state = Statekeeper(self, ('stdout',))
# Create a pipe with read and write sides
read_fd, write_fd = os.pipe()
# Make sure that self.poutput() expects unicode strings in Python 3 and byte strings in Python 2
write_mode = 'w'
read_mode = 'r'
if six.PY2:
write_mode = 'wb'
read_mode = 'rb'
# Open each side of the pipe and set stdout accordingly
# noinspection PyTypeChecker
self.stdout = io.open(write_fd, write_mode)
self.redirecting = True
# noinspection PyTypeChecker
subproc_stdin = io.open(read_fd, read_mode)
# We want Popen to raise an exception if it fails to open the process. Thus we don't set shell to True.
try:
self.pipe_proc = subprocess.Popen(shlex.split(statement.parsed.pipeTo), stdin=subproc_stdin)
except Exception as ex:
# Restore stdout to what it was and close the pipe
self.stdout.close()
subproc_stdin.close()
self.pipe_proc = None
self.kept_state.restore()
self.kept_state = None
self.redirecting = False
# Re-raise the exception
raise ex
elif statement.parsed.output:
if (not statement.parsed.outputTo) and (not can_clip):
raise EnvironmentError('Cannot redirect to paste buffer; install ``xclip`` and re-run to enable')
self.kept_state = Statekeeper(self, ('stdout',))
self.kept_sys = Statekeeper(sys, ('stdout',))
self.redirecting = True
if statement.parsed.outputTo:
mode = 'w'
if statement.parsed.output == 2 * self.redirector:
mode = 'a'
try:
sys.stdout = self.stdout = open(os.path.expanduser(statement.parsed.outputTo), mode)
except (FILE_NOT_FOUND_ERROR, IOError) as ex:
self.perror('Not Redirecting because - {}'.format(ex), traceback_war=False)
self.redirecting = False
else:
sys.stdout = self.stdout = tempfile.TemporaryFile(mode="w+")
if statement.parsed.output == '>>':
self.poutput(get_paste_buffer())
def _restore_output(self, statement):
"""Handles restoring state after output redirection as well as the actual pipe operation if present.
:param statement: ParsedString - subclass of str which also contains pyparsing ParseResults instance
"""
# If we have redirected output to a file or the clipboard or piped it to a shell command, then restore state
if self.kept_state is not None:
# If we redirected output to the clipboard
if statement.parsed.output and not statement.parsed.outputTo:
self.stdout.seek(0)
write_to_paste_buffer(self.stdout.read())
try:
# Close the file or pipe that stdout was redirected to
self.stdout.close()
except BROKEN_PIPE_ERROR:
pass
finally:
# Restore self.stdout
self.kept_state.restore()
self.kept_state = None
# If we were piping output to a shell command, then close the subprocess the shell command was running in
if self.pipe_proc is not None:
self.pipe_proc.communicate()
self.pipe_proc = None
# Restore sys.stdout if need be
if self.kept_sys is not None:
self.kept_sys.restore()
self.kept_sys = None
self.redirecting = False
def _func_named(self, arg):
"""Gets the method name associated with a given command.
:param arg: str - command to look up method name which implements it
:return: str - method name which implements the given command
"""
result = None
target = 'do_' + arg
if target in dir(self):
result = target
return result
def onecmd(self, line):
""" This executes the actual do_* method for a command.
If the command provided doesn't exist, then it executes _default() instead.
:param line: ParsedString - subclass of string including the pyparsing ParseResults
:return: bool - a flag indicating whether the interpretation of commands should stop
"""
statement = self.parser_manager.parsed(line)
funcname = self._func_named(statement.parsed.command)
if not funcname:
return self.default(statement)
# Since we have a valid command store it in the history
if statement.parsed.command not in self.exclude_from_history:
self.history.append(statement.parsed.raw)
try:
func = getattr(self, funcname)
except AttributeError:
return self.default(statement)
stop = func(statement)
return stop
def default(self, statement):
"""Executed when the command given isn't a recognized command implemented by a do_* method.
:param statement: ParsedString - subclass of string including the pyparsing ParseResults
:return:
"""
arg = statement.full_parsed_statement()
if self.default_to_shell:
result = os.system(arg)
# If os.system() succeeded, then don't print warning about unknown command
if not result:
return
# Print out a message stating this is an unknown command
self.poutput('*** Unknown syntax: {}\n'.format(arg))
@staticmethod
def _surround_ansi_escapes(prompt, start="\x01", end="\x02"):
"""Overcome bug in GNU Readline in relation to calculation of prompt length in presence of ANSI escape codes.
:param prompt: str - original prompt
:param start: str - start code to tell GNU Readline about beginning of invisible characters
:param end: str - end code to tell GNU Readline about end of invisible characters
:return: str - prompt safe to pass to GNU Readline
"""
# Windows terminals don't use ANSI escape codes and Windows readline isn't based on GNU Readline
if sys.platform == "win32":
return prompt
escaped = False
result = ""
for c in prompt:
if c == "\x1b" and not escaped:
result += start + c
escaped = True
elif c.isalpha() and escaped:
result += c + end
escaped = False
else:
result += c
return result
def pseudo_raw_input(self, prompt):
"""
began life as a copy of cmd's cmdloop; like raw_input but
- accounts for changed stdin, stdout
- if input is a pipe (instead of a tty), look at self.echo
to decide whether to print the prompt and the input
"""
# Deal with the vagaries of readline and ANSI escape codes
safe_prompt = self._surround_ansi_escapes(prompt)
if self.use_rawinput:
try:
if sys.stdin.isatty():
line = sm.input(safe_prompt)
else:
line = sm.input()
if self.echo:
sys.stdout.write('{}{}\n'.format(safe_prompt, line))
except EOFError:
line = 'eof'
else:
if self.stdin.isatty():
# on a tty, print the prompt first, then read the line
self.poutput(safe_prompt, end='')
self.stdout.flush()
line = self.stdin.readline()
if len(line) == 0:
line = 'eof'
else:
# we are reading from a pipe, read the line to see if there is
# anything there, if so, then decide whether to print the
# prompt or not
line = self.stdin.readline()
if len(line):
# we read something, output the prompt and the something
if self.echo:
self.poutput('{}{}'.format(safe_prompt, line))
else:
line = 'eof'
return line.strip()
def _cmdloop(self):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
This serves the same role as cmd.cmdloop().
:return: bool - True implies the entire application should exit.
"""
# An almost perfect copy from Cmd; however, the pseudo_raw_input portion
# has been split out so that it can be called separately
if self.use_rawinput and self.completekey and rl_type != RlType.NONE:
# Set up readline for our tab completion needs
if rl_type == RlType.GNU:
# Set GNU readline's rl_basic_quote_characters to NULL so it won't automatically add a closing quote
# We don't need to worry about setting rl_completion_suppress_quote since we never declared
# rl_completer_quote_characters.
old_basic_quotes = ctypes.cast(rl_basic_quote_characters, ctypes.c_void_p).value
rl_basic_quote_characters.value = None
old_completer = readline.get_completer()
readline.set_completer(self.complete)
# Break words on whitespace and quotes when tab completing
completer_delims = " \t\n" + ''.join(QUOTES)
if self.allow_redirection:
# If redirection is allowed, then break words on those characters too
completer_delims += ''.join(REDIRECTION_CHARS)
old_delims = readline.get_completer_delims()
readline.set_completer_delims(completer_delims)
# Enable tab completion
readline.parse_and_bind(self.completekey + ": complete")
stop = None
try:
while not stop:
if self.cmdqueue:
# Run command out of cmdqueue if nonempty (populated by load command or commands at invocation)
line = self.cmdqueue.pop(0)
if self.echo and line != 'eos':
self.poutput('{}{}'.format(self.prompt, line))
else:
# Otherwise, read a command from stdin
if not self.quit_on_sigint:
try:
line = self.pseudo_raw_input(self.prompt)
except KeyboardInterrupt:
self.poutput('^C')
line = ''
else:
line = self.pseudo_raw_input(self.prompt)
# Run the command along with all associated pre and post hooks
stop = self.onecmd_plus_hooks(line)
finally:
if self.use_rawinput and self.completekey and rl_type != RlType.NONE:
# Restore what we changed in readline
readline.set_completer(old_completer)
readline.set_completer_delims(old_delims)
if rl_type == RlType.GNU:
readline.set_completion_display_matches_hook(None)
rl_basic_quote_characters.value = old_basic_quotes
elif rl_type == RlType.PYREADLINE:
readline.rl.mode._display_completions = orig_pyreadline_display
# Need to set empty list this way because Python 2 doesn't support the clear() method on lists
self.cmdqueue = []
self._script_dir = []
return stop
@with_argument_list
def do_alias(self, arglist):
"""Define or display aliases
Usage: Usage: alias [name] | [<name> <value>]
Where:
name - name of the alias being looked up, added, or replaced
value - what the alias will be resolved to (if adding or replacing)
this can contain spaces and does not need to be quoted
Without arguments, 'alias' prints a list of all aliases in a reusable form which
can be outputted to a startup_script to preserve aliases across sessions.
With one argument, 'alias' shows the value of the specified alias.
Example: alias ls (Prints the value of the alias called 'ls' if it exists)
With two or more arguments, 'alias' creates or replaces an alias.
Example: alias ls !ls -lF
If you want to use redirection or pipes in the alias, then either quote the tokens with these
characters or quote the entire alias value.
Examples:
alias save_results print_results ">" out.txt
alias save_results print_results "> out.txt"
alias save_results "print_results > out.txt"
"""
# If no args were given, then print a list of current aliases
if not arglist:
for cur_alias in self.aliases:
self.poutput("alias {} {}".format(cur_alias, self.aliases[cur_alias]))
# The user is looking up an alias
elif len(arglist) == 1:
name = arglist[0]
if name in self.aliases:
self.poutput("alias {} {}".format(name, self.aliases[name]))
else:
self.perror("Alias {!r} not found".format(name), traceback_war=False)
# The user is creating an alias
else:
name = arglist[0]
value = ' '.join(arglist[1:])
# Check for a valid name
for cur_char in name:
if cur_char not in self.identchars:
self.perror("Alias names can only contain the following characters: {}".format(self.identchars),
traceback_war=False)
return
# Set the alias
self.aliases[name] = value
self.poutput("Alias {!r} created".format(name))
def complete_alias(self, text, line, begidx, endidx):
""" Tab completion for alias """
alias_names = set(self.aliases.keys())
visible_commands = set(self.get_visible_commands())
index_dict = \
{
1: alias_names,
2: list(alias_names | visible_commands)
}
return self.index_based_complete(text, line, begidx, endidx, index_dict, self.path_complete)
@with_argument_list
def do_unalias(self, arglist):
"""Unsets aliases
Usage: Usage: unalias [-a] name [name ...]
Where:
name - name of the alias being unset
Options:
-a remove all alias definitions
"""
if not arglist:
self.do_help('unalias')
if '-a' in arglist:
self.aliases.clear()
self.poutput("All aliases cleared")
else:
# Get rid of duplicates
arglist = list(set(arglist))
for cur_arg in arglist:
if cur_arg in self.aliases:
del self.aliases[cur_arg]
self.poutput("Alias {!r} cleared".format(cur_arg))
else:
self.perror("Alias {!r} does not exist".format(cur_arg), traceback_war=False)
def complete_unalias(self, text, line, begidx, endidx):
""" Tab completion for unalias """
return self.basic_complete(text, line, begidx, endidx, self.aliases)
@with_argument_list
def do_help(self, arglist):
"""List available commands with "help" or detailed help with "help cmd"."""
if not arglist or (len(arglist) == 1 and arglist[0] in ('--verbose', '-v')):
verbose = len(arglist) == 1 and arglist[0] in ('--verbose', '-v')
self._help_menu(verbose)
else:
# Getting help for a specific command
funcname = self._func_named(arglist[0])
if funcname:
# Check to see if this function was decorated with an argparse ArgumentParser
func = getattr(self, funcname)
if func.__dict__.get('has_parser', False):
# Function has an argparser, so get help based on all the arguments in case there are sub-commands
new_arglist = arglist[1:]
new_arglist.append('-h')
# Temporarily redirect all argparse output to both sys.stdout and sys.stderr to self.stdout
with redirect_stdout(self.stdout):
with redirect_stderr(self.stdout):
func(new_arglist)
else:
# No special behavior needed, delegate to cmd base class do_help()
cmd.Cmd.do_help(self, funcname[3:])
else:
# This could be a help topic
cmd.Cmd.do_help(self, arglist[0])
def _help_menu(self, verbose=False):
"""Show a list of commands which help can be displayed for.
"""
# Get a sorted list of help topics
help_topics = self.get_help_topics()
help_topics.sort()
# Get a sorted list of visible command names
visible_commands = self.get_visible_commands()
visible_commands.sort()
cmds_doc = []
cmds_undoc = []
cmds_cats = {}
for command in visible_commands:
if command in help_topics or getattr(self, self._func_named(command)).__doc__:
if command in help_topics:
help_topics.remove(command)
if hasattr(getattr(self, self._func_named(command)), HELP_CATEGORY):
category = getattr(getattr(self, self._func_named(command)), HELP_CATEGORY)
cmds_cats.setdefault(category, [])
cmds_cats[category].append(command)
else:
cmds_doc.append(command)
else:
cmds_undoc.append(command)
if len(cmds_cats) == 0:
# No categories found, fall back to standard behavior
self.poutput("{}\n".format(str(self.doc_leader)))
self._print_topics(self.doc_header, cmds_doc, verbose)
else:
# Categories found, Organize all commands by category
self.poutput('{}\n'.format(str(self.doc_leader)))
self.poutput('{}\n\n'.format(str(self.doc_header)))
for category in sorted(cmds_cats.keys()):
self._print_topics(category, cmds_cats[category], verbose)
self._print_topics('Other', cmds_doc, verbose)
self.print_topics(self.misc_header, help_topics, 15, 80)
self.print_topics(self.undoc_header, cmds_undoc, 15, 80)
def _print_topics(self, header, cmds, verbose):
"""Customized version of print_topics that can switch between verbose or traditional output"""
if cmds:
if not verbose:
self.print_topics(header, cmds, 15, 80)
else:
self.stdout.write('{}\n'.format(str(header)))
widest = 0
# measure the commands
for command in cmds:
width = len(command)
if width > widest:
widest = width
# add a 4-space pad
widest += 4
if widest < 20:
widest = 20
if self.ruler:
self.stdout.write('{:{ruler}<{width}}\n'.format('', ruler=self.ruler, width=80))
for command in cmds:
# Try to get the documentation string
try:
# first see if there's a help function implemented
func = getattr(self, 'help_' + command)
except AttributeError:
# Couldn't find a help function
try:
# Now see if help_summary has been set
doc = getattr(self, self._func_named(command)).help_summary
except AttributeError:
# Last, try to directly access the function's doc-string
doc = getattr(self, self._func_named(command)).__doc__
else:
# we found the help function
result = StringIO()
# try to redirect system stdout
with redirect_stdout(result):
# save our internal stdout
stdout_orig = self.stdout
try:
# redirect our internal stdout
self.stdout = result
func()
finally:
# restore internal stdout
self.stdout = stdout_orig
doc = result.getvalue()
# Attempt to locate the first documentation block
doc_block = []
found_first = False
for doc_line in doc.splitlines():
str(doc_line).strip()
if len(doc_line.strip()) > 0:
doc_block.append(doc_line.strip())
found_first = True
else:
if found_first:
break
for doc_line in doc_block:
self.stdout.write('{: <{col_width}}{doc}\n'.format(command,
col_width=widest,
doc=doc_line))
command = ''
self.stdout.write("\n")
def do_shortcuts(self, _):
"""Lists shortcuts (aliases) available."""
result = "\n".join('%s: %s' % (sc[0], sc[1]) for sc in sorted(self.shortcuts))
self.poutput("Shortcuts for other commands:\n{}\n".format(result))
def do_eof(self, _):
"""Called when <Ctrl>-D is pressed."""
# End of script should not exit app, but <Ctrl>-D should.
print('') # Required for clearing line when exiting submenu
return self._STOP_AND_EXIT
def do_quit(self, _):
"""Exits this application."""
self._should_quit = True
return self._STOP_AND_EXIT
def select(self, opts, prompt='Your choice? '):
"""Presents a numbered menu to the user. Modelled after
the bash shell's SELECT. Returns the item chosen.
Argument ``opts`` can be:
| a single string -> will be split into one-word options
| a list of strings -> will be offered as options
| a list of tuples -> interpreted as (value, text), so
that the return value can differ from
the text advertised to the user """
local_opts = opts
if isinstance(opts, string_types):
local_opts = list(zip(opts.split(), opts.split()))
fulloptions = []
for opt in local_opts:
if isinstance(opt, string_types):
fulloptions.append((opt, opt))
else:
try:
fulloptions.append((opt[0], opt[1]))
except IndexError:
fulloptions.append((opt[0], opt[0]))
for (idx, (value, text)) in enumerate(fulloptions):
self.poutput(' %2d. %s\n' % (idx + 1, text))
while True:
response = sm.input(prompt)
if rl_type != RlType.NONE:
hlen = readline.get_current_history_length()
if hlen >= 1 and response != '':
readline.remove_history_item(hlen - 1)
try:
response = int(response)
result = fulloptions[response - 1][0]
break
except (ValueError, IndexError):
self.poutput("{!r} isn't a valid choice. Pick a number between 1 and {}:\n".format(response,
len(fulloptions)))
return result
def cmdenvironment(self):
"""Get a summary report of read-only settings which the user cannot modify at runtime.
:return: str - summary report of read-only settings which the user cannot modify at runtime
"""
read_only_settings = """
Commands may be terminated with: {}
Arguments at invocation allowed: {}
Output redirection and pipes allowed: {}
Parsing of @options commands:
Shell lexer mode for command argument splitting: {}
Strip Quotes after splitting arguments: {}
Argument type: {}
""".format(str(self.terminators), self.allow_cli_args, self.allow_redirection,
"POSIX" if POSIX_SHLEX else "non-POSIX",
"True" if STRIP_QUOTES_FOR_NON_POSIX and not POSIX_SHLEX else "False",
"List of argument strings" if USE_ARG_LIST else "string of space-separated arguments")
return read_only_settings
def show(self, args, parameter):
param = ''
if parameter:
param = parameter.strip().lower()
result = {}
maxlen = 0
for p in self.settable:
if (not param) or p.startswith(param):
result[p] = '%s: %s' % (p, str(getattr(self, p)))
maxlen = max(maxlen, len(result[p]))
if result:
for p in sorted(result):
if args.long:
self.poutput('{} # {}'.format(result[p].ljust(maxlen), self.settable[p]))
else:
self.poutput(result[p])
# If user has requested to see all settings, also show read-only settings
if args.all:
self.poutput('\nRead only settings:{}'.format(self.cmdenvironment()))
else:
raise LookupError("Parameter '%s' not supported (type 'show' for list of parameters)." % param)
set_parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
set_parser.add_argument('-a', '--all', action='store_true', help='display read-only settings as well')
set_parser.add_argument('-l', '--long', action='store_true', help='describe function of parameter')
set_parser.add_argument('settable', nargs='*', help='[param_name] [value]')
@with_argparser(set_parser)
def do_set(self, args):
"""Sets a settable parameter or shows current settings of parameters.
Accepts abbreviated parameter names so long as there is no ambiguity.
Call without arguments for a list of settable parameters with their values.
"""
try:
param_name, val = args.settable
val = val.strip()
param_name = param_name.strip().lower()
if param_name not in self.settable:
hits = [p for p in self.settable if p.startswith(param_name)]
if len(hits) == 1:
param_name = hits[0]
else:
return self.show(args, param_name)
current_val = getattr(self, param_name)
if (val[0] == val[-1]) and val[0] in ("'", '"'):
val = val[1:-1]
else:
val = cast(current_val, val)
setattr(self, param_name, val)
self.poutput('%s - was: %s\nnow: %s\n' % (param_name, current_val, val))
if current_val != val:
try:
onchange_hook = getattr(self, '_onchange_%s' % param_name)
onchange_hook(old=current_val, new=val)
except AttributeError:
pass
except (ValueError, AttributeError):
param = ''
if args.settable:
param = args.settable[0]
self.show(args, param)
def do_shell(self, command):
"""Execute a command as if at the OS prompt.
Usage: shell <command> [arguments]"""
try:
# Use non-POSIX parsing to keep the quotes around the tokens
tokens = shlex.split(command, posix=False)
except ValueError as err:
self.perror(err, traceback_war=False)
return
# Support expanding ~ in quoted paths
for index, _ in enumerate(tokens):
if tokens[index]:
# Check if the token is quoted. Since shlex.split() passed, there isn't
# an unclosed quote, so we only need to check the first character.
first_char = tokens[index][0]
if first_char in QUOTES:
tokens[index] = strip_quotes(tokens[index])
tokens[index] = os.path.expanduser(tokens[index])
# Restore the quotes
if first_char in QUOTES:
tokens[index] = first_char + tokens[index] + first_char
expanded_command = ' '.join(tokens)
proc = subprocess.Popen(expanded_command, stdout=self.stdout, shell=True)
proc.communicate()
def complete_shell(self, text, line, begidx, endidx):
"""Handles tab completion of executable commands and local file system paths for the shell command
:param text: str - the string prefix we are attempting to match (all returned matches must begin with it)
:param line: str - the current input line with leading whitespace removed
:param begidx: int - the beginning index of the prefix text
:param endidx: int - the ending index of the prefix text
:return: List[str] - a list of possible tab completions
"""
index_dict = {1: self.shell_cmd_complete}
return self.index_based_complete(text, line, begidx, endidx, index_dict, self.path_complete)
def cmd_with_subs_completer(self, text, line, begidx, endidx):
"""
This is a function provided for convenience to those who want an easy way to add
tab completion to functions that implement subcommands. By setting this as the
completer of the base command function, the correct completer for the chosen subcommand
will be called.
The use of this function requires assigning a completer function to the subcommand's parser
Example:
A command called print has a subcommands called 'names' that needs a tab completer
When you create the parser for names, include the completer function in the parser's defaults.
names_parser.set_defaults(func=print_names, completer=complete_print_names)
To make sure the names completer gets called, set the completer for the print function
in a similar fashion to what follows.
complete_print = cmd2.Cmd.cmd_with_subs_completer
When the subcommand's completer is called, this function will have stripped off all content from the
beginning of the command line before the subcommand, meaning the line parameter always starts with the
subcommand name and the index parameters reflect this change.
For instance, the command "print names -d 2" becomes "names -d 2"
begidx and endidx are incremented accordingly
:param text: str - the string prefix we are attempting to match (all returned matches must begin with it)
:param line: str - the current input line with leading whitespace removed
:param begidx: int - the beginning index of the prefix text
:param endidx: int - the ending index of the prefix text
:return: List[str] - a list of possible tab completions
"""
# The command is the token at index 0 in the command line
cmd_index = 0
# The subcommand is the token at index 1 in the command line
subcmd_index = 1
# Get all tokens through the one being completed
tokens, _ = self.tokens_for_completion(line, begidx, endidx)
if tokens is None:
return []
matches = []
# Get the index of the token being completed
index = len(tokens) - 1
# If the token being completed is past the subcommand name, then do subcommand specific tab-completion
if index > subcmd_index:
# Get the command name
command = tokens[cmd_index]
# Get the subcommand name
subcommand = tokens[subcmd_index]
# Find the offset into line where the subcommand name begins
subcmd_start = 0
for cur_index in range(0, subcmd_index + 1):
cur_token = tokens[cur_index]
subcmd_start = line.find(cur_token, subcmd_start)
if cur_index != subcmd_index:
subcmd_start += len(cur_token)
# Strip off everything before subcommand name
orig_line = line
line = line[subcmd_start:]
# Update the indexes
diff = len(orig_line) - len(line)
begidx -= diff
endidx -= diff
# Call the subcommand specific completer if it exists
compfunc = self.get_subcommand_completer(command, subcommand)
if compfunc is not None:
matches = compfunc(self, text, line, begidx, endidx)
return matches
@staticmethod
def _reset_py_display():
"""
Resets the dynamic objects in the sys module that the py and ipy consoles fight over.
When a Python console starts it adopts certain display settings if they've already been set.
If an ipy console has previously been run, then py uses its settings and ends up looking
like an ipy console in terms of prompt and exception text. This method forces the Python
console to create its own display settings since they won't exist.
IPython does not have this problem since it always overwrites the display settings when it
is run. Therefore this method only needs to be called before creating a Python console.
"""
# Delete any prompts that have been set
attributes = ['ps1', 'ps2', 'ps3']
for cur_attr in attributes:
try:
del sys.__dict__[cur_attr]
except KeyError:
pass
# Reset functions
sys.displayhook = sys.__displayhook__
sys.excepthook = sys.__excepthook__
def do_py(self, arg):
"""
Invoke python command, shell, or script
py <command>: Executes a Python command.
py: Enters interactive Python mode.
End with ``Ctrl-D`` (Unix) / ``Ctrl-Z`` (Windows), ``quit()``, '`exit()``.
Non-python commands can be issued with ``cmd("your command")``.
Run python code from external script files with ``run("script.py")``
"""
if self._in_py:
self.perror("Recursively entering interactive Python consoles is not allowed.", traceback_war=False)
return
self._in_py = True
# noinspection PyBroadException
try:
self.pystate['self'] = self
arg = arg.strip()
# Support the run command even if called prior to invoking an interactive interpreter
def run(filename):
"""Run a Python script file in the interactive console.
:param filename: str - filename of *.py script file to run
"""
try:
with open(filename) as f:
interp.runcode(f.read())
except IOError as e:
self.perror(e)
def onecmd_plus_hooks(cmd_plus_args):
"""Run a cmd2.Cmd command from a Python script or the interactive Python console.
:param cmd_plus_args: str - command line including command and arguments to run
:return: bool - True if cmdloop() should exit once leaving the interactive Python console
"""
return self.onecmd_plus_hooks(cmd_plus_args + '\n')
self.pystate['run'] = run
self.pystate['cmd'] = onecmd_plus_hooks
localvars = (self.locals_in_py and self.pystate) or {}
interp = InteractiveConsole(locals=localvars)
interp.runcode('import sys, os;sys.path.insert(0, os.getcwd())')
if arg:
interp.runcode(arg)
# If there are no args, then we will open an interactive Python console
else:
# noinspection PyShadowingBuiltins
def quit():
"""Function callable from the interactive Python console to exit that environment"""
raise EmbeddedConsoleExit
self.pystate['quit'] = quit
self.pystate['exit'] = quit
# Set up readline for Python console
if rl_type != RlType.NONE:
# Save cmd2 history
saved_cmd2_history = []
for i in range(1, readline.get_current_history_length() + 1):
saved_cmd2_history.append(readline.get_history_item(i))
readline.clear_history()
# Restore py's history
for item in self.py_history:
readline.add_history(item)
if self.use_rawinput and self.completekey:
# Set up tab completion for the Python console
# rlcompleter relies on the default settings of the Python readline module
if rl_type == RlType.GNU:
old_basic_quotes = ctypes.cast(rl_basic_quote_characters, ctypes.c_void_p).value
rl_basic_quote_characters.value = orig_rl_basic_quotes
if 'gnureadline' in sys.modules:
# rlcompleter imports readline by name, so it won't use gnureadline
# Force rlcompleter to use gnureadline instead so it has our settings and history
saved_readline = None
if 'readline' in sys.modules:
saved_readline = sys.modules['readline']
sys.modules['readline'] = sys.modules['gnureadline']
old_delims = readline.get_completer_delims()
readline.set_completer_delims(orig_rl_delims)
# rlcompleter will not need cmd2's custom display function
# This will be restored by cmd2 the next time complete() is called
if rl_type == RlType.GNU:
readline.set_completion_display_matches_hook(None)
elif rl_type == RlType.PYREADLINE:
readline.rl.mode._display_completions = self._display_matches_pyreadline
# Save off the current completer and set a new one in the Python console
# Make sure it tab completes from its locals() dictionary
old_completer = readline.get_completer()
interp.runcode("from rlcompleter import Completer")
interp.runcode("import readline")
interp.runcode("readline.set_completer(Completer(locals()).complete)")
# Set up sys module for the Python console
self._reset_py_display()
keepstate = Statekeeper(sys, ('stdin', 'stdout'))
sys.stdout = self.stdout
sys.stdin = self.stdin
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
try:
interp.interact(banner="Python {} on {}\n{}\n({})\n{}".format(sys.version, sys.platform,
cprt, self.__class__.__name__,
self.do_py.__doc__))
except EmbeddedConsoleExit:
pass
finally:
keepstate.restore()
# Set up readline for cmd2
if rl_type != RlType.NONE:
# Save py's history
del self.py_history[:]
for i in range(1, readline.get_current_history_length() + 1):
self.py_history.append(readline.get_history_item(i))
readline.clear_history()
# Restore cmd2's history
for item in saved_cmd2_history:
readline.add_history(item)
if self.use_rawinput and self.completekey:
# Restore cmd2's tab completion settings
readline.set_completer(old_completer)
readline.set_completer_delims(old_delims)
if rl_type == RlType.GNU:
rl_basic_quote_characters.value = old_basic_quotes
if 'gnureadline' in sys.modules:
# Restore what the readline module pointed to
if saved_readline is None:
del (sys.modules['readline'])
else:
sys.modules['readline'] = saved_readline
except Exception:
pass
finally:
self._in_py = False
return self._should_quit
@with_argument_list
def do_pyscript(self, arglist):
"""\nRuns a python script file inside the console
Usage: pyscript <script_path> [script_arguments]
Console commands can be executed inside this script with cmd("your command")
However, you cannot run nested "py" or "pyscript" commands from within this script
Paths or arguments that contain spaces must be enclosed in quotes
"""
if not arglist:
self.perror("pyscript command requires at least 1 argument ...", traceback_war=False)
self.do_help('pyscript')
return
# Get the absolute path of the script
script_path = os.path.expanduser(arglist[0])
# Save current command line arguments
orig_args = sys.argv
# Overwrite sys.argv to allow the script to take command line arguments
sys.argv = [script_path]
sys.argv.extend(arglist[1:])
# Run the script - use repr formatting to escape things which need to be escaped to prevent issues on Windows
self.do_py("run({!r})".format(script_path))
# Restore command line arguments to original state
sys.argv = orig_args
# Enable tab-completion for pyscript command
def complete_pyscript(self, text, line, begidx, endidx):
index_dict = {1: self.path_complete}
return self.index_based_complete(text, line, begidx, endidx, index_dict)
# Only include the do_ipy() method if IPython is available on the system
if ipython_available:
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def do_ipy(self, arg):
"""Enters an interactive IPython shell.
Run python code from external files with ``run filename.py``
End with ``Ctrl-D`` (Unix) / ``Ctrl-Z`` (Windows), ``quit()``, '`exit()``.
"""
banner = 'Entering an embedded IPython shell type quit() or <Ctrl>-d to exit ...'
exit_msg = 'Leaving IPython, back to {}'.format(sys.argv[0])
embed(banner1=banner, exit_msg=exit_msg)
history_parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
history_parser_group = history_parser.add_mutually_exclusive_group()
history_parser_group.add_argument('-r', '--run', action='store_true', help='run selected history items')
history_parser_group.add_argument('-e', '--edit', action='store_true',
help='edit and then run selected history items')
history_parser_group.add_argument('-s', '--script', action='store_true', help='script format; no separation lines')
history_parser_group.add_argument('-o', '--output-file', metavar='FILE', help='output commands to a script file')
history_parser_group.add_argument('-t', '--transcript', help='output commands and results to a transcript file')
_history_arg_help = """empty all history items
a one history item by number
a..b, a:b, a:, ..b items by indices (inclusive)
[string] items containing string
/regex/ items matching regular expression"""
history_parser.add_argument('arg', nargs='?', help=_history_arg_help)
@with_argparser(history_parser)
def do_history(self, args):
"""View, run, edit, and save previously entered commands."""
# If an argument was supplied, then retrieve partial contents of the history
cowardly_refuse_to_run = False
if args.arg:
# If a character indicating a slice is present, retrieve
# a slice of the history
arg = args.arg
if '..' in arg or ':' in arg:
try:
# Get a slice of history
history = self.history.span(arg)
except IndexError:
history = self.history.get(arg)
else:
# Get item(s) from history by index or string search
history = self.history.get(arg)
else:
# If no arg given, then retrieve the entire history
cowardly_refuse_to_run = True
# Get a copy of the history so it doesn't get mutated while we are using it
history = self.history[:]
if args.run:
if cowardly_refuse_to_run:
self.perror("Cowardly refusing to run all previously entered commands.", traceback_war=False)
self.perror("If this is what you want to do, specify '1:' as the range of history.",
traceback_war=False)
else:
for runme in history:
self.pfeedback(runme)
if runme:
self.onecmd_plus_hooks(runme)
elif args.edit:
fd, fname = tempfile.mkstemp(suffix='.txt', text=True)
with os.fdopen(fd, 'w') as fobj:
for command in history:
fobj.write('{}\n'.format(command))
try:
os.system('"{}" "{}"'.format(self.editor, fname))
self.do_load(fname)
except Exception:
raise
finally:
os.remove(fname)
elif args.output_file:
try:
with open(os.path.expanduser(args.output_file), 'w') as fobj:
for command in history:
fobj.write('{}\n'.format(command))
plural = 's' if len(history) > 1 else ''
self.pfeedback('{} command{} saved to {}'.format(len(history), plural, args.output_file))
except Exception as e:
self.perror('Saving {!r} - {}'.format(args.output_file, e), traceback_war=False)
elif args.transcript:
self._generate_transcript(history, args.transcript)
else:
# Display the history items retrieved
for hi in history:
if args.script:
self.poutput(hi)
else:
self.poutput(hi.pr())
def _generate_transcript(self, history, transcript_file):
"""Generate a transcript file from a given history of commands."""
# Save the current echo state, and turn it off. We inject commands into the
# output using a different mechanism
import io
saved_echo = self.echo
self.echo = False
# Redirect stdout to the transcript file
saved_self_stdout = self.stdout
# The problem with supporting regular expressions in transcripts
# is that they shouldn't be processed in the command, just the output.
# In addition, when we generate a transcript, any slashes in the output
# are not really intended to indicate regular expressions, so they should
# be escaped.
#
# We have to jump through some hoops here in order to catch the commands
# separately from the output and escape the slashes in the output.
transcript = ''
for history_item in history:
# build the command, complete with prompts. When we replay
# the transcript, we look for the prompts to separate
# the command from the output
first = True
command = ''
for line in history_item.splitlines():
if first:
command += '{}{}\n'.format(self.prompt, line)
first = False
else:
command += '{}{}\n'.format(self.continuation_prompt, line)
transcript += command
# create a new string buffer and set it to stdout to catch the output
# of the command
membuf = io.StringIO()
self.stdout = membuf
# then run the command and let the output go into our buffer
self.onecmd_plus_hooks(history_item)
# rewind the buffer to the beginning
membuf.seek(0)
# get the output out of the buffer
output = membuf.read()
# and add the regex-escaped output to the transcript
transcript += output.replace('/', '\/')
# Restore stdout to its original state
self.stdout = saved_self_stdout
# Set echo back to its original state
self.echo = saved_echo
# finally, we can write the transcript out to the file
try:
with open(transcript_file, 'w') as fout:
fout.write(transcript)
except (FILE_NOT_FOUND_ERROR, IOError) as ex:
self.perror('Failed to save transcript: {}'.format(ex), traceback_war=False)
else:
# and let the user know what we did
if len(history) > 1:
plural = 'commands and their outputs'
else:
plural = 'command and its output'
msg = '{} {} saved to transcript file {!r}'
self.pfeedback(msg.format(len(history), plural, transcript_file))
@with_argument_list
def do_edit(self, arglist):
"""Edit a file in a text editor.
Usage: edit [file_path]
Where:
* file_path - path to a file to open in editor
The editor used is determined by the ``editor`` settable parameter.
"set editor (program-name)" to change or set the EDITOR environment variable.
"""
if not self.editor:
raise EnvironmentError("Please use 'set editor' to specify your text editing program of choice.")
filename = arglist[0] if arglist else ''
if filename:
os.system('"{}" "{}"'.format(self.editor, filename))
else:
os.system('"{}"'.format(self.editor))
# Enable tab-completion for edit command
def complete_edit(self, text, line, begidx, endidx):
index_dict = {1: self.path_complete}
return self.index_based_complete(text, line, begidx, endidx, index_dict)
@property
def _current_script_dir(self):
"""Accessor to get the current script directory from the _script_dir LIFO queue."""
if self._script_dir:
return self._script_dir[-1]
else:
return None
@with_argument_list
def do__relative_load(self, arglist):
"""Runs commands in script file that is encoded as either ASCII or UTF-8 text.
Usage: _relative_load <file_path>
optional argument:
file_path a file path pointing to a script
Script should contain one command per line, just like command would be typed in console.
If this is called from within an already-running script, the filename will be interpreted
relative to the already-running script's directory.
NOTE: This command is intended to only be used within text file scripts.
"""
# If arg is None or arg is an empty string this is an error
if not arglist:
self.perror('_relative_load command requires a file path:', traceback_war=False)
return
file_path = arglist[0].strip()
# NOTE: Relative path is an absolute path, it is just relative to the current script directory
relative_path = os.path.join(self._current_script_dir or '', file_path)
self.do_load(relative_path)
def do_eos(self, _):
"""Handles cleanup when a script has finished executing."""
if self._script_dir:
self._script_dir.pop()
@with_argument_list
def do_load(self, arglist):
"""Runs commands in script file that is encoded as either ASCII or UTF-8 text.
Usage: load <file_path>
* file_path - a file path pointing to a script
Script should contain one command per line, just like command would be typed in console.
"""
# If arg is None or arg is an empty string this is an error
if not arglist:
self.perror('load command requires a file path:', traceback_war=False)
return
file_path = arglist[0].strip()
expanded_path = os.path.abspath(os.path.expanduser(file_path))
# Make sure expanded_path points to a file
if not os.path.isfile(expanded_path):
self.perror('{} does not exist or is not a file'.format(expanded_path), traceback_war=False)
return
# Make sure the file is not empty
if os.path.getsize(expanded_path) == 0:
self.perror('{} is empty'.format(expanded_path), traceback_war=False)
return
# Make sure the file is ASCII or UTF-8 encoded text
if not self.is_text_file(expanded_path):
self.perror('{} is not an ASCII or UTF-8 encoded text file'.format(expanded_path), traceback_war=False)
return
try:
# Read all lines of the script and insert into the head of the
# command queue. Add an "end of script (eos)" command to cleanup the
# self._script_dir list when done. Specify file encoding in Python
# 3, but Python 2 doesn't allow that argument to open().
kwargs = {'encoding': 'utf-8'} if six.PY3 else {}
with open(expanded_path, **kwargs) as target:
self.cmdqueue = target.read().splitlines() + ['eos'] + self.cmdqueue
except IOError as e:
self.perror('Problem accessing script from {}:\n{}'.format(expanded_path, e))
return
self._script_dir.append(os.path.dirname(expanded_path))
# Enable tab-completion for load command
def complete_load(self, text, line, begidx, endidx):
index_dict = {1: self.path_complete}
return self.index_based_complete(text, line, begidx, endidx, index_dict)
@staticmethod
def is_text_file(file_path):
"""
Returns if a file contains only ASCII or UTF-8 encoded text
:param file_path: path to the file being checked
"""
expanded_path = os.path.abspath(os.path.expanduser(file_path.strip()))
valid_text_file = False
# Check if the file is ASCII
try:
with codecs.open(expanded_path, encoding='ascii', errors='strict') as f:
# Make sure the file has at least one line of text
# noinspection PyUnusedLocal
if sum(1 for line in f) > 0:
valid_text_file = True
except IOError:
pass
except UnicodeDecodeError:
# The file is not ASCII. Check if it is UTF-8.
try:
with codecs.open(expanded_path, encoding='utf-8', errors='strict') as f:
# Make sure the file has at least one line of text
# noinspection PyUnusedLocal
if sum(1 for line in f) > 0:
valid_text_file = True
except IOError:
pass
except UnicodeDecodeError:
# Not UTF-8
pass
return valid_text_file
def run_transcript_tests(self, callargs):
"""Runs transcript tests for provided file(s).
This is called when either -t is provided on the command line or the transcript_files argument is provided
during construction of the cmd2.Cmd instance.
:param callargs: List[str] - list of transcript test file names
"""
class TestMyAppCase(Cmd2TestCase):
cmdapp = self
self.__class__.testfiles = callargs
sys.argv = [sys.argv[0]] # the --test argument upsets unittest.main()
testcase = TestMyAppCase()
runner = unittest.TextTestRunner()
runner.run(testcase)
def cmdloop(self, intro=None):
"""This is an outer wrapper around _cmdloop() which deals with extra features provided by cmd2.
_cmdloop() provides the main loop equivalent to cmd.cmdloop(). This is a wrapper around that which deals with
the following extra features provided by cmd2:
- commands at invocation
- transcript testing
- intro banner
:param intro: str - if provided this overrides self.intro and serves as the intro banner printed once at start
"""
if self.allow_cli_args:
parser = optparse.OptionParser()
parser.add_option('-t', '--test', dest='test',
action="store_true",
help='Test against transcript(s) in FILE (wildcards OK)')
(callopts, callargs) = parser.parse_args()
# If transcript testing was called for, use other arguments as transcript files
if callopts.test:
self._transcript_files = callargs
# If commands were supplied at invocation, then add them to the command queue
if callargs:
self.cmdqueue.extend(callargs)
# Always run the preloop first
self.preloop()
# If transcript-based regression testing was requested, then do that instead of the main loop
if self._transcript_files is not None:
self.run_transcript_tests(self._transcript_files)
else:
# If an intro was supplied in the method call, allow it to override the default
if intro is not None:
self.intro = intro
# Print the intro, if there is one, right after the preloop
if self.intro is not None:
self.poutput(str(self.intro) + "\n")
# And then call _cmdloop() to enter the main loop
self._cmdloop()
# Run the postloop() no matter what
self.postloop()
# noinspection PyPep8Naming
class ParserManager:
"""
Class which encapsulates all of the pyparsing parser functionality for cmd2 in a single location.
"""
def __init__(self, redirector, terminators, multilineCommands, legalChars, commentGrammars, commentInProgress,
blankLinesAllowed, prefixParser, preparse, postparse, aliases, shortcuts):
"""Creates and uses parsers for user input according to app's parameters."""
self.commentGrammars = commentGrammars
self.preparse = preparse
self.postparse = postparse
self.aliases = aliases
self.shortcuts = shortcuts
self.main_parser = self._build_main_parser(redirector=redirector, terminators=terminators,
multilineCommands=multilineCommands, legalChars=legalChars,
commentInProgress=commentInProgress,
blankLinesAllowed=blankLinesAllowed, prefixParser=prefixParser)
self.input_source_parser = self._build_input_source_parser(legalChars=legalChars,
commentInProgress=commentInProgress)
def _build_main_parser(self, redirector, terminators, multilineCommands, legalChars, commentInProgress,
blankLinesAllowed, prefixParser):
"""Builds a PyParsing parser for interpreting user commands."""
# Build several parsing components that are eventually compiled into overall parser
output_destination_parser = (pyparsing.Literal(redirector * 2) |
(pyparsing.WordStart() + redirector) |
pyparsing.Regex('[^=]' + redirector))('output')
terminator_parser = pyparsing.Or(
[(hasattr(t, 'parseString') and t) or pyparsing.Literal(t) for t in terminators])('terminator')
string_end = pyparsing.stringEnd ^ '\nEOF'
multilineCommand = pyparsing.Or(
[pyparsing.Keyword(c, caseless=False) for c in multilineCommands])('multilineCommand')
oneline_command = (~multilineCommand + pyparsing.Word(legalChars))('command')
pipe = pyparsing.Keyword('|', identChars='|')
do_not_parse = self.commentGrammars | commentInProgress | pyparsing.quotedString
after_elements = \
pyparsing.Optional(pipe + pyparsing.SkipTo(output_destination_parser ^ string_end,
ignore=do_not_parse)('pipeTo')) + \
pyparsing.Optional(output_destination_parser +
pyparsing.SkipTo(string_end, ignore=do_not_parse).
setParseAction(lambda x: strip_quotes(x[0].strip()))('outputTo'))
multilineCommand.setParseAction(lambda x: x[0])
oneline_command.setParseAction(lambda x: x[0])
if blankLinesAllowed:
blankLineTerminationParser = pyparsing.NoMatch
else:
blankLineTerminator = (pyparsing.lineEnd + pyparsing.lineEnd)('terminator')
blankLineTerminator.setResultsName('terminator')
blankLineTerminationParser = ((multilineCommand ^ oneline_command) +
pyparsing.SkipTo(blankLineTerminator, ignore=do_not_parse).setParseAction(
lambda x: x[0].strip())('args') + blankLineTerminator)('statement')
multilineParser = (((multilineCommand ^ oneline_command) +
pyparsing.SkipTo(terminator_parser,
ignore=do_not_parse).setParseAction(lambda x: x[0].strip())('args') +
terminator_parser)('statement') +
pyparsing.SkipTo(output_destination_parser ^ pipe ^ string_end,
ignore=do_not_parse).setParseAction(lambda x: x[0].strip())('suffix') +
after_elements)
multilineParser.ignore(commentInProgress)
singleLineParser = ((oneline_command +
pyparsing.SkipTo(terminator_parser ^ string_end ^ pipe ^ output_destination_parser,
ignore=do_not_parse).setParseAction(
lambda x: x[0].strip())('args'))('statement') +
pyparsing.Optional(terminator_parser) + after_elements)
blankLineTerminationParser = blankLineTerminationParser.setResultsName('statement')
parser = prefixParser + (
string_end |
multilineParser |
singleLineParser |
blankLineTerminationParser |
multilineCommand + pyparsing.SkipTo(string_end, ignore=do_not_parse)
)
parser.ignore(self.commentGrammars)
return parser
@staticmethod
def _build_input_source_parser(legalChars, commentInProgress):
"""Builds a PyParsing parser for alternate user input sources (from file, pipe, etc.)"""
input_mark = pyparsing.Literal('<')
input_mark.setParseAction(lambda x: '')
# Also allow spaces, slashes, and quotes
file_name = pyparsing.Word(legalChars + ' /\\"\'')
input_from = file_name('inputFrom')
input_from.setParseAction(replace_with_file_contents)
# a not-entirely-satisfactory way of distinguishing < as in "import from" from <
# as in "lesser than"
inputParser = input_mark + pyparsing.Optional(input_from) + pyparsing.Optional('>') + \
pyparsing.Optional(file_name) + (pyparsing.stringEnd | '|')
inputParser.ignore(commentInProgress)
return inputParser
def parsed(self, raw):
""" This function is where the actual parsing of each line occurs.
:param raw: str - the line of text as it was entered
:return: ParsedString - custom subclass of str with extra attributes
"""
if isinstance(raw, ParsedString):
p = raw
else:
# preparse is an overridable hook; default makes no changes
s = self.preparse(raw)
s = self.input_source_parser.transformString(s.lstrip())
s = self.commentGrammars.transformString(s)
# Make a copy of aliases so we can edit it
tmp_aliases = list(self.aliases.keys())
keep_expanding = len(tmp_aliases) > 0
# Expand aliases
while keep_expanding:
for cur_alias in tmp_aliases:
keep_expanding = False
if s == cur_alias or s.startswith(cur_alias + ' '):
s = s.replace(cur_alias, self.aliases[cur_alias], 1)
# Do not expand the same alias more than once
tmp_aliases.remove(cur_alias)
keep_expanding = len(tmp_aliases) > 0
break
# Expand command shortcut to its full command name
for (shortcut, expansion) in self.shortcuts:
if s.startswith(shortcut):
# If the next character after the shortcut isn't a space, then insert one
shortcut_len = len(shortcut)
if len(s) == shortcut_len or s[shortcut_len] != ' ':
expansion += ' '
# Expand the shortcut
s = s.replace(shortcut, expansion, 1)
break
try:
result = self.main_parser.parseString(s)
except pyparsing.ParseException:
# If we have a parsing failure, treat it is an empty command and move to next prompt
result = self.main_parser.parseString('')
result['raw'] = raw
result['command'] = result.multilineCommand or result.command
result = self.postparse(result)
p = ParsedString(result.args)
p.parsed = result
p.parser = self.parsed
return p
class HistoryItem(str):
"""Class used to represent an item in the History list.
Thin wrapper around str class which adds a custom format for printing. It
also keeps track of its index in the list as well as a lowercase
representation of itself for convenience/efficiency.
"""
listformat = '-------------------------[{}]\n{}\n'
# noinspection PyUnusedLocal
def __init__(self, instr):
str.__init__(self)
self.lowercase = self.lower()
self.idx = None
def pr(self):
"""Represent a HistoryItem in a pretty fashion suitable for printing.
:return: str - pretty print string version of a HistoryItem
"""
return self.listformat.format(self.idx, str(self).rstrip())
class History(list):
""" A list of HistoryItems that knows how to respond to user requests. """
# noinspection PyMethodMayBeStatic
def _zero_based_index(self, onebased):
result = onebased
if result > 0:
result -= 1
return result
def _to_index(self, raw):
if raw:
result = self._zero_based_index(int(raw))
else:
result = None
return result
spanpattern = re.compile(r'^\s*(?P<start>-?\d+)?\s*(?P<separator>:|(\.{2,}))?\s*(?P<end>-?\d+)?\s*$')
def span(self, raw):
"""Parses the input string search for a span pattern and if if found, returns a slice from the History list.
:param raw: str - string potentially containing a span of the forms a..b, a:b, a:, ..b
:return: List[HistoryItem] - slice from the History list
"""
if raw.lower() in ('*', '-', 'all'):
raw = ':'
results = self.spanpattern.search(raw)
if not results:
raise IndexError
if not results.group('separator'):
return [self[self._to_index(results.group('start'))]]
start = self._to_index(results.group('start')) or 0 # Ensure start is not None
end = self._to_index(results.group('end'))
reverse = False
if end is not None:
if end < start:
(start, end) = (end, start)
reverse = True
end += 1
result = self[start:end]
if reverse:
result.reverse()
return result
rangePattern = re.compile(r'^\s*(?P<start>[\d]+)?\s*-\s*(?P<end>[\d]+)?\s*$')
def append(self, new):
"""Append a HistoryItem to end of the History list
:param new: str - command line to convert to HistoryItem and add to the end of the History list
"""
new = HistoryItem(new)
list.append(self, new)
new.idx = len(self)
def get(self, getme=None):
"""Get an item or items from the History list using 1-based indexing.
:param getme: int or str - item(s) to get - either an integer index or string to search for
:return: List[str] - list of HistoryItems matching the retrieval criteria
"""
if not getme:
return self
try:
getme = int(getme)
if getme < 0:
return self[:(-1 * getme)]
else:
return [self[getme - 1]]
except IndexError:
return []
except ValueError:
range_result = self.rangePattern.search(getme)
if range_result:
start = range_result.group('start') or None
end = range_result.group('start') or None
if start:
start = int(start) - 1
if end:
end = int(end)
return self[start:end]
# noinspection PyUnresolvedReferences
getme = getme.strip()
if getme.startswith(r'/') and getme.endswith(r'/'):
finder = re.compile(getme[1:-1], re.DOTALL | re.MULTILINE | re.IGNORECASE)
def isin(hi):
"""Listcomp filter function for doing a regular expression search of History.
:param hi: HistoryItem
:return: bool - True if search matches
"""
return finder.search(hi)
else:
def isin(hi):
"""Listcomp filter function for doing a case-insensitive string search of History.
:param hi: HistoryItem
:return: bool - True if search matches
"""
return getme.lower() in hi.lowercase
return [itm for itm in self if isin(itm)]
def cast(current, new):
"""Tries to force a new value into the same type as the current when trying to set the value for a parameter.
:param current: current value for the parameter, type varies
:param new: str - new value
:return: new value with same type as current, or the current value if there was an error casting
"""
typ = type(current)
if typ == bool:
try:
return bool(int(new))
except (ValueError, TypeError):
pass
try:
new = new.lower()
except AttributeError:
pass
if (new == 'on') or (new[0] in ('y', 't')):
return True
if (new == 'off') or (new[0] in ('n', 'f')):
return False
else:
try:
return typ(new)
except (ValueError, TypeError):
pass
print("Problem setting parameter (now %s) to %s; incorrect type?" % (current, new))
return current
class Statekeeper(object):
"""Class used to save and restore state during load and py commands as well as when redirecting output or pipes."""
def __init__(self, obj, attribs):
"""Use the instance attributes as a generic key-value store to copy instance attributes from outer object.
:param obj: instance of cmd2.Cmd derived class (your application instance)
:param attribs: Tuple[str] - tuple of strings listing attributes of obj to save a copy of
"""
self.obj = obj
self.attribs = attribs
if self.obj:
self._save()
def _save(self):
"""Create copies of attributes from self.obj inside this Statekeeper instance."""
for attrib in self.attribs:
setattr(self, attrib, getattr(self.obj, attrib))
def restore(self):
"""Overwrite attributes in self.obj with the saved values stored in this Statekeeper instance."""
if self.obj:
for attrib in self.attribs:
setattr(self.obj, attrib, getattr(self, attrib))
class OutputTrap(object):
"""Instantiate an OutputTrap to divert/capture ALL stdout output. For use in transcript testing."""
def __init__(self):
self.contents = ''
def write(self, txt):
"""Add text to the internal contents.
:param txt: str
"""
self.contents += txt
def read(self):
"""Read from the internal contents and then clear them out.
:return: str - text from the internal contents
"""
result = self.contents
self.contents = ''
return result
class Cmd2TestCase(unittest.TestCase):
"""Subclass this, setting CmdApp, to make a unittest.TestCase class
that will execute the commands in a transcript file and expect the results shown.
See example.py"""
cmdapp = None
def fetchTranscripts(self):
self.transcripts = {}
for fileset in self.cmdapp.testfiles:
for fname in glob.glob(fileset):
tfile = open(fname)
self.transcripts[fname] = iter(tfile.readlines())
tfile.close()
if not len(self.transcripts):
raise Exception("No test files found - nothing to test.")
def setUp(self):
if self.cmdapp:
self.fetchTranscripts()
# Trap stdout
self._orig_stdout = self.cmdapp.stdout
self.cmdapp.stdout = OutputTrap()
def runTest(self): # was testall
if self.cmdapp:
its = sorted(self.transcripts.items())
for (fname, transcript) in its:
self._test_transcript(fname, transcript)
def _test_transcript(self, fname, transcript):
line_num = 0
finished = False
line = strip_ansi(next(transcript))
line_num += 1
while not finished:
# Scroll forward to where actual commands begin
while not line.startswith(self.cmdapp.visible_prompt):
try:
line = strip_ansi(next(transcript))
except StopIteration:
finished = True
break
line_num += 1
command = [line[len(self.cmdapp.visible_prompt):]]
line = next(transcript)
# Read the entirety of a multi-line command
while line.startswith(self.cmdapp.continuation_prompt):
command.append(line[len(self.cmdapp.continuation_prompt):])
try:
line = next(transcript)
except StopIteration:
raise (StopIteration,
'Transcript broke off while reading command beginning at line {} with\n{}'.format(line_num,
command[0])
)
line_num += 1
command = ''.join(command)
# Send the command into the application and capture the resulting output
# TODO: Should we get the return value and act if stop == True?
self.cmdapp.onecmd_plus_hooks(command)
result = self.cmdapp.stdout.read()
# Read the expected result from transcript
if strip_ansi(line).startswith(self.cmdapp.visible_prompt):
message = '\nFile {}, line {}\nCommand was:\n{}\nExpected: (nothing)\nGot:\n{}\n'.format(
fname, line_num, command, result)
self.assert_(not (result.strip()), message)
continue
expected = []
while not strip_ansi(line).startswith(self.cmdapp.visible_prompt):
expected.append(line)
try:
line = next(transcript)
except StopIteration:
finished = True
break
line_num += 1
expected = ''.join(expected)
# transform the expected text into a valid regular expression
expected = self._transform_transcript_expected(expected)
message = '\nFile {}, line {}\nCommand was:\n{}\nExpected:\n{}\nGot:\n{}\n'.format(
fname, line_num, command, expected, result)
self.assertTrue(re.match(expected, result, re.MULTILINE | re.DOTALL), message)
def _transform_transcript_expected(self, s):
"""parse the string with slashed regexes into a valid regex
Given a string like:
Match a 10 digit phone number: /\d{3}-\d{3}-\d{4}/
Turn it into a valid regular expression which matches the literal text
of the string and the regular expression. We have to remove the slashes
because they differentiate between plain text and a regular expression.
Unless the slashes are escaped, in which case they are interpreted as
plain text, or there is only one slash, which is treated as plain text
also.
Check the tests in tests/test_transcript.py to see all the edge
cases.
"""
regex = ''
start = 0
while True:
(regex, first_slash_pos, start) = self._escaped_find(regex, s, start, False)
if first_slash_pos == -1:
# no more slashes, add the rest of the string and bail
regex += re.escape(s[start:])
break
else:
# there is a slash, add everything we have found so far
# add stuff before the first slash as plain text
regex += re.escape(s[start:first_slash_pos])
start = first_slash_pos+1
# and go find the next one
(regex, second_slash_pos, start) = self._escaped_find(regex, s, start, True)
if second_slash_pos > 0:
# add everything between the slashes (but not the slashes)
# as a regular expression
regex += s[start:second_slash_pos]
# and change where we start looking for slashed on the
# turn through the loop
start = second_slash_pos + 1
else:
# No closing slash, we have to add the first slash,
# and the rest of the text
regex += re.escape(s[start-1:])
break
return regex
@staticmethod
def _escaped_find(regex, s, start, in_regex):
"""
Find the next slash in {s} after {start} that is not preceded by a backslash.
If we find an escaped slash, add everything up to and including it to regex,
updating {start}. {start} therefore serves two purposes, tells us where to start
looking for the next thing, and also tells us where in {s} we have already
added things to {regex}
{in_regex} specifies whether we are currently searching in a regex, we behave
differently if we are or if we aren't.
"""
while True:
pos = s.find('/', start)
if pos == -1:
# no match, return to caller
break
elif pos == 0:
# slash at the beginning of the string, so it can't be
# escaped. We found it.
break
else:
# check if the slash is preceeded by a backslash
if s[pos-1:pos] == '\\':
# it is.
if in_regex:
# add everything up to the backslash as a
# regular expression
regex += s[start:pos-1]
# skip the backslash, and add the slash
regex += s[pos]
else:
# add everything up to the backslash as escaped
# plain text
regex += re.escape(s[start:pos-1])
# and then add the slash as escaped
# plain text
regex += re.escape(s[pos])
# update start to show we have handled everything
# before it
start = pos+1
# and continue to look
else:
# slash is not escaped, this is what we are looking for
break
return regex, pos, start
def tearDown(self):
if self.cmdapp:
# Restore stdout
self.cmdapp.stdout = self._orig_stdout
def namedtuple_with_two_defaults(typename, field_names, default_values=('', '')):
"""Wrapper around namedtuple which lets you treat the last value as optional.
:param typename: str - type name for the Named tuple
:param field_names: List[str] or space-separated string of field names
:param default_values: (optional) 2-element tuple containing the default values for last 2 parameters in named tuple
Defaults to an empty string for both of them
:return: namedtuple type
"""
T = collections.namedtuple(typename, field_names)
# noinspection PyUnresolvedReferences
T.__new__.__defaults__ = default_values
return T
class CmdResult(namedtuple_with_two_defaults('CmdResult', ['out', 'err', 'war'])):
"""Derive a class to store results from a named tuple so we can tweak dunder methods for convenience.
This is provided as a convenience and an example for one possible way for end users to store results in
the self._last_result attribute of cmd2.Cmd class instances. See the "python_scripting.py" example for how it can
be used to enable conditional control flow.
Named tuple attributes
----------------------
out - this is intended to store normal output data from the command and can be of any type that makes sense
err: str - (optional) this is intended to store an error message and it being non-empty indicates there was an error
Defaults to an empty string
war: str - (optional) this is intended to store a warning message which isn't quite an error, but of note
Defaults to an empty string.
NOTE: Named tuples are immutable. So the contents are there for access, not for modification.
"""
def __bool__(self):
"""If err is an empty string, treat the result as a success; otherwise treat it as a failure."""
return not self.err
def __nonzero__(self):
"""Python 2 uses this method for determining Truthiness"""
return self.__bool__()
if __name__ == '__main__':
# If run as the main application, simply start a bare-bones cmd2 application with only built-in functionality.
# Set "use_ipython" to True to include the ipy command if IPython is installed, which supports advanced interactive
# debugging of your application via introspection on self.
app = Cmd(use_ipython=False)
app.cmdloop()
|
michaelhenkel/contrail-tripleo-docu
|
.tox/docs/lib/python2.7/site-packages/cmd2.py
|
cmd2.py
|
py
| 197,211 |
python
|
en
|
code
| 2 |
github-code
|
50
|
17027919186
|
#!/usr/bin/env python3
"""Script to send Icinga2 notifications to Discord channel via webhook"""
import sys
import argparse
import urllib.parse
import requests
parser = argparse.ArgumentParser(
prog = 'Icinga2 Discord Notification',
description = 'Script to send Icinga2 notifications to Discord channel via webhook',
)
parser.add_argument('-v', dest = 'verbose', action = 'count', default = 0)
parser.add_argument('-r', dest = 'discord_url', metavar = 'Discord webhook url', required = True)
parser.add_argument('-i', dest = 'icinga2_url', metavar = 'Icinga2 web url')
parser.add_argument('-t', dest = 'notification_type', metavar = 'Notification type', required = True)
parser.add_argument('-b', dest = 'notification_author', metavar = 'Notification author', nargs = '?', default = '')
parser.add_argument('-c', dest = 'notification_comment', metavar = 'Notification comment', nargs = '?', default = '')
parser.add_argument('-d', dest = 'notification_timestamp', metavar = 'Notification timestamp', required = True)
parser.add_argument('-x', dest = 'notification_notes', metavar = 'Notification notes', nargs = '?', default = '')
parser.add_argument('-s', dest = 'check_state', metavar = 'Host/Service state', required = True)
parser.add_argument('-o', dest = 'check_output', metavar = 'Host/Service output', required = True)
parser.add_argument('-l', dest = 'host_name', metavar = 'Host name', required = True)
parser.add_argument('-n', dest = 'host_display_name', metavar = 'Host display name', required = True)
parser.add_argument('-4', dest = 'host_address', metavar = 'Host IPv4 address', nargs = '?', default = '')
parser.add_argument('-6', dest = 'host_address6', metavar = 'Host IPv6 address', nargs = '?', default = '')
parser.add_argument('-e', dest = 'service_name', metavar = 'Service name', nargs = '?')
parser.add_argument('-u', dest = 'service_display_name', metavar = 'Service display name', nargs = '?')
args = parser.parse_args()
# Dict
NOTIFICATION_VARS = {
'check_type': 'host',
'discord_username': 'Icinga2 Monitoring',
'embed_color': 0,
'embed_title': f'[{args.notification_type} Notification] ',
'embed_fields': [],
}
# Is check host or service check?
if args.service_name is not None:
NOTIFICATION_VARS['check_type'] = 'service'
# Embed color based on state
if args.check_state in ('UP', 'OK'):
#006400
NOTIFICATION_VARS['embed_color'] = 25600
elif args.check_state == 'WARNING':
#B96500
NOTIFICATION_VARS['embed_color'] = 12150016
elif args.check_state in ('DOWN', 'CRITICAL'):
#8B0000
NOTIFICATION_VARS['embed_color'] = 9109504
elif args.check_state == 'UNKNOWN':
#800080
NOTIFICATION_VARS['embed_color'] = 8388736
# Embed title
if NOTIFICATION_VARS['check_type'] == 'host':
NOTIFICATION_VARS['embed_title'] += f'Host {args.host_display_name}'
else:
NOTIFICATION_VARS['embed_title'] += f'Service {args.service_display_name} on {args.host_display_name}'
NOTIFICATION_VARS['embed_title'] += f' - {args.check_state})'
# Embed fields
NOTIFICATION_VARS['embed_fields'].append({
'name': 'Hostname',
'value': args.host_name,
})
if args.host_address != '':
NOTIFICATION_VARS['embed_fields'].append({
'name': 'IPv4 address',
'value': args.host_address,
})
if args.host_address6 != '':
NOTIFICATION_VARS['embed_fields'].append({
'name': 'IPv6 address',
'value': args.host_address6,
})
NOTIFICATION_VARS['embed_fields'].append({
'name': 'Notification date',
'value': args.notification_timestamp,
})
if args.notification_comment != '':
embed_comment = args.notification_comment
if args.notification_author != '':
embed_comment += f' ({args.notification_author})'
NOTIFICATION_VARS['embed_fields'].append({
'name': 'Comment',
'value': embed_comment,
})
if args.notification_notes != '':
NOTIFICATION_VARS['embed_fields'].append({
'name': 'Notes',
'value': args.notification_notes,
})
if args.icinga2_url is not None:
args.icinga2_url = args.icinga2_url.rstrip('/')
args.icinga2_url += '/monitoring/'
if NOTIFICATION_VARS['check_type'] == 'host':
args.icinga2_url += \
f'host/show?host={urllib.parse.quote(args.host_name)}'
else:
args.icinga2_url += \
f'service/show?host={urllib.parse.quote(args.host_name)}' \
f'&service={urllib.parse.quote(args.service_name)}'
NOTIFICATION_VARS['embed_fields'].append({
'name': 'Icinga2 web',
'value': args.icinga2_url,
})
# Request
req_data = {
'username': NOTIFICATION_VARS['discord_username'],
'embeds': [{
'title': NOTIFICATION_VARS['embed_title'],
'color': NOTIFICATION_VARS['embed_color'],
'author': {
'name': NOTIFICATION_VARS['discord_username'],
},
'description': args.check_output,
'fields': NOTIFICATION_VARS['embed_fields'],
}],
}
if args.verbose >= 1:
print(req_data)
try:
res = requests.post(args.discord_url, json = req_data, timeout = 10)
if args.verbose >= 1:
print(res.text)
except requests.exceptions.RequestException as e:
raise SystemExit(e) from e
sys.exit(0)
|
Dennis14e/monitoring-discord-webhook
|
notification.py
|
notification.py
|
py
| 5,258 |
python
|
en
|
code
| 1 |
github-code
|
50
|
18854870234
|
import requests
from typing import Self
import xmltodict
import urllib.parse
import base64
import jinja2
PLAYREADY_SYSID = "9A04F07998404286AB92E65BE0885F95"
WIDEVINE_SYSID = "EDEF8BA979D64ACEA3C827DCD51D21ED"
#only works for 1 audio 1 video
DRM_TEMPLATE= """<?xml version="1.0" encoding="UTF-8" ?>
<GPACDRM type="CENC AES-CTR">
<!-- Widevine- ->
<DRMInfo type="pssh" version="0">
<BS ID128="{{ widevine_id }}"/>
<BS ID128="0x{{ iv }}"/>
<BS data64="{{ widevine_pssh }}"/>
</DRMInfo>
<!-- Playready -->
<DRMInfo type="pssh" version="0">
<BS ID128="{{ playready_id }}"/>
<BS ID128="0x{{ iv }}"/>
<BS data64="{{ playready_pssh }}"/>
</DRMInfo>
<CrypTrack trackID="1" IsEncrypted="1" first_IV="0x{{ iv }}" saiSavedBox="senc">
<key KID="0x{{ kid }}" value="0x{{ secret_key}}"/>
</CrypTrack>
<CrypTrack trackID="2" IsEncrypted="1" first_IV="0x{{ iv }}" saiSavedBox="senc">
<key KID="0x{{ kid }}" value="0x{{ secret_key}}"/>
</CrypTrack>
</GPACDRM>
"""
class EZDrm:
def __init__(self, url: str, kid: str, contentId: str, user: str, password: str):
self.url = url
self.kid = kid
self.contentId = contentId
self.user = user
self.password = password
def fetch(self) -> Self:
params = {
"c": self.contentId,
"u": self.user,
"p": self.password,
"k": self.kid,
}
url = "{url}?{params}".format(
url=self.url, params=urllib.parse.urlencode(params)
)
r = requests.get(
"{url}?{params}".format(
url=self.url, params=urllib.parse.urlencode(params)
),
stream=True,
)
# decompress if we are receiving GZip
r.raw.decode_content = True
self.content_dict = xmltodict.parse(r.raw)
return self
def parseKeys(self) -> Self:
root = self.content_dict["cpix:CPIX"]
self.contentId = root["@contentId"]
self.version = root["@version"]
contentKey = root["cpix:ContentKeyList"]["cpix:ContentKey"]
self.explicitIV = (
base64.b64decode(contentKey["@explicitIV"].encode("ascii")).hex().upper()
)
secret = contentKey["cpix:Data"]["pskc:Secret"]["pskc:PlainValue"]
self.secretKey = base64.b64decode(secret.encode("ascii")).hex().upper()
self.encryption = contentKey["@commonEncryptionScheme"]
self.psshList = dict(
[
(
x["@systemId"].upper().replace("-", ""),
x["cpix:PSSH"]
)
for x in filter(
lambda attr: True if "cpix:PSSH" in attr else False,
root["cpix:DRMSystemList"]["cpix:DRMSystem"],
)
]
)
self.widevine_pssh = (
self.psshList[WIDEVINE_SYSID] if WIDEVINE_SYSID in self.psshList else None
)
self.playready_pssh = (
self.psshList[PLAYREADY_SYSID] if PLAYREADY_SYSID in self.psshList else None
)
return self
def writeDRMXML(self):
environment = jinja2.Environment()
template = environment.from_string(DRM_TEMPLATE)
output = template.render(
kid=self.kid.replace('-', '').upper(),
secret_key = self.secretKey,
iv =self.explicitIV,
playready_id=PLAYREADY_SYSID,
widevine_id=WIDEVINE_SYSID,
widevine_pssh= self.widevine_pssh ,
playready_pssh=self.playready_pssh
)
print(output)
|
RodolpheFouquet/drm_scripts
|
ezdrm.py
|
ezdrm.py
|
py
| 3,626 |
python
|
en
|
code
| 0 |
github-code
|
50
|
17784782852
|
#!/usr/bin/python3
# go to 'targeturl' looking for 'searchterm' and return all of the values within
# 'delim' immediately following the beginning of 'searchterm'
import urllib.request
import os
# Crawl url, look for searchTerm, grab thing within delim, put it in txtFile
def crawl(url, pageBegin, pageEnd, searchTerm, delim, squash, txtFile):
# temp- we now have decent files to play with
# return ('x', 'x')
multi = True
multiQuery = ''
# clear text file
try:
os.remove(txtFile)
except: pass
pageCount = 0
findCount = 0
try:
while multi:
pageCount += 1
print('Going to: ' + url+multiQuery)
response = urllib.request.urlopen(url+multiQuery)
html = str(response.read())
#print('ERR:' + html[0:10])
#if tolower(html[0:9]) == "http error":
# print('Error getting page:' + html[0:15])
# exit()
# PAGEBEGIN TO PAGEEND
# Make it just the sweet nectar within pageBegin and pageEnd
startHtml = html.find(pageBegin)
endHtml = html.find(pageEnd)
html = html[startHtml:endHtml]
# SQUASH
# remove problematic tags and strings
if squash:
for squish in squash:
html = html.replace(squish, '')
# MULTI
# If the category spans multiple pages, cry
multi = html.find('pagefrom=')
# we need this link for the next time around
startMulti = html.find('=', multi) + 1
endMulti = html.find('"', startMulti + 1)
multiQuery = html[startMulti:endMulti]
if multi > 0: multi = True
else: multi = False
# PROCESS HTML and save
foundList = []
saveFile = open(txtFile, 'a')
while True:
startFind = html.find(searchTerm) + len(searchTerm)
startFound = html.find(delim[0], startFind)
endFound = html.find(delim[1], startFound + 1)
found = html[startFound + 1 : endFound]
html = html[endFound:]
if found:
findCount += 1
foundList.append(found)
else:
foundTxt = '\n'.join(foundList) + '\n'
saveFile.write(foundTxt)
saveFile.close
break
return (str(findCount), str(pageCount))
except Exception as e:
print(str(e))
return (0, 0)
def cleanResults(dirtyFilename, specialRules, replace, ignoreList):
try:
readFile = open(dirtyFilename, 'r')
except Exception as e:
print(str(e))
return
resultList = []
for line in readFile:
resultList.append(line.strip('\n'))
# Round 1 for specialRules
cleanList = []
for rule in specialRules:
# print(rule) # debug
for txt in resultList:
if rule == 'caps':
txt = txt.upper()
elif rule == 'lower':
txt = txt.lower()
elif rule == 'firstLower':
txt = txt[0].lower() + txt[1:]
cleanList.append(txt)
# Round 2, replicants and ignorables
if cleanList[1]:
resultList = cleanList
cleanList = []
for txt in resultList:
# Assume they took a bath
dirty = ''
if txt in cleanList:
# She's a replicant
print(str(cleanList.index(txt)) + ' : ' + txt)
dirty = ' a replicant'
if txt.lower() in ignoreList:
dirty = ' in the ignoreList'
if dirty == '':
cleanList.append(txt)
else:
pass
# print('Removed: ' + txt + ' because it was' + dirty)
# Round 3, replacements
if cleanList[1]:
resultList = cleanList
if replace[0]:
cleanList = []
for txt in resultList:
txt = txt.replace(replace[0], replace[1])
cleanList.append(txt)
readFile.close
resultTxt = '\n'.join(cleanList) + '\n'
writeFile = open(dirtyFilename, 'w')
writeFile.write(resultTxt)
writeFile.close
# return number, first and last
return (str(len(resultList)), cleanList[0], cleanList[-1])
if __name__ == "__main__":
print('The main file is "buildit.py" Run that instead.')
|
PumpkinPai/lslvimazing-builder
|
spidey.py
|
spidey.py
|
py
| 4,506 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28395539037
|
import os
import pickle
from multiprocessing import Pool, Manager
from pathlib import Path
from typing import Optional, List, Tuple, Union, Any
import numpy as np
from PIL import Image
from detectron2.structures import BoxMode
from lofarnn.data.cutouts import convert_to_valid_color, augment_image_and_bboxes
from lofarnn.models.dataloaders.utils import get_lotss_objects
from lofarnn.utils.common import create_coco_style_directory_structure, split_data
def make_single_coco_annotation_set(
image_names: List[Path],
record_list: List[Any],
set_number: int,
image_destination_dir: Optional[str],
multiple_bboxes: bool = True,
resize: Optional[Union[Tuple[int], int]] = None,
rotation: Optional[Union[List[float], float]] = None,
convert: bool = True,
bands: List[str] = (
"iFApMag",
"w1Mag",
"gFApMag",
"rFApMag",
"zFApMag",
"yFApMag",
"w2Mag",
"w3Mag",
"w4Mag",
),
precomputed_proposals: bool = False,
normalize: bool = True,
):
"""
For use with multiprocessing, goes through and does one rotation for the COCO annotations
:param image_names: Image names to load and use for generating dataset
:param record_list: Array to add the sources to
:param set_number:
:param image_destination_dir: The destination directory of the images
:param resize: What to resize to
:param rotation: How much to rotate
:param convert: Whether to convert to PNG and normalize to between 0 and 255 for all channels
:param bands: Whether to use all 10 channels, or just radio, iband, W1 band
:param precomputed_proposals: Whether to create precomputed proposals
:param normalize: Whether to normalize input data between 0 and 1
:return:
"""
for i, image_name in enumerate(image_names):
# Get image dimensions and insert them in a python dict
if convert:
image_dest_filename = os.path.join(
image_destination_dir, image_name.stem + f".{set_number}.png"
)
else:
if rotation is not None and rotation.any() > 0:
image_dest_filename = os.path.join(
image_destination_dir, image_name.stem + f".{set_number}.npy"
)
else:
image_dest_filename = os.path.join(
image_destination_dir, image_name.stem + f".npy"
)
(image, cutouts, proposal_boxes, wcs) = np.load(
image_name, allow_pickle=True
) # mmap_mode might allow faster read
image = np.nan_to_num(image)
# Change order to H,W,C for imgaug
if rotation is not None:
if isinstance(rotation, (list, tuple, np.ndarray)):
(image, cutouts, proposal_boxes,) = augment_image_and_bboxes(
image,
cutouts=cutouts,
proposal_boxes=proposal_boxes,
angle=rotation[set_number],
new_size=resize,
)
else:
(image, cutouts, proposal_boxes,) = augment_image_and_bboxes(
image,
cutouts=cutouts,
proposal_boxes=proposal_boxes,
angle=np.random.uniform(-rotation, rotation),
new_size=resize,
)
else:
# Need this to convert the bbox coordinates into the correct format
(image, cutouts, proposal_boxes,) = augment_image_and_bboxes(
image,
cutouts=cutouts,
proposal_boxes=proposal_boxes,
angle=0,
new_size=resize,
verbose=False,
)
width, height, depth = np.shape(image)
if depth != len(bands):
continue
# First R (Radio) channel
image[:, :, 0] = convert_to_valid_color(
image[:, :, 0],
clip=True,
lower_clip=0.0,
upper_clip=1000,
normalize=normalize,
scaling=None,
)
for layer in range(1, image.shape[2]):
image[:, :, layer] = convert_to_valid_color(
image[:, :, layer],
clip=True,
lower_clip=10.0,
upper_clip=28.0,
normalize=normalize,
scaling=None,
)
if not os.path.exists(os.path.join(image_dest_filename)):
if convert:
image = np.nan_to_num(image)
image = (255.0 * image).astype(np.uint8)
# If converting, only take the first three layers, generally Radio, i band, W1 band
pil_im = Image.fromarray(image[:, :, :3], "RGB")
pil_im.save(image_dest_filename)
else:
image = np.nan_to_num(image)
np.save(image_dest_filename, image) # Save to the final destination
if bands:
rec_depth = 10
else:
rec_depth = 3
record = {
"file_name": image_dest_filename,
"image_id": i,
"height": height,
"width": width,
"depth": rec_depth,
}
# Insert bounding boxes and their corresponding classes
objs = []
# check if there is no optical source
try:
if len(cutouts) > 0: # There is an optical source
if not multiple_bboxes:
cutouts = [
cutouts[0]
] # Only take the first one, the main optical source
for source_num, bbox in enumerate(cutouts):
assert float(bbox[2]) >= float(bbox[0])
assert float(bbox[3]) >= float(bbox[1])
if bbox[4] == "Other Optical Source":
category_id = 0
else:
category_id = 0
obj = {
"bbox": [
float(bbox[0]),
float(bbox[1]),
float(bbox[2]),
float(bbox[3]),
],
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": category_id, # For Optical Source
"iscrowd": 0,
}
objs.append(obj)
except Exception as e:
print(e)
print("No Optical source found")
if precomputed_proposals:
record["proposal_boxes"] = proposal_boxes
record["proposal_objectness_logits"] = np.ones(len(proposal_boxes))
record["proposal_bbox_mode"] = BoxMode.XYXY_ABS
record["annotations"] = objs
record_list.append(record)
def create_coco_annotations(
image_names: Union[List[Path], List[str]],
image_destination_dir: Optional[str],
json_dir: str = "",
json_name: str = "json_data.pkl",
multiple_bboxes: bool = True,
resize: Optional[Union[Tuple[int], int]] = None,
rotation: Optional[Union[List[float], float]] = None,
convert: bool = True,
bands: List[str] = (
"iFApMag",
"w1Mag",
"gFApMag",
"rFApMag",
"zFApMag",
"yFApMag",
"w2Mag",
"w3Mag",
"w4Mag",
),
precomputed_proposals: bool = False,
normalize: bool = True,
rotation_names: Optional[List[str]] = None,
verbose: bool = False,
):
"""
Creates the annotations for the COCO-style dataset from the npy files available, and saves the images in the correct
directory
:param image_names: Image names, i.e., the source names
:param image_destination_dir: The directory the images will end up in
:param json_dir: The directory where to put the JSON annotation file
:param json_name: The name of the JSON file
:param multiple_bboxes: Whether to use multiple bounding boxes, or only the first, for
example, to only use the main source Optical source, or include others that fall within the
defined area
:param rotation: Whether to rotate the images or not, if given as a tuple, it is taken as rotate each image by that amount,
if a single float, then rotates images randomly between -rotation,rotation 50 times
:param convert: Whether to convert to PNG files (default), or leave them as NPY files
:return:
"""
if rotation is not None:
if type(rotation) == tuple:
num_copies = len(rotation)
else:
num_copies = 3
else:
num_copies = 1
if rotation_names is not None:
# Rotate these specific sources ~ 2.5 times more (generally multicomponent ones)
extra_rotates = []
t = []
for i, name in enumerate(image_names):
print(name.stem)
print(rotation_names)
if name.stem in rotation_names:
extra_rotates.append(i)
t.append(rotation_names)
print(f"Matched Names: {t}")
print(f"Indicies: {extra_rotates} out of {image_names}")
extra_rotates = np.asarray(extra_rotates)
image_names = np.asarray(image_names)
extra_names = image_names[extra_rotates]
mask = np.ones(len(image_names), np.bool)
mask[extra_rotates] = 0
single_names = image_names[mask]
else:
single_names = image_names
extra_names = []
print(f"Extra Names: {len(extra_names)}")
print(f"Single Names: {len(single_names)}")
print(f"Extra Names: {extra_names}")
print(f"Single Names: {single_names}")
# List to store single dict for each image
dataset_dicts = []
if num_copies > 1:
manager = Manager()
pool = Pool(processes=os.cpu_count())
L = manager.list()
rotation = np.linspace(0, 180, num_copies)
[
pool.apply_async(
make_single_coco_annotation_set,
args=[
single_names,
L,
m,
image_destination_dir,
multiple_bboxes,
resize,
rotation,
convert,
bands,
precomputed_proposals,
normalize,
],
)
for m in range(num_copies)
]
print(len(L))
# Now do the same for the extra copies, but with more rotations, ~2.5 to equal out multi and single comp sources
num_multi_copies = int(np.ceil(num_copies * 2.5))
print(f"Num Multi Copies: {num_multi_copies}")
multi_rotation = np.linspace(0, 180, num_multi_copies)
[
pool.apply_async(
make_single_coco_annotation_set,
args=[
extra_names,
L,
m,
image_destination_dir,
multiple_bboxes,
resize,
multi_rotation,
convert,
bands,
precomputed_proposals,
normalize,
],
)
for m in range(num_multi_copies)
]
pool.close()
pool.join()
print(len(L))
print(f"Length of L: {len(L)}")
for element in L:
dataset_dicts.append(element)
print(f"Length of Dataset Dict: {len(dataset_dicts)}")
# Write all image dictionaries to file as one json
json_path = os.path.join(json_dir, json_name)
with open(json_path, "wb") as outfile:
pickle.dump(dataset_dicts, outfile)
if verbose:
print(f"COCO annotation file created in '{json_dir}'.\n")
return 0 # Returns to doesnt go through it again
# Iterate over all cutouts and their objects (which contain bounding boxes and class labels)
for m in range(num_copies):
make_single_coco_annotation_set(
image_names=image_names,
record_list=dataset_dicts,
set_number=m,
image_destination_dir=image_destination_dir,
multiple_bboxes=multiple_bboxes,
resize=resize,
rotation=rotation,
convert=convert,
bands=bands,
precomputed_proposals=precomputed_proposals,
normalize=normalize,
)
# Write all image dictionaries to file as one json
json_path = os.path.join(json_dir, json_name)
with open(json_path, "wb") as outfile:
pickle.dump(dataset_dicts, outfile)
if verbose:
print(f"COCO annotation file created in '{json_dir}'.\n")
def create_coco_dataset(
root_directory: str,
multiple_bboxes: bool = False,
split_fraction: float = 0.2,
resize: Optional[Union[Tuple[int], int]] = None,
rotation: Optional[Union[List[float], float]] = None,
convert: bool = True,
bands: List[str] = (
"iFApMag",
"w1Mag",
"gFApMag",
"rFApMag",
"zFApMag",
"yFApMag",
"w2Mag",
"w3Mag",
"w4Mag",
),
precomputed_proposals: bool = False,
normalize: bool = True,
subset: str = "",
multi_rotate_only: Optional[str] = None,
verbose: bool = False,
):
"""
Create COCO directory structure, if it doesn't already exist, split the image data, and save it to the correct
directories, and create the COCO annotation file to be loaded into Detectron2, or other similar models
:param split_fraction: Fraction of the data for the test set. the validation set is rolled into the test set.
:param root_directory: root directory for the COCO dataset
:param multiple_bboxes: Whether to include multiple bounding boxes, or only the main source
:param resize: Image size to resize to, or None if not resizing
:param convert: Whether to convert npy files to png, or to keep them in the original format, useful for SourceMapper
:param verbose: Whether to print more data to stdout or not
:param subset: Whether to limit ones to only the fluxlimit sources, if not empty, should be path to list of source filepaths to use
:return:
"""
(
all_directory,
train_directory,
val_directory,
test_directory,
annotations_directory,
) = create_coco_style_directory_structure(root_directory, verbose=verbose)
# Gather data from all_directory
data_split = split_data(
all_directory, val_split=split_fraction, test_split=split_fraction
)
if subset:
# Keep only those already in the subset
subset = np.load(subset, allow_pickle=True)
for d in ["train", "test", "val"]:
data_split[d] = data_split[d][np.isin(data_split[d], subset)]
annotations_directory = os.path.join(annotations_directory, "subset")
if multi_rotate_only:
l_objects = get_lotss_objects(multi_rotate_only, False)
# Get all multicomponent sources
l_objects = l_objects[l_objects["LGZ_Assoc"] > 1]
multi_names = l_objects["Source_Name"].data
else:
multi_names = None
create_coco_annotations(
data_split["train"],
json_dir=annotations_directory,
image_destination_dir=train_directory,
json_name=f"json_train_prop{precomputed_proposals}_all{bands}_multi{multiple_bboxes}_norm{normalize}.pkl",
multiple_bboxes=multiple_bboxes,
resize=resize,
rotation=rotation,
convert=convert,
normalize=normalize,
bands=bands,
precomputed_proposals=precomputed_proposals,
rotation_names=multi_names,
verbose=verbose,
)
create_coco_annotations(
data_split["train"],
json_dir=annotations_directory,
image_destination_dir=train_directory,
json_name=f"json_train_test_prop{precomputed_proposals}_all{bands}_multi{multiple_bboxes}_norm{normalize}.pkl",
multiple_bboxes=multiple_bboxes,
resize=resize,
rotation=None,
convert=convert,
normalize=normalize,
bands=bands,
precomputed_proposals=precomputed_proposals,
rotation_names=None,
verbose=verbose,
)
if len(data_split["val"]) > 0:
create_coco_annotations(
data_split["val"],
json_dir=annotations_directory,
image_destination_dir=val_directory,
json_name=f"json_val_prop{precomputed_proposals}_all{bands}_multi{multiple_bboxes}_norm{normalize}.pkl",
multiple_bboxes=multiple_bboxes,
resize=resize,
rotation=None,
convert=convert,
normalize=normalize,
bands=bands,
precomputed_proposals=precomputed_proposals,
rotation_names=None,
verbose=verbose,
)
create_coco_annotations(
data_split["test"],
json_dir=annotations_directory,
image_destination_dir=test_directory,
json_name=f"json_test_prop{precomputed_proposals}_all{bands}_multi{multiple_bboxes}_norm{normalize}.pkl",
multiple_bboxes=multiple_bboxes,
resize=resize,
rotation=None,
convert=convert,
normalize=normalize,
bands=bands,
precomputed_proposals=precomputed_proposals,
verbose=verbose,
)
|
jacobbieker/lofarnn
|
lofarnn/utils/coco.py
|
coco.py
|
py
| 17,530 |
python
|
en
|
code
| 1 |
github-code
|
50
|
5050699532
|
while True:
n = input()
if n == '0':
break
check_list1 = ""
check_list2 = ""
n_len = len(n)
part_point = n_len // 2
if n_len % 2 == 1:
n = n[:part_point] + n[part_point+1:] #12321 => 1221
#애는 point에서 0까지 거꾸로 분할
for i in range(part_point -1, -1, -1):
check_list1 += n[i]
#애는 point에서 문자 끝까지 분할
for i in range(part_point, len(n)):
check_list2 += n[i]
if check_list1 == check_list2:
print("yes")
else:
print("no")
|
jihunhan98/Baekjoon
|
1259.py
|
1259.py
|
py
| 559 |
python
|
ko
|
code
| 0 |
github-code
|
50
|
24634017890
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
# iterative
def isSymmetric(self, root: TreeNode) -> bool:
stack = [(root.left, root.right)]
while len(stack):
left, right = stack.pop()
if (not left) or (not right):
if left != right:
return False
else:
if left.val != right.val:
return False
stack.append((left.left, right.right))
stack.append((left.right, right.left))
return True
# recursive
def isSymmetric(self, root: TreeNode) -> bool:
def compare(left: TreeNode, right: TreeNode) -> bool:
if (not left) or (not right):
return left == right
return left.val == right.val \
and compare(left.left, right.right) \
and compare(left.right, right.left)
return compare(root.left, root.right)
|
toh995/leetcode-practice
|
medium/0101_symmetric_tree.py
|
0101_symmetric_tree.py
|
py
| 1,129 |
python
|
en
|
code
| 0 |
github-code
|
50
|
71268256475
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
#Get data
train = pd.read_csv('/home/bhanuchander/course/Learn_MachineLearning/data/csv/titanic/train.csv')
test = pd.read_csv('/home/bhanuchander/course/Learn_MachineLearning/data/csv/titanic/test.csv')
train.drop(["Name", "Ticket", "Cabin"], axis=1, inplace=True)
test.drop(["Name", "Ticket", "Cabin"], axis=1, inplace=True)
train.head()
one_hot_train = pd.get_dummies(train)
one_hot_test = pd.get_dummies(test)
# First five rows from train dataset
one_hot_train.head()
one_hot_train['Age'].fillna(one_hot_train['Age'].mean(), inplace=True)
one_hot_test['Age'].fillna(one_hot_test['Age'].mean(), inplace=True)
one_hot_train.isnull().sum()
one_hot_test.isnull().sum().sort_values(ascending=False)
# Fill the null Fare values with the mean of all Fares
one_hot_test['Fare'].fillna(one_hot_test['Fare'].mean(), inplace=True)
one_hot_test.isnull().sum().sort_values(ascending=False)
feature = one_hot_train.drop('Survived', axis=1)
target = one_hot_train['Survived']
# Model creation
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state=1, criterion='gini', max_depth=10, n_estimators=50, n_jobs=-1)
rf.fit(feature, target)
|
Bhanuchander210/Learn_MachineLearning
|
Problems/titanic_prediction/mytest.py
|
mytest.py
|
py
| 1,243 |
python
|
en
|
code
| 1 |
github-code
|
50
|
29810645821
|
# getaddrinfo() takes several arguments to filter the result list.
# The host and port values given in the example are required arguments.
# The optional arguments are family, socktype, proto, and flags. The family, socktype,
# and proto values should be 0 or one of the constants defined by socket.
import socket
def get_constants(prefix):
"""Create a dictionary mapping socket module constants to their names."""
return dict( (getattr(socket, n), n)
for n in dir(socket)
if n.startswith(prefix)
)
families = get_constants('AF_')
types = get_constants('SOCK_')
protocols = get_constants('IPPROTO_')
for response in socket.getaddrinfo('www.ibm.com', 'http',
socket.AF_INET, # family
socket.SOCK_STREAM, # socktype
socket.IPPROTO_TCP, # protocol
socket.AI_CANONNAME, # flags
):
# Unpack the response tuple
family, socktype, proto, canonname, sockaddr = response
print ('Family :', families[family])
print ('Type :', types[socktype])
print ('Protocol :', protocols[proto])
print ('Canonical name:', canonname)
print ('Socket address:', sockaddr)
print ('---')
|
profjuarezbarbosajr/python-networking
|
addressing_protocols_socket_types/server_address_lookup_optional_args.py
|
server_address_lookup_optional_args.py
|
py
| 1,365 |
python
|
en
|
code
| 0 |
github-code
|
50
|
3913206133
|
"""A vectorized code to compute multiple trajectories at simultaneously"""
from jax import partial,jvp,vmap,jit
import jax.numpy as jnp
import numpy as np
from ode_solvers import rk4_solver as solver
from testbed_models import L96
import os
import json
forcing=8.0
# these parameters now give you the mode, which is only a function of x, the state vector
model=partial(L96,forcing=8.0)
model_dim=40
num_initials=1
# Generate intial conditions from a gaussian with variance =epsilon
#epsilon=0.0001
#X_o=np.random.multivariate_normal(np.zeros(model_dim),epsilon*np.eye(model_dim),size=num_initials).T
# Or load a file containing initial conditions( like multiple ensembles)
with open('L96/L_96_I_x0_on_attractor_dim_40,forcing_8.json') as jsonfile:
param=json.load(jsonfile)
#path to initials
X_o=param["initial"]
T_start=0.0
T_stop=500.0
dt=0.05
dt_solver=0.01
iters_dt=int(dt/dt_solver)
Trajs=np.zeros((int((T_stop-T_start)/dt),model_dim,num_initials))
@jit
def model_vectorized(X_):
"model vectorization using vmap, we can compute multiple forward passes"
# The second argument below will take the RHS of dynamical equation.
f_X= vmap(model,in_axes=(1),out_axes=(1))(X_)
return f_X
# fix the rhs of your ode and the runge-kutta time step in the solver using partial function
my_solver=jit(partial(solver,rhs_function=model_vectorized,time_step=dt_solver))
@jit
def integrate_forward(X_):
#forecast_ensemble=lax.fori_loop(0,iters_delta_t,my_solver,last_analysis_ensemble)
X=X_
for i in range(iters_dt):
X=my_solver(x_initial=X)
return X
if (num_initials==1):
X_now=np.expand_dims(X_o,axis=1)
else:
X_now=X_o
Trajs[0]=X_now
for i in range(1,Trajs.shape[0]):
X_next=integrate_forward(X_now)
Trajs[i]=X_next
X_now=X_next
if (num_initials==1):
Trajs=np.squeeze(Trajs,axis=-1)
os.chdir('L96')
#Save the solution
np.save('Multiple_trajectories_N={}_gap={}_ti={}_tf={}_dt_{}_dt_solver={}.npy'.format(num_initials,dt,T_start,T_stop,dt,dt_solver),Trajs)
print('job done')
|
Mr-Markovian/Covariant_lyapunov_Vectors
|
codes/assimilation/L96/multiple_trajectories.py
|
multiple_trajectories.py
|
py
| 2,053 |
python
|
en
|
code
| 1 |
github-code
|
50
|
17781918084
|
# %%
import copy
import einops
import numpy as np
# import streamlit as st
import torch as t
from funcs import (
cal_score_read_my,
cal_score_weight_probe,
cal_score_write_unemb,
load_board_seq,
load_model,
neuron_and_blank_my_emb,
one_hot,
plot_probe_outputs,
relu,
state_stack_to_one_hot,
plot_cosine_sim,
select_board_states,
yield_similar_boards,
yield_extended_boards,
get_act_patch_mlp_post,
patching_metric,
)
from othello_world.mechanistic_interpretability.mech_interp_othello_utils import (
OthelloBoardState,
string_to_label,
to_int,
plot_single_board,
)
import os
from pathlib import Path
import pandas as pd
import plotly.express as px
# import streamlit as st
import torch as t
from jaxtyping import Float
import transformer_lens.utils as utils
from torch import Tensor
from tqdm.notebook import tqdm
from transformer_lens.hook_points import HookPoint
from typing import List, Optional, Tuple, Union, Callable
working_dir = Path(f"{os.getcwd()}").resolve()
device = t.device("cuda" if t.cuda.is_available() else "cpu")
from othello_world.mechanistic_interpretability.mech_interp_othello_utils import (
OthelloBoardState,
int_to_label,
plot_board,
plot_board_log_probs,
plot_single_board,
string_to_label,
to_int,
to_string,
)
# %%
t.set_grad_enabled(False)
stoi_indices = [i for i in range(64) if i not in [27, 28, 35, 36]]
# Define our rows, and the function that converts an index into a (row, column) label, e.g. `E2`
alpha = "ABCDEFGH"
device = "cuda" if t.cuda.is_available() else "cpu"
model = load_model()
board_seqs_int, board_seqs_string = load_board_seq(
"othello_world/mechanistic_interpretability/"
)
num_games = 50
# shape: [50, 60] = [50 games, 60 moves each]
focus_games_int = board_seqs_int[:num_games]
focus_games_string = board_seqs_string[:num_games]
focus_states = np.zeros((num_games, 60, 8, 8), dtype=np.float32)
focus_valid_moves = t.zeros((num_games, 60, 64), dtype=t.float32)
for i in range(num_games):
board = OthelloBoardState()
for j in range(60):
board.umpire(focus_games_string[i, j].item())
focus_states[i, j] = board.state
focus_valid_moves[i, j] = one_hot(board.get_valid_moves())
focus_logits, focus_cache = model.run_with_cache(focus_games_int[:, :-1].to(device))
OTHELLO_ROOT = "./othello_world"
OTHELLO_MECHINT_ROOT = OTHELLO_ROOT + "/mechanistic_interpretability"
full_linear_probe = t.load(
OTHELLO_MECHINT_ROOT + "/main_linear_probe.pth", map_location=device
)
rows = 8
cols = 8
options = 3
assert full_linear_probe.shape == (3, model.cfg.d_model, rows, cols, options)
black_to_play_index = 0
white_to_play_index = 1
blank_index = 0
their_index = 1
my_index = 2
# Creating values for linear probe (converting the "black/white to play" notation into "me/them to play")
linear_probe = t.zeros(model.cfg.d_model, rows, cols, options, device=device)
linear_probe[..., blank_index] = 0.5 * (
full_linear_probe[black_to_play_index, ..., 0]
+ full_linear_probe[white_to_play_index, ..., 0]
)
linear_probe[..., their_index] = 0.5 * (
full_linear_probe[black_to_play_index, ..., 1]
+ full_linear_probe[white_to_play_index, ..., 2]
)
linear_probe[..., my_index] = 0.5 * (
full_linear_probe[black_to_play_index, ..., 2]
+ full_linear_probe[white_to_play_index, ..., 1]
)
alternating = np.array(
[-1 if i % 2 == 0 else 1 for i in range(focus_games_int.shape[1])]
)
flipped_focus_states = focus_states * alternating[None, :, None, None]
# We now convert to one-hot encoded vectors
focus_states_flipped_one_hot = state_stack_to_one_hot(
t.tensor(flipped_focus_states), rows=rows, cols=cols
)
# Take the argmax (i.e. the index of option empty/their/mine)
focus_states_flipped_value = focus_states_flipped_one_hot.argmax(dim=-1)
probe_out = einops.einsum(
focus_cache["resid_post", 6],
linear_probe,
"game move d_model, d_model row col options -> game move row col options",
)
probe_out_value = probe_out.argmax(dim=-1)
correct_middle_odd_answers = (
probe_out_value.cpu() == focus_states_flipped_value[:, :-1]
)[:, 5:-5:2]
accuracies_odd = einops.reduce(
correct_middle_odd_answers.float(), "game move row col -> row col", "mean"
)
correct_middle_answers = (probe_out_value.cpu() == focus_states_flipped_value[:, :-1])[
:, 5:-5
]
accuracies = einops.reduce(
correct_middle_answers.float(), "game move row col -> row col", "mean"
)
blank_probe = linear_probe[..., 0] - (linear_probe[..., 1] + linear_probe[..., 2]) / 2
my_probe = linear_probe[..., 2] - linear_probe[..., 1]
pos = 20
game_index = 0
# Plot board state
moves = focus_games_string[game_index, : pos + 1]
# plot_single_board(moves)
# Plot corresponding model predictions
state = t.zeros((8, 8), dtype=t.float32, device=device) - 13.0
state.flatten()[stoi_indices] = focus_logits[game_index, pos].log_softmax(dim=-1)[1:]
cell_r = 5
cell_c = 4
print(f"Flipping the color of cell {'ABCDEFGH'[cell_r]}{cell_c}")
board = OthelloBoardState()
board.update(moves.tolist())
board_state = board.state.copy()
valid_moves = board.get_valid_moves()
flipped_board = copy.deepcopy(board)
flipped_board.state[cell_r, cell_c] *= -1
flipped_valid_moves = flipped_board.get_valid_moves()
newly_legal = [
string_to_label(move) for move in flipped_valid_moves if move not in valid_moves
]
newly_illegal = [
string_to_label(move) for move in valid_moves if move not in flipped_valid_moves
]
print("newly_legal", newly_legal)
print("newly_illegal", newly_illegal)
# %%
# st.write("got to here")
flip_dir = my_probe[:, cell_r, cell_c]
big_flipped_states_list = []
layer = 4
scales = [0, 1, 2, 4, 8, 16]
# Scale the probes down to be unit norm per cell
blank_probe_normalised = blank_probe / blank_probe.norm(dim=0, keepdim=True)
my_probe_normalised = my_probe / my_probe.norm(dim=0, keepdim=True)
# Set the center blank probes to 0, since they're never blank so the probe is meaningless
blank_probe_normalised[:, [3, 3, 4, 4], [3, 4, 3, 4]] = 0.0
# with st.sidebar:
# # cell_label = st.text_input('Target Cell', 'C0')
# cell_label = st.selectbox(
# "Target Cell",
# [f"{alp}{num}" for alp in "ABCDEFGH" for num in range(8)],
# index=16,
# )
# layer = st.slider("Layer", 0, 7, 5)
# ACTIVATION_THRES = st.slider(
# "Activation threshold", min_value=0.0, max_value=1.0, value=0.0, step=0.05
# )
# SCORING_METRIC = st.selectbox(
# "Scoring metric",
# [
# "read_blank",
# "write_unemb",
# "read_my",
# "multiply all",
# "Find invalid moves: -write_unemb",
# "Destroy: -(read_blank * write_blank)",
# "Destroy: -(read_my * write_my)",
# "Destroy: -(write_unemb * write_blank)",
# "Enhance: read_blank * write_blank",
# "Enhance: read_my * write_my",
# "Patching",
# "Random"
# ],
# index=3,
# )
# row and column of the cell
cell = (ord(cell_label[0]) - ord("A"), int(cell_label[-1]))
w_in_L5 = model.W_in[layer, :, :] # shape: [d_model, d_mlp]
w_in_L5_norm = w_in_L5 / w_in_L5.norm(dim=0, keepdim=True)
w_out_L5 = model.W_out[layer, :, :] # shape: [d_mlp, d_model]
w_out_L5_norm = w_out_L5 / w_out_L5.norm(dim=-1, keepdim=True)
# shape: [d_model, n_vocab]
W_U_norm = model.W_U / model.W_U.norm(dim=0, keepdim=True)
w_in_L5_my = einops.einsum(
w_in_L5_norm, my_probe_normalised, "d_model d_mlp, d_model row col -> d_mlp row col"
)
w_out_L5_my = einops.einsum(
w_out_L5_norm,
my_probe_normalised,
"d_mlp d_model, d_model row col -> d_mlp row col",
)
w_in_L5_blank = einops.einsum(
w_in_L5_norm,
blank_probe_normalised,
"d_model d_mlp, d_model row col -> d_mlp row col",
)
w_out_L5_blank = einops.einsum(
w_out_L5_norm,
blank_probe_normalised,
"d_mlp d_model, d_model row col -> d_mlp row col",
)
w_out_L5_umemb = einops.einsum(
w_out_L5_norm, W_U_norm, "d_mlp d_model, d_model n_vocab -> d_mlp n_vocab"
)
# calculate scores
score_read_blank = cal_score_weight_probe(w_in_L5_blank, cell)
score_write_blank = cal_score_weight_probe(w_out_L5_blank, cell)
score_read_my = cal_score_weight_probe(w_in_L5_my, cell)
score_write_my = cal_score_weight_probe(w_out_L5_my, cell)
score_write_unemb = cal_score_write_unemb(w_out_L5_umemb, cell)
score_detector_match, top_detector = cal_score_read_my(w_in_L5_my, cell)
# %%
# calculating patching score
if SCORING_METRIC == "Patching":
for datapoint in select_board_states(
["C0", "D1", "E2"],
["valid", "theirs", "mine"],
pos=None,
batch_size=1000,
game_str_gen=board_seqs_string,
):
print("inside datapoint loop")
orig_extensions = yield_extended_boards(
datapoint[:-1], datapoint.shape[0], batch_size=25
)
selected_board_states = select_board_states(
["C0", "C0"],
["blank", "invalid"],
game_str_gen=orig_extensions,
pos=datapoint.shape[0],
batch_size=25,
)
alter_dataset = list(
yield_similar_boards(
datapoint,
selected_board_states,
sim_threshold=0.0,
by_valid_moves=True,
match_valid_moves_number=True,
batch_size=25,
)
)
if alter_dataset != []:
orig_datapoint = datapoint
alter_datapoint = alter_dataset[0]
break
clean_input = t.tensor(to_int(orig_datapoint))
corrupted_input = t.tensor(to_int(alter_datapoint))
clean_logits, clean_cache = model.run_with_cache(clean_input)
corrupted_logits, corrupted_cache = model.run_with_cache(corrupted_input)
clean_log_probs = clean_logits.log_softmax(dim=-1)
corrupted_log_probs = corrupted_logits.log_softmax(dim=-1)
pos = -1
answer_index = to_int("C0")
clean_log_prob = clean_log_probs[0, pos, answer_index]
corrupted_log_prob = corrupted_log_probs[0, pos, answer_index]
print("Everything is fine, prepared to patch")
act_patch = get_act_patch_mlp_post(
model,
corrupted_input,
clean_cache,
patching_metric,
answer_index,
corrupted_log_prob,
clean_log_prob,
layer=layer,
pos=pos,
)
# col1, col2 = st.columns(2)
# col1.write('Clean board')
# col1.plotly_chart(plot_single_board(string_to_label(orig_datapoint), return_fig=True), aspect='equal')
# col2.write('Corrupt board')
# col2.plotly_chart(plot_single_board(string_to_label(alter_datapoint), return_fig=True), aspect='equal')
# act_patch shape: [d_mlp]
# top_neurons = act_patch.argsort(descending=True)[:5]
sub_score = {
"read_blank": score_read_blank,
"write_unemb": score_write_unemb,
"read_my": score_detector_match,
}
# if SCORING_METRIC == "multiply all":
# score = (
# relu(score_read_blank) * relu(score_write_unemb) * relu(score_detector_match)
# )
# elif SCORING_METRIC == "read_blank":
# score = score_read_blank
# elif SCORING_METRIC == "write_unemb":
# score = score_write_unemb
# elif SCORING_METRIC == "read_my":
# score = score_read_my
# elif SCORING_METRIC == "Find invalid moves: -write_unemb":
# score = -score_write_unemb
# elif SCORING_METRIC == "Destroy: -(read_blank * write_blank)":
# score = -(score_read_blank * score_write_blank)
# elif SCORING_METRIC == "Destroy: -(read_my * write_my)":
# score = -(score_read_my * score_write_my)
# elif SCORING_METRIC == "Destroy: -(write_unemb * write_blank)":
# score = -(score_write_unemb * score_write_blank)
# elif SCORING_METRIC == "Enhance: read_blank * write_blank":
# score = score_read_blank * score_write_blank
# elif SCORING_METRIC == "Enhance: read_my * write_my":
# score = score_read_my * score_write_my
# elif SCORING_METRIC == "Patching":
# score = t.abs(act_patch)
# elif SCORING_METRIC == "Random":
# score = t.rand_like(score_read_blank)
n_top_neurons = 10
top_neurons = score.argsort(descending=True)[:n_top_neurons]
# visualize the input and output weights for these neurons
# tabs = st.tabs([f"L{layer}N{neuron}" for neuron in top_neurons])
# for neuron, tab in zip(top_neurons, tabs):
# if SCORING_METRIC == "Patching":
# detector = t.full((8, 8), t.nan).to(device)
# detector[2, 0] = 0
# detector[3, 1] = -1
# detector[4, 2] = 1
# else:
# detector = top_detector[neuron]
# with tab:
# neuron_and_blank_my_emb(
# layer,
# neuron.item(),
# model,
# blank_probe_normalised,
# my_probe_normalised,
# focus_states_flipped_value,
# focus_cache,
# ACTIVATION_THRES,
# score,
# sub_score,
# detector,
# )
# calculate cosine similarity between blank probe, my probe and W_U
SHOW_COSINE_SIM = False
SHOW_COSINE_SIM = st.checkbox("Show cosine similarity between probes and W_U")
if SHOW_COSINE_SIM:
cols = st.columns(3)
pairs = [("blank", "my"), ("blank", "W_U"), ("my", "W_U")]
for col, (a, b) in zip(cols, pairs):
col.plotly_chart(
plot_cosine_sim(a, b, model, blank_probe_normalised, my_probe_normalised),
aspect="equal",
)
# %%
def gen_pattern(
target_label: List[str],
target_state: List[str],
):
pattern = t.full((8, 8), t.nan).to(device)
state2num = {"blank": 0, "theirs": -1, "mine": 1}
for label, state in zip(target_label, target_state):
cell_idx = to_string(label)
r, c = cell_idx // 8, cell_idx % 8
pattern[r, c] = state2num[state]
return pattern
# classic_pattern = gen_pattern(["C0", "D1", "E2"], ["blank", "theirs", "mine"])
classic_pattern = gen_pattern(["C0", "B1", "A2"], ["blank", "theirs", "mine"])
# %%
def match_state_pattern(
flipped_states: Float[Tensor, "games moves 8 8"], pattern: Float[Tensor, "8 8"]
):
match = flipped_states == pattern[None, None, :, :]
nan_place = pattern[None, None, :, :].isnan()
return (match | nan_place).all(dim=-1).all(dim=-1)
# %%
layer = 5
mlp_post = focus_cache["mlp_post", layer] # shape: [50, 59, 2048]
match_seqs = match_state_pattern(t.tensor(flipped_focus_states), classic_pattern)[
:, :-1
] # shape: [50, 59]
def act_distribution_diff_given_pattern(neuron_act, match):
"""
neuron_act: shape: [num_of_games, num_of_moves(59), 2048]
match: shape: [num_of_games, num_of_moves(59)]
"""
act_on_matched_pattern = neuron_act[match] # shape: [num_of_matched_seq, 2048]
act_on_unmatched_pattern = neuron_act[~match] # shape: [num_of_unmatched_seq, 2048]
dist_diff = (
act_on_matched_pattern.mean(dim=0) - act_on_unmatched_pattern.mean(dim=0)
) / act_on_unmatched_pattern.std(dim=0)
return dist_diff
# px.histogram(act_on_matched_pattern.mean(dim=0).cpu().numpy(), template="plotly").show()
# px.histogram(
# act_on_unmatched_pattern.mean(dim=0).cpu().numpy(), template="plotly"
# ).show()
# px.histogram(
# act_on_matched_pattern.mean(dim=0).cpu().numpy()
# - act_on_unmatched_pattern.mean(dim=0).cpu().numpy(),
# template="plotly",
# ).show()
# %%
dist_diff = act_distribution_diff_given_pattern(mlp_post, match_seqs)
top_neurons = dist_diff.argsort(descending=True)[:10]
for neuron in top_neurons:
df = pd.DataFrame(
{
"acts": mlp_post[:, :, neuron].cpu().numpy().flatten(),
"label": match_seqs.flatten(),
}
)
px.histogram(
df,
title=f"L{layer}N{neuron} on matched and unmatched pattern",
x="acts",
color="label",
template="plotly",
barmode="group",
log_y=True,
).show()
# %%
# use logit lens
logit = model(focus_games_int[0][:-1])
plot_board_log_probs(focus_games_string[0][:-1], logit[0])
# %%
def zero_ablate(act, hook):
act.fill_(0.0)
return act
patch_logit = model.run_with_hooks(
focus_games_int[0][:-1],
fwd_hooks=[
# (utils.get_act_name('mlp_post', 5), zero_ablate),
# (utils.get_act_name('mlp_post', 6), zero_ablate),
(utils.get_act_name('mlp_post', 7), zero_ablate),
]
)
plot_board_log_probs(focus_games_string[0][:-1], patch_logit[0])
# %%
|
yeutong/ARENA_Othello
|
explore.py
|
explore.py
|
py
| 16,533 |
python
|
en
|
code
| 0 |
github-code
|
50
|
23673085326
|
# 백준 11722: 가장 긴 감소하는 부분 수열(실버 II), https://www.acmicpc.net/problem/11722
import sys
input = sys.stdin.readline
N = int(input())
A = list(map(int, input().split()))
dp = [1] * N
for i in range(N-1):
for j in range(i+1, N):
if A[i] <= A[j]: continue
dp[j] = max(dp[j], dp[i] + 1)
print(max(dp))
|
mukhoplus/BOJ
|
2_python/11722.py
|
11722.py
|
py
| 347 |
python
|
ko
|
code
| 0 |
github-code
|
50
|
40261696633
|
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField, IntegerField, TextAreaField, SelectField
from wtforms.validators import InputRequired, Optional, NumberRange, URL, Length
class AddPetForm(FlaskForm):
"""Form for adding a pet"""
name = StringField("Pet name",
validators=[InputRequired()])
species = SelectField("Species", choices = [("cat", "Cat"), ("dog", "Dog"), ("porcupine", "Porcupine")])
photo_url = StringField("Photo", validators=[Optional(), URL()])
age = IntegerField("Age", validators=[Optional(), NumberRange(min=0, max=30, message="Age should be between 0 and 30")])
notes = TextAreaField("Comments", validators=[Optional(), Length(min=10)])
available = BooleanField("Available?")
class EditPetForm(FlaskForm):
"""Form for editing existing pet"""
photo_url = StringField("Photo", validators=[Optional(), URL()])
notes = TextAreaField("Comments", validators=[Optional(), Length(min=10)])
available = BooleanField("Available?")
|
ShaheenKhan99/flask-adoption-agency
|
forms.py
|
forms.py
|
py
| 1,069 |
python
|
en
|
code
| 0 |
github-code
|
50
|
40540326751
|
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
serial_output = """icostri: 45.4164: linf_verts = 2.42561, linf_faces = 1.49743
cubedsphere: 82.9186: linf_verts = 0.2566, linf_faces = 0
icostri: 22.7082: linf_verts = 0.175552, linf_faces = 0.446258
cubedsphere: 41.4593: linf_verts = 0.898719, linf_faces = 0.466355
icostri: 11.3541: linf_verts = 0.0327947, linf_faces = 0.18008
cubedsphere: 20.7296: linf_verts = 0.105744, linf_faces = 0.330876
icostri: 5.67705: linf_verts = 0.0078577, linf_faces = 0.057519
cubedsphere: 10.3648: linf_verts = 0.0242884, linf_faces = 0.153158
icostri: 2.83852: linf_verts = 0.00199034, linf_faces = 0.0170644
cubedsphere: 5.18241: linf_verts = 0.00611779, linf_faces = 0.0500934
icostri: 1.41926: linf_verts = 0.000499979, linf_faces = 0.00492556
cubedsphere: 2.59121: linf_verts = 0.00152096, linf_faces = 0.0148438"""
gpu_output = """icostri: 45.4164: linf_verts = 2.42561, linf_faces = 1.49743
cubedsphere: 82.9186: linf_verts = 0.2566, linf_faces = 0
icostri: 22.7082: linf_verts = 0.175552, linf_faces = 0.446258
cubedsphere: 41.4593: linf_verts = 0.898719, linf_faces = 0.466355
icostri: 11.3541: linf_verts = 0.0327947, linf_faces = 0.18008
cubedsphere: 20.7296: linf_verts = 0.105744, linf_faces = 0.330876
icostri: 5.67705: linf_verts = 0.0078577, linf_faces = 0.057519
cubedsphere: 10.3648: linf_verts = 0.0242884, linf_faces = 0.153158
icostri: 2.83852: linf_verts = 0.00199034, linf_faces = 0.0170644
cubedsphere: 5.18241: linf_verts = 0.00611779, linf_faces = 0.0500934
icostri: 1.41926: linf_verts = 0.000499979, linf_faces = 0.00492556
cubedsphere: 2.59121: linf_verts = 0.00152096, linf_faces = 0.0148438"""
#lines = serial_output.split('\n')
lines = gpu_output.split('\n')
ic = []
cs = []
for l in lines:
if 'icostri' in l:
ic.append(l)
elif 'cubedsphere' in l:
cs.append(l)
numtrials = len(ic)
icmeshsize = np.zeros(numtrials)
iclinfverts = np.zeros(numtrials)
iclinffaces = np.zeros(numtrials)
csmeshsize = np.zeros(numtrials)
cslinfverts = np.zeros(numtrials)
cslinffaces = np.zeros(numtrials)
for i, l in enumerate(ic):
ls = l.split()
icmeshsize[i] = float(ls[1][0:-1])
iclinfverts[i] = float(ls[4][0:-1])
iclinffaces[i] = float(ls[7])
for i, l in enumerate(cs):
ls = l.split()
csmeshsize[i] = float(ls[1][0:-1])
cslinfverts[i] = float(ls[4][0:-1])
cslinffaces[i] = float(ls[7])
o2ref = np.power(icmeshsize,2)
icratesverts = (np.log(iclinfverts[1:])-np.log(iclinfverts[0:-1]))/(np.log(icmeshsize[1:])-np.log(icmeshsize[0:-1]))
csratesverts = (np.log(cslinfverts[2:])-np.log(cslinfverts[1:-1]))/(np.log(csmeshsize[2:])-np.log(csmeshsize[1:-1]))
icratesfaces = (np.log(iclinffaces[1:])-np.log(iclinffaces[0:-1]))/(np.log(icmeshsize[1:])-np.log(icmeshsize[0:-1]))
csratesfaces = (np.log(cslinffaces[2:])-np.log(cslinffaces[1:-1]))/(np.log(csmeshsize[2:])-np.log(csmeshsize[1:-1]))
print("icratesverts = ", icratesverts)
print("icratesfaces = ", icratesfaces)
print("csratesverts = ", csratesverts)
print("csratesfaces = ", csratesfaces)
fig, ax = plt.subplots()
ax.loglog(icmeshsize, iclinfverts, '-s', label='icos. tri. verts.')
ax.loglog(csmeshsize[1:], cslinfverts[1:], '-o', label='cubed sph. verts.')
ax.loglog(icmeshsize, iclinffaces, '--d', label='icos. tri. faces')
ax.loglog(csmeshsize[1:], cslinffaces[1:], '--*', label='cubed sph. faces')
ax.loglog(icmeshsize, 0.0005*o2ref, 'k-.', label='O($\Delta x^2$)')
ax.set_xticks(icmeshsize);
ax.set_xlabel('mesh size (degrees)')
ax.set_ylabel('max err')
ax.set_xticklabels(['{:3.2f}'.format(dl) for dl in icmeshsize])
ax.legend()
ax.set_title("Spherical Poisson solve max error")
fig.savefig("sph_poisson_conv.pdf", bbox_inches='tight')
plt.close(fig)
|
pbosler/lpm
|
scripts/poissonOutput.py
|
poissonOutput.py
|
py
| 4,069 |
python
|
en
|
code
| 5 |
github-code
|
50
|
33798140343
|
"""
Initialise the shop! Users can spend gold to purchase items.
TODO: It is currently a bad bad system that keeps everything in JSON and loops through the inventory to find things.
I will move this to an indexed format at some point but as a poc this works for now.
"""
import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
import json
import os
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme()
class Shop(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.SHOP_PATH = "glicko_bot/data/art/"
self.WALLET_PATH = "glicko_bot/data/users.json"
self.KITTY_PATH = "glicko_bot/data/kitty.json"
@commands.command()
async def stock(self, ctx):
"""
Display what is available to buy.
Example usage:
!stock
"""
embed = discord.Embed(title="Shop",
description="Welcome to the shop. Only the finest artwork for sale!\nEverything is one of a kind!\nNO REFUNDS",
color=0x674EA7
)
for collection in os.listdir(self.SHOP_PATH):
items = os.path.join(self.SHOP_PATH, collection, "metadata.jsonl")
with open(items, "r") as f:
inventory = [json.loads(item) for item in f.readlines()]
inv_str = "\n".join([f"_{item['name']} (UID: {item['uid']})_ - Price: {item['base_price']} GLD" for item in inventory if bool(item["for_sale"])])
embed.add_field(name=collection, value=inv_str, inline=False)
await ctx.send(embed=embed)
@commands.command(aliases=["art", "preview", "ap"])
async def art_preview(self, ctx,
uid: int = commands.parameter(description="The unique id of the art.", default=0),
):
"""
Preview an item from !stock.
Example usage:
!preview 2
"""
path = os.path.join(self.SHOP_PATH, "founding_collection/metadata.jsonl")
with open(path, "r") as f:
stock = [json.loads(a) for a in f.readlines()]
for item in stock:
if bool(item["for_sale"]):
if item["uid"] == uid:
image_path = self.SHOP_PATH + f"founding_collection/{item['path']}"
with open(image_path, "rb") as f:
file = discord.File(f, image_path)
await ctx.send(f"This is {item['name']}, available for the low price of only {item['base_price']} GLD.\nType _!buy {item['uid']}_ to buy it.", file=file)
return
@commands.command(aliases=["purchase"])
async def buy(self, ctx,
uid: int = commands.parameter(description="The unique id of the art.", default=0),
):
"""
Purchase an item from !stock using its Unique ID.
Example usage:
!buy 1
"""
path = os.path.join(self.SHOP_PATH, "founding_collection/metadata.jsonl")
with open(path, "r") as f:
stock = [json.loads(a) for a in f.readlines()]
with open(self.WALLET_PATH, "r") as f:
users = json.load(f)
user = str(ctx.author.id)
if user not in users.keys():
await ctx.send("How can you buy stuff without a wallet?!")
return
for art in stock:
if art.get("uid", False) == uid:
owner = art.get("owner", "")
for_sale = art.get("for_sale", 0)
name = art.get("name", "")
if bool(for_sale):
price = art.get('base_price', 0)
funds = users[user].get("GLD")
if funds < price:
await ctx.send(f"You don't have enough GLD to buy {name} for {price} GLD!")
else:
users[user]["GLD"] -= price
if owner != "":
owner_id = discord.utils.get(self.bot.users, name=owner).id
users[str(owner_id)]["GLD"] += price
art["owner"] = ctx.message.author.name
art["for_sale"] = 0
art["sale_history"].append({"from":owner, "to": ctx.message.author.name, "price": price})
await ctx.send(f"@everyone {ctx.message.author.name} bought {name} for {price} GLD.")
with open(path, "w") as f:
for a in stock:
json.dump(a, f)
f.write("\n")
with open(self.WALLET_PATH, "w") as f:
json.dump(users, f)
return
else:
await ctx.send(f"This art is already owned by {owner}!")
return
await ctx.send(f"I don't seem to have that registered?")
return
@commands.cooldown(1, 30, BucketType.user)
@commands.command()
async def sell(self, ctx, uid: int = commands.parameter(description="The unique id of the art.", default=0), price: float = commands.parameter(description="The price in GLD.", default=1000)):
"""
Put art up for sale so that !buy can be used to buy it!
Example usage:
!sell 3 1700
"""
path = os.path.join(self.SHOP_PATH, "founding_collection/metadata.jsonl")
with open(path, "r") as f:
stock = [json.loads(a) for a in f.readlines()]
with open(self.KITTY_PATH, "r") as f:
tax_pool = json.load(f)["tax"]
if price >= tax_pool:
await ctx.send(f"I think that's a bit unreasonable... Try something less than {tax_pool:,.4f}")
return
for art in stock:
if art.get("uid", False) == uid:
owner = art.get("owner", "")
if owner == ctx.message.author.name:
previous_price = art["base_price"]
for_sale = art.get("for_sale", 0)
art["base_price"] = price
art["for_sale"] = 1
if not bool(for_sale):
await ctx.send(f"{ctx.message.author.name} listed {art['name']} for {price:,} GLD.\n Buy it while you can!")
else:
await ctx.send(f"{ctx.message.author.name} re-listed {art['name']} for {price:,} GLD.\nPreviously it was {previous_price:,}")
with open(path, "w") as f:
for a in stock:
json.dump(a, f)
f.write("\n")
else:
await ctx.send(f"You don't own {art['name']}")
return
@commands.command()
async def collection(self, ctx, show: int = commands.parameter(description="Whether to display the images or just titles.", default=0)):
"""
Show off your art collection.
Add 1 to show the images themselves.
Example usage:
!collection
!collection 0
!collection 1
"""
path = os.path.join(self.SHOP_PATH, "founding_collection/metadata.jsonl")
with open(path, "r") as f:
stock = [json.loads(a) for a in f.readlines()]
title = f"{ctx.author.name}'s collection:"
await ctx.send(title)
if bool(show):
for item in stock:
if item["owner"] == ctx.author.name:
image_path = self.SHOP_PATH + f"founding_collection/{item['path']}"
with open(image_path, "rb") as f:
file = discord.File(f, image_path)
await ctx.send(f"{item['name']} (UID: {item['uid']})", file=file)
else:
embed = discord.Embed(title=title, color=0xE65AD8)
for item in stock:
if item["owner"] == ctx.author.name:
embed.add_field(name=f"__{item['name']}__", value=f"(UID: {item['uid']})")
await ctx.send(embed=embed)
async def setup(bot: commands.bot):
await bot.add_cog(Shop(bot))
|
jkswin/glicko_goblins
|
glicko_bot/cogs/shop.py
|
shop.py
|
py
| 8,395 |
python
|
en
|
code
| 0 |
github-code
|
50
|
24933452472
|
import inspect
import os
import re
import traceback
import types
import sys
#
from gub.syntax import printf, function_class
from gub import octal
from gub import misc
def subst_method (func):
'''Decorator to match Context.get_substitution_dict ()'''
func.substitute_me = True
return func
NAME = 0
OBJECT = 1
def is_method (member):
return (type (member[OBJECT]) == types.MethodType
or (member[OBJECT].__class__ == misc.MethodOverrider))
def is_subst_method (member):
return (is_method (member)
and (hasattr (member[OBJECT], 'substitute_me')
or base_is_class_subst_method (member[NAME],
function_class (member[OBJECT]))))
def object_get_methods (object):
return list (filter (is_method, inspect.getmembers (object)))
class _C (object):
pass
def is_class_subst_method (name, cls):
try:
if name in cls.__dict__:
classmethod (cls.__dict__[name])
except:
printf ('self:', cls)
printf ('name:', name)
raise
if (name in cls.__dict__
and type (cls.__dict__[name]) != type (_C.__init__)
and classmethod (cls.__dict__[name])
and 'substitute_me' in cls.__dict__[name].__dict__):
return True
return False
def base_is_class_subst_method (name, cls):
if is_class_subst_method (name, cls):
return True
for c in cls.__bases__:
if base_is_class_subst_method (name, c):
return True
return False
def typecheck_substitution_dict (d):
for (k, v) in list (d.items ()):
if type (v) != str:
raise Exception ('type', (k, v))
def recurse_substitutions (d):
for (k, v) in list (d.items ()):
if type (v) != str:
del d[k]
continue
try:
while v.index ('%(') >= 0:
v = v % d
except:
t, vv, b = sys.exc_info ()
if t == ValueError:
pass
elif t == KeyError or t == ValueError:
printf ('variable: >>>' + k + '<<<')
printf ('format string: >>>' + v + '<<<')
raise
else:
raise
d[k] = v
return d
class ConstantCall:
def __init__ (self, const):
self.const = const
def __call__ (self):
return self.const
class SetAttrTooLate (Exception):
pass
class ExpandInInit (Exception):
pass
class NonStringExpansion (Exception):
pass
class Context (object):
def __init__ (self, parent = None):
self._substitution_dict = None
self._parent = parent
self._substitution_assignment_traceback = None
def __setattr__ (self, k, v):
if (type (v) == str
and k != '_substitution_dict' and self._substitution_dict):
msg = ('%s was already set in\n%s'
% (k,
''.join (traceback.format_list (self._substitution_assignment_traceback))))
raise SetAttrTooLate (msg)
self.__dict__[k] = v
def get_constant_substitution_dict (self):
d = {}
if self._parent:
d = self._parent.get_substitution_dict ()
d = d.copy ()
members = inspect.getmembers (self)
member_substs = {}
for (name, method) in filter (is_subst_method, members):
val = method ()
self.__dict__[name] = ConstantCall (val)
member_substs[name] = val
if type (val) != str:
message = '[' + self.name () + '] non string value: ' + str (val) + ' for subst_method: ' + name
printf (message)
raise NonStringExpansion (message)
string_vars = dict ((k, v) for (k, v) in members if type (v) == str)
d.update (string_vars)
d.update (member_substs)
try:
d = recurse_substitutions (d)
except KeyError:
printf ('self:', self)
raise
return d
def get_substitution_dict (self, env={}):
if self._substitution_dict == None:
self._substitution_assignment_traceback = traceback.extract_stack ()
self._substitution_dict = self.get_constant_substitution_dict ()
init_found = False
for (file, line, name, text) in self._substitution_assignment_traceback:
# this is a kludge, but traceback doesn't yield enough info
# to actually check that the __init__ being called is a
# derived from self.__init__
if name == '__init__' and 'buildrunner.py' not in file:
init_found = True
if init_found:
# if this happens derived classes cannot override settings
# from the baseclass.
msg = 'Cannot Context.expand () in __init__ ()'
raise ExpandInInit (msg)
d = self._substitution_dict
if env:
d = d.copy ()
for (k, v) in list (env.items ()):
try:
if type (v) == str:
d.update ({k: v % d})
except:
repr_v = repr (v)
repr_k = repr (k)
type_v = type (v)
type_k = type (k)
msg = 'Error substituting in %(repr_v)s(%(type_v)s) with %(repr_k)s(%(type_k)s)' % locals ()
raise NonStringExpansion (msg)
return d
def expand (self, s, env={}):
d = self.get_substitution_dict (env)
try:
e = s % d
except:
t, v, b = sys.exc_info ()
if t == KeyError or t == ValueError:
printf ('format string: >>>' + s + '<<<')
printf ('self:', self)
raise
return e
class RunnableContext (Context):
def __init__ (self, *args):
Context.__init__ (self, *args)
# TODO: should use _runner ?
self.runner = None
def connect_command_runner (self, runner):
previous = self.runner
self.runner = runner
return previous
def file_sub (self, re_pairs, name, to_name=None, env={}, must_succeed=False, use_re=True):
substs = []
for (frm, to) in re_pairs:
frm = self.expand (frm, env)
if type (to) == str:
to = self.expand (to, env)
substs.append ((frm, to))
if to_name:
to_name = self.expand (to_name, env)
return self.runner.file_sub (substs, self.expand (name, env), to_name=to_name,
must_succeed=must_succeed, use_re=use_re)
def fakeroot (self, s, env={}):
self.runner.fakeroot (self.expand (s, env=env))
def command (self, str):
self.runner.command (str)
def system (self, cmd, env={}, ignore_errors=False):
dict = self.get_substitution_dict (env)
cmd = self.expand (cmd, env)
self.runner.system (cmd, env=dict, ignore_errors=ignore_errors)
def shadow_tree (self, src, dest, env={}, soft=False):
src = self.expand (src, env=env)
dest = self.expand (dest, env=env)
self.runner.shadow_tree (src, dest, soft)
def dump (self, str, name, mode='w', env={},
expand_string=True, expand_name=True, permissions=octal.o644):
if expand_name:
name = self.expand (name, env)
if expand_string:
str = self.expand (str, env)
return self.runner.dump (str, name, mode=mode, permissions=permissions)
def map_find_files (self, func, directory, pattern, must_happen=False, silent=False, find_func=misc.find_files):
return self.runner.map_find_files (func, self.expand (directory), pattern, must_happen, silent, find_func)
def map_locate (self, func, directory, pattern, must_happen=False, silent=False, find_func=misc.locate_files):
return self.runner.map_locate (func, self.expand (directory), pattern, must_happen, silent, find_func)
def copy (self, src, dest, env={}):
return self.runner.copy (self.expand (src, env=env), self.expand (dest, env=env))
def func (self, f, *args):
return self.runner.func (f, *args)
def chmod (self, file, mode, env={}):
return self.runner.chmod (self.expand (file, env=env), mode)
def link (self, src, dest, env={}):
return self.runner.link (self.expand (src, env=env), self.expand (dest, env=env))
def symlink (self, src, dest, env={}):
return self.runner.symlink (self.expand (src, env=env), self.expand (dest, env=env))
def rename (self, src, dest, env={}):
return self.runner.rename (self.expand (src, env=env), self.expand (dest, env=env))
def mkdir (self, dir, env={}):
return self.runner.mkdir (self.expand (dir, env=env))
def remove (self, file, env={}):
return self.runner.remove (self.expand (file, env=env))
def rmtree (self, file, env={}):
return self.runner.rmtree (self.expand (file, env=env))
#
# Tests.
#
if __name__=='__main__':
class TestBase (Context):
@subst_method
def bladir (self):
return 'foo'
class TestClass (TestBase):
@subst_method
def name (self):
return self.__class__.__name__
def bladir (self):
return 'derivedbladir'
class S:
pass
p = TestClass ()
from gub import settings
s = settings.Settings ('debian-arm')
c = RunnableContext (s)
printf (c.locate_files ('/etc/', '*.conf'))
printf (p.expand ('%(name)s %(bladir)s'))
|
EddyPronk/gub
|
gub/context.py
|
context.py
|
py
| 9,845 |
python
|
en
|
code
| 2 |
github-code
|
50
|
32187295828
|
idioma = input("Introduce las palabras en español e inglés separadas por dos puntos (:) y cada par separado por comas.")
idiomas = {}
duplas = idioma.split(",")
for dupla in duplas:
palabra = dupla.split(":")
español = palabra[0].strip()
ingles = palabra[1].strip()
idiomas[español] = ingles
traducir = input("Introduce una frase en español: ")
palabras = traducir.split()
traduccion = []
for palabra in palabras:
traducida = idiomas.get(palabra, palabra)
traduccion.append(traducida)
print("La traducción es: " + " ".join(traduccion))
|
SyZeck/Ejercicios-de-Programacion-con-Python
|
Diccionarios/Ejercicio 8/solucion.py
|
solucion.py
|
py
| 575 |
python
|
es
|
code
| 0 |
github-code
|
50
|
16092127365
|
"""
Map out allocated memory regions in a specificed binary.
Not a standalone python file -- needs to run under GDB.
Called by harvest_heap_data.py
"""
import gdb #won't work unless script is being run under a GDB process
import re
import os
import struct
import sys
# GDB's python interpreter needs this to locate the other files
sys.path.append(os.path.dirname(__file__))
import data_structures
import build_graph
"""
Construct list of contiguous mapped region names, their starting virtual addresses, their end virtual adresses
In many cases there are multiple VMAs with the same name. To deal with this, we append indices to the region names.
For example, if there are three regions called libc.so, they will become libc.so_0, libc.s0_1, libc.so_2
pid = process to attach to and map
coalesce = whether or not to merge adjascent VMAs that have the same name
"""
def build_maplist(pid, coalesce=False):
gdb.execute("attach " + str(pid))
# parse the VMA table printed by gdb
maps = [re.split(r'\s{1,}', s)[1:] for s in gdb.execute("info proc mappings", False, True).split("\n")[4:-1]]
maplist = data_structures.MapList()
for segment in maps:
segname = segment[-1]
if segname == "(deleted)":
segname = segment[-2] + "_(deleted)"
region = data_structures.Region(int(segment[0],16), int(segment[1],16), segname) # (start, end, name)
maplist.add_region(region)
if coalesce:
maplist.coalesce()
return maplist
"""
Function for dumping memory from regoins of interst. This is helpful in general
becaues dumping data and then analyzing it from files is much faster than querying
GDB every time we want to look up an address.
maplist = data structure of mapped regions obtained from `build_maptlist`
sources = list of names of regions to dump. If None, will dump every region in maplist. Note that
the names of the sources in `sourcelist` do not include the index (i.e., they are of the
form 'libc.so' instead of 'libc.so_4')
dumpname = prefix to append to the string "[region].dump" where "[region]" is the region name
length_lb, length_ub = constraints on the length of regions to dump. Useful in scenarios when
we want to dump and analyze all regions of a certain length. This happened
when we wanted to find all jemalloc chunks in the firefox heap.
"""
def dump_mem(maplist, sources=None, dumpname="", length_lb = -1, length_ub = 2**30):
sourcelist = [reg for reg in maplist.regions_list if reg.end - reg.start >= length_lb and reg.end - reg.start <= length_ub]
if sources:
sourcelist = [s for s in sourcelist if "_".join(s.name.split("_")[:-1]) in sources]
for i,src in enumerate(sourcelist):
print("Dumping " + str(src.name) + " ({}/{})".format(i,len(sourcelist)) + "len = {} bytes".format(src.end - src.start))
try:
# will save dump in file dumpname + region_name, where region_name is the section of the VMA name
# after the last '/' character. Prevents path issues with the saved file.
gdb.execute("dump memory {}.dump {} {}".format(dumpname + src.name.split("/")[-1], src.start, src.end))
except:
continue
print("finished dump")
"""
Run the full script and save the memory graph
pid = pid of the process to attach to
sources = names of regions to scan for pointers. If None, all regions will be scanned
name = prefix for all saved files, including serialized data structures and memory dumps
llb, lub = upper and lower bounds on lengths of source regions to scan
coalesce = whether to aggregate adjascent regions with the same name
"""
def gdb_main(pid, sources=None, name="", llb = -1, lub=2**30, graph=True, psize=8, coalesce=False):
maplist = build_maplist(pid, coalesce)
dump_mem(maplist, sources, name, llb, lub)
maplist.serialize(name + "maplist.json")
if graph:
memgraph = build_graph.build_graph_from_dumps(maplist, psize, sources, name, llb, lub)
memgraph.serialize(name + "memgraph.json")
gdb.execute("detach")
gdb.execute("quit")
|
jproney/MemoryCartography
|
cartography_gdb.py
|
cartography_gdb.py
|
py
| 4,172 |
python
|
en
|
code
| 4 |
github-code
|
50
|
12195818389
|
import RPi.GPIO as GPIO
import time
buz = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(buz,GPIO.OUT)
GPIO.setwarnings(False)
pwm = GPIO.PWM(buz,262)
pwm.start(50.0)
time.sleep(1)
pwm.stop()
GPIO.cleanup()
|
embededdrive/embedded
|
Raspberry Pi/day01/buzzer.py
|
buzzer.py
|
py
| 199 |
python
|
en
|
code
| 0 |
github-code
|
50
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.