seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17669758792
|
"""2020_02_18
Revision ID: 000001
Revises:
Create Date: 2020-02-18 03:57:38.958091
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "000001"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"users",
sa.Column("added_on", sa.DateTime(), nullable=False),
sa.Column("modified_on", sa.DateTime(), nullable=False),
sa.Column("id", sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column("enabled", sa.Boolean(), nullable=False),
sa.Column("last_auth_time", sa.DateTime(), nullable=True),
sa.Column("username", sa.String(length=32), nullable=False),
sa.Column("password_hash", sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint("id", name=op.f("pk_users")),
)
op.create_index(op.f("ix_users_username"), "users", ["username"], unique=True)
op.create_table(
"user_accesses",
sa.Column("added_on", sa.DateTime(), nullable=False),
sa.Column("modified_on", sa.DateTime(), nullable=False),
sa.Column("id", sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column("enabled", sa.Boolean(), nullable=False),
sa.Column("ip_address", sa.String(length=15), nullable=False),
sa.Column("external_app_id", sa.String(length=15), nullable=False),
sa.Column("users_id", sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(
["users_id"],
["users.id"],
name=op.f("fk_user_accesses_users_id_users"),
onupdate="CASCADE",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_user_accesses")),
sa.UniqueConstraint(
"users_id",
"ip_address",
"external_app_id",
name=op.f("uq_user_accesses_users_id"),
),
)
op.create_index(
op.f("ix_user_accesses_external_app_id"),
"user_accesses",
["external_app_id"],
unique=True,
)
op.create_index(
op.f("ix_user_accesses_ip_address"),
"user_accesses",
["ip_address"],
unique=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_user_accesses_ip_address"), table_name="user_accesses")
op.drop_index(op.f("ix_user_accesses_external_app_id"), table_name="user_accesses")
op.drop_table("user_accesses")
op.drop_index(op.f("ix_users_username"), table_name="users")
op.drop_table("users")
# ### end Alembic commands ###
|
ichux/elog
|
migrations/versions/000001_2020_02_18.py
|
000001_2020_02_18.py
|
py
| 2,743 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "alembic.op.create_table",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.BigInteger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "alembic.op.f",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "alembic.op.create_index",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_table",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.BigInteger",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.BigInteger",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "alembic.op.f",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "alembic.op.f",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.UniqueConstraint",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "alembic.op.f",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "alembic.op.create_index",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "alembic.op.create_index",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_index",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_index",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_index",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 77,
"usage_type": "name"
}
] |
43266096059
|
import discord
import os
from keep_alive import keep_alive
from discord.ext import commands
from better_profanity import profanity
os.system('python3 -m commands')
profanity.load_censor_words_from_file('./profanity.txt')
client = commands.Bot(command_prefix = '$')
money_registry = []
list1 = ['myself', 'me', 'i']
@client.event
async def on_ready():
print('Bot is ready!')
await client.change_presence(activity=discord.Game('$help'))
@client.command()
async def displayembed(ctx, *, Title):
embed = discord.Embed(title= Title, description= Title, color = 6400 ) #,color=Hex code
await ctx.send(embed=embed)
@client.command()
async def ping(ctx):
await ctx.send(f'Pong! {round (client.latency * 1000)}ms')
@client.command()
async def kill(ctx, *, WhoToKill):
embed = discord.Embed(description=f'{WhoToKill} eats some mushrooms from the wild. Too bad they were poisonous...', color= 6400) #,color=Hex code
await ctx.send(embed=embed)
@client.event
async def on_message(message):
mention = f'<@!{client.user.id}>'
if mention in message.content:
embed = discord.Embed(description=f"_{message.author.mention} :bell: You ping me, I ping you._", color= 6400 )
await message.channel.send(embed=embed)
if str(message.channel) == "pictures" and message.content != '':
if message.author != client.user:
await message.channel.purge(limit=1)
embed = discord.Embed(description= f"Sorry{message.author.mention}! Only Pictures!", color = 6400)
await message.channel.send(embed=embed)
else:
pass
if '' in message.content:
embed = discord.Embed(title= "Self Roles", description = "React to this message to get these roles! ")
if not message.author.bot:
if profanity.contains_profanity(message.content):
await message.delete()
embed = discord.Embed(description= f"{message.author.mention} :octagonal_sign: Mind your language!", color = 6400)
await message.channel.send(embed=embed)
await client.process_commands(message)
@client.event
async def on_member_join(member):
print(f'{member} has joined the server! Welcome!')
@client.event
async def on_member_remove(member):
print(f'{member} has left! Goodbai! GLHF')
keep_alive()
client.run(os.getenv('TOKEN'))
|
LittlRayRay/Censorbot
|
main.py
|
main.py
|
py
| 2,297 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.system",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "better_profanity.profanity.load_censor_words_from_file",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "better_profanity.profanity",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "discord.Game",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "better_profanity.profanity.contains_profanity",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "better_profanity.profanity",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "keep_alive.keep_alive",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 71,
"usage_type": "call"
}
] |
34879700956
|
import re
from requests import get
from sys import argv as cla
from readabilipy import simple_json_from_html_string
from ebooklib import epub
def valid_url(url):
regex = re.compile(
r'^(?:http)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return re.match(regex, str(url)) is not None
def slugify(s):
s = s.lower().strip()
s = ''.join(char for char in s if ord(char) < 128) #remove non-ascii characters
s = re.sub(r'[^\w\s-]', '', s)
s = re.sub(r'[\s_-]+', '-', s)
s = re.sub(r'^-+|-+$', '', s)
return s
def main():
if not cla[1]:
raise Exception("Invalid argument..")
if len(cla) != 2:
raise Exception("This script expects just one parameter.. Did you comma separate the URL's")
links = str(cla[1]).split(',')
for l in links:
if not valid_url(l):
raise Exception(str("This is not a valid url: "+l))
book = epub.EpubBook()
book.set_language('en')
chapters = ['nav']
epub_title = ""
epub_author = ""
toc = []
if len(links) > 1:
print("You're trying to download {0} links. Please provide title and author.".format(len(links)))
epub_title = input("ePub title: ")
epub_author = input("ePub author: ")
for idx, link in enumerate(links):
try:
request = get(link)
if bool(request.text) == False:
if input('Do you want to skip this URL and continue? [y/n]') == 'y':
continue
else:
print('Script stopped')
sys.exit(0)
else:
print('Extracting content from page..')
page_content = simple_json_from_html_string(request.text, use_readability=False)
chapter_content = page_content['plain_content']
if not epub_title:
epub_title = page_content['title']
if not epub_author:
epub_author = page_content['byline'] if page_content['byline'] else "Various authors"
print('Adding content to ePub..')
chapter = epub.EpubHtml(title=page_content['title'], file_name=str('chapter{}.xhtml'.format(idx+1)), lang='en')
chapter.content = u'{}'.format(chapter_content)
book.add_item(chapter)
chapters.append(chapter)
pass
except Exception as e:
raise e
print("Finishing epub..")
slug = slugify(epub_title)
book.set_identifier(slug)
book.set_title(epub_title)
book.add_item(epub.EpubNcx())
book.add_item(epub.EpubNav())
book.spine = chapters
if epub_author:
book.add_author(epub_author)
else:
book.add_author("Unknown Author")
try:
epub.write_epub('{}.epub'.format(slug), book, {})
print("Done! Saved to {}.epub".format(slug))
except Exception as e:
raise e
if __name__ == "__main__":
main()
|
eklop/web2epub
|
web2epub.py
|
web2epub.py
|
py
| 2,797 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "re.match",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "sys.argv",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "ebooklib.epub.EpubBook",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "ebooklib.epub",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "readabilipy.simple_json_from_html_string",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "ebooklib.epub.EpubHtml",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "ebooklib.epub",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "ebooklib.epub.EpubNcx",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "ebooklib.epub",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "ebooklib.epub.EpubNav",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "ebooklib.epub",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "ebooklib.epub.write_epub",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "ebooklib.epub",
"line_number": 95,
"usage_type": "name"
}
] |
225940000
|
from sqlalchemy import Column, Integer, String, ForeignKey
from app.routers.db import Base
class Task(Base):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True, index=True)
title = Column(String)
body = Column(String)
|
gitdarsh/todo
|
todo/app/models/model.py
|
model.py
|
py
| 248 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "app.routers.db.Base",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 10,
"usage_type": "argument"
}
] |
30410142101
|
import CryptoCurrency
import sqlite3 as sql
import requests
from datetime import datetime
import time
def get_crypto():
"""Récupères la liste des cryptomonnaies tradable sur le marché futures de Bybit
(!! 120 requests per second for 5 consecutive seconds maximum)
Returns:
list:liste des cryptomonnaies
"""
url = "https://api-testnet.bybit.com/v5/market/instruments-info?category=linear"
payload = {}
headers = {}
response = requests.request(
"GET", url, headers=headers, data=payload).json()
baseCoins = []
for crypto in response['result']['list']:
if crypto['baseCoin'][:5] == '10000' and crypto['quoteCoin'] == 'USDT' and crypto['baseCoin'] not in baseCoins:
# baseCoins += [crypto['baseCoin'][5:]]
# non traité pour le moment
pass
elif crypto['baseCoin'][:4] == '1000' and crypto['quoteCoin'] == 'USDT' and crypto['baseCoin'] not in baseCoins:
# baseCoins += [crypto['baseCoin'][4:]]
# non traité pour le moment
pass
elif crypto['quoteCoin'] == 'USDT' and crypto['baseCoin'] not in baseCoins and crypto['baseCoin'] != 'LUNA2' and crypto['baseCoin'] != 'PEOPLE':
# exception LUNA2 et PEOPLE à traiter
baseCoins += [crypto['baseCoin']]
return baseCoins
def get_price_history(interval, crypto):
"""renvoie un dicitonnaire qui permet de connaître le prix de la cryptomonnaie depuis l'apparition de son contrat futures sur l'échange de cryptomonnaie.
Args:
interval (string): interval de temps entre deux données (Kline interval. 1,3,5,15,30,60,120,240,360,720,D,M,W)
crypto (CryptoCurrency): la crypto dont on veut le prix
"""
listeDictionnaires = []
listeDictionnaires.append(crypto.get_price(interval, 1500000000000,
int(datetime.now().timestamp())*1000))
lastTimestamps = list(listeDictionnaires[0].keys())
lastTimestamps.sort()
if len(lastTimestamps) < 200:
return listeDictionnaires
# intervalInTimestamp = int(lastTimestamps[2])-int(lastTimestamps[1])
# jusqu'ici on a récupéré les 200 derniers timestamps
compteur = 1
while len(lastTimestamps) == 200:
listeDictionnaires.append(crypto.get_price(
interval, 1500000000000, int(lastTimestamps[0])))
# il ne faut pas dépasser les 120 requetes par 5 secondes
if compteur % 119 == 0:
time.sleep(5)
lastTimestamps = (list(listeDictionnaires[compteur].keys()))
lastTimestamps.sort()
compteur += 1
print(listeDictionnaires)
return listeDictionnaires
if __name__ == "__main__":
# fonctionnement normal
# print(get_crypto())
cryptos = get_crypto()
conn = sql.connect("cryptoDatabase.db")
curs = conn.cursor()
curs.execute("DROP TABLE IF EXISTS Crypto")
curs.execute(
"CREATE TABLE Crypto (nom VARCHAR, symbol VARCHAR PRIMARY KEY, whitepaperlink VARCHAR)")
curs.execute("DROP TABLE IF EXISTS Prix")
curs.execute(
"CREATE TABLE Prix (symbol VARCHAR, date VARCHAR, open FLOAT, high FLOAT, low FLOAT, close FLOAT,PRIMARY KEY (symbol, date),FOREIGN KEY (symbol) REFERENCES Crypto(symbol))")
cryptoCurrencies = []
for crypto in cryptos:
cryptoCurrencies += [CryptoCurrency.Cryptocurrency(crypto)]
for crypto in cryptoCurrencies:
infos = crypto.get_name_and_whitepaperlink()
# l'interval choisi ici est hebdomadaire si on veut plus de précision, on peut prendre un plus petit interval
price_history = get_price_history(
"W", crypto)
curs.execute("INSERT INTO Crypto(nom,symbol,whitepaperlink) VALUES (?,?,?)",
(infos["name"], crypto.symbol, infos["whitepaperLink"]))
conn.commit()
for prices in price_history:
timestamps = list(prices.keys())
for date in timestamps:
curs.execute("INSERT INTO Prix(symbol,date,open,high,low,close) VALUES (?,?,?,?,?,?)",
(crypto.symbol, datetime.fromtimestamp(int(date)/1000), prices[date]["open"], prices[date]["high"], prices[date]["low"], prices[date]["close"]))
conn.commit()
conn.commit()
conn.close()
# test
# nft = CryptoCurrency.Cryptocurrency('EOS')
# print(get_price_history("D", nft))
# bitcoin = CryptoCurrency.Cryptocurrency("BTC")
# get_price_history("D", bitcoin)
# infos = bitcoin.get_name_and_whitepaperlink()
# conn = sql.connect("cryptoDatabase.db")
# curs = conn.cursor()
# curs.execute("DROP TABLE IF EXISTS Crypto")
# curs.execute(
# "CREATE TABLE Crypto (nom VARCHAR PRIMARY KEY, symbole VARCHAR, whitepaperlink VARCHAR)")
# curs.execute(
# "INSERT INTO Crypto(nom,symbole,whitepaperlink) VALUES (?,?,?)", (infos["name"], bitcoin.symbol, infos["whitepaperLink"]))
# conn.commit()
# conn.close()
|
ArthurOnWeb/l-historique-du-prix-d-une-cryptomonnaie
|
Main.py
|
Main.py
|
py
| 5,015 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.request",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "CryptoCurrency.Cryptocurrency",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 98,
"usage_type": "name"
}
] |
70396769467
|
""" JAX functions to Calculate moving average.
Author: Toshinori Kitamura
Affiliation: NAIST & OSX
"""
from __future__ import annotations
import jax
from chex import Array
from jax import lax
@jax.jit
def calc_ma(lr: float, idx1: Array, idx2: Array, tb: Array, tb_targ: Array) -> Array:
"""Calculate moving average.
The semantics of calc_ma are given by:
def calc_ma(lr, idx1, idx2, tb, tb_targ):
for s, a, targ in zip(idx1, idx2, tb_targ):
tb[s, a] = (1 - lr) * tb[s, a] + lr * targ
return tb
Args:
lr (float): Learning rate
idx1 (Array): (?, ) or (?, 1) array
idx2 (Array): (?, ) or (?, 1) array
tb (Array): (?, ?) initial array
tb_targ (Array): (?, ) or (?, 1) target array
Returns:
tb (Array): (?, ) array
"""
assert len(tb.shape) == 2 # dSxdA
idx1 = idx1.squeeze(axis=1) if len(idx1) == 2 else idx1
idx2 = idx2.squeeze(axis=1) if len(idx2) == 2 else idx2
tb_targ = tb_targ.squeeze(axis=1) if len(tb_targ) == 2 else tb_targ
def body_fn(i, tb):
i1, i2, t = idx1[i], idx2[i], tb_targ[i]
targ = (1 - lr) * tb[i1, i2] + lr * t
return tb.at[i1, i2].set(targ)
tb = lax.fori_loop(0, len(idx1), body_fn, tb)
return tb
|
omron-sinicx/ShinRL
|
shinrl/_calc/moving_average.py
|
moving_average.py
|
py
| 1,289 |
python
|
en
|
code
| 42 |
github-code
|
6
|
[
{
"api_name": "chex.Array",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "jax.lax.fori_loop",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "jax.lax",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "jax.jit",
"line_number": 12,
"usage_type": "attribute"
}
] |
14572312600
|
############## THESE SPLINES ARE USING CATMULL SPLINES ##############
# https://en.wikipedia.org/wiki/Centripetal_Catmull%E2%80%93Rom_spline
#
# FOLLOWING javidx9's SPLINE VIDEOS:
# https://www.youtube.com/watch?v=9_aJGUTePYo&t=898s&ab_channel=javidx9
from typing import List
import pygame, math
from code_modules.spline.spline_point_2D import Spline_Point2D
### THE FONT IS USED TO SHOW fOffset AND fMarker ###
###############
class Spline:
def __init__(self):
self.points = []
self.activePoint = 0
self.isLooped = False
self.RIGHT = False
self.LEFT = False
self.UP = False
self.DOWN = False
self.totalLineLength = 0
############# DEBUG FONT #############
### THE FONT IS USED TO SHOW fOffset AND fMarker ###
self.font = pygame.font.SysFont(None, 20)
def update(self):
if self.RIGHT:
self.points[self.activePoint].x += 5
if self.LEFT:
self.points[self.activePoint].x -= 5
if self.UP:
self.points[self.activePoint].y -= 5
if self.DOWN:
self.points[self.activePoint].y += 5
### CALCULATE TOTAL LENGTH ###
self.totalLineLength = self.__getTotalLength()
def draw(self, canvas):
##### DRAW SPLINE POINTS #####
### LOOPED ###
if self.isLooped:
for t in range(0, len(self.points)*100, 1):
pos = self.getSplinePoint(t / 100)
pygame.draw.circle(canvas, (255,255,255), (pos.x, pos.y), 2)
### NOT LOOPED ###
else:
for t in range(0, (len(self.points)*100) - 300 , 1):
pos = self.getSplinePoint(t / 100)
pygame.draw.circle(canvas, (255,255,255), (pos.x, pos.y), 2)
##### DRAW CONTROL POINTS + TEXT #####
for i in range(len(self.points)):
### DRAW DISTANCE ###
tempImg = self.font.render(str(self.points[i].length), True, (200,200,200))
canvas.blit(tempImg, (self.points[i].x + 20, self.points[i].y))
##########################
##### CONTROL POINTS #####
if i == self.activePoint:
pygame.draw.circle(canvas, (255,255,0), (self.points[i].x, self.points[i].y), 5)
else:
pygame.draw.circle(canvas, (255,0,0), (self.points[i].x, self.points[i].y), 5)
tempImg = self.font.render(str(i), True, (255,255,255))
canvas.blit(tempImg, (self.points[i].x, self.points[i].y))
def getSplinePoint(self, t):
if not self.isLooped:
p1 = int(t) + 1
p2 = p1 + 1
p3 = p2 + 1
p0 = p1 - 1
else:
p1 = int(t)
p2 = (p1 + 1) % len(self.points)
p3 = (p2 + 1) % len(self.points)
if p1 >= 1:
p0 = p1 - 1
else:
p0 = len(self.points) - 1
t = t - int(t)
tSquare = t * t
tCube = tSquare * t
q1 = -tCube + 2 * tSquare - t
q2 = 3 * tCube - 5 * tSquare + 2
q3 = -3 * tCube + 4 * tSquare + t
q4 = tCube - tSquare
tx = 0.5 * (self.points[p0].x * q1 +
self.points[p1].x * q2 +
self.points[p2].x * q3 +
self.points[p3].x * q4)
ty = 0.5 * (self.points[p0].y * q1 +
self.points[p1].y * q2 +
self.points[p2].y * q3 +
self.points[p3].y * q4)
return Spline_Point2D(tx, ty)
def getSplineGradient(self, t):
if not self.isLooped:
p1 = int(t) + 1
p2 = p1 + 1
p3 = p2 + 1
p0 = p1 - 1
else:
p1 = int(t)
p2 = (p1 + 1) % len(self.points)
p3 = (p2 + 1) % len(self.points)
if p1 >= 1:
p0 = p1 - 1
else:
p0 = len(self.points) - 1
t = t - int(t)
tSquare = t * t
tCube = tSquare * t
q1 = -3*tSquare + 4*t - 1
q2 = 9*tSquare - 10*t
q3 = -9*tSquare + 8*t + 1
q4 = 3*tSquare - 2*t
tx = 0.5 * (self.points[p0].x * q1 +
self.points[p1].x * q2 +
self.points[p2].x * q3 +
self.points[p3].x * q4)
ty = 0.5 * (self.points[p0].y * q1 +
self.points[p1].y * q2 +
self.points[p2].y * q3 +
self.points[p3].y * q4)
return Spline_Point2D(tx, ty)
def __getTotalLength(self):
### CALCULATE TOTAL LENGTH ###
total = 0
if self.isLooped:
for i in range(len(self.points)):
self.points[i].length = self.__calculateSegmentLength(i)
total += self.points[i].length
else:
for i in range(len(self.points)-3):
self.points[i].length = self.__calculateSegmentLength(i)
total += self.points[i].length
return total
def __calculateSegmentLength(self, node):
fLength = 0
fStepSize = 3
old_point = self.getSplinePoint(node)
for t in range(0, 100, fStepSize):
new_point = self.getSplinePoint(node + t/100)
fLength += math.sqrt((new_point.x - old_point.x) * (new_point.x - old_point.x)
+ (new_point.y - old_point.y)*(new_point.y - old_point.y))
old_point = new_point
### You need to recalculate the segment lengths if the spline changes.
# which means its very innefficient to use splines dynamically. Preferrably
# you use them Statically.
return fLength
def getNormalizedOffset(self, p):
# Which node is the base?
i = 0
while p > self.points[i].length:
p -= self.points[i].length
i += 1
# The fractional is the offset
return i + (p / self.points[i].length)
|
EliasFredriksson/Tower_Defence_Reworked
|
code_modules/spline/spline.py
|
spline.py
|
py
| 6,006 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.font.SysFont",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "code_modules.spline.spline_point_2D.Spline_Point2D",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "code_modules.spline.spline_point_2D.Spline_Point2D",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 166,
"usage_type": "call"
}
] |
46525020186
|
import pygame
from robot import Robot
from manual_robot import ManualRobot
from automated_robot import AutomatedRobot
from automated_robots.robots_concursantes import *
from automated_robots.robots_zimatek import *
from robot_hub import RobotHub
from coin import Coin
import numpy as np
import os
class Combat:
"""
A class to represent a robot combat environment.
...
Attributes
----------
dims : list
with and height of the screen, dims = [width, height]
robots : pygame.sprite.Group
a sprite group containing the robot objects
robot_list : list
a list containing the robot objects
left_robot : Robot
the Robot that starts in the left-hand side
right_robot : Robot
the Robot that starts in the right-hand side
robot_hubs : pygame.sprite.Group
a sprite group containing the RobotHub objects
coin_per_second : float
estimated coin per second
Methods
-------
fix_bugs:
fixes bugs in the position of the robot sprites
run:
runs the robot combat
"""
def __init__(self, left_robot: Robot, right_robot: Robot, coin_per_second: float):
self.dims = (1050, 750)
self.robots = pygame.sprite.Group()
self.robots.add(left_robot)
self.robots.add(right_robot)
self.robot_list = [left_robot, right_robot]
self.left_robot = left_robot
self.right_robot = right_robot
self.robot_hubs = pygame.sprite.Group()
self.left_robot_hub = RobotHub(self.left_robot, RobotHub.DownLeft)
self.right_robot_hub = RobotHub(self.right_robot, RobotHub.DownRight)
self.robot_hubs.add(self.left_robot_hub)
self.robot_hubs.add(self.right_robot_hub)
self.coin_per_second = coin_per_second
self.font = None
self.font2 = None
def fix_bugs(self):
"""
fixes bugs in the position of the robot sprites
:return:
"""
if self.right_robot.living and self.left_robot.living:
collide = self.left_robot.rect.colliderect(self.right_robot.rect)
if collide:
if self.left_robot.rect.x <= self.right_robot.rect.x:
self.left_robot.move(dx=-self.left_robot.rect.width, combat=self)
self.right_robot.move(dx=self.right_robot.rect.width, combat=self)
else:
self.left_robot.move(dx=self.left_robot.rect.width, combat=self)
self.right_robot.move(dx=-self.right_robot.rect.width, combat=self)
def run(self):
"""
runs the robot combat
:return:
"""
pygame.init()
pygame.font.init()
self.font = pygame.font.Font("Resources/Pokemon_Classic.ttf", 16)
self.font2 = pygame.font.Font("Resources/Pokemon_Classic.ttf", 28)
background_image = pygame.image.load("Resources/background_mountains.jpg")
background_image = pygame.transform.rotozoom(background_image, 0, 2.5)
os.environ['SDL_VIDEO_CENTERED'] = '0'
screen = pygame.display.set_mode(self.dims)
pygame.display.set_caption("Robot Combat: {:s} vs {:s}".format(str(type(self.left_robot)).split(".")[-1][:-2],
str(type(self.right_robot)).split(".")[-1][:-2]))
for robot in self.robots:
robot.set_up()
for hub in self.robot_hubs:
hub.set_up()
# PRE LOOP
sprites_all = pygame.sprite.Group()
projectiles = pygame.sprite.Group()
coins = pygame.sprite.Group()
stop = False
pause = False
winner = None
sprites_all.add(self.robots)
sprites_all.add(projectiles)
sprites_all.add(coins)
clock = pygame.time.Clock()
time = 1
count_down = 60*3
totalcoins = 0
# -------- Principal Loop of the Program -----------
while not stop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
stop = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
pause = not pause
winner = None
if event.key == pygame.K_1:
pause = True
winner = 1
if event.key == pygame.K_0:
pause = True
winner = 0
if isinstance(self.left_robot, ManualRobot) and self.left_robot.living:
projectile = self.left_robot.decide(event, self.left_robot_hub)
if projectile is not None:
sprites_all.add(projectile)
projectiles.add(projectile)
elif isinstance(self.right_robot, ManualRobot) and self.right_robot.living:
projectile = self.right_robot.decide(event, self.right_robot_hub)
if projectile is not None:
sprites_all.add(projectile)
projectiles.add(projectile)
# --- The Logic
if not pause and (time > count_down):
np.random.shuffle(self.robot_list)
for robot in self.robot_list:
if robot == self.left_robot:
other = self.right_robot
else:
other = self.left_robot
if isinstance(robot, AutomatedRobot) and robot.living:
projectile = robot.decide(other_robot_properties=other.get_properties(),
coins=coins,
projectiles=projectiles)
if projectile is not None:
sprites_all.add(projectile)
projectiles.add(projectile)
for robot in self.robot_list:
if robot.living:
robot_damaged = pygame.sprite.spritecollide(robot, projectiles, True)
coins_captured = pygame.sprite.spritecollide(robot, coins, True)
for projectile_hit in robot_damaged:
robot.suffer(projectile_hit.damage)
for coin in coins_captured:
robot.claim_coin(coin)
robot.update(combat=self)
for projectile in projectiles:
projectile.draw(screen)
projectile.update(combat=self)
coins.update()
self.fix_bugs()
if np.random.random() < self.coin_per_second / 60:
totalcoins += 2
pos1 = 50 + np.random.random(2) * (np.array(self.dims)-100) * np.array([0.5, 1])
pos2 = np.array(self.dims) * np.array([1, 0]) + pos1 * np.array([-1, 1])
coin_left = Coin(pos1)
coin_right = Coin(pos2)
coins.add(coin_left)
coins.add(coin_right)
sprites_all.add(coin_left)
sprites_all.add(coin_right)
# --- The image
screen.fill((255, 255, 255))
screen.blit(background_image, (0, 0))
sprites_all.draw(screen)
for projectile in projectiles:
projectile.draw(screen)
for hub in self.robot_hubs:
hub.draw(screen)
time_text = self.font.render("{:02d}:{:02d}".format(int((time / 60) // 60), int((time / 60) % 60)), False,
(0, 0, 0))
screen.blit(time_text, (self.dims[0] - 5 - time_text.get_width(), 5))
coin_text = self.font.render("# {:d}/{:d}".format(len(coins), int(totalcoins)), False,
(0, 0, 0))
screen.blit(coin_text, (self.dims[0] - 5 - coin_text.get_width(), 5 + coin_text.get_height()))
# Only for zimabot
if (isinstance(self.left_robot, Zimabot) or isinstance(self.right_robot, Zimabot)):
if isinstance(self.left_robot, Zimabot):
zimabot = self.left_robot
other = self.right_robot
else:
zimabot = self.right_robot
other = self.left_robot
if time < count_down or (zimabot.living and not other.living):
belt_image = pygame.image.load("Resources/cinturon.png").convert_alpha()
belt_image = pygame.transform.scale(belt_image, (2*zimabot.width, 2*zimabot.height))
screen.blit(belt_image, zimabot.pos - np.array([zimabot.width//2, int(zimabot.height*1.5)]))
# ----
if self.left_robot.living and not self.right_robot.living:
winner = 1
pause = True
if not self.left_robot.living and self.right_robot.living:
winner = 0
pause = True
if pause:
if winner == 1:
pause_text = self.font2.render("The winner is {:s}".format(str(type(self.left_robot)).split(".")[-1][:-2]), False, (0, 0, 0))
center = (self.dims[0] // 2, self.dims[1] // 2)
text_rect = pause_text.get_rect(center=center)
screen.blit(pause_text, text_rect)
elif winner == 0:
pause_text = self.font2.render("The winner is {:s}".format(str(type(self.right_robot)).split(".")[-1][:-2]), False, (0, 0, 0))
center = (self.dims[0] // 2, self.dims[1] // 2)
text_rect = pause_text.get_rect(center=center)
screen.blit(pause_text, text_rect)
else:
pause_text = self.font.render("Paused", False, (0, 0, 0))
center = (self.dims[0] // 2, self.dims[1] // 2)
text_rect = pause_text.get_rect(center=center)
screen.blit(pause_text, text_rect)
else:
time += 1
pygame.display.flip()
clock.tick(60)
pygame.quit()
if __name__ == '__main__':
attributes = {
"health": 500,
"armor": 90,
"health_regen": 19,
"damage": 65,
"self_speed": 3,
"projectile_initial_speed": 4,
"projectile_per_second": 0.6,
"g_health": 80,
"g_armor": 8,
"g_health_regen": 2,
"g_damage": 12,
"g_projectile_per_second": 0.05,
"max_self_speed": 5,
"max_projectile_initial_speed": 10,
"experience_for_level_up": 7,
"g_experience_for_level_up": 3
}
cps = 2
bots = pygame.sprite.Group()
bot1 = stalin_t_pose(x=150, y=325, **attributes)
bot2 = Zimabot(x=1050-150-4*32, y=325, turn_left=True,
projectile_color=(38, 162, 149), image_path="Resources/simple_robot_green.png",
**attributes)
mg = Combat(bot1, bot2, coin_per_second=cps)
mg.run()
|
zimatek/RobotCombat
|
combat.py
|
combat.py
|
py
| 11,303 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "robot.Robot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "robot_hub.RobotHub",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "robot_hub.RobotHub.DownLeft",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "robot_hub.RobotHub",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "robot_hub.RobotHub.DownRight",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pygame.font.init",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.rotozoom",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "robot.set_up",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_1",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_0",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "manual_robot.ManualRobot",
"line_number": 141,
"usage_type": "argument"
},
{
"api_name": "manual_robot.ManualRobot",
"line_number": 147,
"usage_type": "argument"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "automated_robot.AutomatedRobot",
"line_number": 161,
"usage_type": "argument"
},
{
"api_name": "robot.living",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "robot.decide",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "robot.living",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.spritecollide",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.spritecollide",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "robot.suffer",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "robot.claim_coin",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "robot.update",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.random",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "coin.Coin",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "coin.Coin",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "pygame.display.flip",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 287,
"usage_type": "attribute"
}
] |
17469039054
|
from bs4 import BeautifulSoup
import requests
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
fx=open('WEB.txt','r',encoding="utf-8") ## FILENAME me file ka name dalna
line=fx.readline()
l=open('email_mailto.txt','a',encoding='utf-8')
def web_imrove(url):
print(url)
try:
source = requests.get(url)
except Exception :
l.write('\n')
return 0
plain_text = source.text
soup = BeautifulSoup(plain_text, 'html.parser')
emails = [a["href"] for a in soup.select('a[href^=mailto:]')]
popo = str(emails)
toto = popo.replace('mailto:', '')
hoho = toto.replace('[', '')
gogo = hoho.replace(']', '')
mm = gogo.replace("'", "")
if len(mm) is not 0:
l.write(mm)
l.write('\n')
print(mm)
return 1
else:
l.write('\n')
return 0
#print(mm)
while line:
if line is '\n':
l.write('\n')
line=fx.readline()
continue
p = line.strip()
if( web_imrove('http://'+p) ):
print('first')
elif( web_imrove('http://' + p+'/contact-us') ):
print('second')
else:
web_imrove('http://' + p+'/contactus')
line = fx.readline()
|
akkiei/Web_Scrapper
|
Mail_to.py
|
Mail_to.py
|
py
| 1,282 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.packages",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 21,
"usage_type": "call"
}
] |
17086061072
|
from datetime import datetime
#convert date from YYYY-MM-DD-T to Date, Month, Year (in words)
#dfdsf
#dsfds
datetime
def date_convert(date):
date=str(date)
data=date.split('-') #year/month/day+time all separated by dash
daydate=data[-1].split() #data[-1] is day+time, separated by a space
day=daydate[0] #discard time, keep day
day=day if day[0]!=0 else day[1] #otherwise single-digit days retain leading zero
year=str(data[0]) #data is list containing the year and the month
month=str(data[1])
#map month numbers to their names
months={'01':'January',
'02':'February',
'03':'March',
'04':'April',
'05':'May',
'06':'June',
'07':'July',
'08':'August',
'09':'September',
'10':'October',
'11':'November',
'12':'December'}
#adds appropriate suffix to day
if day[-1]=='1' and int(day)%100!=11: #checks if date ends with 1 and isn't 11
suffix='st'
elif day[-1]=='2' and int(day)%100!=12: #checks if date ends with 1 and isn't 11
suffix='nd'
elif day[-1]=='3':
suffix='rd'
else:
suffix='th' #including special cases 11 and 12 which were previously excluded
return day+suffix+' '+months[month]+', '+year #returns string with date in appropriate format
#test case
#date=datetime.now()
#print date_convert(date)
|
veliakiner/SmogonQDB
|
date_convert.py
|
date_convert.py
|
py
| 1,428 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "name"
}
] |
19980146036
|
from pytube import YouTube
from PySimpleGUI import PySimpleGUI as sg
sg.theme("reddit")
layout = [
[sg.Text("URL"), sg.Input(key="url")],
[sg.Button("Fazer o Download")]
],
janela = sg.Window("Video Downloader", layout)
while True:
eventos, valores = janela.read()
if eventos == sg.WINDOW_CLOSED:
break
if eventos == "Fazer o Download":
link = valores["url"]
yt = YouTube(link)
stream = yt.streams.get_highest_resolution()
stream.download()
|
jopsfernandes/video_downloader
|
youtube.py
|
youtube.py
|
py
| 525 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PySimpleGUI.PySimpleGUI.theme",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.PySimpleGUI",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "PySimpleGUI.PySimpleGUI.Text",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.PySimpleGUI",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PySimpleGUI.PySimpleGUI.Input",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.PySimpleGUI.Button",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.PySimpleGUI",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PySimpleGUI.PySimpleGUI.Window",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.PySimpleGUI",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PySimpleGUI.PySimpleGUI.WINDOW_CLOSED",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "PySimpleGUI.PySimpleGUI",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pytube.YouTube",
"line_number": 18,
"usage_type": "call"
}
] |
11707381858
|
#!/usr/bin/python
# coding: utf-8
from io import open
import os
import time
import re
import db
from sqlalchemy import or_, and_, not_, asc, desc, func
from datetime import datetime, timedelta
from functools import wraps # We need this to make Flask understand decorated routes.
import hashlib
import subprocess
from lxml.html.clean import Cleaner
from lxml.etree import ParserError
from werkzeug import secure_filename
from flask import Flask, Blueprint, render_template, request, flash, redirect, session, abort, url_for, make_response, g
from wtforms import Form, BooleanField, TextField, TextAreaField, PasswordField, RadioField, SelectField, SelectMultipleField, BooleanField, IntegerField, HiddenField, SubmitField, validators, ValidationError, widgets
from wtforms.fields.html5 import DateTimeLocalField
import requests
def now():
if app.config['DB'].startswith('postgresql+psycopg2'):
# https://stackoverflow.com/questions/796008/cant-subtract-offset-naive-and-offset-aware-datetimes/17752647#17752647
import psycopg2
return datetime.utcnow().replace(
tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=0, name=None))
else:
return datetime.utcnow()
dtnow = now
class MultiCheckboxField(SelectMultipleField):
"""
A multiple-select, except displays a list of checkboxes.
Iterating the field will produce subfields, allowing custom rendering of
the enclosed checkbox fields.
Shamelessly stolen from WTForms FAQ.
"""
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
app_dir = os.path.dirname(os.path.abspath(__file__))
app = Flask('rhforum', template_folder=app_dir+"/templates")
app.config.from_pyfile(app_dir+"/config.py") # XXX
BASE_URL = app.config.get("BASE_URL", "")
rhforum = Blueprint('rhforum', __name__,
template_folder='templates',
static_folder='static')
doku = None
if app.config.get("DOKU_URL", ""):
from dokuwiki import DokuWiki
try:
doku = DokuWiki(app.config['DOKU_URL'], app.config['DOKU_USER'], app.config['DOKU_PASS'])
except Exception as ex:
print("Failed to connect to DokuWiki: ", ex)
class PostForm(Form):
text = TextAreaField('Text', [validators.required()])
submit = SubmitField('Odeslat')
class EditPostForm(Form):
text = TextAreaField('Text', [validators.required()])
submit = SubmitField('Upravit')
delete = SubmitField('Smazat')
class EditThreadForm(Form):
name = TextField('Nadpis', [validators.required()])
text = TextAreaField('Text', [validators.required()])
forum_id = SelectField('Fórum', coerce=int)
wiki_article = TextField('Wiki článek')
submit = SubmitField('Upravit')
delete = SubmitField('Smazat')
class ThreadForm(PostForm):
name = TextField('Nadpis', [validators.required()])
class UserForm(Form):
fullname = TextField('Nadpis', [validators.required()])
email = TextField('Email', [validators.required()])
new_password = PasswordField('Nové heslo')
homepage = TextField('Homepage')
avatar_url = TextField('URL avataru')
profile = TextAreaField('Profil')
submit = SubmitField('Upravit')
class AdminUserForm(UserForm):
group_ids = MultiCheckboxField('Skupiny', coerce=int)
@rhforum.app_template_filter('datetime')
def datetime_format(value, format='%d. %m. %Y %H:%M:%S'):
if not value: return "-"
if isinstance(value, str): return value
return value.strftime(format)
cleaner = Cleaner(comments=False, style=False, embedded=False, annoying_tags=False)
@rhforum.app_template_filter('postfilter')
def postfilter(text):
return text
@rhforum.app_template_filter('clean')
def clean(value):
try:
return cleaner.clean_html(value)
except ParserError:
return ""
@rhforum.app_template_filter('bbcode')
def bbcode(text):
text = re.sub("\[quote=([^\]@]*)@(\d)*\]", "<blockquote><div class='quoting' data-id='\\2'>\\1</div><p>", text)
text = re.sub("\[quote=([^\]@]*)\]", "<blockquote><div class='quoting'>\\1</div><p>", text)
text = re.sub("\[quote\]", "<blockquote><p>", text)
text = re.sub("\[\/quote\]", "</blockquote>", text)
return text
@rhforum.before_request
def before_request():
if not hasattr(g, 'telegram_messages'):
g.telegram_messages = []
if not hasattr(g, 'irc_messages'):
g.irc_messages = []
if not hasattr(g, 'discord_messages'):
g.discord_messages = []
if 'user_id' in session:
g.user = db.session.query(db.User).get(session['user_id'])
if not g.user:
# TODO
pass
g.user.laststamp = now()
else:
g.user = db.Guest()
g.now = now()
g.yesterday = g.now - timedelta(days=1)
g.tomorrow = g.now + timedelta(days=1)
g.production = app.config['PRODUCTION']
@rhforum.after_request
def after_request(response):
try:
while g.telegram_messages:
message = g.telegram_messages.pop(0)
subprocess.Popen(["python", app_dir+"/report.py", "telegram", message.encode('utf-8')])
while g.irc_messages:
message = g.irc_messages.pop(0)
subprocess.Popen(["python", app_dir+"/report.py", "irc", message.encode('utf-8')])
while g.discord_messages:
message = g.discord_messages.pop(0)
subprocess.Popen(["python", app_dir+"/report.py", "discord", message.encode('utf-8')])
except Exception as ex:
print(type(ex), ex)
return response
@rhforum.teardown_request
def shutdown_session(exception=None):
db.session.close()
db.session.remove()
def sort_tasks(tasks):
return []
now = g.now
def cmp_tasks(task0, task1):
# sort order:
# 0. unspecified announcements and tasks
# 1. upcoming announcements and all unfinished tasks
# 2. past announcements and tasks ("everything else")
# 3. finished unspecified tasks
def get_task_priority(task):
if not task.due_time and not task.status: return 0
if not task.due_time and task.status == "todo": return 0
if not task.status and task.due_time and task.due_time > now: return 1
if task.status == "todo": return 1
if not task.due_time and task.status == "done": return 3
return 2
task0_pri = get_task_priority(task0)
task1_pri = get_task_priority(task1)
if task0_pri < task1_pri: return -1
if task0_pri > task1_pri: return 1
if not task0.due_time: return 1;
if not task1.due_time: return 1;
return 1 if abs(now - task0.due_time) > abs(now - task1.due_time) else -1
tasks.sort(cmp_tasks)
class ForumForm(Form):
name = TextField('Jméno', [validators.required()])
description = TextField('Popisek', [validators.required()])
category_id = SelectField('Kategorie', coerce=int)
move_up = SubmitField('↑')
move_down = SubmitField('↓')
save = SubmitField('Uložit')
new_forum_id = SelectField('Nové fórum', coerce=int, default=0)
delete = SubmitField('Odstranit')
class CategoryForm(Form):
name = TextField('Jméno', [validators.required()])
group_id = SelectField('Nutná skupina', coerce=int)
move_up = SubmitField('↑')
move_down = SubmitField('↓')
save = SubmitField('Uložit')
delete = SubmitField('Odstranit')
class ForumControlsForm(Form):
mark_read = SubmitField('Označit fórum za přečtené')
class TaskForm(Form):
type = SelectField("Typ", [validators.optional()], choices=(('task', 'úkol'), ('announcement', 'oznámení')))
due_time = DateTimeLocalField('Čas', [validators.optional()], format="%Y-%m-%dT%H:%M")
text = TextField('Text', [validators.required()])
user_id = SelectField('Uživatel', coerce=int)
submit = SubmitField("Zadat")
@rhforum.errorhandler(404)
def page_not_found(e):
if not request.path.startswith("/static"):
return render_template('forum/errorpage.html', error=404), 404
else:
return "404", 404 # we don't have templates
@rhforum.errorhandler(403)
def page_not_found(e):
return render_template('forum/errorpage.html', error=403), 403
@rhforum.errorhandler(500)
def page_not_found(e):
return render_template('forum/errorpage.html', error=500), 500
@rhforum.errorhandler(400)
def page_not_found(e):
return render_template('forum/errorpage.html', error=400), 400
def get_active_threads():
threads = db.session.query(db.Thread).join(db.Forum).outerjoin(db.Category)\
.filter(or_(db.Forum.category_id==None, db.Category.group_id.in_([None, 0]), db.Category.group_id.in_(group.id for group in g.user.groups)))\
.filter(db.Forum.trash == False) \
.order_by(db.Thread.laststamp.desc())
return threads
@rhforum.route("/", methods="GET POST".split())
def index():
form = None
if g.user:
form = ForumControlsForm(request.form)
if request.method == "POST":# and form.validate():
if form.mark_read.data:
g.user.read_all()
categories = db.session.query(db.Category).order_by(db.Category.position).all()
uncategorized_fora = db.session.query(db.Forum).filter(db.Forum.category == None, db.Forum.trash == False).order_by(db.Forum.position).all()
trash = db.session.query(db.Forum).filter(db.Forum.trash == True).scalar()
if uncategorized_fora:
categories.append(None)
latest_threads = get_active_threads()[0:10]
tasks = db.session.query(db.Task).filter(db.Task.user_id.in_([g.user.id, None, 0])).all()
sort_tasks(tasks)
return render_template("forum/index.html", categories=categories, uncategorized_fora=uncategorized_fora, edit_forum = None, latest_threads=latest_threads, trash=trash, form=form, tasks=tasks)
@rhforum.route("/active", methods="GET POST".split())
def active():
form = ForumControlsForm(request.form)
active_threads = get_active_threads()[0:100]
return render_template("forum/active.html", active_threads=active_threads, form=form)
@rhforum.route("/edit-forum/<int:forum_id>", endpoint="edit_forum", methods="GET POST".split())
@rhforum.route("/edit-forum/new", endpoint="edit_forum", methods="GET POST".split())
@rhforum.route("/edit-category/<int:category_id>", endpoint="edit_category", methods="GET POST".split())
@rhforum.route("/edit-category/new", endpoint="edit_category", methods="GET POST".split())
def edit_forum_or_category(forum_id=None, category_id=None):
if not g.user.admin: abort(403) # TODO minrights decorator
categories = db.session.query(db.Category).order_by(db.Category.position).all()
uncategorized_fora = db.session.query(db.Forum).filter(db.Forum.category == None, db.Forum.trash == False).order_by(db.Forum.position)
trash = db.session.query(db.Forum).filter(db.Forum.trash == True).scalar()
if request.endpoint == 'rhforum.edit_forum':
if forum_id:
forum = db.session.query(db.Forum).get(forum_id)
#forum.last = forum.position == len(forum.category.fora) - 1 if forum.category else True
if not forum.category: forum.position = 0
else:
forum = db.Forum()
uncategorized_fora = list(uncategorized_fora) + [forum]
forum.position = 0
forum.last = True
form = ForumForm(request.form, forum)
form.category_id.choices = [(0, "-")] + [(c.id, c.name) for c in categories if c]
fora = db.session.query(db.Forum).outerjoin(db.Category).order_by(db.Category.position, db.Forum.position).all()
form.new_forum_id.choices = [(0, "-")] + [(f.id, f.name) for f in fora]
editable = forum
elif request.endpoint == 'rhforum.edit_category':
if category_id:
category = db.session.query(db.Category).get(category_id)
#category.last = category.position == len(categories) - 1
else:
category = db.Category()
categories = list(categories) + [category]
category.position = 0
category.last = True
form = CategoryForm(request.form, category)
form.group_id.choices = [(0, "-")] + [(group.id, group.name) for group in db.session.query(db.Group)]
editable = category
if request.method == "POST" and form.validate():
if request.endpoint == 'rhforum.edit_forum':
forum.name = form.name.data
forum.identifier = forum.name.lower().replace(' ', '-')
forum.description = form.description.data
forum.category_id = form.category_id.data or None
forum.category = db.session.query(db.Category).get(form.category_id.data)
elif request.endpoint == 'rhforum.edit_category':
category.name = form.name.data
category.group_id = form.group_id.data
if form.save.data:
if request.endpoint == 'rhforum.edit_forum':
if not forum_id:
if forum.category_id:
forum.position = len(forum.category.fora) - 1
db.session.add(forum)
flash("Fórum vytvořeno.")
else:
flash("Fórum upraveno.")
elif request.endpoint == 'rhforum.edit_category':
if not category_id:
category.position = len(categories) - 1
db.session.add(category)
flash("Kategorie vytvořena.")
else:
flash("Kategorie upravena.")
db.session.commit()
return redirect(url_for('.index'))
elif form.delete.data:
if request.endpoint == 'rhforum.edit_forum':
if not form.new_forum_id.data and forum.threads:
flash("Je nutno témata někam přesunout.")
else:
moved = False
if form.new_forum_id.data:
moved = True
new_forum = db.session.query(db.Forum).get(form.new_forum_id.data)
for thread in forum.threads:
thread.forum = new_forum
else:
moved = False
db.session.delete(forum)
if moved:
flash("Fórum odstraněno a témata přesunuty.")
else:
flash("Fórum odstraněno.")
db.session.commit()
return redirect(url_for('.index'))
elif request.endpoint == 'rhforum.edit_category':
db.session.delete(category)
flash("Kategorie odstraněna.")
db.session.commit()
return redirect(url_for('.index'))
else:
# moving
i = editable.position
if request.endpoint == 'rhforum.edit_forum':
items = list(forum.category.fora)
elif request.endpoint == 'rhforum.edit_category':
items = list(categories)
items.remove(editable)
if form.move_up and form.move_up.data:
items.insert(i-1, editable)
elif form.move_down and form.move_down.data:
items.insert(i+1, editable)
for i, x in enumerate(items):
x.position = i
db.session.add(x)
db.session.commit()
if request.endpoint == 'rhforum.edit_category':
categories = items
if editable.position == 0:
del form.move_up
if request.endpoint == 'rhforum.edit_forum':
if not forum.category or forum.position == len(forum.category.fora) - 1:
del form.move_down
elif request.endpoint == 'rhforum.edit_category':
if not category.id or category.position == len(categories) - 1:
del form.move_down
return render_template("forum/index.html", categories=categories+[None], uncategorized_fora=uncategorized_fora, editable=editable, form=form, new=not bool(forum_id), trash=trash)
class LoginForm(Form):
name = TextField('Jméno', [validators.required()])
password = PasswordField('Heslo', [validators.required()])
submit = SubmitField('Přihlásit se')
@rhforum.route("/login", methods="GET POST".split())
def login():
form = LoginForm(request.form)
failed = False
if request.method == 'POST' and form.validate():
user = db.session.query(db.User).filter(db.User.login == form.name.data.lower()).first()
if not user: failed = True
else:
try:
password_matches = user.verify_password(form.password.data)
except db.OldHashingMethodException:
failed = True
password_matches = False
flash("Prosím, požádejte admina o změnu hesla.")
if password_matches:
g.user = user
session['user_id'] = g.user.id
session.permanent = True
flash("Jste přihlášeni.")
return redirect(url_for('.index'))
else:
failed = True
return render_template("forum/login.html", form=form, failed=failed)
class RegisterForm(Form):
username = TextField('Nevyplňovat')
bbq = TextField('Login', [validators.required()])
fullname = TextField('Jméno', [validators.required()])
password = PasswordField('Heslo', [
validators.Required(),
validators.EqualTo('confirm_password', message='Hesla se musí schodovat')
])
confirm_password = PasswordField('Heslo znovu')
email = TextField('Email', [validators.required()])
submit = SubmitField('Zaregistrovat se')
@rhforum.route("/register", methods="GET POST".split())
def register():
if g.user:
if g.user.admin:
flash("Pro ruční registraci účtů ostatním použijte prosím DokuWiki.")
return redirect(url_for(".index"))
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
if form.username.data:
return "OK"
username = form.bbq.data.lower()
if db.session.query(db.User).filter(db.User.login == username).first():
flash("Tento login je už zabraný, vyberte si prosím jiný.")
else:
user = db.User(login=username, fullname=form.fullname.data, email=form.email.data, timestamp=now(), laststamp=now())
user.set_password(form.password.data)
user_group = db.session.query(db.Group).filter(db.Group.name=="user").scalar()
if user_group:
user.groups.append(user_group)
db.session.add(user)
db.session.commit()
g.telegram_messages.append("Nová registrace: *{}* (login *{}*, email {}): {}".format(
user.fullname, user.login, user.email, BASE_URL+user.url))
#g.irc_messages.append("Nová registrace: \x0302{}\x03 (login \x0208{}\x03, email {}): {}".format(
# user.fullname, user.login, user.email, BASE_URL+user.url))
g.discord_messages.append("Nová registrace: **{}** (login **{}**, email {}): <{}>".format(
user.fullname, user.login, user.email, BASE_URL+user.url))
g.user = user
g.user.read_all()
session['user_id'] = g.user.id
session.permanent = True
flash("Registrace proběhla úspěšně.")
return redirect(url_for(".index"))
return render_template("forum/register.html", form=form)
@rhforum.route("/logout")
def logout():
if 'user_id' in session:
session.pop('user_id')
flash("Odhlášení proběhlo úspěšně.")
return redirect(url_for('.index'))
@rhforum.route("/<int:forum_id>", methods="GET POST".split())
@rhforum.route("/<int:forum_id>-<forum_identifier>", methods="GET POST".split())
def forum(forum_id, forum_identifier=None):
forum = db.session.query(db.Forum).get(forum_id)
if not forum: abort(404)
if forum.category and forum.category.group and forum.category.group not in g.user.groups: abort(403)
if forum.trash and not g.user.admin: abort(403)
threads = db.session.query(db.Thread).filter(db.Thread.forum == forum).order_by(db.Thread.archived.asc(), db.Thread.pinned.desc(), db.Thread.laststamp.desc())
form = None
if not forum.trash:
form = ThreadForm(request.form)
if g.user and request.method == 'POST' and form.validate():
now = dtnow()
thread = db.Thread(forum=forum, author=g.user, timestamp=now, laststamp=now,
name=form.name.data)
db.session.add(thread)
post = db.Post(thread=thread, author=g.user, timestamp=now,
text=form.text.data)
db.session.add(post)
db.session.commit()
g.telegram_messages.append("Nové téma od *{}*: *{}*: {}".format(
thread.author.name, thread.name, BASE_URL+thread.short_url))
if not thread.forum.category or not thread.forum.category.group or thread.forum.category.group.name == "extern":
g.discord_messages.append("Nové téma od **{}**: **{}**: <{}>".format(
thread.author.name, thread.name, BASE_URL+thread.short_url))
# g.irc_messages.append("Nové téma od \x0302{}\x03: \x0306{}\x03: {}".format(
# thread.author.name, thread.name, BASE_URL+thread.short_url))
return redirect(thread.url)
return render_template("forum/forum.html", forum=forum, threads=threads, form=form)
@rhforum.route("/users/<int:user_id>/threads")
@rhforum.route("/users/<int:user_id>-<name>/threads")
def user_threads(user_id, name=None):
user = db.session.query(db.User).get(user_id)
if not user: abort(404)
forum = db.Forum(name="Témata od {}".format(user.name))
threads = db.session.query(db.Thread).join(db.Forum)\
.filter(db.Forum.trash == False, db.Thread.author == user)\
.outerjoin(db.Category)\
.filter(or_(db.Forum.category_id==None, db.Category.group_id.in_([None, 0]), db.Category.group_id.in_(group.id for group in g.user.groups)))\
.filter(db.Forum.trash == False).order_by(db.Thread.laststamp.desc()).all()
return render_template("forum/forum.html", forum=forum, threads=threads, user=user)
# TODO <path:thread_identificator>
@rhforum.route("/<int:forum_id>/<int:thread_id>", methods="GET POST".split())
@rhforum.route("/<int:forum_id>-<forum_identifier>/<int:thread_id>-<thread_identifier>", methods="GET POST".split())
def thread(forum_id, thread_id, forum_identifier=None, thread_identifier=None):
thread = db.session.query(db.Thread).get(thread_id)
if not thread: abort(404)
if thread.forum.category and thread.forum.category.group and thread.forum.category.group not in g.user.groups: abort(403)
if thread.forum.trash and not g.user.admin: abort(403)
reply_post = None
if "reply" in request.args:
try:
reply_post_id = int(request.args["reply"])
except ValueError:
abort(400)
reply_post = db.session.query(db.Post).get(reply_post_id)
if reply_post_id and not reply_post:
abort(404)
if reply_post and reply_post.thread != thread:
abort(400)
if g.user.admin and "show_deleted" in request.args:
posts = thread.posts.filter()
else:
posts = thread.posts.filter(db.Post.deleted==False)
num_deleted = thread.posts.count() - thread.posts.filter(db.Post.deleted==False).count()
form = None
if not thread.forum.trash and not (thread.locked and not g.user.admin):
text = ""
if reply_post:
text = "[quote={}@{}]{}[/quote]\n".format(reply_post.author.login, reply_post.id, reply_post.text)
form = PostForm(request.form, text=text)
if g.user and request.method == 'POST' and form.validate():
now = dtnow()
post = db.Post(thread=thread, author=g.user, timestamp=now,
text=form.text.data)
db.session.add(post)
thread.laststamp = now
db.session.commit()
g.telegram_messages.append("Nový příspěvek od *{}* do *{}*: {}".format(
post.author.name, post.thread.name, BASE_URL+post.short_url))
if not thread.forum.category or not thread.forum.category.group or thread.forum.category.group.name == "extern":
g.discord_messages.append("Nový příspěvek od **{}** do **{}**: <{}>".format(
post.author.name, post.thread.name, BASE_URL+post.short_url))
# g.irc_messages.append("Nový příspěvek od \x0302{}\x03 do \x0306{}\x03: {}".format(
# post.author.name, post.thread.name, BASE_URL+post.short_url))
return redirect(thread.url+"#post-latest") # TODO id
if g.user:
thread_read = db.session.query(db.ThreadRead).filter(db.ThreadRead.user==g.user, db.ThreadRead.thread==thread).first()
if not thread_read:
last_read_timestamp = None
else:
last_read_timestamp = thread_read.last_post.timestamp
g.user.read(thread.last_post)
else:
last_read_timestamp = g.now
article = None
article_revisions = []
article_info = None
doku_error = None
if thread.wiki_article and doku:
try:
article = doku.pages.html(thread.wiki_article)
#article_revisions = doku.send("wiki.getPageVersions", thread.wiki_article)
article_info = doku.send("wiki.getPageInfo", thread.wiki_article)
print(article_info, 'xxx')
except Exception as ex:
print(ex)
doku_error = ex
return render_template("forum/thread.html", thread=thread, forum=thread.forum, posts=posts, form=form, now=dtnow(), last_read_timestamp=last_read_timestamp, article=article, article_revisions=article_revisions, article_info=article_info, doku_error=doku_error, reply_post=reply_post, show_deleted="show_deleted" in request.args, num_deleted=num_deleted)
@rhforum.route("/<int:forum_id>/<int:topic_id>/set", methods="POST".split())
@rhforum.route("/<int:forum_id>-<forum_identifier>/<int:thread_id>-<thread_identifier>/set", methods="POST".split())
def thread_set(forum_id, thread_id, forum_identifier=None, thread_identifier=None):
if not g.user.admin: abort(403)
thread = db.session.query(db.Thread).get(thread_id)
if not thread: abort(404)
if request.form.get("pin"):
thread.pinned = True
elif request.form.get("unpin"):
thread.pinned = False
elif request.form.get("lock"):
thread.locked = True
elif request.form.get("unlock"):
thread.locked = False
elif request.form.get("archive"):
thread.archived = True
elif request.form.get("unarchive"):
thread.archived = False
db.session.commit()
return redirect(thread.url)
@rhforum.route("/<int:forum_id>/<int:thread_id>/edit/<int:post_id>", methods="GET POST".split())
@rhforum.route("/<int:forum_id>-<forum_identifier>/<int:thread_id>-<thread_identifier>/edit/<int:post_id>", methods="GET POST".split())
def edit_post(forum_id, thread_id, post_id, forum_identifier=None, thread_identifier=None):
post = db.session.query(db.Post).get(post_id)
thread = db.session.query(db.Thread).get(thread_id)
if not post: abort(404)
if thread.forum.category and thread.forum.category.group and thread.forum.category.group not in g.user.groups: abort(403)
if post.thread != thread: abort(400)
if post.deleted:
# The user probably hit edit multiple times. Let's just be helpful.
return redirect(thread.url)
if post.author != g.user and not g.user.admin: abort(403)
if post.thread.forum.trash and not g.user.admin: abort(403)
posts = thread.posts.filter(db.Post.deleted==False)
if post == posts[0] and g.user.admin:
edit_thread = True
form = EditThreadForm(request.form, text=post.text, name=thread.name, forum_id=thread.forum_id, wiki_article=thread.wiki_article)
forums = db.session.query(db.Forum).outerjoin(db.Category).order_by(db.Category.position, db.Forum.position).all()
form.forum_id.choices = [(f.id, f.name) for f in forums]
else:
edit_thread = False
form = EditPostForm(request.form, text=post.text)
if not g.user.admin: del form.delete
if request.method == 'POST' and form.validate():
if form.submit.data:
now = dtnow()
new_post = db.Post(thread=thread, author=post.author, timestamp=post.timestamp, editstamp=now,
text=form.text.data, original=post.original if post.original else post, editor=g.user)
db.session.add(new_post)
post.deleted=True
if edit_thread:
thread.name = form.name.data
thread.forum_id = form.forum_id.data
thread.wiki_article = form.wiki_article.data
#forum.fix_laststamp() # TODO
db.session.commit()
if edit_thread:
return redirect(thread.url)
else:
return redirect(new_post.url)
elif form.delete.data:
post.deleted = True
db.session.commit()
return redirect(thread.url)
return render_template("forum/thread.html", thread=thread, forum=thread.forum, posts=posts, form=form, now=dtnow(), edit_post=post, edit_thread=edit_thread, last_read_timestamp=g.now)
@rhforum.route("/users/")
def users():
if not g.user.admin: abort(403)
users = db.session.query(db.User).order_by(db.User.fullname)
return render_template("forum/users.html", users=users)
@rhforum.route("/users/<int:user_id>")
@rhforum.route("/users/<int:user_id>-<name>")
def user(user_id, name=None):
user = db.session.query(db.User).get(user_id)
if not user: abort(404)
return render_template("forum/user.html", user=user)
@rhforum.route("/users/<int:user_id>/edit", methods="GET POST".split())
@rhforum.route("/users/<int:user_id>-<name>/edit", methods="GET POST".split())
def edit_user(user_id, name=None):
user = db.session.query(db.User).get(user_id)
if not user: abort(404)
if user != g.user and not g.user.admin: abort(403)
if g.user.admin:
form = AdminUserForm(request.form, user)
form.group_ids.choices = []
for group in db.session.query(db.Group):
form.group_ids.choices.append((group.id, group.name))
if form.group_ids.data == None:
form.group_ids.data = [group.id for group in user.groups]
else:
form = UserForm(request.form, user)
if request.method == 'POST' and form.validate():
user.fullname = form.fullname.data
user.email = form.email.data
user.homepage = form.homepage.data
user.avatar_url = form.avatar_url.data
if form.new_password.data:
user.set_password(form.new_password.data)
flash("Heslo změněno.")
if g.user.admin:
user.groups = []
for group_id in form.group_ids.data:
user.groups.append(db.session.query(db.Group).get(group_id))
db.session.commit()
flash("Uživatel upraven.")
return redirect(user.url)
return render_template("forum/user.html", user=user, edit=True, form=form)
class GroupForm(Form):
name = TextField('Jméno', [validators.required()])
symbol = TextField('Symbol')
title = TextField('Titul')
rank = IntegerField('Rank')
display = BooleanField('Zobrazovat')
submit = SubmitField('Uložit')
@rhforum.route("/groups/", methods=["GET"])
@rhforum.route("/groups/<int:edit_group_id>/edit", methods=["GET", "POST"])
def groups(edit_group_id=None):
if not g.user.admin: abort(403)
groups = db.session.query(db.Group).all()
edit_group = None
form = None
if edit_group_id == 0 and request.method == 'POST':
group = db.Group(name="")
db.session.add(group)
db.session.commit()
return redirect(url_for('.groups', edit_group_id=group.id))
if edit_group_id:
edit_group = db.session.query(db.Group).get(edit_group_id)
form = GroupForm(request.form, edit_group)
if request.method == 'POST' and form.validate():
edit_group.name = form.name.data
edit_group.symbol = form.symbol.data
edit_group.title = form.title.data
edit_group.rank = form.rank.data
edit_group.display = form.display.data
db.session.commit()
flash("Skupina {} upravena.".format(edit_group.name))
return redirect(url_for('.groups'))
return render_template("forum/groups.html", groups=groups, edit_group=edit_group, form=form)
@rhforum.route("/tasks", methods="GET POST".split())
@rhforum.route("/tasks/<int:task_id>", methods=["GET", "POST"])
def tasks(task_id=None):
if not g.user.in_group("retroherna"): error(403)
task = None
if task_id:
task = db.session.query(db.Task).get(task_id)
if not task: error(404)
form = TaskForm(request.form, task)
form.user_id.choices = [(0, '-')]
for user in db.session.query(db.User):
form.user_id.choices.append((user.id, user.name))
if request.method == 'POST' and form.validate():
if not form.due_time.data and (form.type.data == "announcement" or (task and not task.status)):
flash("Nelze vytvořit oznámení bez konečného času.")
else:
if not task_id:
task = db.Task()
task.created_time = now()
task.author = g.user
task.text = form.text.data
task.due_time = form.due_time.data
if form.type.data == "task":
task.status = "todo"
task.user_id = form.user_id.data
if not task_id:
db.session.add(task)
db.session.commit()
if not task_id:
flash("Úkol přidán.")
else:
flash("Úkol upraven.")
return redirect(url_for('.tasks'))
tasks = db.session.query(db.Task).all()#.order_by(func.abs(func.now() - db.Task.due_time))
sort_tasks(tasks)
return render_template("forum/tasks.html", tasks=tasks, form=form, task_id=task_id)
@rhforum.route("/tasks/<int:task_id>/status", methods=["POST"])
def change_task_status(task_id):
if not g.user.in_group("retroherna"): error(403)
task = db.session.query(db.Task).get(task_id)
if not task: error(404)
if request.form["status"] == "todo":
task.status = "todo"
elif request.form["status"] == "done":
task.status = "done"
db.session.commit()
return redirect(url_for(".tasks"))
class IRCSendForm(Form):
text = TextField('Text', [validators.required()])
submit = SubmitField('Odeslat')
@rhforum.route("/irc-send/", methods=["GET", "POST"])
def irc_send():
if not g.user.admin: error(403)
text = None
form = IRCSendForm(request.form)
if request.method == 'POST' and form.validate():
text = form.text.data
g.irc_messages.append(text)
form = IRCSendForm()
return render_template("forum/irc_send.html", form=form, text=text)
app.register_blueprint(rhforum, url_prefix='')
if not app.debug:
import logging
from logging import FileHandler
file_handler = FileHandler(app_dir+'/flask.log')
file_handler.setLevel(logging.WARNING)
formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
if __name__ == "__main__":
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.run(host="", port=8080, debug=True, threaded=True)
|
retroherna/rhweb2
|
rhforum.py
|
rhforum.py
|
py
| 36,199 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.utcnow",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "psycopg2.tz.FixedOffsetTimezone",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "psycopg2.tz",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "wtforms.SelectMultipleField",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "wtforms.widgets.ListWidget",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "wtforms.widgets",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "wtforms.widgets.CheckboxInput",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "wtforms.widgets",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "flask.Blueprint",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "dokuwiki.DokuWiki",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "wtforms.Form",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "wtforms.TextAreaField",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "wtforms.Form",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "wtforms.TextAreaField",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "wtforms.Form",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "wtforms.TextAreaField",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "wtforms.SelectField",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "wtforms.TextField",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "wtforms.TextField",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "wtforms.Form",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "wtforms.PasswordField",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "wtforms.TextField",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "wtforms.TextField",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "wtforms.TextAreaField",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "lxml.html.clean.Cleaner",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "lxml.etree.ParserError",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "flask.g",
"line_number": 130,
"usage_type": "argument"
},
{
"api_name": "flask.g.telegram_messages",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "flask.g",
"line_number": 132,
"usage_type": "argument"
},
{
"api_name": "flask.g.irc_messages",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "flask.g",
"line_number": 134,
"usage_type": "argument"
},
{
"api_name": "flask.g.discord_messages",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "db.User",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "flask.session",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "db.Guest",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "flask.g.now",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "flask.g.yesterday",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "flask.g.now",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "flask.g.tomorrow",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "flask.g.now",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "flask.g.production",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "flask.g.telegram_messages",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "flask.g.telegram_messages.pop",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "flask.g.telegram_messages",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "flask.g.irc_messages",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "flask.g.irc_messages.pop",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "flask.g.irc_messages",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "flask.g.discord_messages",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "flask.g.discord_messages.pop",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "flask.g.discord_messages",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "db.session.close",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "db.session.remove",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "flask.g.now",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "wtforms.Form",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "wtforms.SelectField",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "wtforms.SelectField",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "wtforms.Form",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "wtforms.SelectField",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "wtforms.Form",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "wtforms.Form",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "wtforms.SelectField",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.optional",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "wtforms.fields.html5.DateTimeLocalField",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.optional",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "wtforms.SelectField",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "flask.request.path.startswith",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "flask.request.path",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "db.Thread",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "db.Category",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.or_",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "db.Forum",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "db.Category.group_id.in_",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "db.Category",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "flask.g.user",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "db.Forum",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "db.Thread.laststamp.desc",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "db.Thread",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "flask.g.user",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "flask.g.user.read_all",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "db.Category",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "db.session.query",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "db.session.query",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "db.session.query",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "db.Task",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "db.Task.user_id.in_",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 281,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 291,
"usage_type": "attribute"
},
{
"api_name": "db.Category",
"line_number": 291,
"usage_type": "attribute"
},
{
"api_name": "db.session.query",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "db.session.query",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "flask.request.endpoint",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "db.Category",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "flask.request.endpoint",
"line_number": 309,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "db.Category",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "db.Category",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 318,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 319,
"usage_type": "attribute"
},
{
"api_name": "db.Group",
"line_number": 319,
"usage_type": "attribute"
},
{
"api_name": "flask.request.method",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "flask.request.endpoint",
"line_number": 322,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "db.Category",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "flask.request.endpoint",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "flask.request.endpoint",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "db.session.add",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "flask.request.endpoint",
"line_number": 340,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "db.session.add",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 343,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "db.session.commit",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 347,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "flask.request.endpoint",
"line_number": 350,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 357,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 357,
"usage_type": "attribute"
},
{
"api_name": "db.session.delete",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "db.session.commit",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "flask.request.endpoint",
"line_number": 369,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 369,
"usage_type": "name"
},
{
"api_name": "db.session.delete",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "db.session.commit",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 372,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "flask.request.endpoint",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 377,
"usage_type": "name"
},
{
"api_name": "flask.request.endpoint",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 379,
"usage_type": "name"
},
{
"api_name": "db.session.add",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 388,
"usage_type": "attribute"
},
{
"api_name": "db.session.commit",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 389,
"usage_type": "attribute"
},
{
"api_name": "flask.request.endpoint",
"line_number": 390,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "flask.request.endpoint",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "flask.request.endpoint",
"line_number": 397,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 397,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "wtforms.Form",
"line_number": 402,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 403,
"usage_type": "name"
},
{
"api_name": "wtforms.PasswordField",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 409,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 411,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 411,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 412,
"usage_type": "attribute"
},
{
"api_name": "db.User",
"line_number": 412,
"usage_type": "attribute"
},
{
"api_name": "db.OldHashingMethodException",
"line_number": 417,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 422,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 422,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 423,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 423,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 423,
"usage_type": "name"
},
{
"api_name": "flask.session.permanent",
"line_number": 424,
"usage_type": "attribute"
},
{
"api_name": "flask.session",
"line_number": 424,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "wtforms.Form",
"line_number": 432,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "wtforms.TextField",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 434,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 435,
"usage_type": "name"
},
{
"api_name": "wtforms.PasswordField",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.Required",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 437,
"usage_type": "name"
},
{
"api_name": "wtforms.validators.EqualTo",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 438,
"usage_type": "name"
},
{
"api_name": "wtforms.PasswordField",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "wtforms.TextField",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 446,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 447,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 447,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 450,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 450,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 455,
"usage_type": "attribute"
},
{
"api_name": "db.User",
"line_number": 455,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "db.User",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "db.Group",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "db.session.add",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 463,
"usage_type": "attribute"
},
{
"api_name": "db.session.commit",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 464,
"usage_type": "attribute"
},
{
"api_name": "flask.g.telegram_messages.append",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "flask.g.telegram_messages",
"line_number": 466,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 466,
"usage_type": "name"
},
{
"api_name": "flask.g.discord_messages.append",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "flask.g.discord_messages",
"line_number": 470,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 470,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 473,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 473,
"usage_type": "name"
},
{
"api_name": "flask.g.user.read_all",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 474,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 474,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 475,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 475,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 475,
"usage_type": "name"
},
{
"api_name": "flask.session.permanent",
"line_number": 476,
"usage_type": "attribute"
},
{
"api_name": "flask.session",
"line_number": 476,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 485,
"usage_type": "name"
},
{
"api_name": "flask.session.pop",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 486,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 493,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 493,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 493,
"usage_type": "attribute"
},
{
"api_name": "flask.abort",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 495,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 495,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 496,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 496,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 496,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 497,
"usage_type": "attribute"
},
{
"api_name": "db.Thread",
"line_number": 497,
"usage_type": "attribute"
},
{
"api_name": "db.Thread.archived.asc",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "db.Thread.pinned.desc",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "db.Thread.laststamp.desc",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 500,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 500,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 501,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 501,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 501,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 501,
"usage_type": "name"
},
{
"api_name": "db.Thread",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 503,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 503,
"usage_type": "name"
},
{
"api_name": "db.session.add",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 505,
"usage_type": "attribute"
},
{
"api_name": "db.Post",
"line_number": 506,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 506,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 506,
"usage_type": "name"
},
{
"api_name": "db.session.add",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 508,
"usage_type": "attribute"
},
{
"api_name": "db.session.commit",
"line_number": 509,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 509,
"usage_type": "attribute"
},
{
"api_name": "flask.g.telegram_messages.append",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "flask.g.telegram_messages",
"line_number": 510,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 510,
"usage_type": "name"
},
{
"api_name": "flask.g.discord_messages.append",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "flask.g.discord_messages",
"line_number": 513,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 513,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 523,
"usage_type": "attribute"
},
{
"api_name": "db.User",
"line_number": 523,
"usage_type": "attribute"
},
{
"api_name": "flask.abort",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "db.Forum",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 528,
"usage_type": "attribute"
},
{
"api_name": "db.Thread",
"line_number": 528,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 528,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 529,
"usage_type": "attribute"
},
{
"api_name": "db.Thread",
"line_number": 529,
"usage_type": "attribute"
},
{
"api_name": "db.Category",
"line_number": 530,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.or_",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "db.Forum",
"line_number": 531,
"usage_type": "attribute"
},
{
"api_name": "db.Category.group_id.in_",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "db.Category",
"line_number": 531,
"usage_type": "attribute"
},
{
"api_name": "flask.g.user",
"line_number": 531,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 531,
"usage_type": "name"
},
{
"api_name": "db.Forum",
"line_number": 532,
"usage_type": "attribute"
},
{
"api_name": "db.Thread.laststamp.desc",
"line_number": 532,
"usage_type": "call"
},
{
"api_name": "db.Thread",
"line_number": 532,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 534,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 541,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 541,
"usage_type": "attribute"
},
{
"api_name": "db.Thread",
"line_number": 541,
"usage_type": "attribute"
},
{
"api_name": "flask.abort",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 543,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 543,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 544,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 544,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 546,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 546,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 548,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 548,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 550,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 551,
"usage_type": "attribute"
},
{
"api_name": "db.Post",
"line_number": 551,
"usage_type": "attribute"
},
{
"api_name": "flask.abort",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 555,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 557,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 557,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 557,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 557,
"usage_type": "name"
},
{
"api_name": "db.Post",
"line_number": 560,
"usage_type": "attribute"
},
{
"api_name": "db.Post",
"line_number": 562,
"usage_type": "attribute"
},
{
"api_name": "flask.g.user",
"line_number": 565,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 565,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 569,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 569,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 570,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 570,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 570,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 570,
"usage_type": "name"
},
{
"api_name": "db.Post",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 572,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 572,
"usage_type": "name"
},
{
"api_name": "db.session.add",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 574,
"usage_type": "attribute"
},
{
"api_name": "db.session.commit",
"line_number": 576,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 576,
"usage_type": "attribute"
},
{
"api_name": "flask.g.telegram_messages.append",
"line_number": 577,
"usage_type": "call"
},
{
"api_name": "flask.g.telegram_messages",
"line_number": 577,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 577,
"usage_type": "name"
},
{
"api_name": "flask.g.discord_messages.append",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "flask.g.discord_messages",
"line_number": 580,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 580,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 584,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 586,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 586,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 587,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 587,
"usage_type": "attribute"
},
{
"api_name": "db.ThreadRead",
"line_number": 587,
"usage_type": "attribute"
},
{
"api_name": "flask.g.user",
"line_number": 587,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 587,
"usage_type": "name"
},
{
"api_name": "flask.g.user.read",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 592,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 592,
"usage_type": "name"
},
{
"api_name": "flask.g.now",
"line_number": 594,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 594,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 610,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 610,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 610,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 615,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 615,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 615,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 616,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 616,
"usage_type": "attribute"
},
{
"api_name": "db.Thread",
"line_number": 616,
"usage_type": "attribute"
},
{
"api_name": "flask.abort",
"line_number": 617,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 619,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 619,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 619,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 621,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 621,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 621,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 624,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 624,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 626,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 626,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 626,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 629,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 629,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 629,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 631,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 631,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 631,
"usage_type": "name"
},
{
"api_name": "db.session.commit",
"line_number": 633,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 633,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 635,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 640,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 640,
"usage_type": "attribute"
},
{
"api_name": "db.Post",
"line_number": 640,
"usage_type": "attribute"
},
{
"api_name": "db.session.query",
"line_number": 641,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 641,
"usage_type": "attribute"
},
{
"api_name": "db.Thread",
"line_number": 641,
"usage_type": "attribute"
},
{
"api_name": "flask.abort",
"line_number": 642,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 643,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 643,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 643,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 644,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 647,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 648,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 648,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 648,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 649,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 649,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 649,
"usage_type": "call"
},
{
"api_name": "db.Post",
"line_number": 650,
"usage_type": "attribute"
},
{
"api_name": "flask.g.user",
"line_number": 652,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 652,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 654,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 654,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 655,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 655,
"usage_type": "attribute"
},
{
"api_name": "db.Forum",
"line_number": 655,
"usage_type": "attribute"
},
{
"api_name": "db.Category",
"line_number": 655,
"usage_type": "attribute"
},
{
"api_name": "flask.request.form",
"line_number": 659,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 659,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 661,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 661,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 663,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 663,
"usage_type": "name"
},
{
"api_name": "db.Post",
"line_number": 666,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 667,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 667,
"usage_type": "name"
},
{
"api_name": "db.session.add",
"line_number": 668,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 668,
"usage_type": "attribute"
},
{
"api_name": "db.session.commit",
"line_number": 675,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 675,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 677,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "db.session.commit",
"line_number": 682,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 682,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 683,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 685,
"usage_type": "call"
},
{
"api_name": "flask.g.now",
"line_number": 685,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 685,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 689,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 689,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 689,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 690,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 690,
"usage_type": "attribute"
},
{
"api_name": "db.User",
"line_number": 690,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 691,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 696,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 696,
"usage_type": "attribute"
},
{
"api_name": "db.User",
"line_number": 696,
"usage_type": "attribute"
},
{
"api_name": "flask.abort",
"line_number": 697,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 698,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 703,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 703,
"usage_type": "attribute"
},
{
"api_name": "db.User",
"line_number": 703,
"usage_type": "attribute"
},
{
"api_name": "flask.abort",
"line_number": 704,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 705,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 705,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 705,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 707,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 707,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 708,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 708,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 710,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 710,
"usage_type": "attribute"
},
{
"api_name": "db.Group",
"line_number": 710,
"usage_type": "attribute"
},
{
"api_name": "flask.request.form",
"line_number": 715,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 715,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 718,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 718,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 725,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 726,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 726,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 729,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 729,
"usage_type": "attribute"
},
{
"api_name": "db.Group",
"line_number": 729,
"usage_type": "attribute"
},
{
"api_name": "db.session.commit",
"line_number": 730,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 730,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 731,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 732,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 734,
"usage_type": "call"
},
{
"api_name": "wtforms.Form",
"line_number": 736,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 737,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 737,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 737,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 738,
"usage_type": "call"
},
{
"api_name": "wtforms.TextField",
"line_number": 739,
"usage_type": "call"
},
{
"api_name": "wtforms.IntegerField",
"line_number": 740,
"usage_type": "call"
},
{
"api_name": "wtforms.BooleanField",
"line_number": 741,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 742,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 748,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 748,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 748,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 749,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 749,
"usage_type": "attribute"
},
{
"api_name": "db.Group",
"line_number": 749,
"usage_type": "attribute"
},
{
"api_name": "flask.request.method",
"line_number": 752,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 752,
"usage_type": "name"
},
{
"api_name": "db.Group",
"line_number": 753,
"usage_type": "call"
},
{
"api_name": "db.session.add",
"line_number": 754,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 754,
"usage_type": "attribute"
},
{
"api_name": "db.session.commit",
"line_number": 755,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 755,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 758,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 758,
"usage_type": "attribute"
},
{
"api_name": "db.Group",
"line_number": 758,
"usage_type": "attribute"
},
{
"api_name": "flask.request.form",
"line_number": 759,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 759,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 760,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 760,
"usage_type": "name"
},
{
"api_name": "db.session.commit",
"line_number": 766,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 766,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 767,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 768,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 768,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 770,
"usage_type": "call"
},
{
"api_name": "flask.g.user.in_group",
"line_number": 775,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 775,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 775,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 778,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 778,
"usage_type": "attribute"
},
{
"api_name": "db.Task",
"line_number": 778,
"usage_type": "attribute"
},
{
"api_name": "flask.request.form",
"line_number": 781,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 781,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 783,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 783,
"usage_type": "attribute"
},
{
"api_name": "db.User",
"line_number": 783,
"usage_type": "attribute"
},
{
"api_name": "flask.request.method",
"line_number": 786,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 786,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 788,
"usage_type": "call"
},
{
"api_name": "db.Task",
"line_number": 791,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 793,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 793,
"usage_type": "name"
},
{
"api_name": "db.session.add",
"line_number": 801,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 801,
"usage_type": "attribute"
},
{
"api_name": "db.session.commit",
"line_number": 802,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 802,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 804,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 806,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 807,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 807,
"usage_type": "call"
},
{
"api_name": "db.session.query",
"line_number": 809,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 809,
"usage_type": "attribute"
},
{
"api_name": "db.Task",
"line_number": 809,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 812,
"usage_type": "call"
},
{
"api_name": "flask.g.user.in_group",
"line_number": 816,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 816,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 816,
"usage_type": "name"
},
{
"api_name": "db.session.query",
"line_number": 817,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 817,
"usage_type": "attribute"
},
{
"api_name": "db.Task",
"line_number": 817,
"usage_type": "attribute"
},
{
"api_name": "flask.request.form",
"line_number": 819,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 819,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 821,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 821,
"usage_type": "name"
},
{
"api_name": "db.session.commit",
"line_number": 823,
"usage_type": "call"
},
{
"api_name": "db.session",
"line_number": 823,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 824,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 824,
"usage_type": "call"
},
{
"api_name": "wtforms.Form",
"line_number": 827,
"usage_type": "name"
},
{
"api_name": "wtforms.TextField",
"line_number": 828,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 828,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 828,
"usage_type": "name"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 829,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 833,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 833,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 836,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 836,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 837,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 837,
"usage_type": "name"
},
{
"api_name": "flask.g.irc_messages.append",
"line_number": 839,
"usage_type": "call"
},
{
"api_name": "flask.g.irc_messages",
"line_number": 839,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 839,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 843,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 850,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 851,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 852,
"usage_type": "call"
}
] |
35445440233
|
from dexy.common import OrderedDict
import dexy.database
import dexy.doc
import dexy.parser
import dexy.reporter
import inspect
import json
import logging
import logging.handlers
import os
import shutil
class Wrapper(object):
"""
Class that assists in interacting with Dexy, including running Dexy.
"""
DEFAULT_ARTIFACTS_DIR = 'artifacts'
DEFAULT_CONFIG_FILE = 'dexy.conf' # Specification of dexy-wide config options.
DEFAULT_DANGER = False
DEFAULT_DB_ALIAS = 'sqlite3'
DEFAULT_DB_FILE = 'dexy.sqlite3'
DEFAULT_DISABLE_TESTS = False
DEFAULT_DONT_USE_CACHE = False
DEFAULT_DRYRUN = False
DEFAULT_EXCLUDE = ''
DEFAULT_GLOBALS = ''
DEFAULT_HASHFUNCTION = 'md5'
DEFAULT_IGNORE_NONZERO_EXIT = False
DEFAULT_LOG_DIR = 'logs'
DEFAULT_LOG_FILE = 'dexy.log'
DEFAULT_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
DEFAULT_LOG_LEVEL = 'DEBUG'
DEFAULT_RECURSE = True
DEFAULT_REPORTS = 'output'
DEFAULT_SILENT = False
LOG_LEVELS = {
'DEBUG' : logging.DEBUG,
'INFO' : logging.INFO,
'WARN' : logging.WARN
}
RENAME_PARAMS = {
'artifactsdir' : 'artifacts_dir',
'conf' : 'config_file',
'dbalias' : 'db_alias',
'dbfile' : 'db_file',
'disabletests' : 'disable_tests',
'dryrun' : 'dry_run',
'ignore' : 'ignore_nonzero_exit',
'logfile' : 'log_file',
'logformat' : 'log_format',
'loglevel' : 'log_level',
'logsdir' : 'log_dir',
'nocache' : 'dont_use_cache'
}
SKIP_KEYS = ['h', 'help', 'version']
def __init__(self, *args, **kwargs):
self.initialize_attribute_defaults()
self.check_config_file_location(kwargs)
self.load_config_file()
self.update_attributes_from_config(kwargs)
self.args = args
self.docs_to_run = []
self.tasks = OrderedDict()
self.pre_attrs = {}
self.state = None
def initialize_attribute_defaults(self):
self.artifacts_dir = self.DEFAULT_ARTIFACTS_DIR
self.config_file = self.DEFAULT_CONFIG_FILE
self.danger = self.DEFAULT_DANGER
self.db_alias = self.DEFAULT_DB_ALIAS
self.db_file = self.DEFAULT_DB_FILE
self.disable_tests = self.DEFAULT_DISABLE_TESTS
self.dont_use_cache = self.DEFAULT_DONT_USE_CACHE
self.dry_run = self.DEFAULT_DRYRUN
self.exclude = self.DEFAULT_EXCLUDE
self.globals = self.DEFAULT_GLOBALS
self.hashfunction = self.DEFAULT_HASHFUNCTION
self.ignore_nonzero_exit = self.DEFAULT_IGNORE_NONZERO_EXIT
self.log_dir = self.DEFAULT_LOG_DIR
self.log_file = self.DEFAULT_LOG_FILE
self.log_format = self.DEFAULT_LOG_FORMAT
self.log_level = self.DEFAULT_LOG_LEVEL
self.recurse = self.DEFAULT_RECURSE
self.reports = self.DEFAULT_REPORTS
self.silent = self.DEFAULT_SILENT
def check_config_file_location(self, kwargs):
self.update_attributes_from_config(kwargs)
def update_attributes_from_config(self, config):
for key, value in config.iteritems():
if not key in self.SKIP_KEYS:
corrected_key = self.RENAME_PARAMS.get(key, key)
if not hasattr(self, corrected_key):
raise Exception("no default for %s" % corrected_key)
setattr(self, corrected_key, value)
def load_config_file(self):
"""
Look for a config file in current working dir and loads it.
"""
if os.path.exists(self.config_file):
with open(self.config_file) as f:
try:
conf = json.load(f)
except ValueError as e:
msg = inspect.cleandoc("""Was unable to parse the json in your config file '%s'.
Here is information from the json parser:""" % self.config_file)
msg += "\n"
msg += str(e)
raise dexy.exceptions.UserFeedback(msg)
self.update_attributes_from_config(conf)
@classmethod
def default_config(klass):
conf = klass().__dict__.copy()
# Remove any attributes that aren't config options
del conf['args']
del conf['docs_to_run']
del conf['tasks']
for cl_key, internal_key in klass.RENAME_PARAMS.iteritems():
conf[cl_key] = conf[internal_key]
del conf[internal_key]
return conf
def db_path(self):
return os.path.join(self.artifacts_dir, self.db_file)
def log_path(self):
return os.path.join(self.log_dir, self.log_file)
def run(self):
self.setup_run()
self.log.debug("batch id is %s" % self.batch_id)
self.state = 'populating'
for doc in self.docs_to_run:
for task in doc:
task()
self.state = 'settingup'
for doc in self.docs_to_run:
for task in doc:
task()
self.state = 'running'
for doc in self.docs_to_run:
for task in doc:
task()
self.state = 'complete'
self.save_db()
self.setup_graph()
def setup_run(self):
self.check_dexy_dirs()
self.setup_log()
self.setup_db()
self.batch_id = self.db.next_batch_id()
if not self.docs_to_run:
self.setup_docs()
def setup_read(self, batch_id=None):
self.check_dexy_dirs()
self.setup_log()
self.setup_db()
if batch_id:
self.batch_id = batch_id
else:
self.batch_id = self.db.max_batch_id()
def check_dexy_dirs(self):
if not (os.path.exists(self.artifacts_dir) and os.path.exists(self.log_dir)):
raise dexy.exceptions.UserFeedback("You need to run 'dexy setup' in this directory first.")
def setup_dexy_dirs(self):
if not os.path.exists(self.artifacts_dir):
os.mkdir(self.artifacts_dir)
if not os.path.exists(self.log_dir):
os.mkdir(self.log_dir)
def remove_dexy_dirs(self):
shutil.rmtree(self.artifacts_dir)
shutil.rmtree(self.log_dir)
# TODO remove reports dirs
def setup_log(self):
try:
loglevel = self.LOG_LEVELS[self.log_level.upper()]
except KeyError:
msg = "'%s' is not a valid log level, check python logging module docs."
raise dexy.exceptions.UserFeedback(msg % self.log_level)
self.log = logging.getLogger('dexy')
self.log.setLevel(loglevel)
handler = logging.handlers.RotatingFileHandler(
self.log_path(),
encoding="utf-8")
formatter = logging.Formatter(self.log_format)
handler.setFormatter(formatter)
self.log.addHandler(handler)
def setup_db(self):
db_class = dexy.database.Database.aliases[self.db_alias]
self.db = db_class(self)
def setup_docs(self):
for arg in self.args:
self.log.debug("Processing arg %s" % arg)
doc = self.create_doc_from_arg(arg)
if not doc:
raise Exception("no doc created for %s" % arg)
doc.wrapper = self
self.docs_to_run.append(doc)
def create_doc_from_arg(self, arg, *children, **kwargs):
if isinstance(arg, dexy.task.Task):
return arg
elif isinstance(arg, list):
if not isinstance(arg[0], basestring):
msg = "First arg in %s should be a string" % arg
raise dexy.exceptions.UserFeedback(msg)
if not isinstance(arg[1], dict):
msg = "Second arg in %s should be a dict" % arg
raise dexy.exceptions.UserFeedback(msg)
if kwargs:
raise Exception("Shouldn't have kwargs if arg is a list")
if children:
raise Exception("Shouldn't have children if arg is a list")
alias, pattern = dexy.parser.AbstractSyntaxTree.qualify_key(arg[0])
return dexy.task.Task.create(alias, pattern, **arg[1])
elif isinstance(arg, basestring):
alias, pattern = dexy.parser.AbstractSyntaxTree.qualify_key(arg[0])
return dexy.task.Task.create(alias, pattern, *children, **kwargs)
else:
raise Exception("unknown arg type %s for arg %s" % (arg.__class__.__name__, arg))
def save_db(self):
self.db.save()
## DOCUMENTED above here..
def run_docs(self, *docs):
"""
Convenience method for testing to add docs and then run them.
"""
self.setup_dexy_dirs()
self.docs_to_run = docs
self.run()
def register(self, task):
"""
Register a task with the wrapper
"""
self.tasks[task.key_with_class()] = task
def registered_docs(self):
return [d for d in self.tasks.values() if isinstance(d, dexy.doc.Doc)]
def registered_doc_names(self):
return [d.name for d in self.registered_docs()]
def reports_dirs(self):
return [c.REPORTS_DIR for c in dexy.reporter.Reporter.plugins]
def report(self, *reporters):
"""
Runs reporters. Either runs reporters which have been passed in or, if
none, then runs all available reporters which have ALLREPORTS set to
true.
"""
if not reporters:
reporters = [c() for c in dexy.reporter.Reporter.plugins if c.ALLREPORTS]
for reporter in reporters:
self.log.debug("Running reporter %s" % reporter.ALIASES[0])
reporter.run(self)
def get_child_hashes_in_previous_batch(self, parent_hashstring):
return self.db.get_child_hashes_in_previous_batch(self.batch_id, parent_hashstring)
def load_doc_config(self):
"""
Look for document config files in current working dir and load them.
"""
parser_aliases = dexy.parser.Parser.aliases
for k in parser_aliases.keys():
if os.path.exists(k):
self.log.debug("found doc config file '%s'" % k)
parser = parser_aliases[k](self)
with open(k, "r") as f:
self.doc_config = f.read()
parser.parse(self.doc_config)
break
def setup_config(self):
self.setup_dexy_dirs()
self.setup_log()
self.load_doc_config()
def cleanup_partial_run(self):
if hasattr(self, 'db'):
# TODO remove any entries which don't have
self.db.save()
def setup_graph(self):
"""
Creates a dot representation of the tree.
"""
graph = ["digraph G {"]
for task in self.tasks.values():
if hasattr(task, 'artifacts'):
task_label = task.key_with_class().replace("|", "\|")
label = """ "%s" [shape=record, label="%s\\n\\n""" % (task.key_with_class(), task_label)
for child in task.artifacts:
label += "%s\l" % child.key_with_class().replace("|", "\|")
label += "\"];"
graph.append(label)
for child in task.children:
if not child in task.artifacts:
graph.append(""" "%s" -> "%s";""" % (task.key_with_class(), child.key_with_class()))
elif "Artifact" in task.__class__.__name__:
pass
else:
graph.append(""" "%s" [shape=record];""" % task.key_with_class())
for child in task.children:
graph.append(""" "%s" -> "%s";""" % (task.key_with_class(), child.key_with_class()))
graph.append("}")
self.graph = "\n".join(graph)
|
gotosprey/dexy
|
dexy/wrapper.py
|
wrapper.py
|
py
| 11,970 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "logging.DEBUG",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "logging.WARN",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "dexy.common.OrderedDict",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "inspect.cleandoc",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "dexy.common.exceptions.UserFeedback",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "dexy.common.exceptions",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "dexy.common.exceptions.UserFeedback",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "dexy.common.exceptions",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "dexy.common.exceptions.UserFeedback",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "dexy.common.exceptions",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "logging.handlers.RotatingFileHandler",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "logging.handlers",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "dexy.common.database",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "dexy.common.task",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "dexy.common.exceptions.UserFeedback",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "dexy.common.exceptions",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "dexy.common.exceptions.UserFeedback",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "dexy.common.exceptions",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "dexy.common.parser.AbstractSyntaxTree.qualify_key",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "dexy.common.parser",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "dexy.common.task.Task.create",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "dexy.common.task",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "dexy.common.parser.AbstractSyntaxTree.qualify_key",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "dexy.common.parser",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "dexy.common.task.Task.create",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "dexy.common.task",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "dexy.common.doc",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "dexy.common.reporter",
"line_number": 291,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "dexy.common.reporter",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "dexy.common.parser",
"line_number": 313,
"usage_type": "attribute"
},
{
"api_name": "dexy.common",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 315,
"usage_type": "attribute"
}
] |
38791493575
|
from flask import Flask
from flask_restful import Resource, Api
import __init__
app=Flask(__name__)
api=Api(app)
class Quote(Resource):
@app.route('/wifi/<int:id>')
def get(id):
x=main.main_(id)
if x==-1:
return 'Not found', 404
else:
return x, 200
@app.route('/trace')
def trace():
x=main.output()
return x, 200
if __name__ == '__main__':
app.run(host="0.0.0.0", port="8080",debug=True)
|
Kaedone/WI-FI_checker
|
api.py
|
api.py
|
py
| 510 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_restful.Resource",
"line_number": 11,
"usage_type": "name"
}
] |
73027941309
|
from google.cloud import storage
import os
input_folder = "../Crop_Reports/Bengal Gazettes Chunks/"
bucket_name = "calcutta-gazette"
def explicit(bucket_name, source_name, path):
# Explicitly use service account credentials by specifying the private key
# file.
storage_client = storage.Client.from_service_account_json('../API_Keys/Famine Research OCR-cdf9018b001d.json')
destination_name = source_name
source_name2 = os.path.join(path, source_name)
# Make an authenticated API request
# buckets = list(storage_client.list_buckets())
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(destination_name)
if not blob.exists():
blob.upload_from_filename(source_name2)
if __name__ == '__main__':
folder_list = os.listdir(input_folder)
for folder in folder_list:
path = os.path.join(input_folder, folder)
file_list = os.listdir(path)
for file in file_list:
print(file)
explicit(bucket_name, file, path)
|
jgoman99/British-Bengal-Weekly-Crop-Reports
|
Python Code/splits_to_cloud.py
|
splits_to_cloud.py
|
py
| 1,067 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "google.cloud.storage.Client.from_service_account_json",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "google.cloud.storage.Client",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.storage",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 33,
"usage_type": "call"
}
] |
40132948104
|
import argparse
from math import sqrt
import Image
import ImageDraw
def color_map(v):
assert 0 <= v <= 255
if v == 0: return (0, 0, 0)
if v == 255: return (255, 255, 255)
if v < 4 * 8:
# 0 .. 31
return (0, 255 - (31 * 4) + v * 4, 0)
if v < 16 * 8:
# 32 .. 127
# 0 .. 95
return (128 + (v - 32) * 127 / 95, 0, 0)
return (0, v, v)
def convert():
if args.test:
data = map(chr, range(256))
else:
data = file(args.in_file).read()
size = len(data)
w = 1
while size / w > w * 8:
w *= 2
h = size / w
if size % w != 0: h += 1
image = Image.new('RGB', (w, h))
d = ImageDraw.Draw(image)
for i, c in enumerate(data):
d.point((i % w, i / w), color_map(ord(c)))
image.save(args.out_file, 'PNG')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Show binary in color pattern')
parser.add_argument('--test', action='store_true')
parser.add_argument('in_file', action='store')
parser.add_argument('--out_file', action='store', default='out.png')
args = parser.parse_args()
convert()
|
nishio/binary_color
|
binary_color.py
|
binary_color.py
|
py
| 1,161 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "Image.new",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "ImageDraw.Draw",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 43,
"usage_type": "call"
}
] |
39784068604
|
import filecmp, os, sys
sys.path.append('c:\\dev\\pytWinc\\superpy')
sys.path.append('c:\\dev\\pytWinc\\superpy\\utils_superpy')
from utils.utils import calculate_inventory, get_path_to_directory_of_file
directory_of_testcase = "fn_calculate_inventory"
path_to_directory_of_testcase = get_path_to_directory_of_file(directory_of_testcase)
# input test files:
path_to_input_file_sold_test_01 = os.path.join(path_to_directory_of_testcase, "test_input", 'input_file_sold_for_testcase_01.csv')
path_to_input_file_cost_test_01 = os.path.join(path_to_directory_of_testcase, "test_input", 'input_file_cost_for_testcase_01.csv')
path_to_input_file_sold_test_02 = os.path.join(path_to_directory_of_testcase, "test_input", 'input_file_sold_for_testcase_02.csv')
path_to_input_file_cost_test_02 = os.path.join(path_to_directory_of_testcase, "test_input", 'input_file_cost_for_testcase_02.csv')
'''
about the data structure of expected testresult:
list of lists is a common and convenient (but not the only) way to create tables in Python.
This also applies to Rich.
So expected test results take the shape of a list with lists. This has worked
while testing fn calculate_expired_products_on_day.
'''
def test_01_calculate_inventory_happy_flow():
filecmp.clear_cache()
date_on_which_to_calculate_inventory = '2024-05-21'
expected_test_result = [['b_3', 'candle', '3.1', '2024-01-11', 'does not expire'], ['b_6', 'book', '0.5', '2024-01-15', 'does not expire'], ['b_39', 'skeelers', '1.1', '2024-04-20', 'does not expire'], ['b_45', 'shoes', '1.4', '2024-04-30', 'does not expire'], ['b_48', 'fish', '2.5', '2024-05-08', '2024-05-23'], ['b_51', 'kiwi', '0.5', '2024-05-15', '2024-05-30'], ['b_54', 'onion', '1.1', '2024-05-21', '2024-06-05']]
actual_result = calculate_inventory(date_on_which_to_calculate_inventory, path_to_input_file_sold_test_01,
path_to_input_file_cost_test_01)
assert actual_result == expected_test_result
def test_02_calculate_inventory_happy_flow():
filecmp.clear_cache()
date_on_which_to_calculate_inventory = '2023-11-15'
expected_test_result = [['b_6', 'garbage_bag', '5.2', '2023-10-17', 'does not expire'], ['b_26', 'tomato', '2.5', '2023-10-31', '2023-11-15'], ['b_28', 'lettuce', '0.5', '2023-11-01', '2023-11-16'], ['b_30', 'lettuce', '4.0', '2023-11-02', '2023-11-17'], ['b_32', 'tomato', '5.2', '2023-11-03', '2023-11-18'], ['b_34', 'lightbulb', '4.0', '2023-11-06', 'does not expire'], ['b_36', 'tomato', '4.0', '2023-11-07', '2023-11-22'], ['b_38', 'rice', '0.5', '2023-11-08', '2023-11-23'], ['b_40', 'cheese', '1.4', '2023-11-09', '2023-11-24'], ['b_42', 'book', '5.2', '2023-11-11', 'does not expire'],
['b_44', 'oats', '0.5', '2023-11-14', '2023-11-29']]
actual_result = calculate_inventory(date_on_which_to_calculate_inventory, path_to_input_file_sold_test_02,
path_to_input_file_cost_test_02)
assert actual_result == expected_test_result
|
davidjfk/David_Sneek_Superpy
|
test_utils/fn_calculate_inventory/test_calculate_inventory.py
|
test_calculate_inventory.py
|
py
| 2,936 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "utils.utils.get_path_to_directory_of_file",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "filecmp.clear_cache",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "utils.utils.calculate_inventory",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "filecmp.clear_cache",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "utils.utils.calculate_inventory",
"line_number": 42,
"usage_type": "call"
}
] |
43626494196
|
# VIIRS packge
from __future__ import division, print_function
import datetime
import numpy as np
from osgeo import gdal
from scipy import ndimage
import core
import env
bumper = env.environment()
class viirs(core.raster):
def __init__(self):
core.raster.__init__(self,'viirs')
return
def read(self,infile):
out = self._copy()
tree = '//HDFEOS/GRIDS/VNP_Grid_{}_2D/Data_Fields/'
field = 'SurfReflect_{0}{1}_1'
base = 'HDF5:"{0}":{1}{2}'
m = [i for i in range(12) if i not in [0,6,9]]
i = [i for i in range(1,4)]
bands = [m,i]
res = ['1km','500m']
mode = ['M','I']
band = gdal.Open(base.format(infile,tree.format('1km'),field.format('QF',1)))
out.metadata = band.GetMetadata()
cloudQA = self._extractBits(band.ReadAsArray(),2,3)
hiresCloudQA = ndimage.zoom(cloudQA,2,order=0)
band = None
band = gdal.Open(base.format(infile,tree.format('1km'),field.format('QF',2)))
shadowQA = self._extractBits(band.ReadAsArray(),3,3)
hiresShadowQA = ndimage.zoom(shadowQA,2,order=0)
# qa = (cloudQA>0)&(shadowQA<1)
mask = ~(hiresCloudQA>0)&(hiresShadowQA<1)
east,west = float(out.metadata['EastBoundingCoord']), float(out.metadata['WestBoundingCoord'])
north,south = float(out.metadata['NorthBoundingCoord']), float(out.metadata['SouthBoundingCoord'])
out.extent = [west,south,east,north]
databands = {'mask':mask}
bandNames = ['mask']
for i in range(2):
for j in range(len(bands[i])):
subdataset = base.format(infile,tree.format(res[i]),field.format(mode[i],bands[i][j]))
band = gdal.Open(subdataset)
if i == 0:
data = ndimage.zoom(band.ReadAsArray(),2,order=0)
else:
data = band.ReadAsArray()
data = np.ma.masked_where(data<0,data)
data = np.ma.masked_where(data>10000,data)
bName = '{0}{1}'.format(mode[i],bands[i][j])
databands[bName] = data.astype(np.int16)
bandNames.append(bName)
band = None
data = None
out.bands = databands
out.bandNames = bandNames
out.updateMask()
coords = {}
out.nativeCRS = {'init':'epsg:6974'}
out.proj = '+proj=sinu +R=6371007.181 +nadgrids=@null +wktext'
coords['lon'],coords['lat'] = self._geoGrid(out.extent,out.bands['I1'].shape,out.proj,wgsBounds=False)
out.coords = coords
out.gt = None
date = '{0}{1}{2}'.format(out.metadata['RangeBeginningDate'],out.metadata['RangeBeginningTime'],' UTC')
out.coords['date'] = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S.%f %Z')
return out
|
Servir-Mekong/bump
|
bump/viirs.py
|
viirs.py
|
py
| 2,977 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "env.environment",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "core.raster",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "core.raster.__init__",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "core.raster",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "osgeo.gdal.Open",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.zoom",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "osgeo.gdal.Open",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.zoom",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "osgeo.gdal.Open",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.zoom",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "numpy.ma.masked_where",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "numpy.ma.masked_where",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "numpy.int16",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 102,
"usage_type": "attribute"
}
] |
72579615228
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 17 11:47:02 2019
@author: hwan - Took out relevant code from dolfin's plotting.py _plot_matplotlib code
- To enter dolfin's own plotting code, use dl.plot(some_dolfin_object) wheresome_dolfin_object is a 3D object and an error will be thrown up
"""
import matplotlib.pyplot as plt
import dolfin.cpp as cpp
import pdb #Equivalent of keyboard in MATLAB, just add "pdb.set_trace()"
def plot_3D(obj, title, angle_1, angle_2):
# Importing this toolkit has side effects enabling 3d support
from mpl_toolkits.mplot3d import Axes3D # noqa
# Enabling the 3d toolbox requires some additional arguments
plt.title(title)
ax = plt.gca(projection='3d')
ax.set_aspect('auto')
ax.view_init(angle_1, angle_2)
# For dolfin.function.Function, extract cpp_object
if hasattr(obj, "cpp_object"):
obj = obj.cpp_object()
if isinstance(obj, cpp.function.Function):
return my_mplot_function(ax, obj,)
elif isinstance(obj, cpp.mesh.Mesh):
return my_mplot_mesh(ax, obj)
def my_mplot_mesh(ax, mesh):
tdim = mesh.topology().dim()
gdim = mesh.geometry().dim()
if gdim == 3 and tdim == 3:
bmesh = cpp.mesh.BoundaryMesh(mesh, "exterior", order=False)
my_mplot_mesh(ax, bmesh)
elif gdim == 3 and tdim == 2:
xy = mesh.coordinates()
return ax.plot_trisurf(*[xy[:, i] for i in range(gdim)],
triangles=mesh.cells())
def my_mplot_function(ax, f):
mesh = f.function_space().mesh()
gdim = mesh.geometry().dim()
C = f.compute_vertex_values(mesh)
X = [mesh.coordinates()[:, i] for i in range(gdim)]
return ax.scatter(*X, c=C)
|
cotran2/Thermal_Fin_Heat_Simulator
|
Utilities/plot_3D.py
|
plot_3D.py
|
py
| 1,852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.title",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "dolfin.cpp.function",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "dolfin.cpp",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "dolfin.cpp.mesh",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "dolfin.cpp",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "dolfin.cpp.mesh.BoundaryMesh",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "dolfin.cpp.mesh",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "dolfin.cpp",
"line_number": 36,
"usage_type": "name"
}
] |
38365303311
|
from datetime import datetime, timedelta
import logging
import os
import json
import pandas as pd
import requests
try:
from .exceptions import ApexApiException
except:
from exceptions import ApexApiException
class Apex_API:
def __init__(self, api_key: str):
self.api_key = api_key
logging.basicConfig(
level=logging.INFO,
format="[%(levelname)s] %(asctime)s %(message)s",
datefmt="%Y-%m-%d %I:%M:%S %p", # this defines the date format for the (asctime) part above
handlers=[logging.StreamHandler()],
# this means store logs to a example.log file as well as print them to the terminal
)
logging.getLogger("requests").setLevel(
logging.WARNING
) # get rid of https debug gahbage
def ___iter__(self):
logging.info("what")
def __str__(self):
return "Apex API Client Object"
def __repr__(self):
return "Apex API"
def get_apex_player_stats(self, player: str) -> pd.DataFrame:
try:
data = requests.get(
f"https://api.mozambiquehe.re/bridge?version=5&platform=PC&player={player}&auth={self.api_key}"
)
logging.info(
f"Grabbing data for Player {player}, Status Code was {data.status_code}"
)
df = data.json()
return df
except BaseException as e:
logging.error(e)
raise ApexApiException
def get_apex_map_rotation(self) -> pd.DataFrame:
try:
data = requests.get(
f"https://api.mozambiquehe.re/maprotation?version=2&auth={self.api_key}"
)
logging.info(
f"Grabbing data for current Map Rotation, Status Code was {data.status_code}"
)
df = data.json()
df_current = pd.DataFrame([df["battle_royale"]["current"]])
df_current["type"] = "current"
df_next = pd.DataFrame([df["battle_royale"]["next"]])
df_next["remainingSecs"] = 0
df_next["remainingMins"] = 0
df_next["remainingTimer"] = "00:00:00"
df_next["type"] = "next"
df_combo = pd.concat([df_current, df_next])
logging.info(f"Grabbing {len(df_combo)} Records for Apex Map Rotation")
return df_combo
except BaseException as e:
logging.error(e)
raise ApexApiException
|
jyablonski/apex_api_scraper
|
src/utils.py
|
utils.py
|
py
| 2,484 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "exceptions.ApexApiException",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "exceptions.ApexApiException",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 53,
"usage_type": "attribute"
}
] |
40695061264
|
"""empty message
Revision ID: 41124ac6e47e
Revises: 57296b50c499
Create Date: 2014-11-30 17:08:44.396000
"""
# revision identifiers, used by Alembic.
revision = '41124ac6e47e'
down_revision = '57296b50c499'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('provider', sa.Column('address', sa.String(length=250), nullable=True))
op.add_column('provider', sa.Column('emails', sa.String(length=250), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('provider', 'emails')
op.drop_column('provider', 'address')
### end Alembic commands ###
|
StasEvseev/adminbuy
|
migrations/versions/41124ac6e47e_.py
|
41124ac6e47e_.py
|
py
| 800 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "alembic.op.add_column",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "alembic.op.add_column",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 48,
"usage_type": "name"
}
] |
73785815229
|
"""empty message
Revision ID: 391b24b33343
Revises: e4338c095afb
Create Date: 2021-06-24 16:47:10.434392
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '391b24b33343'
down_revision = 'e4338c095afb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('post_id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(length=1000), nullable=False),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('post_reactions', sa.Column('reaction', sa.Boolean(), nullable=True))
op.drop_constraint('post_reactions_post_id_fkey', 'post_reactions', type_='foreignkey')
op.create_foreign_key(None, 'post_reactions', 'posts', ['post_id'], ['id'], ondelete='CASCADE')
op.drop_column('post_reactions', '_reaction')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('post_reactions', sa.Column('_reaction', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'post_reactions', type_='foreignkey')
op.create_foreign_key('post_reactions_post_id_fkey', 'post_reactions', 'posts', ['post_id'], ['id'])
op.drop_column('post_reactions', 'reaction')
op.drop_table('comments')
# ### end Alembic commands ###
|
composerben/flask-group-project
|
migrations/versions/20210624_164710_fix_migration.py
|
20210624_164710_fix_migration.py
|
py
| 1,640 |
python
|
en
|
code
| 13 |
github-code
|
6
|
[
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "alembic.op.add_column",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_constraint",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "alembic.op.create_foreign_key",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "alembic.op.add_column",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.BOOLEAN",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_constraint",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "alembic.op.create_foreign_key",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 43,
"usage_type": "name"
}
] |
33983748034
|
from django.shortcuts import render, redirect
from django.views.generic import ListView, \
CreateView, DetailView, UpdateView, DeleteView
from .models import Post, Review
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from accounts.models import UserFollows
from .forms import PostForm, ReviewForm
from django.template.defaulttags import register
from django.contrib import messages
@register.filter
# helps us loop over the review's rating and
# add stars for the range of the int variable; rating
def get_range(value):
return range(value)
def flow(request):
following = UserFollows.objects.filter(following=request.user)
follower = UserFollows.objects.filter(follower=request.user)
posts = []
reviews = []
for post in Post.objects.all().order_by('-date_posted'):
posts.append(post)
for review in Review.objects.all().order_by('-date_posted'):
reviews.append(review)
posts_reviews = []
for post in posts:
if post.author == request.user:
posts_reviews.append(post)
print(post)
for contact in follower:
if post.author == contact.following:
posts_reviews.append(post)
for review in reviews:
if review.author == request.user:
posts_reviews.append(review)
for contact in follower:
if review.author == contact.following:
posts_reviews.append(review)
if review.ticket.author == request.user:
posts_reviews.append(review)
posts_reviews = list(set(posts_reviews))
posts_reviews.sort(key=lambda x: x.date_posted, reverse=True)
for p in posts_reviews:
print(p.type)
context = {
'follower': follower,
'following': following,
'post_review': posts_reviews
}
return render(request, 'flow.html', context)
class ReviewCreateView(LoginRequiredMixin, CreateView):
model = Review
fields = ['ticket', 'headline', 'rating', 'content', ]
def form_valid(self, form):
form.instance.author = self.request.user
try:
return super().form_valid(form)
except ValueError:
messages.add_message(self.request, messages.INFO, 'Hello world.')
class ReviewDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Review
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
else:
return False
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'content', 'header_image']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ['title', 'content', 'header_image']
def form_valid(self, form):
form.instance.author = self.request.user
self.object = form.save()
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
else:
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
else:
return False
class PostListView(ListView):
model = Post
context_object_name = 'posts'
ordering = ['-date_posted']
class ReviewListView(ListView):
model = Review
context_object_name = 'reviews'
ordering = ['-date_posted']
class PostDetailView(DetailView):
model = Post
class ReviewDetailView(DetailView):
model = Review
class ReviewUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Review
fields = ['headline', 'body', 'rating']
def form_valid(self, form):
form.instance.author = self.request.user
self.object = form.save()
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
else:
return False
def review_create_view(request):
form2 = PostForm(request.POST, request.FILES or None)
form = ReviewForm(request.POST or None)
context = {
"form2": form2,
"form": form,
}
if all([form2.is_valid(), form.is_valid()]):
current_user = request.user
parent = form2.save(commit=False)
parent.author_id = current_user.id
parent.reviewed = 'true'
parent.save()
child = form.save(commit=False)
child.author_id = current_user.id
child.ticket = parent
child.save()
print("form", form.cleaned_data)
print("form2", form2.cleaned_data)
context['message'] = 'data saved'
return redirect('flow')
# return render(request, 'reviews/review_create.html', context)
else:
return render(request, 'reviews/review_create.html', context)
def review_of_ticket(request, pk):
instance = Post.objects.get(id=pk)
form = ReviewForm(request.POST or None)
review_form_ticket = instance
context = {
"form": form,
"ticket": review_form_ticket,
}
if form.is_valid():
current_user = request.user
child = form.save(commit=False)
child.author_id = current_user.id
child.ticket = instance
instance.reviewed = 'true'
child.save()
instance.save()
form.save()
return redirect('flow')
else:
return render(request, "website/review_form.html", context,)
def view_tickets_reviews(request):
object1 = Post.objects.filter(author=request.user).order_by('-date_posted')
object2 = Review.objects.filter(
author=request.user).order_by('-date_posted')
context = {
'object1': object1,
'object2': object2,
}
return render(request, "website/review_post_detail.html", context)
|
maximesoydas/maxweb
|
website/views.py
|
views.py
|
py
| 6,223 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.template.defaulttags.register.filter",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "django.template.defaulttags.register",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "accounts.models.UserFollows.objects.filter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "accounts.models.UserFollows.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.UserFollows",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "accounts.models.UserFollows.objects.filter",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "accounts.models.UserFollows.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.UserFollows",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "models.Post.objects.all",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "models.Post.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "models.Post",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "models.Review.objects.all",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "models.Review.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "models.Review",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "django.views.generic.CreateView",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "models.Review",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.add_message",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.INFO",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.UserPassesTestMixin",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.views.generic.DeleteView",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "models.Review",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "django.views.generic.CreateView",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "models.Post",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.UserPassesTestMixin",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "models.Post",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.UserPassesTestMixin",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "django.views.generic.DeleteView",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "models.Post",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "models.Post",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "models.Review",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "django.views.generic.DetailView",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "models.Post",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "django.views.generic.DetailView",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "models.Review",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.UserPassesTestMixin",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "models.Review",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "forms.PostForm",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "forms.ReviewForm",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "models.Post.objects.get",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "models.Post.objects",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "models.Post",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "forms.ReviewForm",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "models.Post.objects.filter",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "models.Post.objects",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "models.Post",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "models.Review.objects.filter",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "models.Review.objects",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "models.Review",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 224,
"usage_type": "call"
}
] |
25497427443
|
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class TinyImageNet:
def __init__(self, root, train=True, transform=None, target_transform=None, test_transform=None, target_test_transform=None):
self.transform = transform
self.target_transform = target_transform
self.target_test_transform = target_test_transform
self.test_transform = test_transform
self.TrainData = []
self.TrainLabels = []
self.TestData = []
self.TestLabels = []
if train:
path = root + '/TinyImageNet/train/'
else:
path = root + '/TinyImageNet/val/'
self.data = np.load(path + 'data.npy')
self.targets = np.load(path + 'targets.npy')
def concatenate(self, datas, labels):
con_data = datas[0]
con_label = labels[0]
for i in range(1, len(datas)):
con_data = np.concatenate((con_data, datas[i]), axis=0)
con_label = np.concatenate((con_label, labels[i]), axis=0)
con_label = np.array(con_label, dtype=np.int64)
return con_data, con_label
def getTestData(self, classes):
datas, labels = [], []
for label in range(classes[0], classes[1]):
data = self.data[np.array(self.targets) == label]
datas.append(data)
labels.append(np.full((data.shape[0]), label))
datas, labels = self.concatenate(datas, labels)
self.TestData = datas if self.TestData == [] else np.concatenate((self.TestData, datas), axis=0)
self.TestLabels = labels if self.TestLabels == [] else np.concatenate((self.TestLabels, labels), axis=0)
print("the size of test set is %s" % (str(self.TestData.shape)))
print("the size of test label is %s" % str(self.TestLabels.shape))
def getTestData_up2now(self, classes):
datas, labels = [], []
for label in range(classes[0], classes[1]):
data = self.data[np.array(self.targets) == label]
datas.append(data)
labels.append(np.full((data.shape[0]), label))
datas, labels = self.concatenate(datas, labels)
self.TestData = datas
self.TestLabels = labels
print("the size of test set is %s" % (str(datas.shape)))
print("the size of test label is %s" % str(labels.shape))
def getTrainData(self, classes):
datas, labels = [], []
for label in range(classes[0], classes[1]):
data = self.data[np.array(self.targets) == label]
datas.append(data)
labels.append(np.full((data.shape[0]), label))
self.TrainData, self.TrainLabels = self.concatenate(datas, labels)
print("the size of train set is %s" % (str(self.TrainData.shape)))
print("the size of train label is %s" % str(self.TrainLabels.shape))
def getTrainItem(self, index):
img, target = Image.fromarray(self.TrainData[index]), self.TrainLabels[index]
if self.transform:
img = self.transform(img)
if self.target_transform:
target = self.target_transform(target)
return index, img, target
def getTestItem(self, index):
img, target = Image.fromarray(self.TestData[index]), self.TestLabels[index]
if self.test_transform:
img = self.test_transform(img)
if self.target_test_transform:
target = self.target_test_transform(target)
return index, img, target
def __getitem__(self, index):
if self.TrainData != []:
return self.getTrainItem(index)
elif self.TestData != []:
return self.getTestItem(index)
def __len__(self):
if self.TrainData != []:
return len(self.TrainData)
elif self.TestData != []:
return len(self.TestData)
def get_image_class(self, label):
return self.data[np.array(self.targets) == label]
|
ruixiang-wang/Incremental-Learning-Research
|
PRE-master/TinyImageNet.py
|
TinyImageNet.py
|
py
| 4,064 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "numpy.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 96,
"usage_type": "call"
}
] |
1478963521
|
import numpy, math, itertools
from hashlib import sha1
from mbfit.exceptions import XYZFormatError, InvalidValueError, InconsistentValueError
from .fragment import Fragment
class Molecule(object):
"""
Stores the fragments of a Molecule
"""
def __init__(self, fragments):
"""
Creates a new Molecule
Args:
None
Returns:
A new Molecule
"""
# list of fragments in this molecule
self.fragments = []
for fragment in fragments:
self.add_fragment(fragment)
# list of energies for this molecule, filled in by get_nmer_energies
self.energies = {}
# list of nmer_energies for this molecule, filled by get_nmer_energies
self.nmer_energies = []
self.mb_energies = []
def get_name(self):
"""
Gets the name of this molecule, consists of the names of the fragments in standard order connected by a dash '-'
Args:
None
Returns:
The name of this molecule
"""
return "-".join([fragment.get_name() for fragment in self.get_fragments()])
def get_symmetry(self):
"""
Gets the symmetry of this molecule
Args:
None
Returns:
The symmetry of this molecule in A1B2_C1D1E1 form
"""
# used to assemble the symmetry string
try:
symmetry = self.get_fragments()[0].get_symmetry()
except IndexError:
# if there are no fragments, symmetry is empty string
return ""
# add each fragment's symmetry to the string
for fragment in self.get_fragments()[1:]:
symmetry += "_" + fragment.get_symmetry()
return symmetry
def add_fragment(self, fragment):
"""
Adds a fragment to this molecule
Args:
fragment - the fragment to add
Returns:
None
"""
# make sure the symmetry class of the atoms in this fragment doesn't violate the 1 symmetry class -> 1 atom type rule
for existing_fragment in self.get_fragments():
if fragment.get_name() == existing_fragment.get_name():
for atom_new, atom_old in zip(fragment.get_atoms(), existing_fragment.get_atoms()):
if atom_new.get_name() != atom_old.get_name():
raise InconsistentValueError("name of atom {} from one {} fragment".format(atom_old.get_name(), existing_fragment.get_name()),
"name of atom {} from another {} fragment".format(atom_new.get_name(), fragment.get_name()),
atom_old.get_name(),
atom_new.get_name(),
"atoms in fragments with the same name must have the same names in the same order.")
if atom_new.get_symmetry_class() != atom_old.get_symmetry_class():
raise InconsistentValueError("symmetry class of atom {} from one {} fragment".format(atom_old.get_name(), existing_fragment.get_name()),
"symmetry class of atom {} from another {} fragment".format(atom_new.get_name(), fragment.get_name()),
atom_old.get_symmetry_class(),
atom_new.get_symmetry_class(),
"atoms in fragments with the same name must have the same symmetry classes in the same order.")
else:
for atom_new in fragment.get_atoms():
for atom_old in existing_fragment.get_atoms():
if atom_new.get_symmetry_class() == atom_old.get_symmetry_class():
raise InconsistentValueError("symmetry class of atom {} from {} fragment".format(atom_old.get_name(), existing_fragment.get_name()),
"symmetry class of atom {} from {} fragment".format(atom_new.get_name(), fragment.get_name()),
atom_old.get_symmetry_class(),
atom_new.get_symmetry_class(),
"atoms in fragments with different names cannot be equivelent and should not have the same symmetry class.")
self.fragments.append(fragment)
return
def get_fragments(self):
"""
Gets a list of the fragments in this molecule in standard order
Args:
None
Returns:
List of fragments in this molecule in standard order
"""
return self.fragments
def get_atoms(self):
"""
Gets a list of the atoms in this molecule in standard order
fragments are first sorted into standard order, and then atoms within those fragments are put in their standard order.
Args:
None
Returns:
List of atoms in this molecule in standard order
"""
atoms = []
for fragment in self.get_fragments():
atoms += fragment.get_atoms()
return atoms
def get_charge(self, fragments = None):
"""
Gets the charge of this molecule by summing the charges of its fragments
Args:
fragments - list of fragment indicies; if specified, only get the charge of these fragments, default is to include all fragments
Returns:
Sum charge of all or some of the fragments of this molecule
"""
if fragments == None:
fragments = range(len(self.get_fragments()))
charge = 0
for index in fragments:
charge += self.get_fragments()[index].get_charge()
return charge
def get_spin_multiplicity(self, fragments = None):
"""
Gets the spin multiplicity of this molecule by summing the spin multiplicities of its fragments
Args:
fragments - list of fragment indicies; if specified, only get the spin multiplicity of these fragments, default is to include all fragments
Returns:
Sum spin multiplicity of all or some of the fragments of this molecule
"""
if fragments == None:
fragments = range(len(self.get_fragments()))
spin_multiplicity = 1
for index in fragments:
spin_multiplicity += self.get_fragments()[index].get_spin_multiplicity() - 1
return spin_multiplicity
def get_num_fragments(self):
"""
Gets the number of fragments in this molecule
Args:
None
Returns:
Number of fragments in this molecule
"""
return len(self.get_fragments())
def get_num_atoms(self):
"""
Gets the number of atoms in this molecule
Args:
None
Returns:
Number of atoms in this molecule
"""
atoms = 0
for fragment in self.get_fragments():
atoms += fragment.get_num_atoms()
return atoms
def translate(self, x, y, z):
"""
Translates all the atoms in this molecule by the given coordinates
Args:
x - amount to translate along x axis
y - amount to translate along y axis
z - amount to translate along z axis
Returns:
None
"""
for fragment in self.get_fragments():
fragment.translate(x, y, z)
def rotate(self, quaternion, origin_x = 0, origin_y = 0, origin_z = 0):
"""
Rotates this Molecule using the rotation defined by the given Quaternion
Args:
quaternion - the Quaternion to rotate by
origin_x - x position of the point to rotate around, default is 0
origin_y - y position of the point to rotate around, default is 0
origin_z - z position of the point to rotate around, default is 0
Returns:
None
"""
for fragment in self.get_fragments():
fragment.rotate(quaternion, origin_x, origin_y, origin_z)
def move_to_center_of_mass(self):
"""
Moves the molecule it its center of mass
Args:
None
Returns:
None
"""
# keep track of the total weighted mass along each axis
total_x = 0
total_y = 0
total_z = 0
# keeps track of the total mass
total_mass = 0
# loop thru every atom in the molecule, adding its contribution to each coordinate mass
for atom in self.get_atoms():
total_x += atom.get_x() * atom.get_mass()
total_y += atom.get_y() * atom.get_mass()
total_z += atom.get_z() * atom.get_mass()
total_mass += atom.get_mass()
# calculate the center of mass my dividing the total weighted mass by the total mass
center_x = total_x / total_mass
center_y = total_y / total_mass
center_z = total_z / total_mass
# translate this molecule to the center of mass
self.translate(-center_x, -center_y, -center_z)
def rotate_on_principal_axes(self):
"""
Rotates a molecule on to its principal axis
Args:
None
Returns:
None
"""
# first we calculate the moment of inertia tensor
# [ Ixx Ixy Ixz ]
# [ Iyx Iyy Iyz ]
# [ Izx Izy Izz ]
I = [[0, 0, 0] for i in range(3)]
# loop over every atom and add their contributions to the moment of inertia tensor
for atom in self.get_atoms():
# Ixx
I[0][0] += (atom.get_y() ** 2 + atom.get_z() ** 2) * atom.get_mass()
# Ixy
I[1][0] += - (atom.get_x() * atom.get_y()) * atom.get_mass()
# Ixz
I[2][0] += - (atom.get_x() * atom.get_z()) * atom.get_mass()
# Iyx
I[0][1] += - (atom.get_y() * atom.get_x()) * atom.get_mass()
# Iyy
I[1][1] += (atom.get_x() ** 2 + atom.get_z() ** 2) * atom.get_mass()
# Iyz
I[2][1] += - (atom.get_y() * atom.get_z()) * atom.get_mass()
# Izx
I[0][2] += - (atom.get_z() * atom.get_x()) * atom.get_mass()
# Izy
I[1][2] += - (atom.get_z() * atom.get_y()) * atom.get_mass()
# Izz
I[2][2] += (atom.get_x() ** 2 + atom.get_y() ** 2) * atom.get_mass()
inertia_tensor = numpy.matrix(I)
# print("Inertia Tensor:", inertia_tensor)
# get numpy matrix from the matrix of principal moments
# get the moments and principal axis as eigen values and eigen vectors
(moments, principal_axes) = numpy.linalg.eigh(inertia_tensor)
idx = numpy.argsort(moments)[::-1]
moments = moments[idx]
principal_axes = principal_axes[:,idx]
fifthmoment = numpy.zeros(3)
# only works for molecules with no symmetry
for atom in self.get_atoms():
fifthmoment += (numpy.matrix([atom.get_x(), atom.get_y(), atom.get_z()]) * principal_axes).getA1() ** 5 * atom.get_mass()
if fifthmoment[0] < 1e-6:
principal_axes[:, 0] *= -1
if fifthmoment[1] < 1e-6:
principal_axes[:, 1] *= -1
if numpy.linalg.det(principal_axes) < 0:
principal_axes[:, 2] *= -1
# update the position of each atom
for atom in self.get_atoms():
x, y, z = (numpy.matrix([atom.get_x(), atom.get_y(), atom.get_z()]) * principal_axes).getA1()
atom.set_xyz(float(x), float(y), float(z))
def rmsd(self, other):
"""
Computes the RMSD between the positions of the atoms in two molecules
molecules must have the same fragments and atoms or an InconsistentValueError will be raised.
generally, you should make sure that both molecules have been moved to their center of mass and rotated on their principal axes.
Args:
other - the molecule to compare this one to
Returns:
The square-root of the mean squared distance between the atoms in this molecule and the other
"""
# fist make sure these molecules have the same number of atoms
if self.get_num_atoms() != other.get_num_atoms():
raise InconsistentValueError("number of atoms in self", "number of atoms in other", self.get_num_atoms(), other.get_num_atoms(), "number of atoms in each molecule must be the same, make sure you are computing the rmsd of two molecules with the same atoms and fragments")
squared_distance = 0
# loop thru every pair of atoms in the two molecules
for this_atom, other_atom in zip(self.get_atoms(), other.get_atoms()):
# check to make sure that these atoms are the same type
if this_atom.get_name() != other_atom.get_name():
raise InconsistentValueError("self atom symbol", "other atom symbol", this_atom.get_name(), other_atom.get_name(), "symbols must be the same, make sure you are computing the rmsd of two molecules with the same atoms and fragments")
# add this atom pair's contribution to the squared distance
squared_distance += this_atom.distance(other_atom) ** 2
# compute rmsd as sqrt of mean squared distance
return math.sqrt(squared_distance / self.get_num_atoms())
def rmsd2(self, other):
self_atoms = self.get_atoms()
other_atoms = other.get_atoms()
rmsds = []
for order in itertools.permutations(other_atoms):
squared_distance = 0
# loop thru every pair of atoms in the two molecules
for this_atom, other_atom in zip(self.get_atoms(), order):
# add this atom pair's contribution to the squared distance
squared_distance += this_atom.distance(other_atom) ** 2
rmsds.append(math.sqrt(squared_distance / self.get_num_atoms()))
return min(rmsds)
def distancermsd(self, other_molecule):
"""
Computes the RMSD of intramolecular interatomic distances in the two molecules
molecules must have the same fragments and atoms or an InconsistentValueError will be raised.
generally, you should make sure that both molecules have been moved to their center of mass and rotated on their principal axes.
Note:
this function is distinct from rmsd() because this function takes the rmsd of the differneces between the distances between pairs of atoms within each molecule
while rmsd() takes the rmsd of the distance between the positions of the same atoms in each molecule.
Args:
other_molecule - the molecule to ompare this one to
Returns:
the square-root of the mean squared difference in the distance between each pair of atoms in this molecule and the other
"""
# fist make sure these molecules have the same number of atoms
if self.get_num_atoms() != other_molecule.get_num_atoms():
raise InconsistentValueError("number of atoms in self", "number of atoms in other", self.get_num_atoms(), other_molecule.get_num_atoms(), "number of atoms in each molecule must be the same, make sure you are computing the rmsd of two molecules with the same atoms and fragments")
squared_distance_difference = 0
# loop over each pair of atoms
for atom_index, this_atom1, other_atom1 in zip(range(self.get_num_atoms()), self.get_atoms(), other_molecule.get_atoms()):
for this_atom2, other_atom2 in zip(self.get_atoms()[atom_index + 1:], other_molecule.get_atoms()[atom_index + 1:]):
# check to make sure that the atom1s have the same type
if this_atom1.get_name() != other_atom1.get_name():
raise InconsistentValueError("self atom symbol", "other atom symbol", this_atom.get_name(), other_atom.get_name(), "symbols must be the same, make sure you are computing the rmsd of two molecules with the same atoms and fragments")
# check to make sure that the atom2s have the same type
if this_atom2.get_name() != other_atom2.get_name():
raise InconsistentValueError("self atom symbol", "other atom symbol", this_atom.get_name(), other_atom.get_name(), "symbols must be the same, make sure you are computing the rmsd of two molecules with the same atoms and fragments")
# add these atom pairs' contribution to the squared distance difference
squared_distance_difference += (this_atom1.distance(this_atom2) - other_atom1.distance(other_atom2)) ** 2
# compute the rmsd of the sqrt of mean squared distance difference
return math.sqrt(squared_distance_difference / self.get_num_atoms())
def compare(self, other, cutoff_rmsd = 0.1):
"""
Compares two molecules to see if they are similar to eachother bellow a cutoff rmsd
Args:
other - the molecule to compare this one to
cutoff_rmsd - the rmsd level at which False will be returned, defailt is 0.1
Returns:
True if the rmsd between this molecule and the other is less than cutoff_rmsd, otherwise False
Always returns False if the two molecules do not have the same fragments and atoms
"""
try:
return self.rmsd(other) < cutoff_rmsd
except InconsistentValueError:
return False
def get_excluded_pairs(self, max_exclusion = 3):
"""
Gets the excluded pairs of this molecule
Args:
None
Returns:
a tuple in the format (excluded_12, excluded_13, excluded_14, ..., excluded_1x) where each ecluded_1x is a list of lists of each fragment's excluded 1x pairs
"""
excluded_pairs = [[] for i in range(max_exclusion)]
for index, fragment in enumerate(self.get_fragments()):
frag_excluded_pairs = fragment.get_excluded_pairs(max_exclusion)
for exclusion_index in range(max_exclusion):
excluded_pairs[exclusion_index].append(frag_excluded_pairs[exclusion_index])
return excluded_pairs
def to_xyz(self, fragments=None, cp=False, num_digits=14):
"""
Gets a string representation of the fragments in this molecule in the xyz file format
Args:
fragments - list of fragment indicies to include in the string; optional, default is to include all fragments.
cp - if True then fragments not specified in the fragments list will be included as ghost fragments.
num_digits - The number of digits after the decimal point to include when writing atom coordinates.
Default: 14 Maximum: 14
Returns:
String representation of the fragments in this molecule in the xyz format
"""
# by default, use all fragments
if fragments == None:
fragments = range(self.get_num_fragments())
string = ""
for index in range(len(self.get_fragments())):
if index in fragments:
string += self.get_fragments()[index].to_xyz(num_digits=num_digits)
elif cp:
string += self.get_fragments()[index].to_ghost_xyz(num_digits=num_digits)
return string[:-1] # removes last character of string (extra newline)
def to_standard_xyz(self, fragments=None, cp=False, num_digits=14):
"""
Gets a string representation of the fragments in this molecule in the xyz file format.
The order of the fragments and atoms is in standard order.
Args:
fragments - list of fragment indicies to include in the string; optional, default is to include all fragments.
cp - if True then fragments not specified in the fragments list will be included as ghost fragments.
num_digits - The number of digits after the decimal point to include when writing atom coordinates.
Default: 14 Maximum: 14
Returns:
String representation of the fragments in this molecule in the xyz format in standard order.
"""
# by default, use all fragments
if fragments == None:
fragments = range(self.get_num_fragments())
string = ""
for index in range(len(self.get_standard_order())):
if index in fragments:
string += self.get_standard_order()[index].to_standard_xyz(num_digits=num_digits)
elif cp:
string += self.get_standard_order()[index].to_standard_ghost_xyz(num_digits=num_digits)
return string[:-1] # removes last character of string (extra newline)
'''
Returns a string containing indicies and energies of nbody fragment
combinations in the format of the log file
'''
def log_frag_energy(self):
string = ""
# for each item in energies, add its combination indicies and energy
# to the output string
for combination in self.energies.keys():
string += "E{}: {}\n".format(combination, "%.8f"%self.energies[combination])
return string
'''
Returns a string containing the many body interaction energies, in the
format of the log file
'''
def log_mb_energy(self, limit):
string = ""
for index in range(limit):
string += "V_{}B: {}\n".format(index + 1, "%.8f"%self.mb_energies[index])
return string
'''
Clears the energies, nmer_energies, and mb_energies fields to make way for
new calculations
'''
def clear(self):
self.energies = {}
self.nmer_energies = []
self.mb_energies = []
def get_SHA1(self):
"""
Generates the SHA1 hash of this molecule. Uses atoms, spin multiplicity and charge. Can be used to uniquely identify this molecule.
Sorts fragments and atoms into standard order first, so the same molecule specified differently will have the same hash
Args:
None
Returns:
SHA1 hash of this molecule
"""
hash_string = self.get_name() + "\n" + self.to_xyz(num_digits=5) + "\n" + str(self.get_charge()) + "\n" + str(self.get_spin_multiplicity())
return sha1(hash_string.encode()).hexdigest()
def get_symbols(self):
"""
Gets the atomic symbols of the atoms in this molecule as a list
Args:
None
Returns:
list of the atomic symbols of the atoms in this molecule
"""
return [atom.get_name() for atom in self.get_atoms()]
def get_coordinates(self):
"""
Gets the positions of the atoms in this molecule as a list of 3-tuples
Args:
None
Returns:
list of the positions of the atoms in this moleule
"""
return [(atom.get_x(), atom.get_y(), atom.get_z()) for atom in self.get_atoms()]
@staticmethod
def read_xyz(string, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment):
"""
Reads fragments from an xyz string and creates a new Molecule.
Args:
string - The xyz format string. Including the atom count line and comment line.
atoms_per_fragment - List containing the number of atoms in each fragment.
name_per_fragment - List containing the names of each fragment.
charge_per_fragment - List containing the charges of each fragment.
spin_multiplicity_per_fragment - List containing the spin multiplicities of each fragment.
symmetry_per_fragment - List containing the symmetries of each fragment, in format A1B2.
SMILE_per_fragment - List containing the SMILE strings of each fragment.
Returns:
The new Molecule.
"""
# Error checking to make sure all lists passed in are the same length
if not len(atoms_per_fragment) == len(symmetry_per_fragment):
raise InconsistentValueError("atoms per fragment", "symmetry per fragment", atoms_per_fragment, symmetry_per_fragment, "lists must be same length")
if not len(atoms_per_fragment) == len(charge_per_fragment):
raise InconsistentValueError("atoms per fragment", "charges per fragment", atoms_per_fragment, charge_per_fragment, "lists must be same length")
if not len(atoms_per_fragment) == len(spin_multiplicity_per_fragment):
raise InconsistentValueError("atoms per fragment", "spin multiplicities per fragment", atoms_per_fragment, spin_multiplicity_per_fragment, "lists must be same length")
if not len(atoms_per_fragment) == len(name_per_fragment):
raise InconsistentValueError("atoms per fragment", "fragment names", atoms_per_fragment, name_per_fragment, "lists must be same length")
if not len(atoms_per_fragment) == len(SMILE_per_fragment):
raise InconsistentValueError("atoms per fragment", "fragment SMILES", atoms_per_fragment, SMILE_per_fragment, "lists must be same length")
# break the input string apart along \n characters
lines = string.splitlines()
# read the total number of atoms from the first line of the xyz
try:
atom_total = int(lines[0])
except ValueError:
raise XYZFormatError("{}".format(lines[0]), "line should contain a single integer")
# make sure that the total number of atoms indicated by the xyz file matches the number of atoms indicated per fragment
if atom_total != sum(atoms_per_fragment):
raise InconsistentValueError("total atoms in xyz string", "fragments", atom_total, atoms_per_fragment, "fragments list must sum to total atoms from input xyz string")
# remove the atom total and comment lines from the lines list
lines = lines[2:]
# make sure that there are a number of lines equal to the total number of atoms
if len(lines) != atom_total:
raise InconsistentValueError("total atoms in xyz string", "atom lines in xyz string", atom_total, len(lines), "number of total atoms indicated in xyz string should match number of atom lines")
fragments = []
# loop over each item in the lists, each iteration containing the information to assemble one fragment
for num_atoms, name, charge, spin, symmetry, SMILE in zip(atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment):
#
fragments.append(Fragment.read_xyz("\n".join(lines[:num_atoms]), name, charge, spin, SMILE, symmetry))
# remove a number of lines from the lines list equal to the number used in the Fragment that was just read
lines = lines[num_atoms:]
return Molecule(fragments)
@staticmethod
def read_xyz_file(file, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment):
"""
Reads fragments from an xyz file and creates a new Molecule.
Will attempt to read lines from the given file handle, raising a StopIteration exception if called on an empty file.
Args:
file - The file to read from.
atoms_per_fragment - List containing the number of atoms in each fragment.
name_per_fragment - List containing the names of each fragment.
charge_per_fragment - List containing the charges of each fragment.
spin_multiplicity_per_fragment - List containing the spin multiplicities of each fragment.
symmetry_per_fragment - List containing the symmetries of each fragment, in format A1B2.
SMILE_per_fragment - List containing the SMILE strings of each fragment.
Returns:
The new Molecule.
"""
# build the xyz string
string = ""
# read blank lines until a non-blank line is found.
while(True):
line = file.readline()
# If line is EOF, then raise StopIteration to say that there are no more molecules in this file.
if line == "":
raise StopIteration
# If line is not a blank line, stop reading blank lines.
if line is not "\n":
break
# add the atom count line to the string.
string += line
# read the comment line.
string += file.readline()
for i in range(sum(atoms_per_fragment)):
line = file.readline()
# if the line is EOF, we have reached EOF mid-parse!
if line == "":
raise XYZFormatError("ran out of lines to read from xyz file {} in the middle of a molecule".format(file.name), "make sure atoms_per_fragment, the atom count line in your xyz file, and the number of atom lines in your xyz file all agree.")
string += line
return Molecule.read_xyz(string, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment)
@staticmethod
def read_xyz_path(path, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment):
"""
Reads fragments from an xyz file indicated by a filepath and constructs a new Molecule.
Will attempt to read lines from the file at the given file path, raising an exception if it runs out of lines mid-parse.
Args:
path - The path to the file to read from.
atoms_per_fragment - List containing the number of atoms in each fragment.
name_per_fragment - List containing the names of each fragment.
charge_per_fragment - List containing the charges of each fragment.
spin_multiplicity_per_fragment - List containing the spin multiplicities of each fragment.
symmetry_per_fragment - List containing the symmetries of each fragment, in format A1B2.
SMILE_per_fragment - List containing the SMILE strings of each fragment.
Returns:
The new Molecule.
"""
with open(path, "r") as file:
try:
return Molecule.read_xyz_file(file, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment)
# if the call to read_xyz_file() raises a StopIteration, it means the file was empty
except StopIteration:
raise XYZFormatError("xyz file {} file is empty".format(file.name), "make sure the xyz file has at least 1 molecule in it")
@staticmethod
def read_xyz_direct(string, settings = None):
"""
Reads fragments from a string and constructs a new Molecule.
Will infer a single fragment with charge 0, spin 1, no symmetry, and a SMILE with atoms
connected in the order they appear in the string if settings is None.
Args:
string - The string to read from.
settings - Settings object containing information about the molecule.
Returns:
The new Molecule.
"""
# if settings is None, then infer default values for molecule attributes
if settings is None:
name_per_fragment = ["noname"]
charge_per_fragment = [0]
spin_multiplicity_per_fragment = [1]
total_atoms = int(string.splitlines()[0])
atoms_per_fragment = [total_atoms]
symmetry = ""
symmetry_class = 65
# loop over each atom assigning it a unique symmetry class
for atom_index in range(total_atoms):
symmetry += "{}1".format(chr(symmetry_class))
symmetry_class += 1
symmetry_per_fragment = [symmetry]
SMILE = ""
for line in string.splitlines()[2:]:
SMILE += "[" + line.split()[0] + "]"
SMILE_per_fragment = [SMILE]
# if settings is defined, read values from xyz file
else:
atoms_per_fragment = [int(count) for count in settings.get("molecule", "fragments").split(",")]
name_per_fragment = settings.get("molecule", "names").split(",")
charge_per_fragment = [int(charge) for charge in settings.get("molecule", "charges").split(",")]
spin_multiplicity_per_fragment = [int(spin) for spin in settings.get("molecule", "spins").split(",")]
symmetry_per_fragment = settings.get("molecule", "symmetry").split(",")
SMILE_per_fragment = settings.get("molecule", "SMILES").split(",")
return Molecule.read_xyz(string, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment)
@staticmethod
def read_xyz_file_direct(file, settings = None):
"""
Reads fragments from a file into a new Molecule.
Will infer a single fragment with charge 0, spin 1, no symmetry, and a SMILE with atoms
connected in the order they appear in the string if settings is None.
Args:
file - The file to read from.
settings - Settings object containing information about the molecule.
Returns:
The new Molecule.
"""
if settings is None:
position = file.tell()
atoms_per_fragment = [int(file.readline())]
file.seek(position)
else:
atoms_per_fragment = [int(count) for count in settings.get("molecule", "fragments").split(",")]
# build the xyz string
string = ""
# read lines from the file equal to the number needed for one molecule
for line_count in range(2 + sum(atoms_per_fragment)):
line = file.readline()
# if the line is an empty string, then we have reached end of file mid parse
if line == "":
if line_count == 0:
raise StopIteration # if the first line is empty, raise StopIteration to indicate that this file is out of molecules to parse
raise XYZFormatError("ran out of lines to read from xyz file {} in the middle of a molecule".format(file.name), "make sure the last molecule in the file has a comment line and a number of atoms equal to the amount indicated in the atom count line.")
string += line
return Molecule.read_xyz_direct(string, settings)
@staticmethod
def read_xyz_path_direct(path, settings = None):
"""
Reads fragments from an xyz_file indicated by a path into this Molecule
Will infer a single fragment with charge 0, spin 1, no symmetry, and a SMILE with atoms
connected in the order they appear in the string if settings is None.
Args:
path - The path to read from.
settings - Settings object containing information about the molecule.
Returns:
The new Molecule.
"""
with open(path, "r") as file:
try:
return Molecule.read_xyz_file_direct(file, settings)
# if the call to read_xyz_file() raises a StopIteration, it means the file was empty
except StopIteration:
raise XYZFormatError("xyz file {} file is empty".format(file.name), "make sure the xyz file has at least 1 molecule in it")
@staticmethod
def read_psi4_string(string):
"""
Reads the string outputted by a call to psi4.molecule.save_string_xyz() into a new Molecule.
Molecules created this way will not have name or symmetry saved correctly, because this information is not available
from the output of psi4.molecule.save_string_xyz(). As a result certain operations will not work on this molecule, for example
do not add this molecule to a database or attempt to generate its polynomial input format in style A1B2.
Args:
string - String output of psi4.molecule.save_string_xyz().
Returns:
The new Molecule.
"""
# divide the string along \n characters
lines = string.splitlines()
# read charge and spin from first line of input string, casting each to an int
try:
charge, spin_multiplicity = [int(value) for value in lines[0].split()]
except ValueError:
raise XYZFormatError(lines[0], "line format should be 'charge spin_multiplicity', make sure you are passing in the output of psi4.molecule.save_string_xyz()")
# calculate total atoms in this molecule
total_atoms = len(lines) - 1
# these fields do not matter
name = "unnamed"
# used to build the symmetry string for the fragment
symmetry = ""
# keeps track of which symmetry_class to use for the next atom
symmetry_class = 65
# loop over each atom assigning it a unique symmetry class
for atom_index in range(total_atoms):
symmetry += "{}1".format(chr(symmetry_class))
symmetry_class += 1
SMILE = ""
for line in string.splitlines()[1:]:
SMILE += line.split()[0]
return Molecule([Fragment.read_xyz("\n".join(lines[1:]), name, charge, spin_multiplicity, SMILE, symmetry)])
def get_standard_order(self):
return sorted(self.fragments, key = lambda x: x.get_name())
def get_config_molecule_section(self):
# TODO: update SMILE
fragments_list = self.get_standard_order()
names = "{}\n".format(",".join(fragment.get_name() for fragment in fragments_list))
fragments = "{}\n".format(",".join(str(fragment.get_num_atoms()) for fragment in fragments_list))
charges = "{}\n".format(",".join(str(fragment.get_charge()) for fragment in fragments_list))
spins = "{}\n".format(",".join(str(fragment.get_spin_multiplicity()) for fragment in fragments_list))
symmetry = "{}\n".format(",".join(fragment.get_standard_symmetry() for fragment in fragments_list))
SMILES = "{}\n".format(",".join(fragment.get_standard_SMILE() for fragment in fragments_list))
next_letter = "A"
for i in range(len(symmetry)):
if symmetry[i].isupper():
symmetry = symmetry[:i] + next_letter + symmetry[i + 1:]
next_letter = chr(ord(next_letter) + 1)
return names, fragments, charges, spins, symmetry, SMILES
def confirm_standard_order(self):
"""
Checks if this fragment is in standard order.
Args:
None.
Returns:
True if this fragment's atoms are in standard order.
False otherwise.
"""
if not self.get_standard_order() == self.get_fragments():
return False
for fragment in self.get_fragments():
if not fragment.confirm_standard_order():
return False
return True
def get_standard_copy(self):
"""
Gets a copy of this molecule, with fragments and atoms in standard order.
Args:
None.
Returns:
A copy of this molecule in standard order.
"""
order, frag_orders = self.get_standard_order_order()
return self.get_reordered_copy(order, frag_orders, [frag.get_standard_SMILE() for frag in self.get_standard_order()])
def get_reorder_copy(self, names, SMILES):
"""
Gets a copy of this molecule, with fragments in the order specified by the names list and
atoms in the order specified in the SMILE strings.
Args:
names - names of the fragments in the new order.
SMILE - list of SMILE strings corresponding to the new order of fragments.
Order the atoms of each fragment to match the order in these SMILE strings.
Returns:
A copy of this molecule in the order specified by names and SMILES.
"""
order, frag_orders = self.get_reorder_order(names, SMILES)
return self.get_reordered_copy(order, frag_orders, SMILES)
def get_copy(self):
"""
Gets a copy of this molecule.
Args:
None.
Returns:
An exact copy of this molecule.
"""
return self.get_reorder_copy([fragment.get_name() for fragment in self.get_fragments()],
[fragment.get_SMILE() for fragment in self.get_fragments()])
def get_standard_order_order(self):
"""
Gets the order the fragments and atoms in this molecule must be in to be in standard order.
Args:
None.
Returns:
(order, frag_orders)
order - A list of indices, where indices[i] = index of fragment that should be in index i to put the molecule in standard order.
frag_orders - A list of lists, where each list corresponds to one fragment.
where frag_orders[j][i] = index of atom that should be in index i to put the fragment j of the new order in standard order.
"""
order = [self.get_fragments().index(frag) for frag in self.get_standard_order()]
frag_orders = [frag.get_standard_order_order() for frag in [self.get_fragments()[index] for index in order]]
return order, frag_orders
def get_reorder_order(self, names, SMILES):
"""
Gets the order the fragments and atoms in this molecule must be in to match the SMILE string.
Args:
names - order the fragments to match the order in this list.
SMILE - order the atoms of each fragment to match the orders in these SMILE strings.
Returns:
(order, frag_orders)
order - A list of indices, where indices[i] = index of fragment that should be in index i to put the fragments in the order specified.
frag_orders - A list of lists, where each list corresponds to one fragment.
where frag_orders[j][i] = index of atom that should be in index i to put the fragment j of the new order in the order specified.
"""
order = []
for name in names:
for index, fragment in enumerate(self.get_fragments()):
if fragment.get_name() == name and index not in order:
order.append(index)
frag_orders = [frag.get_reorder_order(SMILE) for frag, SMILE in zip([self.get_fragments()[index] for index in order], SMILES)]
return order, frag_orders
def get_reordered_copy(self, order, frag_orders, SMILES):
"""
Gets a copy of this molecule, the fragments and atoms are reordered according to the input.
Args:
order - New order of the fragments.
frag_orders - New order of the atoms within each fragment.
SMILES - new SMILE strings for each of the fragments.
Returns:
A copy of this molecule, reordered to match the input.
"""
fragments = []
prev_frag_name = None
next_symmetry = 'A'
symmetry_dict = {}
for fragment, frag_order, SMILE in zip([self.get_fragments()[index] for index in order], frag_orders, SMILES):
prev_frag_name = fragment.get_name()
fragments.append(fragment.get_reordered_copy(frag_order, SMILE))
for atom in fragments[-1].get_atoms():
try:
symmetry = symmetry_dict[atom.get_symmetry_class()]
except:
symmetry = next_symmetry
symmetry_dict[atom.get_symmetry_class()] = symmetry
next_symmetry = chr(ord(next_symmetry) + 1)
atom.set_symmetry_class(symmetry)
return Molecule(fragments)
def __eq__(self, other):
if not self.get_name() == other.get_name():
return False
for self_frag, other_frag in zip(self.get_fragments(), other.get_fragments()):
if self_frag != other_frag:
return False
return True
def __ne__(self, other):
return not self == other
|
paesanilab/MB-Fit
|
mbfit/molecule/molecule.py
|
molecule.py
|
py
| 44,981 |
python
|
en
|
code
| 14 |
github-code
|
6
|
[
{
"api_name": "fragment.get_name",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "fragment.get_symmetry",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "fragment.get_name",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "fragment.get_atoms",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "fragment.get_name",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "fragment.get_name",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "fragment.get_atoms",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "fragment.get_name",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "fragment.get_atoms",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "fragment.get_num_atoms",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "fragment.translate",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "fragment.rotate",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.eigh",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 329,
"usage_type": "attribute"
},
{
"api_name": "numpy.argsort",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.det",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 348,
"usage_type": "attribute"
},
{
"api_name": "numpy.matrix",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 466,
"usage_type": "name"
},
{
"api_name": "fragment.get_excluded_pairs",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "hashlib.sha1",
"line_number": 594,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 642,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 644,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 646,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 648,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 650,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.XYZFormatError",
"line_number": 659,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 663,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.InconsistentValueError",
"line_number": 670,
"usage_type": "call"
},
{
"api_name": "fragment.Fragment.read_xyz",
"line_number": 678,
"usage_type": "call"
},
{
"api_name": "fragment.Fragment",
"line_number": 678,
"usage_type": "name"
},
{
"api_name": "mbfit.exceptions.XYZFormatError",
"line_number": 732,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.XYZFormatError",
"line_number": 764,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.XYZFormatError",
"line_number": 856,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.XYZFormatError",
"line_number": 885,
"usage_type": "call"
},
{
"api_name": "mbfit.exceptions.XYZFormatError",
"line_number": 910,
"usage_type": "call"
},
{
"api_name": "fragment.Fragment.read_xyz",
"line_number": 936,
"usage_type": "call"
},
{
"api_name": "fragment.Fragment",
"line_number": 936,
"usage_type": "name"
},
{
"api_name": "fragment.get_name",
"line_number": 947,
"usage_type": "call"
},
{
"api_name": "fragment.get_num_atoms",
"line_number": 948,
"usage_type": "call"
},
{
"api_name": "fragment.get_charge",
"line_number": 949,
"usage_type": "call"
},
{
"api_name": "fragment.get_spin_multiplicity",
"line_number": 950,
"usage_type": "call"
},
{
"api_name": "fragment.get_standard_symmetry",
"line_number": 951,
"usage_type": "call"
},
{
"api_name": "fragment.get_standard_SMILE",
"line_number": 952,
"usage_type": "call"
},
{
"api_name": "fragment.confirm_standard_order",
"line_number": 977,
"usage_type": "call"
},
{
"api_name": "fragment.get_name",
"line_number": 1023,
"usage_type": "call"
},
{
"api_name": "fragment.get_SMILE",
"line_number": 1024,
"usage_type": "call"
},
{
"api_name": "fragment.get_name",
"line_number": 1061,
"usage_type": "call"
},
{
"api_name": "fragment.get_name",
"line_number": 1089,
"usage_type": "call"
},
{
"api_name": "fragment.get_reordered_copy",
"line_number": 1091,
"usage_type": "call"
}
] |
73750770109
|
from operator import index
from meal import Meal
import json
import sqlite3
class Meal_Data:
"""Data layer to be used in conjunction with the Meal class"""
def __init__(self, filename = "foodinfo.json"):
"""Initializes Meal_Data"""
self.filename = filename
def meal_add(self, meal:Meal):
"""Stores an instance of the Meal class inside foodinfo.json"""
dupe_check = self.meal_find(meal.name)
if dupe_check == None:
meals = self.meal_get()
meals.append(meal)
self.meal_save(meals)
else:
error_message = f"A meal by the name '{meal.name.title()}' already exists."
print(error_message)
return
def meal_save(self, meals:list) -> None:
"""Saves a list of meals to the JSON"""
jsonmeals = []
# -- Following for loop converts objects in meal list (jsonmeals) into dictionaries --
for mealobj in meals:
jsonmeal = mealobj.as_dict()
jsonmeals.append(jsonmeal)
# -- Following two lines converts the list of dictionaries made above into JSON format and saves to foodinfo.json --
# TODO: Handle Missing File
f = open(self.filename, 'w')
f.flush()
json.dump(jsonmeals, f, indent=2)
f.close()
# -- Next two lines print out to string the list of Meals in JSON format --
# jsondump = json.dumps(jsonmeals, indent=2)
# print(jsondump)
return
# -- TODO : make a function to delete a Meal object that is stored inside foodinfo.json --
def meal_del(self, name:str):
"""Removes an instance of the Meal class inside foodinfo.json"""
meals = self.meal_get()
# Loop over all meals and remove
for meal in meals:
if meal.name == name:
index = meals.index(meal)
del meals[index]
else:
pass
# END FOR
self.meal_save(meals)
def meal_get(self) -> list[Meal]:
"""Returns a list of meals"""
try:
f = open(self.filename)
# TODO : If the foodinfo.json is not found it should make a .json file by that name --
except FileNotFoundError:
error_message = f"\nFile {self.filename} was not found.\n"
print(error_message)
return []
# Explicit flush to ensure we have the latest version of the file on disk
f.flush()
try:
jsondata = json.load(f)
# -- When the following error occurs, the list of meals is simply left as an empty list --
except json.JSONDecodeError:
# crete empty JSONData for following loop
jsondata = []
# Close file handle
f.close()
# -- The folowing for loop takes the JSON objects found in foodinfo.json and turns them into Python objects --
# -- and then appends those objects into the meals list
meals = []
for item in jsondata:
meal = Meal(item['name'],item['protein'],item['cost'],item['difficulty'])
meals.append(meal)
return meals
def meal_find(self, name:str) -> Meal:
"""Returns a specific meal object when searching for a meal by name"""
meals = self.meal_get()
# -- The following for loop cycles through the meals list looking for a matching meal name
# -- If the meal name inquired is not found - the loop will return None
for obj in meals:
if obj.name == name:
return obj
return None
|
zaepho/DinnerDecider
|
mealdata.py
|
mealdata.py
|
py
| 3,719 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "meal.Meal",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "meal.name",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "meal.name.title",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "meal.name",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "meal.name",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "operator.index",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "operator.index",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "meal.Meal",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "meal.Meal",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "meal.Meal",
"line_number": 99,
"usage_type": "name"
}
] |
29827630738
|
#This file will only be needed to run
import pandas as pd
import numpy as numpy
from datetime import date
import datetime
import os
class box:
def __init__(self):
self.task_done = ""
self.no_of_day = (datetime.date.today() - date(1997, 8, 21)).days
self.dest = ""
self.wake_up = "" #should change in future
self.sleep = ""
self.social_media_time = 0
self.self_time = ""
self.breakfast = False
self.food_type = False
self.GRE_quant = False
self.GRE_quant_count = 0
self.GRE_verbal = False
self.GRE_verbal_count = 0
self.ML = False
self.articles_read = 0
self.words_learned = 0
self.anger = False
self.exercise = False
self.sad_day = False
self.happy_day = False
self.got_love = False
self.pain = False
def log(self):
print("Enter your daily achievement: ")
self.task_done = str(input())
print("Did you go anywhere? (Leave blank if nowhere) :")
self.dest = str(input())
print("What time did you wake up? : ")
self.wake_up = str(input())
print("What time did you go to sleep? : ")
self.sleep = str(input())
print("How many hours on social media did you spend?")
self.social_media_time = float(input())
print("How many hours for self time did you take out?")
self.self_time = float(input())
#Health
print("Did you have breakfast? :")
self.breakfast = self._conv_bool(input())
print("Did I eat sufficiently? :")
self.food_type = self._conv_bool(input())
#Studies
print("Did you study Machine Learning? :")
self.ML = self._conv_bool(input())
#GREStudies
print("Did you study GRE_quant today? :")
self.GRE_quant = self._conv_bool(input())
self.GRE_quant_count = self._get_GRE(self.GRE_quant)
print("Did you study GRE verbal today? :")
self.GRE_verbal = self._conv_bool(input())
self.GRE_verbal_count = self._get_GRE(self.GRE_verbal)
print("How many articles did you read today? :")
self.articles_read = int(input())
print("How many words did you learn today? :")
self.words_learned = int(input())
#Day Review
print("Did you feel anger today? :")
self.anger = self._conv_bool(input())
print("Did you feel sad today? :")
self.sad_day = self._conv_bool(input())
print("Were you happy today? :")
self.happy_day = self._conv_bool(input())
print("Did someone love you today? :")
self.got_love = self._conv_bool(input())
print("Did you exercise today? :")
self.exercise = self._conv_bool(input())
print("Was your body in pain? :")
self.pain = self._conv_bool(input())
def _get_GRE(self, ip):
if self._conv_bool(ip):
print("How many questions did you solve?")
return int(input())
else:
return 0
def _conv_bool(self, x):
if x == 'Y' or x == 'y':
return True
else :
return False
if __name__ == '__main__':
import os
if not os.path.exists('./logs.csv'):
df = pd.DataFrame(data = None, columns =
['no_of_day', 'task_done', 'destination',
'wake_up_time', 'sleep_time', 'social_media_time',
'self_time', 'breakfast', 'food_type',
'GRE_quant', 'GRE_quant_count',
'GRE_verbal', 'GRE_verbal_count',
'Machine_Learning', 'articles_read', 'words_learned',
'anger', 'exercise', 'sad_day',
'happy_day', 'got_love', 'pain'])
print('File doesnt exist')
print(df.head())
else:
df = pd.read_csv('./logs.csv')
print('File exists')
print(df.head)
b = box()
b.log()
df_2 = pd.DataFrame(data = [[b.no_of_day, b.task_done, b.dest,
b.wake_up, b.sleep, b.social_media_time,
b.self_time, b.breakfast, b.food_type,
b.GRE_quant , b.GRE_quant_count,
b.GRE_verbal, b.GRE_verbal_count,
b.ML, b.articles_read, b.words_learned,
b.anger, b.exercise, b.sad_day,
b.happy_day, b.got_love, b.pain]],
columns = [
'no_of_day', 'task_done', 'destination',
'wake_up_time', 'sleep_time', 'social_media_time',
'self_time', 'breakfast', 'food_type',
'GRE_quant', 'GRE_quant_count',
'GRE_verbal', 'GRE_verbal_count',
'Machine_Learning', 'articles_read', 'words_learned',
'anger', 'exercise', 'sad_day', 'happy_day',
'got_love', 'pain'])
result = df.append(df_2)
result.to_csv('./logs.csv', index = False)
result.head()
print(os.getcwd())
|
Geeks-Sid/habit_organizer
|
main.py
|
main.py
|
py
| 4,237 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.date.today",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 138,
"usage_type": "call"
}
] |
40129830394
|
"""
Get Distances of Shortest Path (Dijkstra)
edges: dict<from:int, dict<to:int, cost:number>>
"""
from heapq import heappush, heappop
def one_to_one(
start, goal, num_vertexes, edges,
INF=9223372036854775807, UNREACHABLE=-1):
distances = [INF] * num_vertexes
distances[start] = 0
queue = [(0, start)]
while queue:
d, frm = heappop(queue)
if distances[frm] < d:
# already know shorter path
continue
if frm == goal:
return d
for to in edges[frm]:
new_cost = distances[frm] + edges[frm][to]
if distances[to] > new_cost:
# found shorter path
distances[to] = new_cost
heappush(queue, (distances[to], to))
return UNREACHABLE
def one_to_all(
start, num_vertexes, edges,
INF=9223372036854775807):
distances = [INF] * num_vertexes
distances[start] = 0
queue = [(0, start)]
while queue:
d, frm = heappop(queue)
if distances[frm] < d:
# already know shorter path
continue
for to in edges[frm]:
new_cost = distances[frm] + edges[frm][to]
if distances[to] > new_cost:
# found shorter path
distances[to] = new_cost
heappush(queue, (distances[to], to))
return distances
def one_to_all_bfs(start, num_vertexes, edges, INF=9223372036854775807):
"""
when all cost is 1, BFS is faster (ABC170E)
"""
distances = [INF] * num_vertexes
distances[start] = 0
to_visit = [start]
while to_visit:
next_visit = []
for frm in to_visit:
for to in edges[frm]:
new_cost = distances[frm] + 1
if new_cost < distances[to]:
distances[to] = new_cost
next_visit.append(to)
to_visit = next_visit
return distances
# --- end of library ---
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def solve(N, M, edges):
INF = 9223372036854775807
ret = INF
for start in range(N):
distances = one_to_all(start, N, edges)
debug(distances, msg=":distances")
ret = min(ret, max(distances))
return ret
def main():
# verified https://atcoder.jp/contests/abc012/tasks/abc012_4
N, M = map(int, input().split())
from collections import defaultdict
edges = defaultdict(dict)
for _i in range(M):
A, B, T = map(int, input().split())
edges[A - 1][B - 1] = T
edges[B - 1][A - 1] = T
print(solve(N, M, edges))
# tests
T1 = """
3 2
1 2 10
2 3 10
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
10
"""
T2 = """
5 5
1 2 12
2 3 14
3 4 7
4 5 9
5 1 18
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
26
"""
T3 = """
4 6
1 2 1
2 3 1
3 4 1
4 1 1
1 3 1
4 2 1
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
1
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
# end of snippets/main.py
|
nishio/atcoder
|
libs/dijkstra.py
|
dijkstra.py
|
py
| 3,668 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "heapq.heappop",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "doctest.testmod",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "doctest.run_docstring_examples",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "sys.setrecursionlimit",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 174,
"usage_type": "call"
}
] |
27330667755
|
import requests
import time
from bs4 import BeautifulSoup
import urllib.request
import re
import json
start_time = time.time()
link_3 = []
link_4 = []
link_5 = []
link_6 = []
links = []
g = ""
b = ""
d = ""
y = ""
ya = ""
ask = ""
domain = ""
emails = []
new_emails = []
mails = []
def crawl(request_url):
try:
response = requests.get(request_url)
new_emails = re.findall(r"[a-z0-9\.\-+_]+@" + domain, response.text)
if new_emails:
emails.append(new_emails)
except:
pass
return emails
def get_links(url):
link_result = []
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
html_page = response.read()
soup = BeautifulSoup(html_page, "lxml")
for link in soup.findAll('a'):
d = link.get('href')
link_result.append(d)
return link_result
if __name__ == '__main__':
domain = input("enter the domain:")
url_d = 'https://duckduckgo.com/?q=email+"%40"+++'+domain+'+++""&ia=web&count=50&first=51'
link_3 = get_links(url_d)
url_y = 'https://in.search.yahoo.com/search?p=%5B%40"%20+%20'+domain+'%20+%20"%5D&pz=100'
link_4 = get_links(url_y)
url_ya = 'https://yandex.com/search/?text="%40"%20%20%20'+domain+'%20%20%20""&lr=20983'
link_5 = get_links(url_ya)
url_ask = "https://www.ask.com/web?q=email+"+domain+"&o=0&qo=homepageSearchBox"
link_6 = get_links(url_ask)
links = link_3 + link_4 + link_5 + link_6
nodup_link = list(set(links))
filtered_links = [i for i in nodup_link if re.search("http", i)]
final_links = list(set(filtered_links))
mails = [crawl(f) for f in final_links]
final_emails = []
for flat_lists in mails:
for flat_list in flat_lists:
item_list = list(set(flat_list))
for item in item_list:
if item not in final_emails:
final_emails.append(item)
print(final_emails)
data = {}
data.update({
'domain': domain,
'mails': final_emails
})
print(data)
with open('data.json', 'w') as outfile:
json.dump(data, outfile)
# print("--- %s seconds ---" % (time.time() - start_time))
|
realchief/EmailScraping-BeautifulSoup
|
filter_crwl_dft_srchegn_updated.py
|
filter_crwl_dft_srchegn_updated.py
|
py
| 2,298 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.Request",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 86,
"usage_type": "call"
}
] |
36282438996
|
import datetime
import requests
from bs4 import BeautifulSoup as bs4
from flask import Flask
from flask_restful import Resource, Api
OYK_URL = "https://oulunkylanyhteiskoulu.fi/"
def get_food() -> list:
with requests.Session() as s:
g = s.get(OYK_URL)
bs = bs4(g.text, 'html.parser')
today = datetime.date.today().weekday()
day = bs.select(".food__list")[today]
foods = day.find_all("p")[1].text.split("\n",)
clean_food = list(filter(None, foods))
return clean_food
app = Flask(__name__)
api = Api(app)
class Food(Resource):
def get(self):
try:
foods = get_food()
alfred = {"items": [{"title": food}
for food in foods]}
return alfred, 200
except:
return {}, 500
api.add_resource(Food, '/food')
if __name__ == "__main__":
app.run(debug=True, port=5000)
|
drstuggels/oyk-food
|
main.py
|
main.py
|
py
| 908 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.Session",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask_restful.Resource",
"line_number": 31,
"usage_type": "name"
}
] |
12056898935
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Code for this script is originally at:
https://github.com/dfm/george/blob/master/docs/_code/model.py
"""
from __future__ import division, print_function
import emcee
import triangle
import numpy as np
import cPickle
import matplotlib.pyplot as pl
import george
from george import kernels
def model(params, t):
amp, loc, sig2 = params
return amp * np.exp(-0.5 * (t - loc) ** 2 / sig2)
def lnprior_base(p):
""" notice how the p are inferred in the original scale """
amp, loc, sig2 = p
if not -10 < amp < 10:
return -np.inf
if not -5 < loc < 5:
return -np.inf
if not 0 < sig2 < 3.0:
return -np.inf
return 0.0
def fit_ind(initial, data, nwalkers=32):
ndim = len(initial)
p0 = [np.array(initial) + 1e-8 * np.random.randn(ndim)
for i in xrange(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob_ind, args=data)
print("Running burn-in")
p0, _, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
print("Running production")
p0, _, _ = sampler.run_mcmc(p0, 1000)
return sampler
def lnlike_gp(p, t, y, yerr):
""" notice how a and tau needs to be exponentiated
meaning that a and tau are supplied in the log scale
"""
a, tau = np.exp(p[:2])
gp = george.GP(a * kernels.Matern32Kernel(tau))
gp.compute(t, yerr)
return gp.lnlikelihood(y - model(p[2:], t))
def lnprior_gp(p):
"""more obvious that p is initiated in the log scale """
lna, lntau = p[:2]
if not -5 < lna < 5:
return -np.inf
if not -5 < lntau < 5:
return -np.inf
return lnprior_base(p[2:])
def lnprob_gp(p, t, y, yerr):
lp = lnprior_gp(p)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike_gp(p, t, y, yerr)
def fit_gp(initial, data, nwalkers=32):
ndim = len(initial)
# start chains at slightly different places in parameter space
p0 = [np.array(initial) + 1e-8 * np.random.randn(ndim)
for i in xrange(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob_gp, args=data)
print("Running burn-in")
p0, lnp, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
print("Running second burn-in")
p = p0[np.argmax(lnp)]
p0 = [p + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
p0, _, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
print("Running production")
p0, _, _ = sampler.run_mcmc(p0, 1000)
return sampler
def generate_data(params, N, rng=(-5, 5)):
gp = george.GP(params[0] * kernels.ExpSquaredKernel(params[1]))
# initialize t for drawing the data points
t = rng[0] + np.diff(rng) * np.sort(np.random.rand(N))
## modify the following
y = gp.sample(t)
y += model(params[2:], t)
yerr = 0.05 + 0.05 * np.random.rand(N)
y += yerr * np.random.randn(N)
# y = model(params[2:], t)
# yerr = gp.sample(t)
# 0.05 + 0.05 * np.random.rand(N)
#y += yerr * np.random.randn(N)
return t, y, yerr
if __name__ == "__main__":
np.random.seed(1234)
#truth = [0.1, 1.0, 0, 0.1, 0.4]
truth = [0.1, 3.3, -1.0, 0.1, 0.4]
t, y, yerr = generate_data(truth, 50)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.ylabel(r"$y$")
pl.xlabel(r"$t$")
pl.xlim(-5, 5)
pl.title("simulated data")
pl.savefig("data.png", dpi=150)
## Fit assuming independent.
# print("Fitting independent")
# data = (t, y, 1.0 / yerr ** 2)
# truth_ind = [0.0, 0.0] + truth
# sampler = fit_ind(truth_ind, data)
## Plot the samples in data space.
# print("Making plots")
# samples = sampler.flatchain
# x = np.linspace(-5, 5, 500)
# for s in samples[np.random.randint(len(samples), size=24)]:
# pl.plot(x, model(s[2:], x)+s[0]*x+s[1], color="#4682b4", alpha=0.3)
# pl.title("results assuming uncorrelated noise")
# pl.savefig("ind-results.png", dpi=150)
## Make the corner plot.
# fig = triangle.corner(samples[:, 2:], truths=truth, labels=labels)
# fig = triangle.corner(samples[:, :], truths=truth, labels=labels)
# fig.savefig("ind-corner.png", dpi=150)
# Fit assuming GP.
print("Fitting GP")
data = (t, y, yerr)
# truth is originally set to be [0.0, 0.0] by dfm, in log scale
truth_gp = truth + 1e-8 * np.random.randn(len(truth)) # [0.0, 0.0] + truth[2:]
sampler = fit_gp(truth_gp, data)
# Plot the samples in data space.
print("Making plots")
samples = sampler.flatchain
x = np.linspace(-5, 5, 500)
pl.figure()
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
for s in samples[np.random.randint(len(samples), size=24)]:
# sampled parameters have to be exponentiated
gp = george.GP(np.exp(s[0]) * kernels.Matern32Kernel(np.exp(s[1])))
gp.compute(t, yerrtruth)
m = gp.sample_conditional(y - model(s[2:], t), x) + model(s[2:], x)
pl.plot(x, m, color="#4682b4", alpha=0.3)
pl.ylabel(r"$y$")
pl.xlabel(r"$t$")
pl.xlim(-5, 5)
pl.title("results with Gaussian process noise model")
pl.savefig("gp-results.png", dpi=150)
# Make the corner plot.
labels = [r"$\ln a^2$", r"$\ln \tau$", r"$\alpha$", r"$\ell$", r"$\sigma^2$"]
#fig = triangle.corner(samples[:, 2:], truths=truth, labels=labels)
# follow the original script to plot the hp in log space
truth[0] = np.log(truth[0])
truth[1] = np.log(truth[1])
cPickle.dump(truth, open("truth.pkl", "w"))
cPickle.dump(samples, open("samples.pkl", "w"))
# only plot the hyperparameters
fig = triangle.corner(samples, truths=truth, labels=labels, size=30)
fig.savefig("gp-corner.png", dpi=150)
|
karenyyng/shear_gp
|
george_examples/model.py
|
model.py
|
py
| 5,729 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.exp",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "emcee.EnsembleSampler",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "george.GP",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "george.kernels.Matern32Kernel",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "george.kernels",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "numpy.inf",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "numpy.isfinite",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "emcee.EnsembleSampler",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "george.GP",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "george.kernels.ExpSquaredKernel",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "george.kernels",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "numpy.diff",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.rand",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "numpy.random.randn",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "numpy.random.randint",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "george.GP",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "george.kernels.Matern32Kernel",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "george.kernels",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "numpy.log",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "cPickle.dump",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "cPickle.dump",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "triangle.corner",
"line_number": 195,
"usage_type": "call"
}
] |
39269323605
|
from sqlalchemy import create_engine
from tests.util import RPCTest
class PDNSTest(RPCTest):
def cleanup_pdns_db(self, db_uri):
with create_engine(db_uri).begin() as conn:
conn.execute('delete from domains')
conn.execute('delete from domainmetadata')
conn.execute('delete from records')
def create_output_for_zone(self, zone, output, zone_group, db_uri):
self.r.output_create(output, plugin='pdns-db', db_uri=db_uri)
self.r.zone_group_create(zone_group)
self.r.zone_group_add_zone(zone_group, zone)
self.r.output_add_group(output, zone_group)
|
1and1/dim
|
dim-testsuite/tests/pdns_test.py
|
pdns_test.py
|
py
| 631 |
python
|
en
|
code
| 39 |
github-code
|
6
|
[
{
"api_name": "tests.util.RPCTest",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 8,
"usage_type": "call"
}
] |
74472078906
|
import os
import pickle
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from openpyxl import Workbook
def save_pickle(data, filename):
with open(filename, 'wb') as file:
pickle.dump(data, file)
def load_pickle(filename):
with open(filename, 'rb') as file:
data = pickle.load(file)
return data
def save_parquet(data, filename):
df = pd.DataFrame(data)
table = pa.Table.from_pandas(df)
pq.write_table(table, filename)
def load_parquet(filename):
table = pq.read_table(filename)
df = table.to_pandas()
data = df.to_dict(orient='records')
return data
def save_xlsx(data, filename):
wb = Workbook()
ws = wb.active
for i, item in enumerate(data, start=1):
for j, value in enumerate(item.values(), start=1):
ws.cell(row=i, column=j, value=value)
wb.save(filename)
def load_xlsx(filename):
wb = pd.read_excel(filename)
data = wb.to_dict(orient='records')
return data
# Przykładowa kolekcja danych
collection = [{'id': i, 'value': i*2} for i in range(1, 101)]
# Zapisywanie i odczytywanie kolekcji za pomocą modułu pickle
save_pickle(collection, 'collection.pickle')
loaded_pickle = load_pickle('collection.pickle')
# Zapisywanie i odczytywanie kolekcji za pomocą Parquet
save_parquet(collection, 'collection.parquet')
loaded_parquet = load_parquet('collection.parquet')
# Zapisywanie i odczytywanie kolekcji za pomocą XLSX
save_xlsx(collection, 'collection.xlsx')
loaded_xlsx = load_xlsx('collection.xlsx')
print(f"Liczba elementów w kolekcji: {len(collection)}")
print("Moduł pickle:")
print(f" Zapis: {len(pickle.dumps(collection))} bajtów")
print(f" Odczyt: {len(pickle.dumps(loaded_pickle))} bajtów")
print("Parquet:")
print(f" Zapis: {os.path.getsize('collection.parquet')} bajtów")
print(f" Odczyt: {os.path.getsize('collection.parquet')} bajtów")
print("XLSX:")
print(f" Zapis: {os.path.getsize('collection.xlsx')} bajtów")
print(f" Odczyt: {os.path.getsize('collection.xlsx')} bajtów")
|
Lisiozmur/Njpo
|
Ćwiczenie5/Zadanie1.py
|
Zadanie1.py
|
py
| 2,117 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pickle.dump",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyarrow.Table.from_pandas",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pyarrow.Table",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pyarrow.parquet.write_table",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pyarrow.parquet",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pyarrow.parquet.read_table",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyarrow.parquet",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "openpyxl.Workbook",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pickle.dumps",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pickle.dumps",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path.getsize",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "os.path.getsize",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "os.path.getsize",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "os.path.getsize",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
}
] |
6727912141
|
"""
Module for parsing arguments.
"""
import sys
import argparse
import os
from pathlib import Path
from typing import Any
__author__ = "Stijn Arends"
__version__ = "v0.1"
__data__ = "21-8-2022"
class ArgumentParser:
"""
Class to parse the input arguments.
"""
def __init__(self) -> None:
self.parser = self._create_argument_parser()
# Print help if no arguments are supplied and stop the program
if len(sys.argv) == 1:
self.parser.print_help(sys.stderr)
sys.exit(1)
self.arguments = self.parser.parse_args()
@staticmethod
def _create_argument_parser():
"""
Create an argument parser.
:returns
--------
parser - ArgumentParser
"""
parser = argparse.ArgumentParser(prog=f"python {os.path.basename(__file__)}",
description="Python script to parse NetWas results.",
epilog="Contact: [email protected]")
# Set version
parser.version = __version__
parser.add_argument('-f',
'--file', dest="file",
help='Input NetWas file - tab seperated txt or csv file',
required=True)
parser.add_argument('-t',
'--threshold', dest="threshold",
help='NetWas score threshold to select \'good\' reprioritized genes., default = None',
default=None, type=float)
parser.add_argument('-o',
'--output', dest="output",
help='Location and name of the ouput file.',
required=True)
parser.add_argument('--gene_list', dest="gene_list",
help='Specify if only gene symbols are written out."\
"Default is NetWas file with filtered genes',
action="store_true")
parser.add_argument('-v',
'--version',
help='Displays the version number of the script and exitst',
action='version')
return parser
def get_argument(self, argument_key: str) -> Any:
"""
Method to get an input argument.
:parameters
-----------
argument_key - str
Full command line argument (so --config for the configuration file argument).
:returns
--------
value - List or boolean
"""
if self.arguments is not None and argument_key in self.arguments:
value = getattr(self.arguments, argument_key)
else:
value = None
return value
def get_parser(self) -> argparse.ArgumentParser:
"""
Get the argument parser
:returns
--------
parser - argparse.ArgumentParser
Argument parser
"""
return self.parser
class CLIArgValidator:
"""
Class to check if arguments are valid.
"""
def validate_input_file(self, input_path: str) -> None:
"""
Validate the input files by checking if they actually exists
and the which extention they have.
:parameters
-----------
input_path - str
Path to a file
"""
input_path = Path(input_path)
self._validate_input_exists(input_path)
self._validate_input_extension(input_path)
@staticmethod
def _validate_input_exists(input_path: Path) -> None:
"""
Check if a file exists.
:parameters
-----------
input_path - str
Path to a file
"""
if not input_path.is_file():
raise FileNotFoundError('Input file does not exist!')
@staticmethod
def _validate_input_extension(input_path: Path) -> None:
"""
Check if a file has the right extension.
:parameters
-----------
input_path - str
Path to a file
"""
if not input_path.suffix in [".txt", ".csv"]:
raise FileNotFoundError('Input file should be either a .txt or .csv')
|
molgenis/benchmark-gwas-prio
|
prioritization_methods/NetWAS/arg_parser.py
|
arg_parser.py
|
py
| 3,976 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 133,
"usage_type": "name"
}
] |
11458247441
|
# 首先要导入一个Select类
from selenium.webdriver.support.select import Select
from selenium import webdriver
import time
# 打开浏览器,进入携程旅行官网
driver = webdriver.Chrome()
driver.get('https://www.ctrip.com/?sid=155952&allianceid=4897&ouid=index')
driver.maximize_window() # 最大化窗口
# 休眠5秒钟
time.sleep(5)
# 通过Select类选择下拉框选项,只能是控件类型(tag_name)为select的控件
# 下拉框的选项都是属于下拉选择框,所以先要定位下拉选择框,然后再进行选择
# 如果下拉框的控件类型是dt(是一个表格),那么先要定位点击下拉选择框,然后再定位选项,点击选项
# 选择select标签类型下拉框的选项的方法:
# ① 通过选择项可见文本进行选择:Select(下拉框控件定位).select_by_visible_text(option标签的文本)
s = driver.find_element_by_id('J_roomCountList')
Select(s).select_by_visible_text('6间') # 选择6间
time.sleep(5)
# ② 通过option标签的value属性值进行选择:Select(下拉框控件定位).select_by_value(option标签的value属性值)
Select(s).select_by_value("5")
time.sleep(5)
# ③ 通过选项下标(所有选项当成一个列表,从0开始)进行选择,Select(下拉框控件定位).select_by_index(选项下标)
Select(s).select_by_index(7)
time.sleep(5)
driver.quit()
|
Ailian482/WebSelenium
|
Auto_Test/20_下拉框选择处理.py
|
20_下拉框选择处理.py
|
py
| 1,363 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.select.Select",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.select.Select",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.select.Select",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 27,
"usage_type": "call"
}
] |
37985935295
|
#! /usr/bin/env python3
import audioInterface
import os
import yaml
import sys
from datetime import datetime
from gpiozero import Button
from signal import pause
from pydub import AudioSegment
from pydub.playback import play
try:
with open("config.yaml") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
except FileNotFoundError as e:
print(
f"Could not find the config.yaml file. FileNotFoundError: {e}. Check config location and retry."
)
sys.exit(1)
hook = Button(config["hook_gpio"])
def off_hook() -> None:
print("Phone off hook, ready to begin!")
audio_interface = audioInterface.AudioInterface(config, hook)
# playback voice message through speaker
print("Playing voicemail message...")
play(
AudioSegment.from_wav(
os.path.dirname(os.path.abspath(config["source_file"]))
+ "/sounds/voicemail.wav"
)
- config["playback_reduction"]
)
# start recording beep
print("Playing beep...")
play(
AudioSegment.from_wav(
os.path.dirname(os.path.abspath(config["source_file"])) + "/sounds/beep.wav"
)
- config["beep_reduction"]
)
# now, while phone is off the hook, record audio from the microphone
print("recording")
audio_interface.record()
audio_interface.stop()
output_file = (
os.path.dirname(os.path.abspath(config["source_file"]))
+ "/recordings/"
+ f"{datetime.now().isoformat()}"
)
audio_interface.close(output_file + ".wav")
print("Finished recording!")
def on_hook() -> None:
print("Phone on hook.\nSleeping...")
def main():
hook.when_pressed = off_hook
hook.when_released = on_hook
pause()
if __name__ == "__main__":
main()
|
nickpourazima/rotary-phone-audio-guestbook
|
audioGuestBook.py
|
audioGuestBook.py
|
py
| 1,781 |
python
|
en
|
code
| 13 |
github-code
|
6
|
[
{
"api_name": "yaml.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "gpiozero.Button",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "audioInterface.AudioInterface",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pydub.playback.play",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pydub.AudioSegment.from_wav",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pydub.AudioSegment",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pydub.playback.play",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pydub.AudioSegment.from_wav",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pydub.AudioSegment",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "signal.pause",
"line_number": 69,
"usage_type": "call"
}
] |
39792208434
|
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import utils
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives import hashes, cmac
from cryptography.exceptions import InvalidSignature
from cryptography.exceptions import InvalidTag
import os
class Security:
def __init__(self,path,BackupPath):
"""
Initialize the security module loading,using the path passed as argument,if present the private and public key,
otherwise generating and saving it
:type path: String
:param path: The path of the pem file in which the private key must be written
:type backupPath: String
:param backupPath: The path of the pem file in which the private key must be written
"""
try:
with open(path,"rb") as pem:
try:
self.privateKey = serialization.load_pem_private_key(pem.read(),password=b'ServerMPSprivatekey',backend=default_backend())
self.publicKey = self.privateKey.public_key()
except ValueError:
try:
with open(BackupPath,"rb") as backup:
backup_key = serialization.load_pem_private_key(backup.read(),password=b'ServerMPSprivatekey',backend=default_backend())
with open(path,"wb") as pem_write:
self.privateKey = backup_key
self.publicKey = self.privateKey.public_key()
serializedPrivateKey = backup_key.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.PKCS8,encryption_algorithm=serialization.BestAvailableEncryption(b'ServerMPSprivatekey'))
pem_write.write(serializedPrivateKey)
except FileNotFoundError:
self.generate_key(path,BackupPath)
except FileNotFoundError:
try:
with open(BackupPath,"rb") as backup,open (path,"wb") as pem:
try:
backup_key = serialization.load_pem_private_key(backup.read(),password=b'ServerMPSprivatekey',backend=default_backend())
SerializedPrivateKey = backup_key.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.PKCS8,encryption_algorithm=serialization.BestAvailableEncryption(b'ServerMPSprivatekey'))
self.privateKey = backup_key
self.publicKey = self.privateKey.public_key()
pem.write(SerializedPrivateKey)
except ValueError:
self.generate_key(path,BackupPath)
except FileNotFoundError:
with open(path,"wb") as pem, open(BackupPath,"wb") as backup:
self.generate_key(path,BackupPath)
def generate_key(self,path,backupPath):
"""
Generate and write the private key
:type path: String
:param path: The path of the pem file in which the private key must be written
:type backupPath: String
:param backupPath: The path of the pem file in which the private key must be written
"""
with open(path,"wb") as pem, open(backupPath,"wb") as backup:
self.privateKey = rsa.generate_private_key(public_exponent=65537,\
key_size=8196,\
backend=default_backend())
self.publicKey = self.privateKey.public_key()
serializedPrivateKey = self.privateKey.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption(b'ServerMPSprivatekey'))
pem.write(serializedPrivateKey)
backup.write(serializedPrivateKey)
def RSAEncryptText(self,text):
"""
Encrypt the text using RSA with the public key of the handled client
:type text: Bytes
:param text: The plain text that must be encrypted
:rtype: Bytes
:return: The cipher text relative to the plain text passed as argument
"""
cipherText = self.ClientPublicKey.encrypt(text,
padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
return cipherText
def RSADecryptText(self,cipherText):
"""
Decrypt the message using your own private key
:type cipherText: Bytes
:param cipherText: The cipher text that must be decrypted
:rtype: Bytes
:return plaintext: the plain text obtained by decriptying the plain text passed as argument
"""
plaintext = self.privateKey.decrypt(cipherText,
padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
return plaintext
def splitMessage(self,data,len):
"""
Split the message in two part, usefull when you need to compare a message with a digest or a signature
:type data: Bytes
:param data: The Data that must be divided in two parts
:type len: Int
:param len: The point in which the list must be divided
:rtype: <Bytes,Bytes>
:return: The touple of lists obtained by dividing in two part the original data :
"""
return [data[0:len*(-1)],data[len*(-1):]]
def generateDigest(self,data):
"""
Generate the digest of the message (in bytes) using SHA-256
:type data: Bytes
:param data: The data of which we want generate the digest
:rtype: Bytes
:return: The digest of the data passed as argument
"""
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(data)
return digest.finalize()
def getSignature(self,data):
"""
Generate a signature by the private key
:type data: Bytes
:param data: The data we want to sign
:rtype: Bytes
:return:The signature of the data passed as argument
"""
signature = self.privateKey.sign(data,
padding.PSS(mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
return signature
def VerifySignature(self,data,signature):
"""
Verify if the signature,generated by the private key of the client,is associated to the data
:type data: Bytes
:param data: The data we want to verify
:type signature: Bytes
:param signature: The signature used to check
:rtype: Boolean
:return: If the signature is correct or not
"""
try:
self.ClientPublicKey.verify(signature,data,padding.PSS(mgf=padding.MGF1(hashes.SHA256()),salt_length=padding.PSS.MAX_LENGTH),hashes.SHA256())
return True
except InvalidSignature:
return False
def AddClientKey(self,key):
"""
Add the public key of the client, in order to use them when it is necessary to encrypt using RSA, pass the key encoded by 'utf-8'
:type key: Bytes
:param key: The public key of the client we want to add
"""
self.ClientPublicKey = serialization.load_pem_public_key(key,backend=default_backend())
def getSerializedPublicKey(self):
"""
Get the server public key serializable (it must be decoded) in order to get it printable and sendable
:rtype: Bytes
:return: The public key of the client
"""
return self.publicKey.public_bytes(encoding=serialization.Encoding.PEM,format=serialization.PublicFormat.SubjectPublicKeyInfo)
def getSerializedClientPublicKey(self):
"""
Get the server public key serializable (it must be decoded) in order to get it printable and sendable
:rtype: Bytes
:return: The public key of the client
"""
return self.ClientPublicKey.public_bytes(encoding=serialization.Encoding.PEM,format=serialization.PublicFormat.SubjectPublicKeyInfo)
def generateSymmetricKey(self,len,nonce):
"""
Generate a symmetric key used in AESGCM with a lenght (suggested 192/256 bit ) and pass a nonce used with the key
to cipher a text (each operation has its own couple of <key,nonce> in order to guarantee security)
:type len: Int
:param len: The lenght of the symmetric key (in bit)
:type nonce: Int
:param nonce: The nonce used to encrypt/decrypt
:rtype: Int
:return: The operations are done correctly
"""
self.nonce = nonce
self.len = len
self.SymmetricKey = AESGCM.generate_key(bit_length=self.len);
return 0
def getSymmetricKey(self):
"""
Get the symmetric key as bytes, if you want to serialize it you have to transform it (suggested in integer with a number of
intger nessary = bit_length of key / 8, becaues each integer reppresent a byte)
:rtype: Bytes
:return: The symmetric key used to encrypt/decrypt
"""
return self.SymmetricKey
def AddPacketNonce(self,nonce):
"""
Add the nonce used in the AES when is necessary to encapsulate some information about the starter of the conversation
between two user
:type nonce: Int
:param nonce: The nonce used to encrypt the packets necessary to exchange key from two clients
"""
self.packetNonce = nonce
def AESDecryptText(self,ct):
"""
Cipher text with AES and GCM in order to guarantee autenthicity and integrity of the message, the handling of the nonce
is provided by the function itself (each encyption/decryption must increment the nonce in order to maintain it always
synchronized on the two side )
:type ct: Bytes
:param ct: The cipher text to decrypt
:rtype: Bytes or None
:return: The plain text obtained by decrypting the cipher text passed as parameter
"""
try:
aescgm = AESGCM(self.SymmetricKey)
self.nonce = self.nonce+1
pt = aescgm.decrypt(self.nonce.to_bytes(16,byteorder='big'),ct,None)
return pt
except:
return None
def AESEncryptText(self,pt):
"""
Cipher text with AES and GCM in order to guarantee autenthicity and integrity of the message, the handling of the nonce
is provided by the function itself (each encyption/decryption must increment the nonce in order to maintain it always
synchronized on the two side )
:type pt: Bytes
:param pt: The plain text to encrypt
:type ct: Bytes or None
:param ct: The cipher text obtained by encrypting the plain text passed as argument
"""
try:
aesgcm = AESGCM(self.SymmetricKey)
self.nonce = self.nonce + 1
return aesgcm.encrypt(self.nonce.to_bytes(16,byteorder='big'), pt, None)
except:
return None
def PacketAESEncryptText(self,pt):
"""
Cipher text with AES and a special nonce (sended by the client during the login procedure) in order
to encapsulate some information useful for the exchange of key between two online user
:type pt: Bytes
:param pt: The plain text to encrypt
:rtype: Bytes or None
:return: The cipher text obtained by encrypting the plain text passed as argument
"""
try:
aesgcm = AESGCM(self.SymmetricKey)
self.packetNonce = self.packetNonce + 1
return aesgcm.encrypt(self.packetNonce.to_bytes(16,byteorder='big'), pt, None)
except:
return None
def addDHparameters(self,p,g):
"""
Add the DH parameter, in orde to retrieve efficiently when necessary
:type p: Int
:param p: the Diffie Hellman P parameter
:type g: Int
:param g: The Diffie Hellman G parameter
"""
self.p = p
self.g = g
def getDHparameters(self):
"""
Get the DH parameters as a list [p,g]
:rtype: [Int,Int]
:return: The tuple composed by the two DH parameters
"""
return [self.p,self.g]
def generateNonce(self,size):
"""
Generate a nonce of a dimension chosed (in bytes) a get it as an Integer encoded in Big Endian
:type size: Int
:param size: The size (in Bytes) of the nonce
:rtype: Int
:return: A nonce generated using the system call specific for cryptography purpose of the dimensione passed as argument
"""
return int.from_bytes(os.urandom(size),byteorder='big')
|
SieniAlessandro/E2E-Secure-Chat
|
Server/Security/Security.py
|
Security.py
|
py
| 14,411 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "cryptography.hazmat.primitives.serialization.load_pem_private_key",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.backends.default_backend",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.load_pem_private_key",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.backends.default_backend",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.Encoding",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.PrivateFormat",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.BestAvailableEncryption",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.load_pem_private_key",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.backends.default_backend",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.Encoding",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.PrivateFormat",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.BestAvailableEncryption",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.rsa",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.backends.default_backend",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.Encoding",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.PrivateFormat",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.BestAvailableEncryption",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding.OAEP",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding.MGF1",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes.SHA256",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.hashes.SHA256",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding.OAEP",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding.MGF1",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes.SHA256",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.hashes.SHA256",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.hashes.Hash",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.hashes.SHA256",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.backends.default_backend",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding.PSS",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding.MGF1",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes.SHA256",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding.PSS",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.hashes.SHA256",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding.PSS",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.padding.MGF1",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes.SHA256",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.hashes",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "cryptography.exceptions.InvalidSignature",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.load_pem_public_key",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.backends.default_backend",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.Encoding",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.PublicFormat",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.Encoding",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.PublicFormat",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.aead.AESGCM.generate_key",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.aead.AESGCM",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.aead.AESGCM",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.aead.AESGCM",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.aead.AESGCM",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "os.urandom",
"line_number": 319,
"usage_type": "call"
}
] |
26135102637
|
import cv2 as cv
import numpy as np
img = cv.imread('/home/ai3/Desktop/common/ML/Day13/girl.jpg',0)
kernel = np.ones((2,2),np.uint8)
open1 = cv.morphologyEx(img,cv.MORPH_OPEN,kernel)
open2 = cv.morphologyEx(img,cv.MORPH_CLOSE,kernel)
open3 = cv.morphologyEx(open1,cv.MORPH_CLOSE,kernel)
img=np.hstack((open1,open2,open3))
img = cv.imshow('dst',img)
cv.waitKey(0)
|
94akshayraj/AI-program
|
ML ans/day13/3.py
|
3.py
|
py
| 365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_OPEN",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_CLOSE",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_CLOSE",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.hstack",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 13,
"usage_type": "call"
}
] |
74637154747
|
import time
import redis
cache = redis.StrictRedis(host='redis', decode_responses=True, db=0, port=6379)
def update_and_get_hit_count():
""""""
print('In utils/update_and_get_hit_count')
retries = 5
while True:
try:
return cache.incr('hits')
except redis.exceptions.ConnectionError as err:
if retries == 0:
raise err
retries -= 1
time.sleep(0.5)
def clear_hit_count():
""""""
print('in utils/clear_hit_count')
retries = 5
while True:
try:
return cache.set('hits', 0)
except redis.exceptions.ConnectionError as err:
if retries == 0:
raise err
retries -= 1
time.sleep(0.5)
|
ShukujiNeel13/composetest
|
utils.py
|
utils.py
|
py
| 770 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "redis.StrictRedis",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "redis.exceptions",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "redis.exceptions",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 34,
"usage_type": "call"
}
] |
12688443618
|
# https://leetcode.com/problems/reverse-linked-list/
from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def reverseList_(self, head: Optional[ListNode]) -> ListNode: # input as list
# empty head
if len(head) <= 1:
return head
# first element in head
first_node = ListNode(val = head[0], next=None)
prev_node = first_node
list_val = [first_node.val]
# len(head) > 1
if len(head) > 1:
for i in range(1, len(head)):
curr_node = ListNode(val = head[i], next=None)
list_val.append(curr_node.val)
prev_node.next = curr_node
prev_node = curr_node
# # traverse forward
# next_node = first_node.next
# while next_node != None:
# # print("Next: ", next_node.val)
# next_node = next_node.next
# traverse reverse
return list_val[::-1]
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]: # input as Listnode; only works on leetcode
prev = None
curr = head
while curr:
nxt = curr.next
curr.next = prev
prev = curr
curr = nxt
return prev
solved = Solution()
# print(solved.reverseList_(head = [1,2,3,4,5]))
# print(solved.reverseList_(head = [1,2]))
# print(solved.reverseList_(head = [-1]))
# print(solved.reverseList_(head = []))
|
zvovov/competitive_coding
|
leetcode/neetcode_150/reverse_linked_list.py
|
reverse_linked_list.py
|
py
| 1,637 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Optional",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 40,
"usage_type": "name"
}
] |
21368489956
|
import numpy as np
import matplotlib.pyplot as plt
import glob
import os
import ruamel.yaml
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
def _get_measurement_range_for_output(output_key, output, method):
# method = output['method']
# config = output[method]
# return np.arange(config['start'], config['stop'], config['step'])
method_keys = method.split('.') # e.g. ['freq_mod', 'span']
config = output
# find the method configuration inside the output-config
for key in method_keys:
config = config[key]
return np.arange(config['start'], config['stop'], config['step'])
def color_generator(N, colormap='gnuplot'):
""" Color generator for a given matplotlib colormap.
Usage:
------------------------------------------
import matplotlib.pylab as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
N = 20
color_gen = color_generator(N)
for i in N:
color = next(color_gen)
# do something with the color ...
"""
cm_map = plt.get_cmap(colormap)
c_norm = colors.Normalize(vmin=0, vmax=N)
scalar_map = cmx.ScalarMappable(norm=c_norm, cmap=cm_map)
for i in xrange(N):
yield scalar_map.to_rgba(i)
cell='4'
measno='3'
filename='N:/data/emily/magnetometer_test/cell{1:s}/remote/meas{0:s}'.format(str(measno), str(cell))
files=glob.glob(filename+"/*.csv")
files=sorted(files)
start=100
steps=100
a=np.loadtxt(files[0], delimiter=',')
a_fft=np.abs(np.fft.rfft(a, axis=0))
b=np.sum(a_fft[start::steps,:], axis=1)
color_gen = color_generator(len(b))
config_name = glob.glob(filename+'/config*.yaml')
with open(config_name[0], 'r') as ymlfile:
cfg = ruamel.yaml.load(ymlfile)
stack = cfg['stack']
meas_ranges = [None] * len(stack)
keys = [None] * len(stack)
outputs = [None] * len(stack)
methods = [None] * len(stack)
for i, stack_entry in enumerate(stack):
keys[i], method_index = stack_entry.split('.') # e.g. key='B1', method_index = '0'
method_index = int(method_index) # index gives the position of the method in the methods array
outputs[i] = cfg['outputs'][keys[i]]
methods[i] = outputs[i]['methods'][method_index]
meas_ranges[i] = _get_measurement_range_for_output(keys[i], outputs[i], methods[i])
b0_amp = cfg['outputs']['B0']['amp']['start']
b1_freq_center = cfg['outputs']['B1']['freq_mod']['center']
b1_freq_span = cfg['outputs']['B1']['freq_mod']['span']['start']
downsampling_factor = cfg['devices']['nidaq']['downsampling_factor']
measurement_time = cfg['devices']['nidaq']['measurement_time_s']
sample_rate = cfg['devices']['nidaq']['sample_rate']
x_axis_label = cfg['outputs'][keys[0]][methods[0]]['label']
data_points = sample_rate*measurement_time/downsampling_factor
datanew=np.zeros([len(b), len(files)])
plt.clf()
# for j in range(len(b)-1):
# #if j!=9: continue
# color = next(color_gen)
# plt.plot(a_fft[:,j], label=str(meas_ranges[1][j]), color=color)
# plt.title("$B_1$ frequency (Hz)", fontsize=16)
# plt.ylabel("FFT signal (a.u).", fontsize=16)
# plt.xlabel("Frequency (Hz)", fontsize=16)
# plt.ylim((0,8))
# plt.legend(ncol=3, prop={'size':10})
# plt.show()
# plt.savefig(filename+"/fft_0mV_{}.png".format(measno), dpi=300)
# plt.savefig(filename+"/fft_0mV_{}.pdf".format(measno))
# plt.clf()
# plt.plot(b)
# plt.ylabel("FFT signal a.u.", fontsize=16)
# plt.xlabel("Frequency (Hz)", fontsize=16)
# plt.ylim((0,9))
# plt.savefig(filename+"/fft_sum_0mV_{}.png".format(measno), dpi=300)
# plt.savefig(filename+"/fft_sum_0mV_{}.pdf".format(measno))
# plt.clf()
# raise
for i in range(len(files)):
data=np.loadtxt(files[i], delimiter=',')
data_fft=np.abs(np.fft.rfft(data, axis=0))
datanew[:,i]=np.sum(data_fft[start::steps,:], axis=1)
plt.imshow(datanew[-1::-1], aspect='auto', interpolation='nearest',
extent=[meas_ranges[0][0]*1000, meas_ranges[0][-1]*1000, start/1000, data_fft.shape[0]/1000], cmap='gnuplot')
plt.xlabel('R$_4$ offset (mV)', fontsize=20)
plt.ylabel('Frequency (kHz)', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=20)
plt.colorbar()
plt.show()
plt.savefig(filename+"/all_together{0:s}_steps{1:s}.png".format(measno, str(steps)), dpi=300)
plt.savefig(filename+"/all_together{0:s}_steps{1:s}.pdf".format(measno, str(steps)))
|
physikier/magnetometer
|
src/analysis.py
|
analysis.py
|
py
| 4,786 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.rc",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.rc",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.Normalize",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.cm.ScalarMappable",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.fft.rfft",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.fft",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "ruamel.yaml.yaml.load",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "ruamel.yaml.yaml",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "ruamel.yaml",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "numpy.loadtxt",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.fft.rfft",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.fft",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tick_params",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 147,
"usage_type": "name"
}
] |
70435413308
|
import re
import emoji
def preprocess_string(text):
"""
입력받은 text 를 전처리 하는 함수.
:param text: str
:return : str
"""
# 이모티콘부터 제거
no_emoticon = ''
for char in text:
if char not in emoji.UNICODE_EMOJI:
no_emoticon += char
# 특수문자 기준 split
no_punctuation = re.split(r'([!,?]+)|([.]+)|([,]+)|(["])|([\'])|([&]+)|([(]+)|([)]+)|([~]+)|([♡]+)|([☆,★]+)',
no_emoticon.strip())
no_punctuation_text = []
for string in no_punctuation:
if (string == '') or (string is None): continue
no_punctuation_text.append(string)
no_punctuation_text = ' '.join(no_punctuation_text)
# 단독으로 쓰인 자모음 분리
split_char = re.split(r'([ㄱ-ㅣ0-9]+)', no_punctuation_text.strip())
split_char = ' '.join(split_char)
# 한국어에서 단독으로 자주 쓰이는 자모음 뭉치 분리
split_char = re.split(r'([ㅎ]{2,})|([ㅜ,ㅠ]{2,})|([ㅗ]+)|([ㅋ,ㄱ,ㄲ]{2,})|\s+', split_char.strip())
final_text = []
for string in split_char:
if (string == '') or (string is None): continue
final_text.append(string)
return ' '.join(final_text)
|
teammatmul/project-purifier
|
purifier/preprocess.py
|
preprocess.py
|
py
| 1,254 |
python
|
ko
|
code
| 78 |
github-code
|
6
|
[
{
"api_name": "emoji.UNICODE_EMOJI",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "re.split",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 35,
"usage_type": "call"
}
] |
2501424452
|
from os import listdir
from PIL import Image
#list src pic
DIR = 'pic'
#print listing of images
img_list = listdir("pic")
#enter and calculate ratio
sh_ent = int(input("Shakal ratio (compress ratio):"))
sh = 100 - sh_ent
#work with image
for filename in img_list:
outname = "out/" + filename
filename = "pic/" + filename
print(filename)
img = Image.open(filename)
#save with compress
img.save(outname, "JPEG", quality=sh)
|
vakarianplay/Pic_tools
|
shakal (compress)/shak.py
|
shak.py
|
py
| 505 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.listdir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 19,
"usage_type": "name"
}
] |
29457010542
|
#!/usr/bin/env python
import sys
import commands
import string
import datetime
import logging
import logging.handlers
from optparse import OptionParser
from random import choice
def print_error(ret, do_exit=False, msg=""):
"""
ret is the tuple returned by commands.getstatusoutput. If ret[0] is not 0,
then msg (if passed) or ret[1] is printed as an error. If do_exit is True,
the program also exits
"""
if ret[0] != 0:
if not msg:
msg = ret[1]
logging.error("Check the following information:")
logging.error(msg)
if do_exit:
sys.exit(ret[0])
def check_lcg_ce(ce):
"""Do the tests for a lcg-CE"""
# I will not waste much effort on this, since lcg-CE are condemned
# to disappear.
rets = []
ce, queue = ce.split("/", 1)
logging.info("\t\tchecking globus-job-run to ce")
cmd = "globus-job-run %s /bin/hostname" % ce
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
logging.info("\t\tchecking globus-job-run to fork")
cmd = "globus-job-run %s/jobmanager-fork /bin/pwd" % ce
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
logging.info("\t\tchecking globus-job-run to queue")
queue = queue.split("-")
cmd = "globus-job-run %s/%s-%s -queue %s /bin/pwd" % tuple([ce] + queue)
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
return rets
def check_cream_ce(ce):
"""Do the tests for a CREAM CE"""
rets = []
ce_hostport, dummy = ce.split("/", 1)
logging.info("\t\tchecking glite-ce-allowed-submission")
cmd = "glite-ce-allowed-submission -n %s" % ce_hostport
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
logging.info("\t\tchecking glite-ce-job-submit")
cmd = "glite-ce-job-submit -n -a -r %s test_submission.jdl" % ce # XXX
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
if ret[0] == 0:
url = ret[1].splitlines()[-1]
else:
return # XXX
logging.info("\t\t\tJob ID: %s", url)
while True:
cmd = "glite-ce-job-status -n %s" % url
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
if "[DONE-OK]" in ret[1]:
logging.info("\t\tsubmission ok, check the following job " + \
"id for further details %s", url)
break
elif "[DONE-FAILED]" in ret[1]:
ret = (1, ret[1])
print_error(ret)
break
print_error(ret)
rets.append(ret)
return rets
def check_gridftp(host):
"""Check gridftp on host"""
cmd = "uberftp %s ls" % host
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
if ret[0] != 0:
print_error(ret)
else:
logging.info("\t\tGridFTP OK")
def check_ces(bdii, vo):
"""Query the bdii for the available CE for VO vo, then check them"""
logging.info("Checking Computing Elements")
logging.info("\tQuerying the BDII for the CEs")
cmd = "lcg-info --list-ce --bdii %(bdii)s --sed --vo %(vo)s" % locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret, do_exit=True)
ces = ret[-1].splitlines()
logging.info("\t\tFound: " + ",\n\t\t\t".join(ces))
checked = []
for ce in ces:
if ce in checked:
continue
rets = []
checked.append(ce)
ce_host = ce.split(":")[0]
logging.info("\tChecking %s", ce_host)
# Check the GridFTP
check_gridftp(ce_host)
if "8443" in ce:
rets.extend(check_cream_ce(ce))
else:
# lcf-CE
rets.extend(check_lcg_ce(ce))
if not any([i[0] for i in rets]):
logging.info("\t\tJob submission seems OK")
else:
logging.critical("\t\tJob submission has problems, check errors")
def filter_and_join_ldap(data, query):
"""Filter results to only those of query and join
line breaks from ldapsearch."""
got = False
aux = []
for i in data.splitlines():
if i.startswith(query):
got = True
aux.append([i.split(":", 1)[-1].strip()])
elif i.startswith(" ") and got:
aux[-1].append(i.strip())
elif got:
got = False
return ["".join(i) for i in aux]
def check_ses(bdii, vo):
"""Query the bdii for the available SE for VO, then check them"""
logging.info("Checking Storage Elements")
logging.info("\tQuerying the BDII for the SEs")
cmd = "lcg-info --list-se --bdii %(bdii)s --sed --vo VO:%(vo)s" % locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret, do_exit=True)
ses = ret[-1].splitlines()
logging.info("\t\tFound: " + ",\n\t\t\t".join(ses))
checked = ["gridce05.ifca.es"]
for se in ses:
if se in checked:
continue
rets = []
checked.append(se)
logging.info("\tChecking %s", se)
cmd = "uberftp %s ls" % se
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
if ret[0] != 0:
print_error(ret)
else:
logging.info("\t\tGridFTP is up")
rets.append(ret)
cmd = "ldapsearch -x -LLL -H ldap://%(bdii)s -b o=grid \
'(&(objectClass=GlueSATop) \
(GlueVOInfoAccessControlBaseRule=VO:%(vo)s) \
(GlueChunkKey=GlueSEUniqueID=%(se)s))' \
GlueVOInfoPath" % locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
se_paths = filter_and_join_ldap(ret[1], "GlueVOInfoPath")
cmd = "ldapsearch -x -LLL -H ldap://%(bdii)s -b o=grid \
'(&(objectClass=GlueSEControlProtocol) \
(GlueChunkKey=GlueSEUniqueID=%(se)s) \
(GlueSEControlProtocolType=SRM) \
(GlueSEControlProtocolVersion=2.2.0))' \
GlueSEControlProtocolEndpoint" % locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
endpt = [i.replace("httpg", "srm") for i in filter_and_join_ldap(
ret[1], "GlueSEControlProtocolEndpoint")]
for endpoint in endpt:
for se_path in se_paths:
logging.info("\t\tUploading to %(endpoint)s/%(se_path)s",
locals())
randfile = ''.join([choice(string.letters + string.digits) \
for i in range(15)])
cmd = "lcg-cp -v -b --vo %(vo)s -D srmv2 file:/etc/issue \
%(endpoint)s/\?SFN=%(se_path)s/%(randfile)s" % locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
if ret[0] == 0:
logging.info("\t\tRemoving uploaded file")
cmd = "lcg-del -l -v -b --vo %(vo)s -D srmv2 \
%(endpoint)s/\?SFN=%(se_path)s/%(randfile)s" % \
locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
if not any([i[0] for i in rets]):
logging.info("\t\tData management seems OK")
else:
logging.critical("\t\tData management has problems, check errors")
def check_bdii(bdii):
"""Check bdii for correctness"""
logging.info("Checking BDII '%s' information (TBD)", bdii)
def get_proxy():
"""Check for proxy validity and return VO"""
ret = commands.getstatusoutput("voms-proxy-info -exists")
print_error(ret, do_exit=True, msg="VOMS: No valid proxy found!")
ret = commands.getstatusoutput("voms-proxy-info -vo")
print_error(ret, do_exit=True)
vo = ret[1]
return vo
def set_logging(level=logging.INFO):
"""Set up logging"""
outfile = "%s.log" % datetime.datetime.now().strftime("%Y%m%d_%H%M%S.%f")
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%m-%d %H:%M",
filename=outfile,
filemode="w")
console = logging.StreamHandler()
console.setLevel(level)
formatter = logging.Formatter('%(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("Detailed output for this run will be on '%s'", outfile)
def main():
"""Main program"""
usage = """%prog [options] <siteBDII host>:<port>"""
parser = OptionParser(usage=usage)
# parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
# default="False", help="Print verbose results")
parser.add_option("-c", "--ces", dest="onlyce", action="store_true",
default=False, help="Check only Computing Elements")
parser.add_option("-s", "--ses", dest="onlyse", action="store_true",
default=False, help="Check only Storage Elements")
(opts, args) = parser.parse_args()
if opts.onlyse and opts.onlyce:
parser.error("-s and -c options are mutually exclusive")
elif opts.onlyse or opts.onlyse:
all_ = False
else:
all_ = True
if len(args) != 1:
parser.error("Error, you have to specify one (and only one) siteBDII")
set_logging()
vo = get_proxy()
logging.info("Checking with VO '%s'", vo)
bdii = args[-1]
check_bdii(bdii)
if all_ or opts.onlyce:
check_ces(bdii, vo)
if all_ or opts.onlyse:
check_ses(bdii, vo)
if __name__ == "__main__":
main()
sys.exit(0)
|
alvarolopez/egi-certool
|
run_tests.py
|
run_tests.py
|
py
| 10,232 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.error",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "logging.critical",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "string.letters",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "string.digits",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "logging.critical",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "commands.getstatusoutput",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 277,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "optparse.OptionParser",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 336,
"usage_type": "call"
}
] |
37601085068
|
from sqlwrapper import gensql, dbget, dbput
import json
import datetime
def HOTEL_FD_POST_UPDATE_CheckinGuestArrivals(request):
d = request.json
res_id = d.get("Res_id")
unique_id = d.get("Res_unique_id")
pf_id = d.get("pf_id")
a = {}
RES_Log_Time = datetime.datetime.utcnow()+datetime.timedelta(hours=5, minutes=30)
RES_Log_Time = RES_Log_Time.time().strftime("%H:%M:%S")
print(RES_Log_Time)
RES_Log_Date = datetime.datetime.utcnow().date()
print(RES_Log_Date)
RES_Log_Date = str(RES_Log_Date)
arrival = dbget("select res_arrival, res_adults,res_room from reservation.res_reservation where res_id = '"+res_id+"' and pf_id = '"+pf_id+"' and res_unique_id = '"+unique_id+"'")
arrival = json.loads(arrival)
print(arrival)
print(arrival[0]['res_arrival'],type(arrival[0]['res_arrival']))
today_arrival = (arrival[0]['res_arrival'])
adult = arrival[0]['res_adults']
room = arrival[0]['res_room']
print(room,type(room))
print(today_arrival)
if RES_Log_Date == today_arrival:
p = {}
p['res_id'] = res_id
p['res_unique_id'] = unique_id
sql_value = gensql('select','room_management.rm_queue_room','rm_queue',p)
sql_value = json.loads(sql_value)
if len(sql_value) != 0:
psql = dbput("delete from room_management.rm_queue_room where res_id = '"+res_id+"' and res_unique_id = '"+unique_id+"'")
print(psql)
else:
pass
e = {}
e['Res_id'] = res_id
e['pf_id'] = pf_id
e['res_unique_id'] = unique_id
a['Res_guest_status'] = "checkin"
sql_value = gensql('update','reservation.res_reservation',a,e)
print(sql_value)
res_id = e.get("Res_id")
Emp_Id = '121'
Emp_Firstname = "daisy"
s = {}
s['Emp_Id'] = Emp_Id
s['Emp_Firstname'] = Emp_Firstname
s['RES_Log_Date'] = RES_Log_Date
s['RES_Log_Time'] = RES_Log_Time
s['RES_Action_Type'] = "Checkin a guest"
s['RES_Description'] = "Checked in a guest"
s['Res_id'] = res_id
sql_value = gensql('insert','reservation.res_activity_log',s)
fo_status = "occupied"
res_status = "checkin"
sql_value = dbput("update room_management.rm_room_list set rm_fo_status = '"+fo_status+"',rm_reservation_status = '"+res_status+"',rm_fo_person = "+str(adult)+" where rm_room in ("+str(room)+")")
print(sql_value)
alertcount = json.loads(dbget("select count(*) from reservation.res_alert where res_id = '"+str(res_id)+"' \
and res_unique_id = '"+str(unique_id)+"'"))
print(alertcount)
if alertcount[0]['count'] !=0:
alertvalue = json.loads(dbget("select * from reservation.res_alert where res_id = '"+str(res_id)+"' \
and res_unique_id = '"+str(unique_id)+"'"))
return(json.dumps({'Status': 'Success', 'StatusCode': '200', 'alertvalue':alertvalue,'Return': 'Alert Got Successfully','ReturnCode':'AGS'}, sort_keys=True, indent=4))
else:
return(json.dumps({'Status': 'Success', 'StatusCode': '200','Return': 'Record Updated Successfully','ReturnCode':'RUS'}, sort_keys=True, indent=4))
else:
return(json.dumps({'Status': 'Success', 'StatusCode': '200','Return': 'Checkin a Today Guest arrivals only','ReturnCode':'CTG'}, sort_keys=True, indent=4))
|
infocuittesting/hotel360-second-version
|
HOTEL_FD_POST_UPDATE_CheckinGuestArrivals.py
|
HOTEL_FD_POST_UPDATE_CheckinGuestArrivals.py
|
py
| 3,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.utcnow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sqlwrapper.dbget",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlwrapper.gensql",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlwrapper.dbput",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sqlwrapper.gensql",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sqlwrapper.gensql",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sqlwrapper.dbput",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "sqlwrapper.dbget",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "sqlwrapper.dbget",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 80,
"usage_type": "call"
}
] |
5285437188
|
from ...robot import Robot
from stt_watson.SttWatsonLogListener import SttWatsonLogListener
from recording.Record import Record
from watson_client.Client import Client
from utils.SignalHandler import SignalHandler
import threading
import signal
import os
class WatsonRobot(Robot):
def __init__(self, config, speaker, actions):
super(WatsonRobot, self).__init__(config, speaker, actions)
config['audio-chunk'] = 8000
config['audio-rate'] = 44100
config['channels'] = 1
self.listeners = []
sttWatsonLogListener = SttWatsonLogListener()
self.listeners.append(sttWatsonLogListener)
self.stopper = threading.Event()
self.record = Record(config, self.stopper)
self.workers = [self.record]
self.watsonClient = Client(config)
self.handler = SignalHandler(self.stopper, self.workers)
signal.signal(signal.SIGINT, self.handler)
def name(self):
return 'Watson'
def listen(self):
audioFd, writer = os.pipe()
self.record.setWriter(writer)
self.record.start()
self.watsonClient.setListeners(self.listeners)
self.watsonClient.startStt(audioFd)
|
lowdev/alfred
|
robot/stt/watson/watson.py
|
watson.py
|
py
| 1,199 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "robot.Robot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "stt_watson.SttWatsonLogListener.SttWatsonLogListener",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "recording.Record.Record",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "watson_client.Client.Client",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "utils.SignalHandler.SignalHandler",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "signal.signal",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "signal.SIGINT",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.pipe",
"line_number": 33,
"usage_type": "call"
}
] |
10623814818
|
from asyncio import sleep
from discord import Forbidden
from discord.ext import commands
from Utils.domain_tester import get_domain_embed
from Utils.file_tester import get_file_embed
class DmCommands(commands.Cog, name="Dm Commands"):
"""
Cog including all Commands that are dm only
"""
def __init__(self, b):
self.b = b
print("Dm Commands succesfully added to the bot!")
@commands.command(name="check",
help="Takes given Input and runs a test over it. Only Dm Channels. Accepts URLs",
brief="Checks Input", aliases=["test"])
async def check(self, ctx, *arg):
if ctx.guild is not None:
try:
await ctx.message.delete()
await ctx.author.send("Only DM Available")
except Forbidden:
await ctx.reply("Only DM Available! Warning! The Above message might be milicious. "
"Dont click the file/url until you trust it! (for some reason i cant delete it)")
return
if arg is None and not ctx.message.attachments:
await ctx.send("Missing an url")
return
if ctx.message.attachments:
await ctx.reply("Starting testing of files. This takes some time")
for i in ctx.message.attachments:
msgn = await ctx.reply("Stand by...")
await msgn.edit(content=None, embed=await get_file_embed(i, ctx))
await sleep(30)
if len(arg) > 0:
domain = arg[0]
await ctx.reply(embed=get_domain_embed(domain, ctx))
|
veni-vidi-code/VirusTotalDiscordBot
|
Cogs/DmCommands.py
|
DmCommands.py
|
py
| 1,634 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "discord.ext.commands.Cog",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "discord.Forbidden",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "Utils.file_tester.get_file_embed",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "Utils.domain_tester.get_domain_embed",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 19,
"usage_type": "name"
}
] |
40071040492
|
from collections import Counter
class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
# anagram: str with same histgram
res = []
lp = len(p) -1
ls = len(s)
pCount = Counter(p)
mCount = Counter(s[:lp]) # from 0 to lp - 2
for i in range(lp, ls):
mCount[s[i]]+=1
if mCount == pCount:
res.append(i-lp)
mCount[s[i-lp]]-=1
if mCount[s[i-lp]] == 0:
del mCount[s[i-lp]]
return res
|
lucy9215/leetcode-python
|
438_findAllAnagramsInAString.py
|
438_findAllAnagramsInAString.py
|
py
| 619 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.Counter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 17,
"usage_type": "call"
}
] |
23932735079
|
import torch
from torch import nn
from torch.autograd import Variable
import numpy as np
from util import get_data
from torch.utils.data import DataLoader
from torch.nn import functional as F
from torch.optim import Adam
from variables import*
from matplotlib import pyplot as plt
class MnistRegression(object):
def __init__(self, train_data, test_data):
self.train_data = train_data
self.test_data = test_data
self.model = self.MnistModel()
class MnistModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(
in_features=input_shape,
out_features=output_shape
)
def forward(self, x):
x = x.reshape(-1, input_shape)
x = self.linear(x)
x = F.log_softmax(x, dim=1)
return x
def loss_fnc(self, Ypred, Y):
return F.cross_entropy(Ypred, Y)
def optimizer(self, learning_rate=0.1):
return Adam(self.model.parameters(), lr=learning_rate)
def evaluate(self, Y, Ypred):
P = torch.argmax(Ypred, dim=1).numpy()
Y = Y.numpy()
return np.sum(Y == P)
def train(self, num_epochs=100):
opt = self.optimizer()
total_train_loss = []
total_test_loss = []
for i in range(1,num_epochs+1):
n_correct = 0
n_total = 0
for X, Y in self.train_data:
Y = Y.to(dtype=torch.int64)
Ypred = self.model(X)
loss = self.loss_fnc(Ypred, Y)
loss.backward() # calculate gradients
total_train_loss.append(loss.item())
n_correct += self.evaluate(Y, Ypred)
n_total += batch_size
opt.step() # update parameters using claculated gradients
opt.zero_grad() # use to avoid accumilating the gradients
train_acc = round(n_correct/n_total, 3)
with torch.no_grad():
n_correct = 0
n_total = 0
for X, Y in self.test_data:
Y = Y.to(dtype=torch.int64)
Ypred = self.model(X)
loss = self.loss_fnc(Ypred, Y)
total_test_loss.append(loss.item())
n_correct += self.evaluate(Y, Ypred)
n_total += batch_size
test_acc = round(n_correct/n_total, 3)
print("Train Acc : {} Test Acc : {}".format(train_acc, test_acc))
plt.plot(total_train_loss, label='Train loss')
plt.plot(total_test_loss , label='Test loss')
plt.legend()
plt.show()
if __name__ == "__main__":
train_data, test_data = get_data()
model = MnistRegression(train_data, test_data)
model.train()
|
1zuu/Pytroch-Examples
|
Mnist/mnist_regression.py
|
mnist_regression.py
|
py
| 2,842 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.log_softmax",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.argmax",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.int64",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.int64",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "util.get_data",
"line_number": 87,
"usage_type": "call"
}
] |
650532287
|
#! /bin/python
import os
import sys
import json
import luigi
import numpy as np
import nifty.tools as nt
import nifty
import nifty.graph.rag as nrag
from vigra.analysis import relabelConsecutive
from elf.segmentation.clustering import mala_clustering, agglomerative_clustering
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
#
# Agglomerate Tasks
#
# TODO it would be nice to be able to change the block shape compared to ws task
# so that we can agglomerate block boundaries.
# However, I am not sure how this interacts with the id-offsets, so haven't
# implemented this yet.
class AgglomerateBase(luigi.Task):
""" Agglomerate base class
"""
task_name = 'agglomerate'
src_file = os.path.abspath(__file__)
# input and output volumes
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
have_ignore_label = luigi.BoolParameter()
dependency = luigi.TaskParameter()
def requires(self):
return self.dependency
@staticmethod
def default_task_config():
# parameter:
# use_mala_agglomeration: whether to use thresholding based mala agglomeration
# or element number based agglomerative clustering
# threshold: threshold up to which to agglomerate (mala) or fraction of nodes
# after agglomeration (agglomerative clustering)
# size_regularizer: size regularizer in agglomerative clustering (wardness)
# invert_inputs: do we need to invert the inputs?
# offsets: offsets for affinities, set to None for boundaries
config = LocalTask.default_task_config()
config.update({'use_mala_agglomeration': True, 'threshold': .9,
'size_regularizer': .5, 'invert_inputs': False,
'offsets': None})
return config
def clean_up_for_retry(self, block_list):
super().clean_up_for_retry(block_list)
# TODO remove any output of failed blocks because it might be corrupted
def run_impl(self):
# get the global config and init configs
shebang, block_shape, roi_begin, roi_end = self.global_config_values()
self.init(shebang)
# get shape and make block config
shape = vu.get_shape(self.input_path, self.input_key)
if len(shape) == 4:
shape = shape[1:]
# load the agglomerate config
config = self.get_task_config()
# update the config with input and output paths and keys
# as well as block shape
config.update({'input_path': self.input_path, 'input_key': self.input_key,
'output_path': self.output_path, 'output_key': self.output_key,
'block_shape': block_shape, 'have_ignore_label': self.have_ignore_label})
if self.n_retries == 0:
block_list = vu.blocks_in_volume(shape, block_shape, roi_begin, roi_end)
else:
block_list = self.block_list
self.clean_up_for_retry(block_list)
self._write_log('scheduling %i blocks to be processed' % len(block_list))
n_jobs = min(len(block_list), self.max_jobs)
# prime and run the jobs
self.prepare_jobs(n_jobs, block_list, config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class AgglomerateLocal(AgglomerateBase, LocalTask):
"""
Agglomerate on local machine
"""
pass
class AgglomerateSlurm(AgglomerateBase, SlurmTask):
"""
Agglomerate on slurm cluster
"""
pass
class AgglomerateLSF(AgglomerateBase, LSFTask):
"""
Agglomerate on lsf cluster
"""
pass
#
# Implementation
#
def _agglomerate_block(blocking, block_id, ds_in, ds_out, config):
fu.log("start processing block %i" % block_id)
have_ignore_label = config['have_ignore_label']
use_mala_agglomeration = config.get('use_mala_agglomeration', True)
threshold = config.get('threshold', 0.9)
size_regularizer = config.get('size_regularizer', .5)
invert_inputs = config.get('invert_inputs', False)
offsets = config.get('offsets', None)
bb = vu.block_to_bb(blocking.getBlock(block_id))
# load the segmentation / output
seg = ds_out[bb]
# check if this block is empty
if np.sum(seg) == 0:
fu.log_block_success(block_id)
return
# load the input data
ndim_in = ds_in.ndim
if ndim_in == 4:
assert offsets is not None
assert len(offsets) <= ds_in.shape[0]
bb_in = (slice(0, len(offsets)),) + bb
input_ = vu.normalize(ds_in[bb_in])
else:
assert offsets is None
input_ = vu.normalize(ds_in[bb])
if invert_inputs:
input_ = 1. - input_
id_offset = int(seg[seg != 0].min())
# relabel the segmentation
_, max_id, _ = relabelConsecutive(seg, out=seg, keep_zeros=True, start_label=1)
seg = seg.astype('uint32')
# construct rag
rag = nrag.gridRag(seg, numberOfLabels=max_id + 1,
numberOfThreads=1)
# extract edge features
if offsets is None:
edge_features = nrag.accumulateEdgeMeanAndLength(rag, input_, numberOfThreads=1)
else:
edge_features = nrag.accumulateAffinityStandartFeatures(rag, input_, offsets,
numberOfThreads=1)
edge_features, edge_sizes = edge_features[:, 0], edge_features[:, -1]
uv_ids = rag.uvIds()
# set edges to ignore label to be maximally repulsive
if have_ignore_label:
ignore_mask = (uv_ids == 0).any(axis=1)
edge_features[ignore_mask] = 1
# build undirected graph
n_nodes = rag.numberOfNodes
graph = nifty.graph.undirectedGraph(n_nodes)
graph.insertEdges(uv_ids)
if use_mala_agglomeration:
node_labels = mala_clustering(graph, edge_features,
edge_sizes, threshold)
else:
node_ids, node_sizes = np.unique(seg, return_counts=True)
if node_ids[0] != 0:
node_sizes = np.concatenate([np.array([0]), node_sizes])
n_stop = int(threshold * n_nodes)
node_labels = agglomerative_clustering(graph, edge_features,
node_sizes, edge_sizes,
n_stop, size_regularizer)
# run clusteting
node_labels, max_id, _ = relabelConsecutive(node_labels, start_label=1, keep_zeros=True)
fu.log("reduced number of labels from %i to %i" % (n_nodes, max_id + 1))
# project node labels back to segmentation
seg = nrag.projectScalarNodeDataToPixels(rag, node_labels, numberOfThreads=1)
seg = seg.astype('uint64')
# add offset back to segmentation
seg[seg != 0] += id_offset
ds_out[bb] = seg
# log block success
fu.log_block_success(block_id)
def agglomerate(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
with open(config_path, 'r') as f:
config = json.load(f)
# read the input cofig
input_path = config['input_path']
input_key = config['input_key']
shape = list(vu.get_shape(input_path, input_key))
if len(shape) == 4:
shape = shape[1:]
block_shape = list(config['block_shape'])
block_list = config['block_list']
# read the output config
output_path = config['output_path']
output_key = config['output_key']
# get the blocking
blocking = nt.blocking([0, 0, 0], shape, block_shape)
# submit blocks
with vu.file_reader(input_path, 'r') as f_in, vu.file_reader(output_path) as f_out:
ds_in = f_in[input_key]
assert ds_in.ndim in (3, 4)
ds_out = f_out[output_key]
assert ds_out.ndim == 3
for block_id in block_list:
_agglomerate_block(blocking, block_id, ds_in, ds_out, config)
# log success
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
agglomerate(job_id, path)
|
constantinpape/cluster_tools
|
cluster_tools/watershed/agglomerate.py
|
agglomerate.py
|
py
| 8,389 |
python
|
en
|
code
| 32 |
github-code
|
6
|
[
{
"api_name": "luigi.Task",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "luigi.Parameter",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "luigi.BoolParameter",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "luigi.TaskParameter",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cluster_tools.cluster_tasks.LocalTask.default_task_config",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "cluster_tools.cluster_tasks.LocalTask",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.get_shape",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.blocks_in_volume",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "cluster_tools.cluster_tasks.LocalTask",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "cluster_tools.cluster_tasks.SlurmTask",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "cluster_tools.cluster_tasks.LSFTask",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.block_to_bb",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "numpy.sum",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils.log_block_success",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.normalize",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.normalize",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "vigra.analysis.relabelConsecutive",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "nifty.graph.rag.gridRag",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "nifty.graph.rag",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "nifty.graph.rag.accumulateEdgeMeanAndLength",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "nifty.graph.rag",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "nifty.graph.rag.accumulateAffinityStandartFeatures",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "nifty.graph.rag",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "nifty.graph.undirectedGraph",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "nifty.graph",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "elf.segmentation.clustering.mala_clustering",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "elf.segmentation.clustering.agglomerative_clustering",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "vigra.analysis.relabelConsecutive",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "nifty.graph.rag.projectScalarNodeDataToPixels",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "nifty.graph.rag",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log_block_success",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils.get_shape",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "nifty.tools.blocking",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "nifty.tools",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.file_reader",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log_job_success",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 256,
"usage_type": "attribute"
}
] |
17424247870
|
from setuptools import setup
import dorm
with open("README.md", "r") as readme:
long_description = readme.read()
setup(
name="dorm",
version=dorm.version,
description="A tiny SQLite ORM for Python.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Dan Watson",
author_email="[email protected]",
url="https://github.com/dcwatson/dorm",
license="MIT",
py_modules=["dorm"],
entry_points={"console_scripts": ["dorm=dorm:main"]},
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Database",
],
)
|
dcwatson/dorm
|
setup.py
|
setup.py
|
py
| 804 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "setuptools.setup",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "dorm.version",
"line_number": 10,
"usage_type": "attribute"
}
] |
12814211947
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0002_userinfo_grade'),
]
operations = [
migrations.CreateModel(
name='Events',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=80)),
('date', models.DateTimeField()),
('cost', models.DecimalField(max_digits=6, decimal_places=2)),
],
),
migrations.AddField(
model_name='userinfo',
name='balance',
field=models.DecimalField(default=0.0, max_digits=6, decimal_places=2),
preserve_default=False,
),
]
|
asp3/StudentAccounts
|
student/migrations/0003_auto_20151025_1630.py
|
0003_auto_20151025_1630.py
|
py
| 906 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.db.models.DecimalField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.DecimalField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 26,
"usage_type": "name"
}
] |
42483900439
|
import pandas as pd
import networkx as nx
import json
hierarchy_df = pd.read_csv('hierarchy_table.csv', index_col=0, dtype=str)
graph_network = nx.from_pandas_edgelist(
hierarchy_df,
source='Parent',
target='Child',
)
json_graph = json.dumps(graph_network, default=nx.node_link_data)
# Using a JSON string
with open('json_graph.json', 'w') as outfile:
outfile.write(json_graph)
|
diegopintossi/graph_network
|
graph_network.py
|
graph_network.py
|
py
| 398 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "networkx.from_pandas_edgelist",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "networkx.node_link_data",
"line_number": 13,
"usage_type": "attribute"
}
] |
35426908825
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
a=0.7
b=0.6
X = np.arange(0,2.4,0.2)
Y = np.arange(0,2.4,0.2)
m,p = np.meshgrid(X,Y)
mdot = np.divide(1,1+np.square(p))- np.multiply(b,m)
pdot = np.subtract(m,np.multiply(a,p))
fig, ax = plt.subplots()
q=ax.quiver(p,m,pdot,mdot)
ax.quiverkey(q,X=0.3,Y=2.4, U=5,
label='Quiver key, length = 5', labelpos='E')
ax.plot(p,np.multiply(a,p))
ax.plot(p, np.divide( 1, np.multiply(b,(1+np.square(p)))))
ax.set_xlabel('p')
ax.set_ylabel('m')
def dydt_autoinhib(t,y,a,b):
y1,y2=y
dy1 = 1/(1+y2**2)-b*y1
dy2 = y1-a*y2
return (dy1,dy2)
# lambda trick so we can pass the right function into the solver
dydt_params = lambda t,y: dydt_autoinhib(t,y,a,b)
solution1 = scipy.integrate.solve_ivp(dydt_params, t_span=(0,20),y0=(2,2), method='RK45')
t1_ode45 = solution1.t
m1_ode45 = solution1.y[0]
p1_ode45 = solution1.y[1]
ax.plot(p1_ode45,m1_ode45)
plt.show()
|
martinaoliver/GTA
|
ssb/m1a/numeric/Practical_full_solutions_jupyter/python_script_solutions/phase_portrait_autorinhib_20190926.py
|
phase_portrait_autorinhib_20190926.py
|
py
| 991 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.arange",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.subtract",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "numpy.multiply",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.integrate.solve_ivp",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.integrate",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
}
] |
34862433797
|
from django import template
from django.urls import NoReverseMatch, reverse
from utilities.utils import get_viewname, prepare_cloned_fields
register = template.Library()
#
# Instance buttons
#
@register.inclusion_tag('buttons/clone.html')
def clone_button(instance):
url = reverse(get_viewname(instance, 'add'))
# Populate cloned field values
param_string = prepare_cloned_fields(instance).urlencode()
if param_string:
url = f'{url}?{param_string}'
return {
'url': url,
}
@register.inclusion_tag('buttons/edit.html')
def edit_button(instance):
viewname = get_viewname(instance, 'edit')
url = reverse(viewname, kwargs={'pk': instance.pk})
return {
'url': url,
}
@register.inclusion_tag('buttons/delete.html')
def delete_button(instance):
viewname = get_viewname(instance, 'delete')
url = reverse(viewname, kwargs={'pk': instance.pk})
return {
'url': url,
}
#
# List buttons
#
@register.inclusion_tag('buttons/add.html')
def add_button(model, action='add'):
try:
url = reverse(get_viewname(model, action))
except NoReverseMatch:
url = None
return {
'url': url,
}
@register.inclusion_tag('buttons/import.html')
def import_button(model, action='import'):
try:
url = reverse(get_viewname(model, action))
except NoReverseMatch:
url = None
return {
'url': url,
}
@register.inclusion_tag('buttons/bulk_edit.html')
def bulk_edit_button(model, action='bulk_edit', query_params=None):
try:
url = reverse(get_viewname(model, action))
if query_params:
url = f'{url}?{query_params.urlencode()}'
except NoReverseMatch:
url = None
return {
'url': url,
}
@register.inclusion_tag('buttons/bulk_delete.html')
def bulk_delete_button(model, action='bulk_delete', query_params=None):
try:
url = reverse(get_viewname(model, action))
if query_params:
url = f'{url}?{query_params.urlencode()}'
except NoReverseMatch:
url = None
return {
'url': url,
}
|
Status-Page/Status-Page
|
statuspage/utilities/templatetags/buttons.py
|
buttons.py
|
py
| 2,140 |
python
|
en
|
code
| 45 |
github-code
|
6
|
[
{
"api_name": "django.template.Library",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "utilities.utils.get_viewname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "utilities.utils.prepare_cloned_fields",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "utilities.utils.get_viewname",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "utilities.utils.get_viewname",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "utilities.utils.get_viewname",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.urls.NoReverseMatch",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "utilities.utils.get_viewname",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.urls.NoReverseMatch",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "utilities.utils.get_viewname",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.urls.NoReverseMatch",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "utilities.utils.get_viewname",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "django.urls.NoReverseMatch",
"line_number": 95,
"usage_type": "name"
}
] |
34958652342
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def normalize_l2(x):
"""
Expects x.shape == [N, C, H, W]
"""
norm = torch.norm(x.view(x.size(0), -1), p=2, dim=1)
norm = norm.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
return x / norm
def pair_cos_dist(x, y):
cos = nn.CosineSimilarity(dim=-1, eps=1e-6)
c = torch.clamp(1 - cos(x, y), min=0)
return c
class Feature_Targets(nn.Module):
def __init__(self, epsilon, num_steps, step_size, data_min = -1.0, data_max = 1.0, grad_sign=True, random_start = True):
super().__init__()
self.epsilon = epsilon
self.num_steps = num_steps
self.step_size = step_size
self.grad_sign = grad_sign
self.data_min = data_min
self.data_max = data_max
self.random_start = random_start
def forward(self, model, bx, by, target_bx):
"""
:param model: the classifier's forward method
:param bx: batch of images
:param by: true labels
:return: perturbed batch of images
"""
adv_bx = bx.detach().clone()
target = target_bx.detach().clone()
if self.random_start:
adv_bx += torch.zeros_like(adv_bx).uniform_(-self.epsilon, self.epsilon)
adv_bx = adv_bx.clamp(self.data_min, self.data_max)
target_feature, target_logits = model(target)
for i in range(self.num_steps):
adv_bx.requires_grad_()
with torch.enable_grad():
feature, logits = model(adv_bx)
loss = pair_cos_dist(feature, target_feature).mean()
grad = torch.autograd.grad(loss, adv_bx, only_inputs=True)[0]
if self.grad_sign:
adv_bx = adv_bx.detach() + self.step_size * torch.sign(grad.detach())
else:
grad = normalize_l2(grad.detach())
adv_bx = adv_bx.detach() + self.step_size * grad
adv_bx = torch.min(torch.max(adv_bx, bx - self.epsilon), bx + self.epsilon).clamp(self.data_min, self.data_max)
return adv_bx
|
arthur-qiu/adv_vis
|
attack_methods/feature_targets.py
|
feature_targets.py
|
py
| 2,092 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.norm",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn.CosineSimilarity",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.clamp",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.zeros_like",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.enable_grad",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.autograd.grad",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "torch.sign",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 57,
"usage_type": "call"
}
] |
7002507231
|
import json
from .db_utils import conn as db_conn
from enum import Enum
class NotificationType(Enum):
questionEndorse = 'question_endorsed'
answerEndorse = 'answer_endorsed'
answerUser = 'answer_user'
answerSaved = 'answer_saved'
NOTIFICATION_TEXT_BY_TYPE = {
NotificationType.questionEndorse: "endorsed your question",
NotificationType.answerEndorse: "endorsed your answer",
NotificationType.answerUser: "answered your question",
NotificationType.answerSaved: "answered a question you saved"
}
DATA_FIELDS_BY_TYPE = {
NotificationType.questionEndorse: set(['question_id']),
NotificationType.answerEndorse: set(['question_id', 'answer_id']),
NotificationType.answerUser: set(['question_id', 'answer_id']),
NotificationType.answerSaved: set(['question_id', 'answer_id'])
}
def push_notification(user_id, notif_type, data):
cur = db_conn.cursor()
if set(data.keys()) != DATA_FIELDS_BY_TYPE[notif_type]:
raise ArgumentError("Invalid data fields for notification type {}; expected {}".format(data.keys(), DATA_FIELDS_BY_TYPE[notif_type]))
cur.execute("INSERT INTO notifications (user_id, type, data) VALUES (%s, %s, %s)", (user_id, notif_type.value, json.dumps(data)))
|
minupalaniappan/gradfire
|
daviscoursesearch/flaskapp/utils/notif_utils.py
|
notif_utils.py
|
py
| 1,239 |
python
|
en
|
code
| 12 |
github-code
|
6
|
[
{
"api_name": "enum.Enum",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "db_utils.conn.cursor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "db_utils.conn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 29,
"usage_type": "call"
}
] |
24706158570
|
#!/usr/bin/python2.4
import base64
import hmac
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import hashlib
class PlacesHandler(webapp.RequestHandler):
"""Handles requests to /places."""
def post(self):
"""Handles posts."""
self.response.headers['Content-Type'] = 'application/json'
action = self.request.get('action')
CLIENT_ID = None
PRIVATE_KEY = None
# These are required to work
if not CLIENT_ID and not PRIVATE_KEY:
self.response.out.write('{}')
return
places_url = None
if action == 'search':
location = self.request.get('location')
radius = self.request.get('radius')
url_to_sign = ('/maps/api/place/search/json?location=%s&radius=%s&client='
'%s&sensor=true') % (location, radius, CLIENT_ID)
decoded_key = base64.urlsafe_b64decode(PRIVATE_KEY)
signature = hmac.new(decoded_key, url_to_sign, hashlib.sha1)
encoded_signature = base64.urlsafe_b64encode(signature.digest())
places_url = ('http://maps.google.com/maps/api/place/search/json?'
'location=%s&radius=%s&client=%s&sensor=true&'
'signature=%s') % (location, radius, CLIENT_ID,
encoded_signature)
if places_url:
self.response.out.write(urlfetch.fetch(places_url).content)
if __name__ == '__main__':
application = webapp.WSGIApplication([('/places[/]?', PlacesHandler)],
debug=True)
run_wsgi_app(application)
|
bilal-karim/gmaps-samples-v3
|
devfest-2010/whereiscoffee/places.py
|
places.py
|
py
| 1,627 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "google.appengine.ext.webapp.RequestHandler",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.webapp",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "base64.urlsafe_b64decode",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "hmac.new",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "hashlib.sha1",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "base64.urlsafe_b64encode",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.urlfetch.fetch",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.urlfetch",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.webapp.WSGIApplication",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.webapp",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.webapp.util.run_wsgi_app",
"line_number": 53,
"usage_type": "call"
}
] |
11552601944
|
import csv
import getopt, sys
from moviepy.editor import VideoFileClip, concatenate_videoclips
folder = '/Videos/'
# file name of the video and config file
event = '20221002 PREECNLBVA'
output_file = None # Create a file for each segment
#output_file = 'check' # Compile the clips with a check flag
output_file = 'highlight' # Compile the clips with a highligh flag
#output_file = '20221002 EYF Segments.mp4' # compile all segments in the config file
# --input
mp4_file = folder + '/' + event + '.mp4'
# --config
config_file = folder + '/' + event + '.csv'
# --output
def return_filename(desc, prefix, suffix):
return str(prefix or '') + str(desc or '') + str(suffix or '') + '.mp4'
def main():
global folder
global event
global output_file
global mp4_file
global config_file
argumentList = sys.argv[1:]
options = "i:c:o:"
long_options = ["input=","config=","output="]
try:
arguments, values = getopt.getopt(argumentList, options, long_options)
for currentArgument, currentValue in arguments:
if currentArgument in ("-i", "--input"):
mp4_file = currentValue
# print ("File: ", currentValue)
if currentArgument in ("-o", "--output"):
output_file = currentValue
if currentArgument in ("-c", "--config"):
config_file = currentValue
# print ("Config: ", currentValue)
except getopt.error as err:
print (str(err))
if mp4_file is None:
# If mp4 file is not provided, use config file name
mp4_file = config_file.replace(".csv", ".mp4")
# Read the config file
rows = csv.DictReader(open(config_file))
first = True
for row in rows:
if row['source'] == 'video':
min = int(row['min'])
sec = int(row['sec'])
if min > 0:
start_seconds = min * 60 + sec
else:
start_seconds = sec
length_in_sec = int(row['length_in_sec'])
end_seconds = start_seconds + length_in_sec
if start_seconds and end_seconds:
if output_file is None:
# MODE = Split the segments into separate files
clip = VideoFileClip(mp4_file).subclip(start_seconds, end_seconds)
file_name = return_filename(row['desc'], row['filename_prefix'], row['filename_suffix'])
clip.write_videofile(file_name)
else:
# MODE = Concatenate the segments into a single file
if (output_file == 'check' and row['filename_suffix'] == 'check') or \
(output_file == 'highlight' and row['filename_suffix'] == 'highlight') or \
(output_file != 'check' and output_file != 'highlight'):
# Save only if check or highlight or if all clips
if first:
final_clip = VideoFileClip(mp4_file).subclip(start_seconds, end_seconds)
first = False
else:
clip = VideoFileClip(mp4_file).subclip(start_seconds, end_seconds)
final_clip = concatenate_videoclips([final_clip,clip])
else:
print(f'Error with config settings for: {row}')
if output_file:
# Save the final clip
if output_file == 'check':
output_file = event + ' check.mp4'
elif output_file == 'highlight':
output_file = event + ' highlight.mp4'
final_clip.write_videofile(output_file)
if __name__ == "__main__":
main()
|
jordiyeh/video-cut
|
create_highlight_videos.py
|
create_highlight_videos.py
|
py
| 3,744 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "getopt.getopt",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "getopt.error",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "csv.DictReader",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "moviepy.editor.VideoFileClip",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "moviepy.editor.VideoFileClip",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "moviepy.editor.VideoFileClip",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "moviepy.editor.concatenate_videoclips",
"line_number": 88,
"usage_type": "call"
}
] |
71270407548
|
"""
This question is asked by Apple. Given two binary strings
(strings containing only 1s and 0s) return their sum (also as a binary string).
Note: neither binary string will contain leading 0s unless the string itself is 0
Ex: Given the following binary strings...
"100" + "1", return "101"
"11" + "1", return "100"
"1" + "0", return "1"
"""
from collections import deque
def addBinary(number1:str, number2: str) -> str:
# Time: O(n) -> where "n" is the number of bits of the final sum
# Space: O(n) or O(1) if we don't consider the output
n1Pointer = len(number1)-1
n2Pointer = len(number2)-1
output = deque()
carry = 0
while n1Pointer >= 0 or n2Pointer >= 0:
n1Digit = 0 if n1Pointer < 0 else int(number1[n1Pointer])
n2Digit = 0 if n2Pointer < 0 else int(number2[n2Pointer])
currDigitSum = n1Digit + n2Digit + carry
carry = 1 if currDigitSum >= 2 else 0
if currDigitSum == 2:
currDigitSum = 0
elif currDigitSum == 3:
currDigitSum = 1
output.appendleft(str(currDigitSum)) # O(1)
n1Pointer -= 1
n2Pointer -= 1
if carry:
output.appendleft(str(carry)) # O(1)
return "".join(output) # O(n)
assert addBinary("100", "1") == "101"
assert addBinary("11", "1") == "100"
assert addBinary("1", "0") == "1"
print("Passed all testes!")
|
lucasbivar/coding-interviews
|
the-daily-byte/week_01/day_05_add_binary.py
|
day_05_add_binary.py
|
py
| 1,314 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 21,
"usage_type": "call"
}
] |
4234376251
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import minimize
from sklearn.metrics import mean_squared_error as mse
def SIR():
def prediction(beta, gamma, population, i0, r0, d0, time_predict):
def SIR_model(y, t, beta, gamma, population):
s, i, r = y
dSdt = -beta * s * i / population
dIdt = beta * s * i / population - gamma * i
dRdt = gamma * i
return [dSdt, dIdt, dRdt]
s0 = population - i0 - r0 - d0
y_0 = [s0, i0, r0]
sol = odeint(SIR_model, y_0, time_predict, args=(beta, gamma, population))
sol = np.transpose(sol)
return sol
def error_model(point, cases, population, infected_0, recovered_0, dead_0):
beta, gamma = point
def SIR_model(y, t, beta, gamma, population):
s, i, r = y
dSdt = -beta * s * i / population
dIdt = beta * s * i / population - gamma * i
dRdt = gamma * i
return [dSdt, dIdt, dRdt]
suscepted_0 = population - infected_0 - recovered_0 - dead_0
y0 = [suscepted_0, infected_0, recovered_0]
sol = odeint(SIR_model, y0, np.arange(1, len(cases) + 1), args=(beta, gamma, population))
sol = np.transpose(sol)
error = mse(cases, sol[1])
return error
def trainer(cases, population, infected_0, recovered_0, dead_0):
optimal = minimize(error_model, np.array([0.001, 0.001]), args=(cases, population, infected_0, recovered_0, dead_0),
method='L-BFGS-B', bounds=[(0.000001, 1.0), (0.000001, 1.0)])
beta, gamma = optimal.x
return beta, gamma
def plot(s, i, r, initials_state, city_name, period_predict, time_predict, population):
plt.figure()
plt.title('Projeção do total de habitantes sucetíveis, infectados e recuperados em ' + city_name + '/' +
initials_state[0], fontsize=20)
plt.xlabel('Meses', fontsize=15)
plt.xticks(np.linspace(15, period_predict + 15, 7)[:-1], ('Abril', 'Maio', 'Junho', 'Julo', 'Agosto', 'Setembro'))
plt.ylabel('Número de habitantes', fontsize=15)
plt.yticks(np.arange(0, population, step=population * 0.03))
plt.plot(time_predict, s, label='Sucetíveis')
plt.plot(time_predict, i, label='Infectados')
plt.plot(time_predict, r, label='Recuperados')
plt.legend(loc='center left', bbox_to_anchor=(1.002, 0.7), fontsize=14)
plt.rcParams["figure.figsize"] = (20, 10)
plt.show()
|
FBWeimer/Plague-Doctor
|
Plague Doctor/plaguedoctor/__init__.py
|
__init__.py
|
py
| 2,604 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "scipy.integrate.odeint",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.odeint",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.minimize",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
}
] |
32111228276
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import fftpack, signal
# 고주파 성분만 날리는 fft
# def get_filtered_data(in_data, filter_value=0.004):
def del_high_freq(in_data, filter_value=0.004):
"""
:param in_data: 대상 시계열 신호
:param filter_value: filter_value이상의 주파수를 가지는 신호를 날림
:return: fft 결과
"""
sig_fft = fftpack.fft(in_data)
sample_freq = fftpack.fftfreq(in_data.size)
high_freq_fft = sig_fft.copy()
high_freq_fft[np.abs(sample_freq) > filter_value] = 0
filtered_data = fftpack.ifft(high_freq_fft)
return filtered_data
# 고주파, 저주파 성분을 날리는 fft
def del_high_and_low_freq(in_data, high_filter_value, low_filter_value):
"""
:param in_data: 대상 시계열 신호
:param high_filter_value: fft를 수행할 최대값, low_filter_value ~ high_filter_value값 사이의 신호를 fft
:param low_filter_value: fft를 수행할 최소값
:return: fft 결과
"""
sig_fft = fftpack.fft(in_data)
sample_freq = fftpack.fftfreq(in_data.size)
high_freq_fft = sig_fft.copy()
low_value1 = np.max(high_freq_fft)
high_freq_fft[np.abs(sample_freq) > high_filter_value] = 0
high_freq_fft[np.abs(sample_freq) < low_filter_value] = 0
low_value2 = np.max(high_freq_fft)
filtered_data = fftpack.ifft(high_freq_fft)
return filtered_data, low_value1, low_value2
def fft(pupil_list, minu=None, quar=None):
global section_frames, time
# 데이터에서 0, -1인 부분 제거
while 0 in pupil_list:
pupil_list.remove(0)
while -1 in pupil_list:
pupil_list.remove(-1)
if minu is not None:
time = minu * 1800
section_frames = len(pupil_list) // time
if quar is not None:
time = len(pupil_list) // quar
section_frames = quar
y = np.array(pupil_list)
# fft
# filtered_sig = del_high_freq(y, filter_value=0.005) # 고주파 필터링
filtered_sig, _, _ = del_high_and_low_freq(y, 0.0048, 0.0035) # 저주파, 고주파 필터링
filtered_sig = filtered_sig.astype(np.float)
# zero-crossing point
zero_crossings = np.where(np.diff(np.sign(np.diff(filtered_sig))))[0]
zero_crossings = np.insert(zero_crossings, 0, 0)
zero_crossings = np.append(zero_crossings, len(filtered_sig) - 1)
# 변화 속도 계산
change_rates_list = [[] for _ in range(section_frames)]
for section in range(section_frames):
# zero-crossing points 기준으로 원하는 위치(섹션) 가져오기
section_zero_crossing = zero_crossings[np.where(zero_crossings <= (section + 1) * time)]
section_zero_crossing = section_zero_crossing[np.where(section * time < section_zero_crossing)]
# 변화 속도 계산
for j in range(len(section_zero_crossing) - 1):
change_rate = abs((filtered_sig[section_zero_crossing[j + 1]] - filtered_sig[section_zero_crossing[j]]) / (
section_zero_crossing[j + 1] - section_zero_crossing[j]))
change_rates_list[section].append(change_rate)
return filtered_sig, zero_crossings, section_frames, change_rates_list
# fft를 수행한 결과 그래프 그리기
def draw_fft_graph(y, filtered_sig, zero_crossings, section_frames, savepath, minu=None, quar=None):
global time
x = np.arange(0, len(y))
if minu is not None:
time = minu * 1800
section_frames = len(y) // time
if quar is not None:
time = len(y) // quar
section_frames = quar
fig = plt.figure(dpi=150)
# plt.figure(figsize=(6, 5))
plt.rcParams["font.family"] = 'Malgun Gothic'
plt.figure(figsize=(14, 6))
plt.plot(x, y, label='Original signal')
plt.plot(x, filtered_sig, linewidth=2, label='Filtered signal')
# plt.plot(zero_crossings, filtered_sig[zero_crossings], marker='o', color='red', linestyle='--')
plt.legend(loc='upper right')
# 섹션 나눠진거 표시
for section in range(section_frames):
plt.axvline(x=section * time, ymin=0, ymax=1.0, color='r')
plt.axvline(x=(section + 1) * time, ymin=0, ymax=1.0, color='r')
# plt.xlim(0, 1800)
plt.title('동공크기 변화율')
plt.xlabel('Frame')
plt.ylabel('Pupil size')
plt.savefig(f'{savepath}')
plt.show()
# 2차식 추세선 그리기, 히스토그램 그래프 저장
def draw_trendline_fft(data, title, y_lim, y_label, savepath, quar = None, avg = False):
results = {}
# 추세선
x = np.arange(0, len(data))
y = []
for idx, value in enumerate(data):
y.append(value)
y = np.array(y) # 10개 구간에 해당하는 특징(깜빡임 횟수)
fit = np.polyfit(x, y, 2)
a = fit[0]
b = fit[1]
c = fit[2]
fit_equation = a * np.square(x) + b * x + c
results['coeffs'] = fit.tolist()
# r-squared
p = np.poly1d(fit)
# fit values, and mean
yhat = p(x)
ybar = np.sum(y) / len(y)
ssreg = np.sum((yhat - ybar) ** 2)
sstot = np.sum((y - ybar) ** 2)
results['r-squared'] = ssreg / sstot
r_squared = str(round(results['r-squared'], 3)) # 출력하기 위해 문자열로 변환
a = str(round(results['coeffs'][0], 3))
b = str(round(results['coeffs'][1], 3))
c = str(round(results['coeffs'][2], 3))
# print("R 제곱값: ", round(results['r-squared'], 3))
# print("추세선: "+"Y="+a+"xX^2 + "+b+"xX + "+c)
period = ['0~3분', '3~6분', '6~9분', '9~12분', '12~15분', '15~18분', '18~21분', '21~24분', '24~27분', '27~30분', '30~33분']
plt.rcParams["font.family"] = 'Malgun Gothic'
fig = plt.figure(dpi=150)
ax = fig.add_subplot(1, 1, 1)
for idx2, value2 in enumerate(data):
ax.bar(period[idx2], value2, color='b', alpha=0.5)
ax.plot(x, fit_equation, color='r', alpha=0.5, label='Polynomial fit', linewidth=3.0)
# ax.scatter(x, y, s = 5, color = 'b', label = 'Data points') # 추세선 예측에 사용한 좌표 그리기
# Plotting
plt.xticks(rotation=20)
plt.title(f'{title}')
plt.ylim(0, y_lim)
plt.xlabel('구간')
plt.ylabel(f'{y_label}')
# 동공 크기 변화율 출력할 때 위치 조정
if not avg:
plt.text(3.2, 0.055, "추세선: " + r'$y = $' + a + r'$x^2 + ($' + b + r'$)x + $' + c, fontdict={'size': 12})
plt.text(7.5, 0.05, r'$R^2 =$' + r_squared, fontdict={'size': 12})
# 평균 동공크기 변화율 출력할 때 위치 조정
else:
plt.text(3.2, 0.027, "추세선: " + r'$y = $' + a + r'$x^2 + ($' + b + r'$)x + $' + c, fontdict={'size': 12})
plt.text(7.5, 0.025, r'$R^2 =$' + r_squared, fontdict={'size': 12})
plt.tight_layout()
fig.canvas.draw()
img = np.array(fig.canvas.renderer._renderer)
spl = title.split('.')[0]
plt.savefig(f'{savepath}')
plt.imshow(img)
plt.show() # 그래프 잘 나오는지 띄우기
|
HanNayeoniee/visual-fatigue-analysis
|
analysis/fft.py
|
fft.py
|
py
| 6,952 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "scipy.fftpack.fft",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "scipy.fftpack.fftfreq",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack.ifft",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "scipy.fftpack.fft",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "scipy.fftpack.fftfreq",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "numpy.max",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack.ifft",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.insert",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axvline",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axvline",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.polyfit",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.poly1d",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 193,
"usage_type": "name"
}
] |
5259664413
|
import re
#import CMUTweetTagger
#import cPickle
from collections import defaultdict
import pickle
from nltk.corpus import wordnet as wn
from itertools import product
import spacy
from spacy.symbols import *
from nltk import Tree
import nltk
nlp=spacy.load('en')
np_labels=set(['nsubj','dobj','pobj','iobj','conj','nsubjpass','appos','nmod','poss','parataxis','advmod','advcl'])
subj_labels=set(['nsubj','nsubjpass'])
need_verb_list=['need','require','want','lack']
send_verb_list=['send','give','donate','transfer','distribute','aid','help','procure']
common_resource=['food','water','medicine','tent','clothes','communication','transport','infrastructure','shelter','internet','sanitation','hospital','donations']
modifiers=['nummod','compound','amod','punct']
after_clause_modifier=['relcl','acl','ccomp','xcomp','acomp','punct']#,'nn','quantmod','nmod','hmod','infmod']
verb_count={}
resource_array=[]
modified_array=[]
# nepal_stop_list=['nepal','earthquake','quake','nepalese']
nepal_stop_list=[]
tel_no="([+]?[0]?[1-9][0-9\s]*[-]?[0-9\s]+)"
email="([a-zA-Z0-9]?[a-zA-Z0-9_.]+[@][a-zA-Z]*[.](com|net|edu|in|org|en))"
web_url="http:[a-zA-Z._0-9/]+[a-zA-Z0-9]"
entity_type_list=['NORP','ORG','GPE','PERSON']
quant_no="([0-9]*[,.]?[0-9]+[k]?)"
need_send_verb_list=['need','require','want','lack','send','give','donate','transfer','distribute','aid','help','support','procure']
# def quant_no(resource):
# return [i for re.findall(quant_no,resource)]
def modifier_word(word):
modified_word=word.orth_
while word.n_lefts+word.n_rights==1 and word.dep_.lower() in modifiers:
word=[child for child in word.children][0]
modified_word=word.orth_+" "+modified_word
return modified_word
def tok_format(tok):
return "_".join([tok.orth_, tok.dep_,tok.ent_type_])
def to_nltk_tree(node):
if node.n_lefts + node.n_rights > 0:
return Tree(tok_format(node), [to_nltk_tree(child) for child in node.children])
else:
return tok_format(node)
def get_children(word,resource_array,modified_array):
#print(word,word.dep_)
for child in word.children:
if child.dep_.lower() in modifiers:
get_word=modifier_word(child)+" "+word.orth_+"<_>"+word.dep_
modified_array.append(get_word)
if child.dep_.lower()=='prep' or child.dep_.lower()=='punct':
get_children(child,resource_array,modified_array)
if child.dep_.lower() in after_clause_modifier:
#print(child, child.dep_)
get_children(child,resource_array,modified_array)
if child.dep_.lower() in np_labels:
get_children(child,resource_array,modified_array)
resource_array.append(child.orth_+"<_>"+child.dep_)
else:
if get_verb_similarity_score(child.orth_,common_resource)>0.7 :
get_children(child,resource_array,modified_array)
def get_verb_similarity_score(word,given_list):
max_verb_similarity=0
if word.lower() in given_list:
max_verb_similarity=1
else:
current_verb_list=wn.synsets(word.lower())
for verb in given_list:
related_verbs=wn.synsets(verb)
for a,b in product(related_verbs,current_verb_list):
d=wn.wup_similarity(a,b)
try:
if d> max_verb_similarity:
max_verb_similarity=d
except:
continue
return max_verb_similarity
def resource_in_list(resource):
related_resources=wn.synsets(resource)
max_similarity=0
chosen_word=""
if resource.lower() in common_resource:
return 1,resource
for word in common_resource:
related_words=wn.synsets(word)
#print(word,related_words)
for a,b in product(related_words,related_resources):
d=wn.wup_similarity(a,b)
try:
if d> max_similarity:
max_similarity=d
chosen_word=word
except:
continue
return max_similarity,chosen_word
def get_resource(text):
doc=nlp(text)
# try:
# [to_nltk_tree(sent.root).pretty_print() for sent in doc.sents]
# except:
# print("Exception here")
org_list=[]
prev_word=""
prev_word_type=""
for word in doc:
if word.ent_type_ in entity_type_list:
org_list.append(word.orth_+"<_>"+word.ent_type_)
else:
org_list.append("<_>")
resource_array=[]
modified_array=[]
for word in doc:
if get_verb_similarity_score(word.orth_,need_send_verb_list)>0.8 or word.dep_=='ROOT':
get_children(word,resource_array,modified_array)
if word.dep_=='cc' and word.n_lefts+word.n_rights==0:
ancestor=word.head.orth_
#print(ancestor)
if get_verb_similarity_score(ancestor,common_resource)>0.6:
get_children(word.head,resource_array,modified_array)
#print(resource_array)
#print(modified_array)
last_word=[]
# for resource in modified_array:
# print(resource)
# print(resource, resource_in_list(resource.lower()))
# for word in modified_array:
# last_word.append(word.split(' ')[-1])
final_resource={}
modified_array_2=[]
resource_array_2=[]
n_subj_list=[]
for i in modified_array:
modified_array_2.append(i[:(i.index("<_>"))])
for i in resource_array:
resource_array_2.append(i[:(i.index("<_>"))])
for resources in modified_array_2:
max_val_resource=0
val_type=""
resource_list=resources.rstrip().split(" ")
for resource in resource_list:
pres_res_val,pres_res_type=resource_in_list(resource)
if pres_res_val> max_val_resource:
val_type=pres_res_type
max_val_resource=pres_res_val
if max_val_resource > 0.6:
final_resource[resources]=val_type
for resource in resource_array_2:
#print(resource)
pres_res_val,pres_res_type=resource_in_list(resource)
if pres_res_val> 0.6:
if resource not in final_resource:
final_resource[resource]=pres_res_type
final_resource_keys=list(final_resource.keys())
prev_word_type=""
prev_word=""
org_list_2=[]
poss_places=[]
for i in org_list:
index=i.index("<_>")
if i[index+3:]=='GPE' and i[:index] in final_resource_keys:
#final_resource_keys.remove(i[:index])
poss_places.append(i[:index])
if i[index+3:]=="ORG" and prev_word_type=="ORG":
prev_word=prev_word+" "+i[:index]
elif i[index+3:]=="PERSON" and prev_word_type=="PERSON":
prev_word=prev_word+" "+i[:index]
else:
if prev_word !='':
org_list_2.append(prev_word+"<_>"+prev_word_type)
prev_word_type=i[index+3:]
prev_word=i[:index]
quantity_dict={}
for i in final_resource:
for j in re.findall(quant_no,i):
quantity_dict[i]=j
source_list=[]
org_person_list=[]
for i in org_list_2:
tag=i[i.index("<_>")+3:]
j=i[:i.index("<_>")]
if tag=="ORG" or tag=="PERSON":
if j.lower() not in nepal_stop_list:
org_person_list.append(j)
elif j.lower() not in nepal_stop_list and j not in quantity_dict.keys():
source_list.append(j)
else:
continue
for i in modified_array:
pos_res=i[:i.index("<_>")]
pos_tag=i[i.index("<_>")+3:]
if pos_tag in subj_labels:
if pos_res not in source_list and pos_res not in final_resource_keys and pos_res.lower() not in nepal_stop_list:
#print(pos_tag,pos_res)
source_list.append(pos_res)
for i in resource_array:
pos_res=i[:i.index("<_>")]
pos_tag=i[i.index("<_>")+3:]
if pos_tag in subj_labels:
if pos_res not in source_list and pos_res not in final_resource_keys and pos_res.lower() not in nepal_stop_list:
#print(pos_tag,pos_res)
source_list.append(pos_res)
return quantity_dict,final_resource_keys,source_list,poss_places,org_person_list
def get_contact(text):
numbers=re.findall(tel_no,text)
print("Contact Information")
for i in numbers:
if len(i)>=7:
print(i)
#test_file.write(str(i)+",")
#test_file.write('\nMail:')
mails= re.findall(email,text)
for i in mails:
print("Mail: "+i)
#test_file.write(str(i)+",")
|
varun-manjunath/disaster-mitigation
|
matching/common_nouns.py
|
common_nouns.py
|
py
| 7,549 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "spacy.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nltk.Tree",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet.synsets",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.wordnet.synsets",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "itertools.product",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet.wup_similarity",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.wordnet.synsets",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.wordnet.synsets",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "itertools.product",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet.wup_similarity",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 257,
"usage_type": "call"
}
] |
7545685477
|
import psycopg2
DBNAME = "news"
def fetch_all(query, params):
"""
execute a query and fetch all result from it
:param query: the query to execute
:param params: parameters of the query
:return: result of this query
"""
# it's kind time consuming every time we open and close a connection
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute(query, params)
ret = c.fetchall()
db.close()
return ret
def article_views(cnt=None):
"""
statistics about article views, article is consider a view if exists
a http request for the article path with GET method and 200 status code
:param cnt: int, optional
max number of articles
:return:
list of (article name, view_cnt) pair ordered by views in desc order
"""
query = """
select title, view_cnt
from articles, view_stat
where concat('/article/', slug) = path
order by view_cnt desc
"""
if cnt is not None:
query += "limit (%s)"
params = (cnt,)
else:
params = ()
return fetch_all(query, params)
def author_views(cnt=None):
"""
statistics about author's all articles views
:param cnt: int, optional
max number of authors
:return:
list of (author name, view_cnt) pair ordered by views in desc order
"""
query = """
select name, sum(view_cnt) as view_cnt
from articles, view_stat, authors
where concat('/article/', slug) = path
and articles.author = authors.id
group by authors.id
order by view_cnt desc
"""
if cnt is not None:
query += "limit (%s)"
params = (cnt,)
else:
params = ()
return fetch_all(query, params)
def error_stat(threshold):
"""
error rate stat by day, error rate is defined as total number of failed
requests(status is not 200) divided by the total number of requests a day.
if a day don't have any requests it will be ignored
:param threshold: double
error rate bigger or equal threshold will be returned
:return: list of (date, error rate)
"""
query = """
select date(time) as stat_date,
sum(cast(status != '200 OK' as integer))
/ cast(count(*) as real) as error_rate
from log
group by stat_date
having
sum(cast(status != '200 OK' as integer))
/ cast(count(*) as real) >= (%s);
"""
return fetch_all(query, (threshold,))
|
akudet/fsnd-proj3
|
reporter_db.py
|
reporter_db.py
|
py
| 2,454 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "psycopg2.connect",
"line_number": 14,
"usage_type": "call"
}
] |
29832128346
|
import cv2
#Reading Image
img = cv2.imread('img46_gray_noise.png')
#Aplying filter
median = cv2.medianBlur(img,3)
#Showing image
cv2.imshow("Noised Image", img)
cv2.imshow("median", median)
cv2.waitKey()
cv2.destroyAllWindows()
#Save result
cv2.imwrite("denoised_image.png", median)
|
Digu62/computer_vision_challenges
|
Questao1/main.py
|
main.py
|
py
| 286 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.medianBlur",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 16,
"usage_type": "call"
}
] |
75118275066
|
import sqlite3
with open("C:\\Users\Asmaa Samir\Desktop\Project\data.txt", "w") as myFile:
my_tuple1 = ('google.com ', '198.188.3.2 ', '255.255.255.0', '11:01 ')
my_tuple2 = ('youtube.com', '199.588.35.22', '255.255.255.0', '1:01')
my_tuple3 = ('google.com', '198.155.66.1', '255.255.255.0', '7:55')
myFile.writelines(my_tuple1)
myFile.writelines(my_tuple2)
myFile.writelines(my_tuple3)
db = sqlite3.connect("data.db") # create database and connect
cr = db.cursor() # تفعيل
# noinspection SqlNoDataSourceInspection
cr.execute("CREATE TABLE Analysis (User Name text, IP , MAC ,URLs being visited ,TIME) ")
cr.execute("insert into Analysis values(?, ?, ?, ?, ?)", my_tuple1) # insert data
cr.execute("insert into skills values(?, ?, ?, ?, ?)", my_tuple2)
cr.execute("insert into skills values(?, ?, ?, ? , ?)", my_tuple3)
db.commit() # save
db.close() # close
|
AsmaaGHSamir/GProject
|
DB.py
|
DB.py
|
py
| 926 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sqlite3.connect",
"line_number": 12,
"usage_type": "call"
}
] |
6814879797
|
import pika, json
def upload(f, fs, channel, access):
# put file into mongodb database
try:
# get file if success
fid = fs.put(f)
except Exception as err:
return "internal server error", 500
# create message
message = {
"video_fid": str(fid),
"mp3_fid": None,
# who owns the file
"username": access["username"],
}
# put message in queue
try:
channel.basic_publish(
exchange="",
routing_key="video",
# convert python object to json string
body=json.dumps(message),
properties=pika.BasicProperties(
# make messages persistent
delivery_mode=pika.PERSISTENT_DELIVERY_MODE
),
)
# if message unsuccesfully added to the queue
except:
# delete file, because it's not connected to any message
fs.delete(fid)
return "internal server error", 500
|
dawmro/testing_microservice_architectures
|
python/src/gateway/storage/util.py
|
util.py
|
py
| 807 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.dumps",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pika.BasicProperties",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pika.PERSISTENT_DELIVERY_MODE",
"line_number": 28,
"usage_type": "attribute"
}
] |
10418352733
|
from __future__ import annotations
import dataclasses
import typing
from randovania.game_description.db.resource_node import ResourceNode
from randovania.game_description.requirements.requirement_and import RequirementAnd
from randovania.game_description.requirements.resource_requirement import ResourceRequirement
from randovania.game_description.resources.node_resource_info import NodeResourceInfo
if typing.TYPE_CHECKING:
from randovania.game_description.db.node import Node, NodeContext
from randovania.game_description.requirements.base import Requirement
from randovania.game_description.resources.resource_info import ResourceGain
def _all_nodes_in_network(context: NodeContext, network_name: str) -> typing.Iterator[TeleporterNetworkNode]:
for node in context.node_provider.iterate_nodes():
if isinstance(node, TeleporterNetworkNode) and node.network == network_name:
yield node
@dataclasses.dataclass(frozen=True, slots=True)
class TeleporterNetworkNode(ResourceNode):
"""
Represents a node that belongs to a set, where you can freely move between if some conditions are satisfied.
- can only teleport *to* if `is_unlocked` is satisfied
- can only teleport *from* if the node has been activated
A TeleporterNetworkNode being activated is implemented as being collected, with this class being a ResourceNode.
There are three methods of activating a TeleporterNetworkNode:
Method 1:
- Be the starting node
Method 2:
- Collecting a TeleporterNetworkNode also collects all other nodes in the same network with satisfied `is_unlocked`
Method 3:
- Collect the node normally by reaching it, with `is_unlocked` satisfied and one of:
- `requirement_to_activate` is satisfied
- this node was already collected
"""
is_unlocked: Requirement
network: str
requirement_to_activate: Requirement
def requirement_to_leave(self, context: NodeContext) -> Requirement:
return RequirementAnd([self.is_unlocked, ResourceRequirement.simple(self.resource(context))])
def resource(self, context: NodeContext) -> NodeResourceInfo:
return NodeResourceInfo.from_node(self, context)
def can_collect(self, context: NodeContext) -> bool:
resources = context.current_resources
req = self.requirement_to_activate
if resources.has_resource(self.resource(context)) or req.satisfied(resources, 0, context.database):
return not self.is_collected(context)
else:
return False
def is_collected(self, context: NodeContext) -> bool:
current_resources = context.current_resources
return all(
context.has_resource(node.resource(context))
for node in _all_nodes_in_network(context, self.network)
if node.is_unlocked.satisfied(current_resources, 0, context.database)
)
def resource_gain_on_collect(self, context: NodeContext) -> ResourceGain:
for node in _all_nodes_in_network(context, self.network):
if node.is_unlocked.satisfied(context.current_resources, 0, context.database):
yield node.resource(context), 1
def connections_from(self, context: NodeContext) -> typing.Iterator[tuple[Node, Requirement]]:
for node in _all_nodes_in_network(context, self.network):
if node != self:
yield node, node.is_unlocked
|
randovania/randovania
|
randovania/game_description/db/teleporter_network_node.py
|
teleporter_network_node.py
|
py
| 3,434 |
python
|
en
|
code
| 165 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "randovania.game_description.db.node.NodeContext",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "randovania.game_description.db.resource_node.ResourceNode",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.requirements.base.Requirement",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.requirements.base.Requirement",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.node.NodeContext",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.requirements.requirement_and.RequirementAnd",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.requirements.resource_requirement.ResourceRequirement.simple",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.requirements.resource_requirement.ResourceRequirement",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.requirements.base.Requirement",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.node.NodeContext",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.resources.node_resource_info.NodeResourceInfo.from_node",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.resources.node_resource_info.NodeResourceInfo",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.resources.node_resource_info.NodeResourceInfo",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.node.NodeContext",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.node.NodeContext",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.node.NodeContext",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.resources.resource_info.ResourceGain",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.node.NodeContext",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "randovania.game_description.db.node.Node",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.requirements.base.Requirement",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 23,
"usage_type": "call"
}
] |
1112499487
|
""" VirtualMachineHandler provides remote access to VirtualMachineDB
The following methods are available in the Service interface:
- insertInstance
- declareInstanceSubmitted
- declareInstanceRunning
- instanceIDHeartBeat
- declareInstanceHalting
- getInstancesByStatus
- declareInstancesStopping
- getUniqueID( instanceID ) return cloud manager uniqueID form VMDIRAC instanceID
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
from subprocess import Popen, PIPE
import six
# DIRAC
from DIRAC import gLogger, S_ERROR, S_OK
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
# VMDIRAC
from VMDIRAC.WorkloadManagementSystem.DB.VirtualMachineDB import VirtualMachineDB
from VMDIRAC.Security import VmProperties
from VMDIRAC.Resources.Cloud.Utilities import STATE_MAP
from VMDIRAC.Resources.Cloud.ConfigHelper import getVMTypeConfig, getVMTypes
from VMDIRAC.Resources.Cloud.EndpointFactory import EndpointFactory
from VMDIRAC.WorkloadManagementSystem.Utilities.Utils import getProxyFileForCE
__RCSID__ = '$Id$'
# This is a global instance of the VirtualMachineDB class
gVirtualMachineDB = False
def initializeVirtualMachineManagerHandler(_serviceInfo):
global gVirtualMachineDB
gVirtualMachineDB = VirtualMachineDB()
haltStalledInstances()
checkStalledInstances()
if gVirtualMachineDB._connected:
gThreadScheduler.addPeriodicTask(60 * 15, checkStalledInstances)
return S_OK()
return S_ERROR()
def haltStalledInstances():
result = gVirtualMachineDB.getInstancesByStatus('Stalled')
if not result['OK']:
return result
uList = []
for image in result['Value']:
uList += result['Value'][image]
stallingList = []
for uID in uList:
result = gVirtualMachineDB.getInstanceID(uID)
if not result['OK']:
continue
stallingList.append(result['Value'])
return haltInstances(stallingList)
def getCEInstances(siteList=None, ceList=None, vo=None):
result = getVMTypes(siteList=siteList, ceList=ceList, vo=vo)
if not result['OK']:
return S_ERROR('Failed to get images from the CS')
imageDict = result['Value']
ceList = []
for site in imageDict:
for ce in imageDict[site]:
result = EndpointFactory().getCE(site, ce)
if not result['OK']:
continue
ceList.append((site, ce, result['Value']))
nodeDict = {}
for site, ceName, ce in ceList:
result = ce.getVMNodes()
if not result['OK']:
continue
for node in result['Value']:
if not node.name.startswith('DIRAC'):
continue
ip = (node.public_ips[0] if node.public_ips else 'None')
nodeState = node.state.upper() if not isinstance(node.state, six.integer_types) else STATE_MAP[node.state]
nodeDict[node.id] = {"Site": site,
"CEName": ceName,
"NodeName": node.name,
"PublicIP": ip,
"State": nodeState}
return S_OK(nodeDict)
def checkStalledInstances():
"""
To avoid stalling instances consuming resources at cloud endpoint,
attempts to halt the stalled list in the cloud endpoint
"""
result = gVirtualMachineDB.declareStalledInstances()
if not result['OK']:
return result
stallingList = result['Value']
return haltInstances(stallingList)
def stopInstance(site, endpoint, nodeID):
result = getVMTypeConfig(site, endpoint)
if not result['OK']:
return result
ceParams = result['Value']
ceFactory = EndpointFactory()
result = ceFactory.getCEObject(parameters=ceParams)
if not result['OK']:
return result
ce = result['Value']
result = ce.stopVM(nodeID)
return result
def createEndpoint(uniqueID):
result = gVirtualMachineDB.getEndpointFromInstance(uniqueID)
if not result['OK']:
return result
site, endpoint = result['Value'].split('::')
result = getVMTypeConfig(site, endpoint)
if not result['OK']:
return result
ceParams = result['Value']
ceFactory = EndpointFactory()
result = ceFactory.getCEObject(parameters=ceParams)
return result
def haltInstances(vmList):
"""
Common haltInstances for Running(from class VirtualMachineManagerHandler) and
Stalled(from checkStalledInstances periodic task) to Halt
"""
failed = {}
successful = {}
for instanceID in vmList:
instanceID = int(instanceID)
result = gVirtualMachineDB.getUniqueID(instanceID)
if not result['OK']:
gLogger.error('haltInstances: on getUniqueID call: %s' % result['Message'])
continue
uniqueID = result['Value']
result = createEndpoint(uniqueID)
if not result['OK']:
gLogger.error('haltInstances: on createEndpoint call: %s' % result['Message'])
continue
endpoint = result['Value']
# Get proxy to be used to connect to the cloud endpoint
authType = endpoint.parameters.get('Auth')
if authType and authType.lower() in ['x509', 'voms']:
siteName = endpoint.parameters['Site']
ceName = endpoint.parameters['CEName']
gLogger.verbose("Getting cloud proxy for %s/%s" % (siteName, ceName))
result = getProxyFileForCE(endpoint)
if not result['OK']:
continue
endpoint.setProxy(result['Value'])
result = endpoint.stopVM(uniqueID)
if result['OK']:
gVirtualMachineDB.recordDBHalt(instanceID, 0)
successful[instanceID] = True
else:
failed[instanceID] = result['Message']
return S_OK({"Successful": successful, "Failed": failed})
def getPilotOutput(pilotRef):
if not pilotRef.startswith('vm://'):
return S_ERROR('Invalid pilot reference %s' % pilotRef)
# Get the VM public IP
diracID, nPilot = os.path.basename(pilotRef).split(':')
result = gVirtualMachineDB.getUniqueIDByName(diracID)
if not result['OK']:
return result
uniqueID = result['Value']
result = gVirtualMachineDB.getInstanceID(uniqueID)
if not result['OK']:
return result
instanceID = result['Value']
result = gVirtualMachineDB.getInstanceParameter("PublicIP", instanceID)
if not result['OK']:
return result
publicIP = result['Value']
op = Operations()
privateKeyFile = op.getValue('/Cloud/PrivateKey', '')
diracUser = op.getValue('/Cloud/VMUser', '')
ssh_str = '%s@%s' % (diracUser, publicIP)
cmd = ['ssh', '-i', privateKeyFile, ssh_str,
"cat /etc/joboutputs/vm-pilot.%s.log" % nPilot]
inst = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output, stderr = inst.communicate()
if inst.returncode:
return S_ERROR('Failed to get pilot output: %s' % stderr)
else:
return S_OK(output)
class VirtualMachineManagerHandler(RequestHandler):
def initialize(self):
credDict = self.getRemoteCredentials()
self.rpcProperties = credDict['properties']
@staticmethod
def __logResult(methodName, result):
'''
Method that writes to log error messages
'''
if not result['OK']:
gLogger.error('%s: %s' % (methodName, result['Message']))
types_getCEInstances = [(list, type(None)), (list, type(None)), six.string_types]
def export_getCEInstances(self, siteList, ceList, vo):
if not siteList:
siteList = None
return getCEInstances(siteList=siteList, ceList=ceList, vo=vo)
types_stopInstance = [six.string_types, six.string_types, six.string_types]
def export_stopInstance(self, site, endpoint, nodeID):
return stopInstance(site, endpoint, nodeID)
types_getPilotOutput = [six.string_types]
def export_getPilotOutput(self, pilotReference):
return getPilotOutput(pilotReference)
types_checkVmWebOperation = [six.string_types]
def export_checkVmWebOperation(self, operation):
"""
return true if rpc has VM_WEB_OPERATION
"""
if VmProperties.VM_WEB_OPERATION in self.rpcProperties:
return S_OK('Auth')
return S_OK('Unauth')
types_insertInstance = [six.string_types, six.string_types, six.string_types, six.string_types, six.string_types]
def export_insertInstance(self, uniqueID, imageName, instanceName, endpoint, runningPodName):
"""
Check Status of a given image
Will insert a new Instance in the DB
"""
res = gVirtualMachineDB.insertInstance(uniqueID, imageName, instanceName, endpoint, runningPodName)
self.__logResult('insertInstance', res)
return res
types_getUniqueID = [six.string_types]
def export_getUniqueID(self, instanceID):
"""
return cloud manager uniqueID from VMDIRAC instanceID
"""
res = gVirtualMachineDB.getUniqueID(instanceID)
self.__logResult('getUniqueID', res)
return res
types_getUniqueIDByName = [six.string_types]
def export_getUniqueIDByName(self, instanceName):
"""
return cloud manager uniqueID from VMDIRAC name
"""
result = gVirtualMachineDB.getUniqueIDByName(instanceName)
self.__logResult('getUniqueIDByName', result)
return result
types_setInstanceUniqueID = [six.integer_types, six.string_types]
def export_setInstanceUniqueID(self, instanceID, uniqueID):
"""
Check Status of a given image
Will insert a new Instance in the DB
"""
res = gVirtualMachineDB.setInstanceUniqueID(instanceID, uniqueID)
self.__logResult('setInstanceUniqueID', res)
return res
types_declareInstanceSubmitted = [six.string_types]
def export_declareInstanceSubmitted(self, uniqueID):
"""
After submission of the instance the Director should declare the new Status
"""
res = gVirtualMachineDB.declareInstanceSubmitted(uniqueID)
self.__logResult('declareInstanceSubmitted', res)
return res
types_declareInstanceRunning = [six.string_types, six.string_types]
def export_declareInstanceRunning(self, uniqueID, privateIP):
"""
Declares an instance Running and sets its associated info (uniqueID, publicIP, privateIP)
Returns S_ERROR if:
- instanceName does not have a "Submitted" entry
- uniqueID is not unique
"""
gLogger.info('Declare instance Running uniqueID: %s' % (uniqueID))
if VmProperties.VM_RPC_OPERATION not in self.rpcProperties:
return S_ERROR("Unauthorized declareInstanceRunning RPC")
publicIP = self.getRemoteAddress()[0]
gLogger.info('Declare instance Running publicIP: %s' % (publicIP))
res = gVirtualMachineDB.declareInstanceRunning(uniqueID, publicIP, privateIP)
self.__logResult('declareInstanceRunning', res)
return res
types_instanceIDHeartBeat = [six.string_types, float, six.integer_types,
six.integer_types, six.integer_types]
def export_instanceIDHeartBeat(self, uniqueID, load, jobs,
transferredFiles, transferredBytes, uptime=0):
"""
Insert the heart beat info from a running instance
It checks the status of the instance and the corresponding image
Declares "Running" the instance and the image
It returns S_ERROR if the status is not OK
"""
if VmProperties.VM_RPC_OPERATION not in self.rpcProperties:
return S_ERROR("Unauthorized declareInstanceIDHeartBeat RPC")
try:
uptime = int(uptime)
except ValueError:
uptime = 0
res = gVirtualMachineDB.instanceIDHeartBeat(uniqueID, load, jobs,
transferredFiles, transferredBytes, uptime)
self.__logResult('instanceIDHeartBeat', res)
return res
types_declareInstancesStopping = [list]
def export_declareInstancesStopping(self, instanceIdList):
"""
Declares "Stopping" the instance because the Delete button of Browse Instances
The instanceID is the VMDIRAC VM id
When next instanceID heat beat with stopping status on the DB the VM will stop the job agent and terminates properly
It returns S_ERROR if the status is not OK
"""
if VmProperties.VM_WEB_OPERATION not in self.rpcProperties:
return S_ERROR("Unauthorized VM Stopping")
for instanceID in instanceIdList:
gLogger.info('Stopping DIRAC instanceID: %s' % (instanceID))
result = gVirtualMachineDB.getInstanceStatus(instanceID)
if not result['OK']:
self.__logResult('declareInstancesStopping on getInstanceStatus call: ', result)
return result
state = result['Value']
gLogger.info('Stopping DIRAC instanceID: %s, current state %s' % (instanceID, state))
if state == 'Stalled':
result = gVirtualMachineDB.getUniqueID(instanceID)
if not result['OK']:
self.__logResult('declareInstancesStopping on getUniqueID call: ', result)
return result
uniqueID = result['Value']
result = gVirtualMachineDB.getEndpointFromInstance(uniqueID)
if not result['OK']:
self.__logResult('declareInstancesStopping on getEndpointFromInstance call: ', result)
return result
endpoint = result['Value']
result = self.export_declareInstanceHalting(uniqueID, 0)
elif state == 'New':
result = gVirtualMachineDB.recordDBHalt(instanceID, 0)
self.__logResult('declareInstanceHalted', result)
else:
# this is only aplied to allowed trasitions
result = gVirtualMachineDB.declareInstanceStopping(instanceID)
self.__logResult('declareInstancesStopping: on declareInstanceStopping call: ', result)
return result
types_declareInstanceHalting = [six.string_types, float]
def export_declareInstanceHalting(self, uniqueID, load):
"""
Insert the heart beat info from a halting instance
The VM has the uniqueID, which is the Cloud manager VM id
Declares "Halted" the instance and the image
It returns S_ERROR if the status is not OK
"""
if VmProperties.VM_RPC_OPERATION not in self.rpcProperties:
return S_ERROR("Unauthorized declareInstanceHalting RPC")
endpoint = gVirtualMachineDB.getEndpointFromInstance(uniqueID)
if not endpoint['OK']:
self.__logResult('declareInstanceHalting', endpoint)
return endpoint
endpoint = endpoint['Value']
result = gVirtualMachineDB.declareInstanceHalting(uniqueID, load)
if not result['OK']:
if "Halted ->" not in result["Message"]:
self.__logResult('declareInstanceHalting on change status: ', result)
return result
else:
gLogger.info("Bad transition from Halted to something, will assume Halted")
haltingList = []
instanceID = gVirtualMachineDB.getInstanceID(uniqueID)
if not instanceID['OK']:
self.__logResult('declareInstanceHalting', instanceID)
return instanceID
instanceID = instanceID['Value']
haltingList.append(instanceID)
return haltInstances(haltingList)
types_getInstancesByStatus = [six.string_types]
def export_getInstancesByStatus(self, status):
"""
Get dictionary of Image Names with InstanceIDs in given status
"""
res = gVirtualMachineDB.getInstancesByStatus(status)
self.__logResult('getInstancesByStatus', res)
return res
types_getAllInfoForUniqueID = [six.string_types]
def export_getAllInfoForUniqueID(self, uniqueID):
"""
Get all the info for a UniqueID
"""
res = gVirtualMachineDB.getAllInfoForUniqueID(uniqueID)
self.__logResult('getAllInfoForUniqueID', res)
return res
types_getInstancesContent = [dict, (list, tuple),
six.integer_types, six.integer_types]
def export_getInstancesContent(self, selDict, sortDict, start, limit):
"""
Retrieve the contents of the DB
"""
res = gVirtualMachineDB.getInstancesContent(selDict, sortDict, start, limit)
self.__logResult('getInstancesContent', res)
return res
types_getHistoryForInstanceID = [six.integer_types]
def export_getHistoryForInstanceID(self, instanceId):
"""
Retrieve the contents of the DB
"""
res = gVirtualMachineDB.getHistoryForInstanceID(instanceId)
self.__logResult('getHistoryForInstanceID', res)
return res
types_getInstanceCounters = [six.string_types, dict]
def export_getInstanceCounters(self, groupField, selDict):
"""
Retrieve the contents of the DB
"""
res = gVirtualMachineDB.getInstanceCounters(groupField, selDict)
self.__logResult('getInstanceCounters', res)
return res
types_getHistoryValues = [int, dict]
def export_getHistoryValues(self, averageBucket, selDict, fields2Get=None, timespan=0):
"""
Retrieve the contents of the DB
"""
if not fields2Get:
fields2Get = []
res = gVirtualMachineDB.getHistoryValues(averageBucket, selDict, fields2Get, timespan)
self.__logResult('getHistoryValues', res)
return res
types_getRunningInstancesHistory = [int, int]
def export_getRunningInstancesHistory(self, timespan, bucketSize):
"""
Retrieve number of running instances in each bucket
"""
res = gVirtualMachineDB.getRunningInstancesHistory(timespan, bucketSize)
self.__logResult('getRunningInstancesHistory', res)
return res
types_getRunningInstancesBEPHistory = [int, int]
def export_getRunningInstancesBEPHistory(self, timespan, bucketSize):
"""
Retrieve number of running instances in each bucket by End-Point History
"""
res = gVirtualMachineDB.getRunningInstancesBEPHistory(timespan, bucketSize)
self.__logResult('getRunningInstancesBEPHistory', res)
return res
types_getRunningInstancesByRunningPodHistory = [int, int]
def export_getRunningInstancesByRunningPodHistory(self, timespan, bucketSize):
"""
Retrieve number of running instances in each bucket by Running Pod History
"""
res = gVirtualMachineDB.getRunningInstancesByRunningPodHistory(timespan, bucketSize)
self.__logResult('getRunningInstancesByRunningPodHistory', res)
return res
types_getRunningInstancesByImageHistory = [int, int]
def export_getRunningInstancesByImageHistory(self, timespan, bucketSize):
"""
Retrieve number of running instances in each bucket by Running Pod History
"""
res = gVirtualMachineDB.getRunningInstancesByImageHistory(timespan, bucketSize)
self.__logResult('getRunningInstancesByImageHistory', res)
return res
|
DIRACGrid/VMDIRAC
|
VMDIRAC/WorkloadManagementSystem/Service/VirtualMachineManagerHandler.py
|
VirtualMachineManagerHandler.py
|
py
| 18,285 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "VMDIRAC.WorkloadManagementSystem.DB.VirtualMachineDB.VirtualMachineDB",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "DIRAC.Core.Utilities.ThreadScheduler.gThreadScheduler.addPeriodicTask",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "DIRAC.Core.Utilities.ThreadScheduler.gThreadScheduler",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "VMDIRAC.Resources.Cloud.ConfigHelper.getVMTypes",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "VMDIRAC.Resources.Cloud.EndpointFactory.EndpointFactory",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "six.integer_types",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "VMDIRAC.Resources.Cloud.Utilities.STATE_MAP",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "VMDIRAC.Resources.Cloud.ConfigHelper.getVMTypeConfig",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "VMDIRAC.Resources.Cloud.EndpointFactory.EndpointFactory",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "VMDIRAC.Resources.Cloud.ConfigHelper.getVMTypeConfig",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "VMDIRAC.Resources.Cloud.EndpointFactory.EndpointFactory",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger.error",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "DIRAC.gLogger.error",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "DIRAC.gLogger.verbose",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "VMDIRAC.WorkloadManagementSystem.Utilities.Utils.getProxyFileForCE",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "DIRAC.ConfigurationSystem.Client.Helpers.Operations.Operations",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "DIRAC.Core.DISET.RequestHandler.RequestHandler",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "DIRAC.gLogger.error",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "six.string_types",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "VMDIRAC.Security.VmProperties.VM_WEB_OPERATION",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "VMDIRAC.Security.VmProperties",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "six.string_types",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "six.integer_types",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "DIRAC.gLogger.info",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "VMDIRAC.Security.VmProperties.VM_RPC_OPERATION",
"line_number": 349,
"usage_type": "attribute"
},
{
"api_name": "VMDIRAC.Security.VmProperties",
"line_number": 349,
"usage_type": "name"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger.info",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger",
"line_number": 353,
"usage_type": "name"
},
{
"api_name": "six.string_types",
"line_number": 360,
"usage_type": "attribute"
},
{
"api_name": "six.integer_types",
"line_number": 360,
"usage_type": "attribute"
},
{
"api_name": "six.integer_types",
"line_number": 361,
"usage_type": "attribute"
},
{
"api_name": "VMDIRAC.Security.VmProperties.VM_RPC_OPERATION",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "VMDIRAC.Security.VmProperties",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "VMDIRAC.Security.VmProperties.VM_WEB_OPERATION",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "VMDIRAC.Security.VmProperties",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger.info",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger",
"line_number": 398,
"usage_type": "name"
},
{
"api_name": "DIRAC.gLogger.info",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "six.string_types",
"line_number": 429,
"usage_type": "attribute"
},
{
"api_name": "VMDIRAC.Security.VmProperties.VM_RPC_OPERATION",
"line_number": 438,
"usage_type": "attribute"
},
{
"api_name": "VMDIRAC.Security.VmProperties",
"line_number": 438,
"usage_type": "name"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger.info",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "DIRAC.gLogger",
"line_number": 453,
"usage_type": "name"
},
{
"api_name": "six.string_types",
"line_number": 465,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 476,
"usage_type": "attribute"
},
{
"api_name": "six.integer_types",
"line_number": 488,
"usage_type": "attribute"
},
{
"api_name": "six.integer_types",
"line_number": 499,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 510,
"usage_type": "attribute"
}
] |
39261296650
|
import os
import shutil
import zipfile
from base64 import b64decode
from utils.config import config
import requests
root_path = os.getcwd()
gat = (
"Z2l0aHViX3BhdF8xMUJBQkhHNkEwa1JRZEM1dFByczhVXzU0cERCS21URXRGYm"
"FYRElUWE5KVUk4VkUxVTdjb0dHbElMSWdhVnI2Qkc3QzVCN0lCWlhWdDJMOUo2"
)
def download_and_extract_zip(url, root_path):
zip_file_path = os.path.join(root_path, "repository.zip")
response = requests.get(url, stream=True)
response.raise_for_status()
total_size = int(response.headers.get("Content-Length", 0))
if total_size == 0:
print("下载失败!")
return 0
block_size = 1024 # 每次下载的块大小
progress = 0
with open(zip_file_path, "wb") as file:
for data in response.iter_content(block_size):
progress += len(data)
file.write(data)
# 计算下载进度并显示进度条
percent = (progress / total_size) * 100
progress_bar = "=" * int(percent // 5) + ">"
print(f"下载进度: {percent:.2f}% [{progress_bar:<20}] ", end="\r")
print("\n下载完成!")
# 解压ZIP文件
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
zip_ref.extractall(root_path)
os.remove(zip_file_path) # 删除ZIP文件
return 1
def sync_github_repo(repo_url, root_path):
# 构建API URL
api_url = f"https://api.github.com/repos/{repo_url}/zipball/main"
# 检查保存路径是否存在,如果不存在则创建
os.makedirs(root_path, exist_ok=True)
# 下载并解压ZIP文件
return download_and_extract_zip(api_url, root_path)
def get_latest_branch_sha(repo_url):
url = f"https://api.github.com/repos/{repo_url}/branches"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": b64decode(gat).decode("utf-8"),
}
try:
response = requests.get(url, headers=headers, timeout=3)
except:
return None
if response.status_code == 200:
branches = response.json()
if branches:
latest_branch = branches[0]
return latest_branch["commit"]["sha"]
else:
return None
def copy_folder_contents(source_folder, destination_folder):
# 检查目标文件夹是否存在,如果不存在则创建
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
# 遍历源文件夹中的所有文件和子文件夹
for item in os.listdir(source_folder):
source = os.path.join(source_folder, item)
destination = os.path.join(destination_folder, item)
if os.path.isfile(source):
# 如果源项是文件,则直接复制并覆盖同名文件
shutil.copy2(source, destination)
elif os.path.isdir(source):
# 如果源项是文件夹,则递归地调用复制函数
copy_folder_contents(source, destination)
def update_map(force=False):
repo_url = "CHNZYX/maps"
# 获取远端sha
remote_sha = get_latest_branch_sha(repo_url)
if remote_sha is None:
print("远端地图sha获取失败, 请检查网络连接")
return "远端地图sha获取失败, 请检查网络连接", "red"
print("远端地图sha: " + remote_sha)
# 获取本地sha
local_sha = config.map_sha
print("本地地图sha: " + local_sha)
# 判断是否需要更新
if remote_sha == local_sha:
print("map无需更新")
return "地图已是最新版本", "green"
map_path = os.path.join(root_path, "imgs\\maps")
print("Map path: " + map_path)
# 下载map仓库并解压
status = sync_github_repo(repo_url, root_path)
if status == 0:
return "下载失败", "red"
print("下载完成")
# 找出下载的map文件夹
t = os.listdir(root_path)
chn_folders = [item for item in t if item.startswith("CHNZYX")]
downloaded_map_path = os.path.join(os.path.join(root_path, chn_folders[0]), "maps")
print("download_map_path: " + downloaded_map_path)
print("解压中...")
# 删除原有map文件夹,复制新的map文件夹
if force:
shutil.rmtree(map_path)
shutil.copytree(downloaded_map_path, map_path)
else:
copy_folder_contents(downloaded_map_path, map_path)
shutil.rmtree(os.path.dirname(downloaded_map_path))
# 更新sha
config.map_sha = remote_sha
config.save()
print("更新完成")
return "更新完成", "green"
|
CHNZYX/Auto_Simulated_Universe
|
utils/update_map.py
|
update_map.py
|
py
| 4,483 |
python
|
en
|
code
| 2,771 |
github-code
|
6
|
[
{
"api_name": "os.getcwd",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy2",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "utils.config.config.map_sha",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "utils.config.config",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "shutil.copytree",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "utils.config.config.map_sha",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "utils.config.config",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "utils.config.config.save",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "utils.config.config",
"line_number": 129,
"usage_type": "name"
}
] |
27070905288
|
import datetime as dt
import random
import pytest
from scheduler import Scheduler, SchedulerError
from scheduler.base.definition import JobType
from scheduler.threading.job import Job
from ...helpers import DELETE_NOT_SCHEDULED_ERROR, foo
@pytest.mark.parametrize(
"n_jobs",
[
1,
2,
3,
10,
],
)
def test_delete_job(n_jobs):
sch = Scheduler()
assert len(sch.jobs) == 0
jobs = []
for _ in range(n_jobs):
jobs.append(sch.once(dt.datetime.now(), foo))
assert len(sch.jobs) == n_jobs
job = random.choice(jobs)
sch.delete_job(job)
assert job not in sch.jobs
assert len(sch.jobs) == n_jobs - 1
# test error if the job is not scheduled
with pytest.raises(SchedulerError, match=DELETE_NOT_SCHEDULED_ERROR):
sch.delete_job(job)
@pytest.mark.parametrize(
"empty_set",
[
False,
True,
],
)
@pytest.mark.parametrize(
"any_tag",
[
None,
False,
True,
],
)
@pytest.mark.parametrize(
"n_jobs",
[
0,
1,
2,
3,
10,
],
)
def test_delete_jobs(n_jobs, any_tag, empty_set):
sch = Scheduler()
assert len(sch.jobs) == 0
for _ in range(n_jobs):
sch.once(dt.datetime.now(), foo)
assert len(sch.jobs) == n_jobs
if empty_set:
if any_tag is None:
num_del = sch.delete_jobs()
else:
num_del = sch.delete_jobs(any_tag=any_tag)
else:
if any_tag is None:
num_del = sch.delete_jobs(tags={})
else:
num_del = sch.delete_jobs(tags={}, any_tag=any_tag)
assert len(sch.jobs) == 0
assert num_del == n_jobs
@pytest.mark.parametrize(
"job_tags, delete_tags, any_tag, n_deleted",
[
[[{"a", "b"}, {"1", "2", "3"}, {"a", "1"}], {"a", "1"}, True, 3],
[[{"a", "b"}, {"1", "2", "3"}, {"a", "2"}], {"b", "1"}, True, 2],
[[{"a", "b"}, {"1", "2", "3"}, {"b", "1"}], {"3"}, True, 1],
[[{"a", "b"}, {"1", "2", "3"}, {"b", "2"}], {"2", "3"}, True, 2],
[[{"a", "b"}, {"1", "2", "3"}, {"a", "1"}], {"a", "1"}, False, 1],
[[{"a", "b"}, {"1", "2", "3"}, {"a", "2"}], {"b", "1"}, False, 0],
[[{"a", "b"}, {"1", "2", "3"}, {"b", "1"}], {"1", "3"}, False, 1],
[[{"a", "b"}, {"1", "2", "3"}, {"b", "2"}], {"2", "3"}, False, 1],
],
)
def test_delete_tagged_jobs(job_tags, delete_tags, any_tag, n_deleted):
sch = Scheduler()
for tags in job_tags:
sch.once(dt.timedelta(), lambda: None, tags=tags)
assert sch.delete_jobs(tags=delete_tags, any_tag=any_tag) == n_deleted
|
DigonIO/scheduler
|
tests/threading/scheduler/test_sch_delete_jobs.py
|
test_sch_delete_jobs.py
|
py
| 2,653 |
python
|
en
|
code
| 51 |
github-code
|
6
|
[
{
"api_name": "scheduler.Scheduler",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "helpers.foo",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime.now",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "scheduler.SchedulerError",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "helpers.DELETE_NOT_SCHEDULED_ERROR",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "scheduler.Scheduler",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "helpers.foo",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime.now",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "scheduler.Scheduler",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 89,
"usage_type": "attribute"
}
] |
24510539081
|
import json
import frappe
from frappe.model.document import Document
from frappe.utils.safe_exec import get_safe_globals, safe_exec
from frappe.integrations.utils import make_post_request
from frappe.desk.form.utils import get_pdf_link
from frappe.utils.background_jobs import enqueue
def validate(self, method):
if self.channel == "WhatsApp":
fields = frappe.get_doc("DocType", self.document_type).fields
fields += frappe.get_all(
"Custom Field",
filters={"dt": self.document_type},
fields=["fieldname"]
)
# if not any(field.fieldname == self.custom_receiver_mobile for field in fields): # noqa
# frappe.throw(f"Field name {self.custom_receiver_mobile} does not exists")
def on_trash(self, method):
pass
# if self.channel == "WhatsApp":
# if self.notification_type == "Scheduler Event":
# frappe.delete_doc("Scheduled Job Type", self.name)
# frappe.cache().delete_value("whatsapp_notification_map")
def after_insert(self, method):
pass
# if self.channel == "WhatsApp":
# if self.notification_type == "Scheduler Event":
# method = f"whatsapp_erpnext.utils.trigger_whatsapp_notifications_{self.event_frequency.lower().replace(' ', '_')}" # noqa
# job = frappe.get_doc(
# {
# "doctype": "Scheduled Job Type",
# "method": method,
# "frequency": self.event_frequency
# }
# )
# job.insert()
def format_number(self, number):
if (number.startswith("+")):
number = number[1:len(number)]
return number
def send_scheduled_message(self) -> dict:
safe_exec(
self.condition, get_safe_globals(), dict(doc=self)
)
language_code = frappe.db.get_value(
"WhatsApp Templates", self.template,
fieldname='language_code'
)
if language_code:
for contact in self._contact_list:
data = {
"messaging_product": "whatsapp",
"to": self.format_number(contact),
"type": "template",
"template": {
"name": self.template,
"language": {
"code": language_code
},
"components": []
}
}
self.notify(data)
# return _globals.frappe.flags
def send_template_message(self, doc: Document, contact_no = None):
"""Specific to Document Event triggered Server Scripts."""
if not self.enabled:
return
doc_data = doc.as_dict()
if self.condition:
# check if condition satisfies
if not frappe.safe_eval(
self.condition, get_safe_globals(), dict(doc=doc_data)
):
return
template = frappe.db.get_value(
"WhatsApp Templates", self.custom_whatsapp_template,
fieldname='*'
)
if template:
for row in self.recipients:
if row.receiver_by_document_field != "owner":
if not contact_no:
contact_no = doc.get(row.receiver_by_document_field)
if contact_no:
data = {
"messaging_product": "whatsapp",
"to": contact_no,
"type": "template",
"template": {
"name": self.custom_whatsapp_template,
"language": {
"code": template.language_code
},
"components": []
}
}
# Pass parameter values
if self.fields:
parameters = []
for field in self.fields:
parameters.append({
"type": "text",
"text": doc.get_formatted(field.field_name)
})
data['template']["components"] = [{
"type": "body",
"parameters": parameters
}]
if self.attach_print:
key = doc.get_document_share_key()
frappe.db.commit()
link = get_pdf_link(
doc_data['doctype'],
doc_data['name'],
print_format=self.print_format or "Standard"
)
filename = f'{doc_data["name"]}.pdf'
url = f'{frappe.utils.get_url()}{link}&key={key}'
data['template']['components'].append({
"type": "header",
"parameters": [{
"type": "document",
"document": {
"link": url,
"filename": filename
}
}]
})
label = f"{doc_data['doctype']} - {doc_data['name']}"
notify(self, data, label)
def notify(self, data, label = None):
"""Notify."""
settings = frappe.get_doc(
"WhatsApp Settings", "WhatsApp Settings",
)
token = settings.get_password("token")
headers = {
"authorization": f"Bearer {token}",
"content-type": "application/json"
}
try:
response = make_post_request(
f"{settings.url}/{settings.version}/{settings.phone_id}/messages",
headers=headers, data=json.dumps(data)
)
message_id = response['messages'][0]['id']
enqueue(save_whatsapp_log, data = data, message_id = message_id, label = label)
frappe.msgprint("WhatsApp Message Triggered", indicator="green", alert=True)
except Exception as e:
response = frappe.flags.integration_request.json()['error']
error_message = response.get('Error', response.get("message"))
frappe.msgprint(
f"Failed to trigger whatsapp message: {error_message}",
indicator="red",
alert=True
)
finally:
status_response = frappe.flags.integration_request.json().get('error')
frappe.get_doc({
"doctype": "Integration Request",
"integration_request_service": self.custom_whatsapp_template,
"output": str(frappe.flags.integration_request.json()),
"status": "Failed" if status_response else "Completed"
}).insert(ignore_permissions=True)
def format_number(self, number):
if (number.startswith("+")):
number = number[1:len(number)]
return number
@frappe.whitelist()
def send_notification(notification, ref_doctype, ref_docname, mobile_no = None):
noti_doc = frappe.get_doc("Notification", notification)
ref_doc = frappe.get_doc(ref_doctype, ref_docname)
send_template_message(noti_doc, ref_doc, mobile_no)
def save_whatsapp_log(data, message_id, label = None):
frappe.get_doc({
"doctype": "WhatsApp Message",
"type": "Outgoing",
"message": str(data['template']),
"to": data['to'],
"message_type": "Template",
"message_id": message_id,
"content_type": "document",
"label": label
}).save(ignore_permissions=True)
|
finbyz/whatsapp_erpnext
|
whatsapp_erpnext/whatsapp_erpnext/doc_events/notification.py
|
notification.py
|
py
| 5,911 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "frappe.get_doc",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "frappe.get_all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "frappe.utils.safe_exec.safe_exec",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "frappe.utils.safe_exec.get_safe_globals",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "frappe.db.get_value",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "frappe.model.document.Document",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "frappe.safe_eval",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "frappe.utils.safe_exec.get_safe_globals",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "frappe.db.get_value",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "frappe.db.commit",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "frappe.desk.form.utils.get_pdf_link",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "frappe.utils.get_url",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "frappe.utils",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "frappe.get_doc",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "frappe.integrations.utils.make_post_request",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "frappe.utils.background_jobs.enqueue",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "frappe.msgprint",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "frappe.flags.integration_request.json",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "frappe.flags",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "frappe.msgprint",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "frappe.flags.integration_request.json",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "frappe.flags",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "frappe.get_doc",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "frappe.flags.integration_request.json",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "frappe.flags",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "frappe.get_doc",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "frappe.get_doc",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "frappe.whitelist",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "frappe.get_doc",
"line_number": 209,
"usage_type": "call"
}
] |
8097011811
|
import pathlib
import PIL.Image
import PIL.ImageChops
import pyscreenshot
from sigsolve import imageutil, geometry
import numpy
def rehydrate(array):
# return PIL.Image.frombytes('RGB', array.shape[:2], array.astype(numpy.uint8).tobytes())
return PIL.Image.fromarray(array, 'RGB')
class Vision:
# How many light levels can a tile differ (in either direction) from the baseline before the tile is no longer
# considered empty. This relies on integer rollover to avoid needing an in16 over a uint8.
MAX_EMPTY_TOLERANCE = 2
@staticmethod
def _getimage(what):
if isinstance(what, (str, bytes, pathlib.Path)):
what = PIL.Image.open(what)
if what.mode != 'RGB':
what = what.convert('RGB')
return what
def __init__(self, baseline=None, composites=None, extents=None):
"""
Handles image processing state functionality.
:param baseline: Baseline image. If this is a string or Path object, it is assumed to be a filename and is
loaded.
:param composites: Optional dictionary of composite images (or image filenames), with IDs as keys.
:param extents: Rectangle of the area we're interested in. Default is the whole image.
"""
self.baseline = self._getimage(baseline)
if extents:
self.baseline = self.baseline.crop(extents.coords)
else:
extents = geometry.Rect(geometry.Point.ORIGIN, self.baseline.size)
self.baseline = imageutil.numpify(self.baseline)
self.baseline.flags.writeable = True
# Some processing.
self.baseline += self.MAX_EMPTY_TOLERANCE
self.baseline[self.baseline < self.MAX_EMPTY_TOLERANCE] = 255 # Cap off what just rolled over
self.extents = extents
self.offset = -self.extents.xy1
self.composites = {}
if composites is not None:
for key, image in composites.items():
self.add_composite(key, image)
self.image = None
def add_composite(self, key, image):
self.composites[key] = imageutil.numpify(self._getimage(image)).astype(numpy.int16)
def match(self, tile):
"""Finds the composite that most closely matches the source tile's image."""
coords = (tile.sample_rect + self.offset).coords
base = self.baseline[coords[1]:coords[3], coords[0]:coords[2], 0:3]
cropped = self.image.crop(coords)
if numpy.all(base - imageutil.numpify(cropped) < 2*self.MAX_EMPTY_TOLERANCE):
return None
data = imageutil.numpify(imageutil.equalize(cropped)).astype(numpy.int16)
buf = numpy.ndarray(data.shape, data.dtype)
unsigned = buf.view(numpy.uint16)
best = None
bestscore = None
for key, composite in self.composites.items():
numpy.subtract(data, composite, out=buf) # Initialize buf with a difference between the two arrays
# We casually convert between signed and unsigned here, and the math just happens to work out due to
# sign extension and truncation.
unsigned **= 2 # Raise all values to power of 2.
score = numpy.sum(unsigned)
if bestscore is None or score < bestscore:
bestscore = score
best = key
return best
def screenshot(self):
"""Sets the image to a screenshot"""
self.set_image(
pyscreenshot.grab(self.extents.coords), cropped=True
)
def set_image(self, image, cropped=False):
"""Sets the image"""
image = self._getimage(image)
if not cropped and (self.extents.xy1 != geometry.Point.ORIGIN or self.extents.xy2 != image.size):
image = image.crop(self.extents.coords)
self.image = image
|
dewiniaid/sigsolve
|
sigsolve/vision.py
|
vision.py
|
py
| 3,821 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "PIL.Image.Image.fromarray",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PIL.Image.Image",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.Image.open",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image.Image",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "sigsolve.geometry.Rect",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sigsolve.geometry",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "sigsolve.geometry.Point",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "sigsolve.imageutil.numpify",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sigsolve.imageutil",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "sigsolve.imageutil.numpify",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sigsolve.imageutil",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "numpy.int16",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "numpy.all",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sigsolve.imageutil.numpify",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sigsolve.imageutil",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "sigsolve.imageutil.numpify",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "sigsolve.imageutil",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "sigsolve.imageutil.equalize",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.uint16",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "numpy.subtract",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pyscreenshot.grab",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sigsolve.geometry.Point",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "sigsolve.geometry",
"line_number": 96,
"usage_type": "name"
}
] |
26234013938
|
#!/usr/bin/python3
"""Starts a basic flask web application"""
from flask import Flask, render_template
from markupsafe import escape
from models import storage
from models.state import State
from models.city import City
app = Flask(__name__)
@app.teardown_appcontext
def teardown(self):
"""procedure to run after request"""
storage.close()
@app.route("/states_list", strict_slashes=False)
def states_list():
"""Function to run when '/states_list' is accessed"""
states = [state for state in storage.all(State).values()]
states.sort(reverse=False, key=lambda state: state.name)
return (render_template('7-states_list.html', states=states))
@app.route("/cities_by_states", strict_slashes=False)
def cities_by_statesb():
"""Function to run when '/cities_by_states' is accessed"""
states = storage.all(State).values()
return (render_template('8-cities_by_states.html', states=states))
if (__name__ == '__main__'):
app.run(host='0.0.0.0', port=5000, debug=False)
|
AndyMSP/holbertonschool-AirBnB_clone_v2
|
web_flask/8-cities_by_states.py
|
8-cities_by_states.py
|
py
| 1,008 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.storage.close",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "models.storage.all",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.state.State",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "models.storage",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models.storage.all",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "models.state.State",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "models.storage",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 31,
"usage_type": "call"
}
] |
35515894022
|
import sys
sys.path.append('..')
from common.wrapped_input import wrapped_input
from common.clean_screen import clean_screen
__TERMINATE_MARKS__ = ['***', '****']
class Reader:
def __init__(self, args):
self.loop = True
def run(self, parser):
print("""
_ __ __,
( / ) o (
/--< _ __, , _ _ `. ,_ __ ,
/___// (_(_/(_(_/ / /_(___)_/|_)_/ (_/_
/| /
Interactive shell (/ '
==========================================
""")
last_input = ''
while last_input not in __TERMINATE_MARKS__:
last_input = wrapped_input()
if last_input in __TERMINATE_MARKS__:
print('[INFO] Querying, please wait...')
return last_input
parser.add(last_input)
|
ezPsycho/brainSpy-cli
|
src/readers/interactive.py
|
interactive.py
|
py
| 881 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "common.wrapped_input.wrapped_input",
"line_number": 28,
"usage_type": "call"
}
] |
39269318225
|
import logging
import os
import random
import sys
from functools import wraps
from pprint import pformat
from subprocess import Popen, PIPE
from threading import Thread
from dim import db
from dim.models.dns import OutputUpdate
from dim.rpc import TRPC
from tests.pdns_test import PDNSTest
from tests.pdns_util import compare_dim_pdns_zones, this_dir, test_pdns_output_process
def delete_record(rpc, r):
rpc.rr_delete(zone=r['zone'], name=r['record'], type=r['type'], **r['value'])
def add_record(rpc, r):
rpc.rr_create(zone=r['zone'], name=r['record'], type=r['type'], ttl=r['ttl'], **r['value'])
def extract(l, selected_idx):
'''split l into two lists: elements with indices in selected and the rest'''
selected = []
rejected = []
selected_idx = set(selected_idx)
for i, e in enumerate(l):
if i in selected_idx:
selected.append(e)
else:
rejected.append(e)
return selected, rejected
class TestRequestProxy(object):
''''
Simulate the flask lifecycle of a request by creating a new TRPC instance and request context
(which in turns creates a new db session)
'''
def __init__(self, username, app):
self.app = app
self.username = username
def __getattr__(self, name):
if not name.startswith('_'):
obj = TRPC(username=self.username)
func = getattr(obj, name)
if callable(func):
@wraps(func)
def wrapper(*args, **kwargs):
with self.app.test_request_context():
return func(*args, **kwargs)
return wrapper
raise AttributeError
done = False
def run_test(app, zone, pdns_output, db_uri, pdns_ip):
global done
try:
rpc = TestRequestProxy('test_user', app)
def check_zone():
global done
pdns_output.wait_updates(zone)
if not compare_dim_pdns_zones(rpc, pdns_ip, {zone: None}):
done = True
if done:
sys.exit()
check_zone()
rpc.zone_dnssec_enable(zone, nsec3_algorithm=1, nsec3_iterations=1, nsec3_salt='deadcafe')
check_zone()
records = rpc.rr_list(zone=zone, value_as_object=True)
created = [r for r in records if r['type'] not in ('SOA', 'DNSKEY')]
deleted = []
total = len(created)
for _ in range(30):
selected = random.sample(range(total), random.randint(1, 5))
midpoint = len(created)
to_del, created = extract(created, [i for i in selected if i < midpoint])
to_add, deleted = extract(deleted, [i - midpoint for i in selected if i >= midpoint])
created.extend(to_add)
deleted.extend(to_del)
print('Adding', pformat(to_add))
print('Deleting', pformat(to_del))
for r in to_del:
delete_record(rpc, r)
for r in to_add:
add_record(rpc, r)
check_zone()
rpc.zone_dnssec_disable(zone)
check_zone()
except:
logging.exception('Exception in run_test')
done = True
def import_zone(zone):
proc = Popen(['ndcli', 'import', 'zone', zone], stdin=PIPE, stdout=PIPE)
zone_contents = open(this_dir(zone)).read()
stdout, stderr = proc.communicate(zone_contents)
if proc.returncode != 0:
raise Exception('zone import failed')
class PDNSOutputProcess(object):
def __enter__(self):
self.proc = test_pdns_output_process(True)
return self
def __exit__(self, *args):
self.proc.kill()
self.proc = None
def wait_updates(self, zone):
'''Wait for all updates to be processed'''
with test.app.test_request_context():
while True:
db.session.rollback()
if OutputUpdate.query.filter(OutputUpdate.zone_name == zone).count() == 0:
break
else:
os.read(self.proc.stdout.fileno(), 1024)
if __name__ == '__main__':
zones = {'web.de': {'db_uri': 'mysql://pdns:[email protected]:3307/pdns1',
'pdns_ip': '127.1.1.1'},
'web2.de': {'db_uri': 'mysql://pdns:[email protected]:3307/pdns2',
'pdns_ip': '127.2.2.2'}}
global test
test = PDNSTest('__init__')
test.setUp()
for zone in list(zones.keys()):
test.cleanup_pdns_db(zones[zone]['db_uri'])
import_zone(zone)
test.create_output_for_zone(zone, zone, zone, db_uri=zones[zone]['db_uri'])
with PDNSOutputProcess() as pdns_output:
threads = []
for zone, attr in zones.items():
t = Thread(target=run_test, args=(test.app, zone, pdns_output), kwargs=attr)
t.start()
threads.append(t)
for t in threads:
while t.isAlive():
t.join(0.1)
|
1and1/dim
|
dim-testsuite/tests/pdns_changes.py
|
pdns_changes.py
|
py
| 4,950 |
python
|
en
|
code
| 39 |
github-code
|
6
|
[
{
"api_name": "dim.rpc.TRPC",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tests.pdns_util.compare_dim_pdns_zones",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "tests.pdns_util.this_dir",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "tests.pdns_util.test_pdns_output_process",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "dim.db.session.rollback",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "dim.db.session",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "dim.db",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "dim.models.dns.OutputUpdate.query.filter",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "dim.models.dns.OutputUpdate.query",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "dim.models.dns.OutputUpdate",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "dim.models.dns.OutputUpdate.zone_name",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "os.read",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "tests.pdns_test.PDNSTest",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 149,
"usage_type": "call"
}
] |
5119440044
|
from netaddr import IPNetwork, IPAddress
import logging
from pymongo import MongoClient
logger = logging.getLogger( "ucn_logger" )
class VPNResolve(object):
def __init__( self, cidr, dbcfg):
self.logscollection = dbcfg['logscollection']
self.devicecollection = dbcfg['devicecollection']
self.db = dbcfg['db']
self.cidr = cidr
self.mc = MongoClient(dbcfg['host'], dbcfg['port'])
def clientip(self, request):
if len(request.access_route) > 1:
host = request.access_route[-1]
else:
host = request.access_route[0]
logger.debug("seen a client ip %s" % host)
if IPAddress(host) not in IPNetwork(self.cidr):
logger.debug("is not local, looking up in openvpn status")
return self.findlocal(host)
else:
return host
def findlocal(self, host):
db = self.mc[self.db]
devices = db[self.logscollection].find({"untrusted_client_ip": host}).sort("ts", -1).limit(1)
devicename = None
protocol = None
for device in devices:
devicename = device['common_name']
protocol = device['proto']
#now lookup device name in the devices collection
device = db[self.devicecollection].find_one({"login":devicename})
if device is not None:
if protocol is not None:
if protocol == "udp":
if 'vpn_udp_ip' in device:
logger.debug("retreived udp ip %s" % device['vpn_udp_ip'])
return device['vpn_udp_ip']
elif protocol == "tcp":
if 'vpn_tcp_ip' in device:
logger.debug("retreived tcp ip %s" % device['vpn_tcp_ip'])
return device['vpn_tcp_ip']
logger.debug("no corresponding ip for %s in db" % host)
return None
|
ucn-eu/ucnviz
|
vpnresolve.py
|
vpnresolve.py
|
py
| 1,620 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "netaddr.IPAddress",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "netaddr.IPNetwork",
"line_number": 25,
"usage_type": "call"
}
] |
11545903852
|
import modules.processing_turn as m_turn
import modules.data_base as m_data
def click_cell(x,y):
# Умова першого рядка таблиці
if y < 100 and y > 0:
# Умова першої комірки по х
if x > -100 and x < 0 and m_data.list_cells[0] == 0:
m_turn.who_turn(-100, 100, 0)
# Умова другої комірки по х
elif x < 100 and x > 0 and m_data.list_cells[1] == 0:
m_turn.who_turn(0, 100, 1)
# Умова третьої комірки по х
elif x > 100 and x < 200 and m_data.list_cells[2] == 0:
m_turn.who_turn(100, 100, 2)
# Умова другого рядка таблиці
elif y < 0 and y > -100:
# Умова четвертої комірки по х
if x > -100 and x < 0 and m_data.list_cells[3] == 0:
m_turn.who_turn(-100, 0, 3)
# Умова п'ятої комірки по х
elif x < 100 and x > 0 and m_data.list_cells[4] == 0:
m_turn.who_turn(0, 0, 4)
# Умова шостої комірки по х
elif x > 100 and x < 200 and m_data.list_cells[5] == 0:
m_turn.who_turn(100, 0, 5)
# Умова третього рядка таблиці
elif y < -100 and y > -200:
if x > -100 and x < 0 and m_data.list_cells[6] == 0:
m_turn.who_turn(-100,-100,6)
elif x < 100 and x > 0 and m_data.list_cells[7] == 0:
m_turn.who_turn(0, -100, 7)
elif x > 100 and x < 200 and m_data.list_cells[8] == 0:
m_turn.who_turn(100, -100, 8)
|
BoiarkinaOryna/cross_zero_game
|
modules/checking_square_coordinates.py
|
checking_square_coordinates.py
|
py
| 1,655 |
python
|
uk
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "modules.data_base.list_cells",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "modules.data_base",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "modules.processing_turn.who_turn",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "modules.processing_turn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "modules.data_base.list_cells",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "modules.data_base",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "modules.processing_turn.who_turn",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "modules.processing_turn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "modules.data_base.list_cells",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "modules.data_base",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "modules.processing_turn.who_turn",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "modules.processing_turn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "modules.data_base.list_cells",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "modules.data_base",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "modules.processing_turn.who_turn",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "modules.processing_turn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "modules.data_base.list_cells",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "modules.data_base",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "modules.processing_turn.who_turn",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "modules.processing_turn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "modules.data_base.list_cells",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "modules.data_base",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "modules.processing_turn.who_turn",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "modules.processing_turn",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "modules.data_base.list_cells",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "modules.data_base",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "modules.processing_turn.who_turn",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "modules.processing_turn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "modules.data_base.list_cells",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "modules.data_base",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "modules.processing_turn.who_turn",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "modules.processing_turn",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "modules.data_base.list_cells",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "modules.data_base",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "modules.processing_turn.who_turn",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "modules.processing_turn",
"line_number": 36,
"usage_type": "name"
}
] |
26807586503
|
from src.utils.all_utils import read_yaml, create_directory
import argparse
import os
import shutil
from tqdm import tqdm
import logging
log_string = "[%(asctime)s: %(levelname)s: %(module)s]: %(message)s"
logs_dir = "Logs"
os.makedirs(logs_dir,exist_ok=True)
logging.basicConfig(filename=os.path.join(logs_dir,"Running_Logs.log"),level=logging.INFO,format=log_string,filemode='a')
def copy_file(source_download_dir,local_data_dir):
source_files = os.listdir(source_download_dir)
N = len(source_files)
for file in tqdm(source_files,total=N,desc= f"Copying File from {source_download_dir} to {local_data_dir}", colour="green"):
src = os.path.join(source_download_dir,file)
dst = os.path.join(local_data_dir,file)
shutil.copy(src, dst)
def get_data(config_path):
config = read_yaml(config_path)
source_download_dirs = config["source_download_dirs"]
local_data_dirs = config["local_data_dirs"]
for source_download_dir,local_data_dir in tqdm(zip(source_download_dirs,local_data_dirs),total=2,desc= "List of Folders", colour="cyan"):
create_directory([local_data_dir])
copy_file(source_download_dir,local_data_dir)
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument("--config", "-c", default="config/config.yaml")
parsed_args = args.parse_args()
try:
logging.info(">>>>>Stage-01 Started...")
get_data(config_path=parsed_args.config)
logging.info("Stage-01 Completed , Data saved into local Directory <<<<<<\n")
except Exception as e:
raise e
|
vicharapubhargav/dvc_tensorflow_demo
|
src/stage_01_load_save.py
|
stage_01_load_save.py
|
py
| 1,595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.makedirs",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "src.utils.all_utils",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "src.utils.all_utils",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "src.utils.all_utils.read_yaml",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "src.utils.all_utils.create_directory",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 44,
"usage_type": "call"
}
] |
6729300182
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
@file: oracle_cls.py
@author: ImKe at 2022/2/23
@email: [email protected]
@feature: #Enter features here
"""
import torch.nn as nn
import torch
import datetime, os, copy, math, time, collections, argparse, nltk, json, sys
sys.path.append('../')
import numpy as np
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from tensorboardX import SummaryWriter
from src.logger import Logger
from src.data import ConditionalGenerationDataset
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config, AdamW, get_linear_schedule_with_warmup
parser = argparse.ArgumentParser()
# Default parameters are set based on single GPU training
parser.add_argument('--lr', type=float, default=5e-5)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument('--class_num', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=200)
parser.add_argument('--max_length', type=int, default=30)
parser.add_argument('--iterations', type=int, default=15000 * 3)
parser.add_argument('--dataset', type=str, default='yelp_polarity', choices=['yelp_polarity', 'imdb_polarity'],
help="Dataset to use for training")
parser.add_argument('--out_dir', type=str, default='cls_train_out')
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--no_gpu', action="store_true")
parser.add_argument('--workers', default=2, type=int, metavar='N',
help='number of data loading workers')
def tokenize(texts, tokenizer, device, args):
# tokenizer.pad_token = tokenizer.eos_token
x_tokenized = tokenizer(texts, padding=True,
truncation=True,
return_tensors='pt', max_length=args.max_length)
input_ids = x_tokenized['input_ids'][:, :-1].to(device)
attention_mask = x_tokenized['attention_mask'][:, 1:].to(device)
x_ids = x_tokenized['input_ids'][:, 1:].contiguous().to(device)
## target, input tokens, mask
return x_ids, input_ids, attention_mask
class Oracle_Classifier(nn.Module):
def __init__(self, config, class_num, wte):
super(Oracle_Classifier, self).__init__()
self.class_num = class_num
self.gpt_embeddings = nn.Embedding(config.vocab_size, config.n_embd)
self.gpt_embeddings.weight.data = wte.weight.data
self.conv1 = nn.Conv1d(config.hidden_size, config.hidden_size, 3)
self.classifier = nn.Linear(config.hidden_size, 1 if self.class_num <= 2 else self.class_num)
self.BCEWithLogitsLoss = nn.BCEWithLogitsLoss()
def step(self, optimizer, loss):
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
def forward(self, sentences, cond_labels):
ft = self.gpt_embeddings(sentences)
ft = self.conv1(ft.transpose(1, 2))
ft = torch.mean(ft, dim=-1)
ft = self.classifier(ft)
prob_cls = ft.squeeze(1)
loss_cls = self.BCEWithLogitsLoss(prob_cls, cond_labels.float())
pred_cls = (prob_cls >= 0).to(dtype=torch.long)
acc_cls = (pred_cls == cond_labels).float()
return loss_cls, acc_cls
def train(args):
# GPU
if not torch.cuda.is_available(): args.no_gpu = True
gpu = not args.no_gpu
if gpu:
print("There are ", torch.cuda.device_count(), " available GPUs!")
# print('Setting GPUs {}'.format(args.device))
# print('Using GPU devices {}'.format(devices))
torch.cuda.set_device(args.gpu)
print('Current single GPU: {}'.format(torch.cuda.current_device()))
device = torch.device(args.gpu if gpu else "cpu")
# randomness
np.random.seed(args.seed)
prng = np.random.RandomState()
torch.random.manual_seed(args.seed)
if gpu: torch.cuda.manual_seed(args.seed); torch.cuda.manual_seed_all(args.seed)
save_folder = os.path.join(args.out_dir, "oracle_cls")
os.makedirs(save_folder, exist_ok=True)
t_writer = SummaryWriter(os.path.join(save_folder, 'train'), flush_secs=5)
v_writer = SummaryWriter(os.path.join(save_folder, 'val'), flush_secs=5)
logging_file = "oracle_cls.log"
logging = Logger(os.path.join(args.out_dir, logging_file))
# t_writer = SummaryWriter(os.path.join(save_folder, 'train'), flush_secs=5)
logging.info('\n*******************************************************************************\n')
logging.info("the configuration:")
logging.info(str(args).replace(',', '\n'))
logging.info('Loading models...')
config = GPT2Config()
gpt2_model = GPT2LMHeadModel.from_pretrained('gpt2', cache_dir='/home/tuhq/.cache/torch/transformers')
tokenizer = GPT2Tokenizer.from_pretrained('gpt2', cache_dir='/home/tuhq/.cache/torch/transformers')
tokenizer.pad_token = tokenizer.eos_token
model = Oracle_Classifier(config, args.class_num, wte=gpt2_model.transformer.wte)
optimizer = AdamW(model.parameters(), lr=args.lr, correct_bias=True)
model = model.to(device)
model.train()
logging.info('Setup data...')
train_loader = DataLoader(
ConditionalGenerationDataset.from_file(f"../data/{args.dataset}/train.txt"),
batch_size=args.batch_size,
pin_memory=True,
drop_last=False,
shuffle=True,
num_workers=args.workers)
test_loader = DataLoader(
ConditionalGenerationDataset.from_file(f"../data/{args.dataset}/test.txt"),
batch_size=args.batch_size,
pin_memory=True,
drop_last=False,
shuffle=True,
num_workers=args.workers)
val_loader = DataLoader(
ConditionalGenerationDataset.from_file(f"../data/{args.dataset}/valid.txt"),
batch_size=args.batch_size,
pin_memory=True,
drop_last=False,
shuffle=True,
num_workers=args.workers)
logging.info('Done.')
def val_step(val_loader):
model.eval()
val_loss_list, val_acc_list = [], []
with tqdm(total=min(len(val_loader), max_val_batches), desc="Evaluating Model") as pbar:
for i, val_data_dict in enumerate(val_loader):
with torch.no_grad():
val_x_ids, val_input_ids, val_attention_mask = tokenize(val_data_dict['x'], tokenizer, device, args)
val_labels = torch.tensor(val_data_dict['y']).to(device)
val_loss_cls, val_acc_cls = model(val_input_ids, val_labels)
val_loss_list.append(val_loss_cls.item())
val_acc_list.append(val_acc_cls.mean().item())
val_loss = np.mean(val_loss_list)
val_acc = np.mean(val_acc_list)
val_loss_std = np.std(val_loss_list)
val_acc_std = np.std(val_acc_list)
logging.info("val loss: %.4f + %.4f" % (val_loss, val_loss_std))
logging.info("val acc : %.4f + %.4f" % (val_acc, val_acc_std))
model.train()
return val_acc
best_acc = 0.0
logging.info("Begin training iterations")
max_val_batches = 200 # max num. of val batches
logging.info("Total iteration: %d" % args.iterations)
e = 0 # number of epoch
num_iters = 0
et = 0
while num_iters < args.iterations:
# Run epoch
# Training
print('Training loop. Batches:', len(train_loader))
logging.info('\n----------------------------------------------------------------------')
logging.info("Training loop. Batches: %d" % len(train_loader))
with tqdm(total=len(train_loader)) as pbar:
for i, data_dict in enumerate(train_loader):
x_ids, input_ids, attention_mask = tokenize(data_dict['x'], tokenizer, device, args)
cond_labels = torch.tensor(data_dict['y']).to(device)
loss_cls, acc_cls = model(input_ids, cond_labels)
loss = model.step(optimizer, loss_cls)
acc_cls = acc_cls.mean()
t_writer.add_scalar('loss', loss, num_iters)
t_writer.add_scalar('acc', acc_cls, num_iters)
end = num_iters >= args.iterations
if end:
break
num_iters += 1
pbar.update(1)
if (num_iters + 1) % 2000 == 0:
logging.info("Test dataset")
_ = val_step(test_loader)
logging.info("Valid dataset")
val_acc = val_step(val_loader)
if val_acc > best_acc:
best_acc = val_acc
save_orderdict = model.state_dict()
torch.save(save_orderdict, os.path.join(save_folder, 'oracle_cls_best.pt'))
else:
et += 1
if et >= 5:
logging.info("Early Stopping..")
break
if not end:
e += 1
logging.info("Training loop. The ith epoch completed: %d" % e)
save_orderdict = model.state_dict()
torch.save(save_orderdict, os.path.join(save_folder, 'oracle_cls_latest.pt'))
logging.info("Test dataset")
val_step(test_loader)
logging.info("Valid dataset")
val_step(val_loader)
logging.info("-" * 50)
logging.info("best acc: {:.4f}".format(best_acc))
if __name__ == '__main__':
args = parser.parse_args()
train(args)
|
ImKeTT/AdaVAE
|
controlgen/oracle_cls.py
|
oracle_cls.py
|
py
| 9,368 |
python
|
en
|
code
| 32 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "torch.nn.BCEWithLogitsLoss",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "torch.mean",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.device_count",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.set_device",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.current_device",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.RandomState",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "torch.random.manual_seed",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.random",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.manual_seed",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "tensorboardX.SummaryWriter",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "tensorboardX.SummaryWriter",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "src.logger.Logger",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "transformers.GPT2Config",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "transformers.GPT2LMHeadModel.from_pretrained",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "transformers.GPT2LMHeadModel",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "transformers.GPT2Tokenizer.from_pretrained",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "transformers.GPT2Tokenizer",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "transformers.AdamW",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "src.data.ConditionalGenerationDataset.from_file",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "src.data.ConditionalGenerationDataset",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "src.data.ConditionalGenerationDataset.from_file",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "src.data.ConditionalGenerationDataset",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "src.data.ConditionalGenerationDataset.from_file",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "src.data.ConditionalGenerationDataset",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 227,
"usage_type": "attribute"
}
] |
39629119175
|
import numpy as np
import glob
import os
import pandas as pd
from tqdm import tqdm
import nltk
import string
from nltk.tokenize import word_tokenize
import random
import pickle
from nltk.corpus import stopwords
from autocorrect import Speller
import re
from nltk.corpus import wordnet
from nltk.stem.wordnet import WordNetLemmatizer
from hyperopt import fmin, tpe, hp
# load a document
def load(filename):
file = open(filename, encoding='utf-8')
text = file.read()
file.close()
return text
# split a document into news story and highlights
def split(doc):
# find first highlight
index = doc.find('@highlight')
# split into story and highlights
story, highlights = doc[:index], doc[index:].split('@highlight')
# strip extra white space around each highlight
highlights = [h.strip() for h in highlights if len(h) > 0]
return story, highlights
# load all stories from a directory
def load_stories(directory):
stories = []
for name in os.listdir(directory):
filename = directory + '/' + name
# load document
doc = load(filename)
# split into story and highlights
story, highlights = split(doc)
# store
stories.append({'story':story, 'highlights':highlights})
return stories
directory = r'C:\Users\ymaha\Desktop\cnn\stories'
stories = load_stories(directory)
print('Loaded Stories %d' % len(stories))
def preprocesing(lines):
# function to convert nltk tag to wordnet tag
def nltk_tag_to_wordnet_tag(nltk_tag):
if nltk_tag.startswith('J'):
return wordnet.ADJ
elif nltk_tag.startswith('V'):
return wordnet.VERB
elif nltk_tag.startswith('N'):
return wordnet.NOUN
elif nltk_tag.startswith('R'):
return wordnet.ADV
else:
return None
def lemmatize_sentence(sentence):
#tokenize the sentence and find the POS tag for each token
nltk_tagged = nltk.pos_tag(nltk.word_tokenize(sentence))
#tuple of (token, wordnet_tag)
wordnet_tagged = map(lambda x: (x[0], nltk_tag_to_wordnet_tag(x[1])), nltk_tagged)
# print(wordnet_tagged)
lemmatized_sentence = []
for word, tag in wordnet_tagged:
if tag is None:
#if there is no available tag, append the token as is
lemmatized_sentence.append(word)
else:
#else use the tag to lemmatize the token
lemmatized_sentence.append(lemmatizer.lemmatize(word, tag))
# if tag is not None:
# lemmatized_sentence.append(lemmatizer.lemmatize(word, tag))
return " ".join(lemmatized_sentence)
temp = []
for line in lines:
# strip source cnn
index = line.find('(CNN)')
if index > -1:
line = line[index+len('(CNN)'):]
# tokenize on white space
line = line.split()
# convert to lower case
line = [word.lower() for word in line]
# remove punctuation and special characters from each token
line = [w.replace('[<>!#@$:.,%\?-_]+', ' ') for w in line]
# remove non ascii characters
line = [w.replace('[^\x00-\x7f]', ' ') for w in line]
# remove tokens with numbers in them
line = [word for word in line if word.isalpha()]
# # removing stop words
# line = [word for word in line if word not in stop_list]
# removing words of length 1
line = [word for word in line if len(word) > 1]
# # Lemmatizing the words and combing them into a line
# temp.append(lemmatize_sentence(' '.join(line)))
# Combining the words into a line
temp.append(' '.join(line))
# remove empty strings
temp = [c for c in temp if len(c) > 0]
return temp
stop_list = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
stemmer = nltk.stem.PorterStemmer()
for i in tqdm(range(len(stories))):
# for example in stories:
stories[i]['story'] = preprocesing(stories[i]['story'].split('\n'))
stories[i]['highlights'] = preprocesing(stories[i]['highlights'])
# save to file
from pickle import dump
dump(stories, open('processed_cnn_data.pkl', 'wb'))
|
kalyankumarp/Abstractive-Text-Summarization-using-Transformers
|
Models/preprocess.py
|
preprocess.py
|
py
| 4,310 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "os.listdir",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet.ADJ",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.wordnet.VERB",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.wordnet.NOUN",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.wordnet.ADV",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "nltk.pos_tag",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "nltk.word_tokenize",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "nltk.stem.wordnet.WordNetLemmatizer",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "nltk.stem.PorterStemmer",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "nltk.stem",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 130,
"usage_type": "call"
}
] |
6460552932
|
import sys
import click
import logging
from pprint import pprint
from ftmstore import get_dataset
from servicelayer.cache import get_redis, get_fakeredis
from servicelayer.logs import configure_logging
from servicelayer.jobs import Job, Dataset
from servicelayer import settings as sl_settings
from servicelayer.archive.util import ensure_path
from ingestors import settings
from ingestors.manager import Manager
from ingestors.directory import DirectoryIngestor
from ingestors.analysis import Analyzer
from ingestors.worker import IngestWorker, OP_ANALYZE, OP_INGEST
log = logging.getLogger(__name__)
STAGES = [OP_ANALYZE, OP_INGEST]
@click.group()
def cli():
configure_logging(level=logging.DEBUG)
@cli.command()
@click.option("-s", "--sync", is_flag=True, default=False, help="Run without threads")
def process(sync):
"""Start the queue and process tasks as they come. Blocks while waiting"""
num_threads = None if sync else sl_settings.WORKER_THREADS
worker = IngestWorker(stages=STAGES, num_threads=num_threads)
code = worker.run()
sys.exit(code)
@cli.command()
@click.argument("dataset")
def cancel(dataset):
"""Delete scheduled tasks for given dataset"""
conn = get_redis()
Dataset(conn, dataset).cancel()
@cli.command()
def killthekitten():
"""Completely kill redis contents."""
conn = get_redis()
conn.flushall()
def _ingest_path(db, conn, dataset, path, languages=[]):
context = {"languages": languages}
job = Job.create(conn, dataset)
stage = job.get_stage(OP_INGEST)
manager = Manager(db, stage, context)
path = ensure_path(path)
if path is not None:
if path.is_file():
entity = manager.make_entity("Document")
checksum = manager.store(path)
entity.set("contentHash", checksum)
entity.make_id(checksum)
entity.set("fileName", path.name)
log.info("Queue: %r", entity.to_dict())
manager.queue_entity(entity)
if path.is_dir():
DirectoryIngestor.crawl(manager, path)
manager.close()
@cli.command()
@click.option("--languages", multiple=True, help="3-letter language code (ISO 639)")
@click.option("--dataset", required=True, help="Name of the dataset")
@click.argument("path", type=click.Path(exists=True))
def ingest(path, dataset, languages=None):
"""Queue a set of files for ingest."""
conn = get_redis()
db = get_dataset(dataset, OP_INGEST)
_ingest_path(db, conn, dataset, path, languages=languages)
@cli.command()
@click.option("--dataset", required=True, help="Name of the dataset")
def analyze(dataset):
db = get_dataset(dataset, OP_ANALYZE)
analyzer = None
for entity in db.partials():
if analyzer is None or analyzer.entity.id != entity.id:
if analyzer is not None:
analyzer.flush()
# log.debug("Analyze: %r", entity)
analyzer = Analyzer(db, entity, {})
analyzer.feed(entity)
if analyzer is not None:
analyzer.flush()
@cli.command()
@click.option("--languages", multiple=True, help="3-letter language code (ISO 639)")
@click.argument("path", type=click.Path(exists=True))
def debug(path, languages=None):
"""Debug the ingest for the given path."""
conn = get_fakeredis()
settings.fts.DATABASE_URI = "sqlite:////tmp/debug.sqlite3"
db = get_dataset("debug", origin=OP_INGEST, database_uri=settings.fts.DATABASE_URI)
db.delete()
_ingest_path(db, conn, "debug", path, languages=languages)
worker = IngestWorker(conn=conn, stages=STAGES)
worker.sync()
for entity in db.iterate():
pprint(entity.to_dict())
if __name__ == "__main__":
cli()
|
alephdata/ingest-file
|
ingestors/cli.py
|
cli.py
|
py
| 3,714 |
python
|
en
|
code
| 45 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "ingestors.worker.OP_ANALYZE",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "ingestors.worker.OP_INGEST",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "servicelayer.logs.configure_logging",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "click.group",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "servicelayer.settings.WORKER_THREADS",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "servicelayer.settings",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "ingestors.worker.IngestWorker",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "servicelayer.cache.get_redis",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "servicelayer.jobs.Dataset",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "servicelayer.cache.get_redis",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "servicelayer.jobs.Job.create",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "servicelayer.jobs.Job",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "ingestors.worker.OP_INGEST",
"line_number": 55,
"usage_type": "argument"
},
{
"api_name": "ingestors.manager.Manager",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "servicelayer.archive.util.ensure_path",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "ingestors.directory.DirectoryIngestor.crawl",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "ingestors.directory.DirectoryIngestor",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "servicelayer.cache.get_redis",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "ftmstore.get_dataset",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "ingestors.worker.OP_INGEST",
"line_number": 79,
"usage_type": "argument"
},
{
"api_name": "click.option",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "click.Path",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "ftmstore.get_dataset",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "ingestors.worker.OP_ANALYZE",
"line_number": 86,
"usage_type": "argument"
},
{
"api_name": "ingestors.analysis.Analyzer",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "servicelayer.cache.get_fakeredis",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "ingestors.settings.fts",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "ingestors.settings",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "ftmstore.get_dataset",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "ingestors.worker.OP_INGEST",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "ingestors.settings.fts",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "ingestors.settings",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "ingestors.worker.IngestWorker",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "click.Path",
"line_number": 101,
"usage_type": "call"
}
] |
36151078302
|
import sqlite3 as lite
import sys
from bs4 import BeautifulSoup
import requests
import re
def site_parsing():
max_page = 10
pages = []
id_n = 0
id_n_price = 0
for x in range(1, max_page + 1):
pages.append(requests.get('https://moto.drom.ru/sale/+/Harley-Davidson+Softail/'))
for n in pages:
soup = BeautifulSoup(n.text, 'html.parser')
moto_name = soup.find_all('a', class_="bulletinLink bull-item__self-link auto-shy")
for rev in moto_name:
id_n += 1
a = str(rev.text)
moto = re.split(r',', a)
moto_name_s = str(moto[0])
moto_year = re.sub(r'[ ]', '', moto[1])
moto_year_s = int(moto_year)
cur.execute("INSERT INTO moto VALUES(?,?,?)", (id_n, moto_name_s, moto_year_s))
price = soup.find_all('span', class_='price-block__price')
pattern = r'(\d{1}\s\d{3}\s\d{3})|(\d{3}\s\d{3})'
for rev in price:
id_n_price += 1
price_str = re.findall(pattern, rev.text)
price_str = str(price_str)
price_str = price_str.replace('\\xa0', '')
price_str = re.sub(r"[\]['(),\s]", '', price_str)
price_int = int(price_str)
cur.execute("INSERT INTO moto_price VALUES(?,?)", (id_n_price, price_int))
connect = None
try:
connect = lite.connect('motos.db')
cur = connect.cursor()
cur.execute("CREATE TABLE moto(id INT, moto TEXT, year INT)")
cur.execute("CREATE TABLE moto_price(id INT, price INT)")
site_parsing()
except lite.Error as e:
print(f"Error {e.args[0]}:")
sys.exit()
with connect:
cur = connect.cursor()
rows_join = f'SELECT * FROM moto JOIN moto_price ON moto.id = moto_price.id'
cur.execute(rows_join)
rows = cur.fetchall()
for row in rows:
print(row)
connect.close()
|
TatyanaKuleshova/lesson19-project-
|
db.py
|
db.py
|
py
| 1,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sqlite3.Error",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 56,
"usage_type": "call"
}
] |
23948038488
|
import torch.nn as nn
import torch_geometric.nn as pyg_nn
class iVGAE_Encoder(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super().__init__()
self.conv0 = pyg_nn.GCNConv(in_channels, hidden_channels)
self.conv1 = pyg_nn.GCNConv(hidden_channels, hidden_channels)
self.lin_mean = nn.Linear(hidden_channels, out_channels)
self.lin_logstd = nn.Linear(hidden_channels, out_channels)
def forward(self, x, edge_index):
h = self.conv0(x, edge_index)
h = nn.ReLU()(h)
h = self.conv1(h, edge_index)
h = nn.ReLU()(h)
mean = self.lin_mean(h)
logstd = self.lin_logstd(h)
return mean, logstd
class iVGAE_Decoder(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super().__init__()
self.conv0 = pyg_nn.GCNConv(in_channels, hidden_channels)
self.conv1 = pyg_nn.GCNConv(hidden_channels, hidden_channels)
self.linear = nn.Linear(hidden_channels, out_channels)
def forward(self, z, edge_index, sigmoid=True):
h = self.conv0(z, edge_index)
h = nn.ReLU()(h)
h = self.conv1(h, edge_index)
h = nn.ReLU()(h)
out = self.linear(h)
if sigmoid:
out = nn.Sigmoid()(out)
return out
class iVGAE(pyg_nn.VGAE):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def decode(self, z, pos_edge_index):
x_gen = self.decoder(z, pos_edge_index)
return x_gen
def forward(self, x, pos_edge_index):
z = self.encode(x, pos_edge_index)
x_gen = self.decode(z, pos_edge_index)
return x_gen, z
|
DavidCarlyn/iVGAE
|
models.py
|
models.py
|
py
| 1,705 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch_geometric.nn.GCNConv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch_geometric.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch_geometric.nn.GCNConv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch_geometric.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch_geometric.nn.GCNConv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch_geometric.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch_geometric.nn.GCNConv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch_geometric.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "torch_geometric.nn.VGAE",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "torch_geometric.nn",
"line_number": 38,
"usage_type": "name"
}
] |
17283528585
|
from solver import Solver
from config import Config
if __name__ == '__main__':
cfg = Config()
cfg.data_dir = "/data/face/parsing/dataset/ibugmask_release"
cfg.model_args.backbone = "STDCNet1446"
cfg.model_args.pretrain_model = "snapshot/STDCNet1446_76.47.tar"
solver = Solver(cfg)
solver.sample(sample_dir="/data/face/parsing/dataset/testset_210720_aligned", result_folder="result")
|
killf/U2Net4FaceParsing
|
test.py
|
test.py
|
py
| 409 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "config.Config",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "solver.Solver",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "solver.sample",
"line_number": 11,
"usage_type": "call"
}
] |
14335019516
|
import numpy as np
import matplotlib.pyplot as plt
from mpi4py import MPI
from process_coordination import width_height, bool_boundaries, number_of_blocks
from streaming_functions import streaming, recalculate_functions
from plotting_functions import plot_velocity, plot_velocity_slice
# Initialize parallelization
comm = MPI.COMM_WORLD
size = comm.Get_size() # num of processes
rank = comm.Get_rank() # rank id of this process
n_timesteps = 20
n_plots = 3
# Initialize Grid:
nx_total = 20 # num of rows
ny_total = 16 # num of columns
# Arrange <size> blocks (num processes) as a optimized grid of
# <n_blocks[0]> rows times <n_blocks[1]> columns.
n_blocks = number_of_blocks((nx_total, ny_total), size)
# Initialize local grid parameters (local grid is the one of the block of this process):
# local size
nx, ny = width_height(rank, nx_total, ny_total, n_blocks)
nx_opt = nx_total//n_blocks[0]
ny_opt = ny_total//n_blocks[1]
# Initialize weights and discrete direction vectors
weights = np.array([4/9, 1/9, 1/9, 1/9, 1/9, 1/36, 1/36, 1/36, 1/36])
c = np.array([[0, 0], [0, 1], [-1, 0], [0, -1], [1, 0], [-1, 1], [-1, -1], [1, -1], [1, 1]])
# Initialize grid (add goast points or dry notes to each edge)
rho = np.ones((nx+2, ny+2)) # density values
v = np.zeros((2, nx+2, ny+2)) # average viscosity values
f = np.einsum("i,jk -> ijk", weights, np.ones((nx+2, ny+2))) # probability density function
# Check on which side this block borders another block or the boundary
borders = bool_boundaries(rank, n_blocks)
# Ranks of the processes of the neighboring blocks (only correct and used when theres no boundary on this side)
rank_right = rank + 1
rank_left = rank - 1
rank_up = rank - n_blocks[1]
rank_down = rank + n_blocks[1]
# Loop over timesteps
for idx_time in range(n_timesteps):
# Calculate the streaming step wrt (global) boundary conditions
f, rho, v = streaming(f, rho, v, c, weights, borders)
# Order of communcations is important in order that all the corner ghost points will get the diagonal adjacent values via two-step-communcation.
if not borders[0]:
comm.send(f[:, :, -2].copy(), rank_right)
data = comm.recv(source=rank_right)
f[:, :, -1] = data
if not borders[2]:
comm.send(f[:, :, 1].copy(), rank_left)
data = comm.recv(source=rank_left)
f[:, :, 0] = data
if not borders[1]:
comm.send(f[:, 1, :].copy(), rank_up)
data = comm.recv(source=rank_up)
f[:, 0, :] = data
if not borders[3]:
comm.send(f[:, -2, :].copy(), rank_down)
data = comm.recv(source=rank_down)
f[:, -1, :] = data
rho, v = recalculate_functions(f, rho, v, c) # Update values
# Plot average velocity vectors
if idx_time % (n_timesteps // n_plots) == 0:
# stack everything in rank 0
f_full = np.zeros((9, nx_total, ny_total))
rho_full = np.ones((nx_total, ny_total))
v_full = np.zeros((2, nx_total, ny_total))
f_list = comm.gather(f[:,1:-1,1:-1].copy(), root=0)
if rank == 0:
for rank_idx, f_block in enumerate(f_list):
block_pos = (rank_idx // n_blocks[1], rank_idx % n_blocks[1])
f_full[:, (nx_opt * block_pos[0]):(nx_opt * block_pos[0] + f_block.shape[1]), (ny_opt * block_pos[1]):(ny_opt * block_pos[1] + f_block.shape[2])] = f_block
rho_full, v_full = recalculate_functions(f_full, rho_full, v_full, c)
plot_velocity(f_full, v_full, return_plot=True)
plt.show()
|
Dunitrie/HPC
|
main.py
|
main.py
|
py
| 3,550 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "process_coordination.number_of_blocks",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "process_coordination.width_height",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.einsum",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "process_coordination.bool_boundaries",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "streaming_functions.streaming",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "streaming_functions.recalculate_functions",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "streaming_functions.recalculate_functions",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "plotting_functions.plot_velocity",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 89,
"usage_type": "name"
}
] |
33155203825
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-from telegram.ext import Updater, CommandHandler
from telegram.ext import Updater, CommandHandler
updater = Updater('TOKEN')
def start_method(bot, update):
bot.sendMessage(update.message.chat_id, "سلام")
start_command = CommandHandler('start', start_method)
updater.dispatcher.add_handler(start_command)
updater.start_polling()
# for exit
updater.idle()
|
rasoolhp/free-telegram-bot
|
bot.py
|
bot.py
|
py
| 412 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "telegram.ext.Updater",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "telegram.ext.CommandHandler",
"line_number": 9,
"usage_type": "call"
}
] |
17913448581
|
"""Made email unique
Revision ID: ff6f0a832e3a
Revises: 876813ef988d
Create Date: 2022-08-09 16:32:43.590993
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ff6f0a832e3a'
down_revision = '876813ef988d'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'users', ['email'])
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'users', type_='unique')
# ### end Alembic commands ###
|
djangbahevans/wallet-clone
|
backend/alembic/versions/ff6f0a832e3a_made_email_unique.py
|
ff6f0a832e3a_made_email_unique.py
|
py
| 667 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "alembic.op.create_unique_constraint",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_constraint",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 27,
"usage_type": "name"
}
] |
715415024
|
import pandas as pd
import pickle
def buildDataSet():
#Import Ingredients DF
print('Loaded Products...')
ewg_ing_df = pd.read_json('ingredients_products_keys_fixed/ewg_ingredients.json', orient = 'index')
#Build mapping between Ingredient ID and ingredient Name
ing_map = {}
for i in range(len(ewg_ing_df)):
ID = ewg_ing_df.iloc[i]['ingredient_id']
name = ewg_ing_df.iloc[i]['ingredient_name']
ing_map[ID] = name
#Read in Product Data and Initialize Acne Score
ewg_prd_df = pd.read_json('ingredients_products_keys_fixed/ewg_products.json', orient = 'index')
ewg_prd_df['Acne_Score'] = 0
print('Loaded ingredients')
#Build Lists of ingredients to modify original DataFrame and Initialize Dataset for Model
from collections import Counter
n = len(ewg_prd_df)
ing_lists = []
ing_cnts = Counter()
string_lists = []
for i in range(n):
try:
new_list = []
strings = ''
ing_list = ewg_prd_df.iloc[i]['ingredient_list']
for ID in ing_list:
new_list.append(ing_map[ID])
ing_cnts[ing_map[ID]] += 1
#strings = strings + ' ' + ing_map[ID]
#print(new_list)
ing_lists.append(new_list)
string_lists.append(str(new_list))
except:
ing_lists.append([''])
string_lists.append('')
print('Failed on',i, 'no ingredient list.')
print('Finished matching ingredients to keys.')
ewg_prd_df['New_List'] = ing_lists
#Build Synonym Dictionary
synonym_dict = {}
for i in range(ewg_ing_df.shape[0]):
row = ewg_ing_df.iloc[i]
syns = row['synonym_list']
if type(syns) == list:
for syn in syns:
synonym_dict[syn.strip()] = row['ingredient_name']
synonym_dict[row['ingredient_name']] = row['ingredient_name']
else:
synonym_dict[row['ingredient_name']] = row['ingredient_name']
print('Build Synonyms')
#Initialize Ingredient Score
ewg_ing_df['Acne_Score'] = 0.0
#Extract Comodegenic Scores
comodegenic = []
with open('comodegenic.csv','r') as f:
for line in f:
if line[0] != ',':
words = line.strip().split(',')
if words[1] != '':
comodegenic.append(( words[0], words[1], words[2]))
cd_df = pd.DataFrame(comodegenic)
#Match Comodegeic Ingredients to EWG
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
matches = []
print('Matching Comodegenic to EWG...')
for i in range(cd_df.shape[0]):
cur_ingredient = cd_df.iloc[i][0].upper()
matches.append(process.extract(cur_ingredient, synonym_dict.keys(),limit=1, scorer=fuzz.token_sort_ratio))
#Match Comodegenic Ingredients to EWG
cd_ranks = []
stop
for i in range(cd_df.shape[0]):
match_score = int(matches[i][0][1])
match_name = matches[i][0][0]
cd_name = cd_df.iloc[i][0].upper()
cd_ranks.append(match_score)
if match_score >= 90:
ewg_name = synonym_dict[match_name]
#print(temp_score, '\t', match_name, '\t', cd_name, '\t', synonym_dict[match_name])
#print(cd_df.iloc[i][1],cd_df.iloc[i][0])
row= ewg_ing_df[ewg_ing_df['ingredient_name']==ewg_name].index
ewg_ing_df.loc[row,'Acne_Score'] = cd_df.iloc[i][1]
#print(ewg_ing_df.loc[row]['ingredient_name'], ewg_ing_df.loc[row]['Acne_Score'])
#print(ewg_ing_df[ewg_ing_df['ingredient_name']==ewg_name])
print('Updated EWG with Acne Scores')
#Update Product Acne Score
acne_score_list = []
for i in range(ewg_prd_df.shape[0]):
row = ewg_prd_df.iloc[i]
total_acne = 0
for ing in row['New_List']:
try:
acne_score = float(ewg_ing_df[ewg_ing_df['ingredient_name']==ing]['Acne_Score'])
#print(ing, acne_score)
total_acne += acne_score
except:
None
acne_score_list.append(total_acne)
#print(acne_score_list)
ewg_prd_df['Acne_Score'] = acne_score_list
#Save Final Acne Matrix
pickle_out = open("ewg_prd_df.pickle","wb")
pickle.dump(ewg_prd_df, pickle_out)
pickle_out.close()
print('Saved dataset to "ewg_prd_df.pickle"')
try:
pickle.load(open("ewg_prd_df.pickle","rb"))
print('Loaded from Pickle')
ewg_prd_df = pickle.load(open("ewg_prd_df.pickle","rb"))
except:
print("Building Dataset from Files...")
buildDataSet()
ewg_prd_df = pickle.load(open("ewg_prd_df.pickle","rb"))
#try:
# X = pickle.load(open("X.pickle","rb"))
#except:
#Need to change to a real function...code block simple
print('Building Dataset...')
#print(ewg_prd_df)
from collections import Counter
n = ewg_prd_df.shape[0]
print(n)
ing_lists = []
ing_cnts = Counter()
string_lists = []
for i in range(n):
ings = ewg_prd_df.iloc[i]['New_List']
str_list = ''
if type(ings) == list:
#print(type(ings), i)
for ing in ings:
if type(ing) == str:
str_list = str_list + '|' + ing
string_lists.append(str_list)
else:
print('Failed',i)
string_lists.append('')
#Build TD-IDF Matrix
from sklearn.feature_extraction.text import TfidfVectorizer
def ing_tokenizer(word):
return word.split('|')
#print(ewg_prd_df['New_List'].tolist())
vectorizer = TfidfVectorizer(tokenizer = ing_tokenizer, lowercase = False, stop_words = ['WATER','GLYCERIN','',
'TITANIUM DIOXIDE', 'IRON OXIDES','BEESWAX','METHYLPARABEN', 'PROPYLPARABEN', 'PROPYLENE GLYCOL', 'PANTHENOL', 'MICA'] )
X = vectorizer.fit_transform(string_lists)
#print(vectorizer.vocabulary_)
pickle_out = open("X.pickle","wb")
pickle.dump(X, pickle_out)
pickle_out.close()
#print(X)
print('Running Optimization...')
from sklearn.metrics import confusion_matrix
for thresh in [0]:
for test_size in [.001,.05,.01,.1]:
for alph in [.001]:
best_alpha = 0
best_test_size = 0
best_thresh_hold = 0
best_test_score = 0
best_train_score = 0
best_model = None
#Initialize Acne Score by Product
Y = []
for i in ewg_prd_df['Acne_Score']:
if i > 0 and i < 3:
Y.append(1)
elif i > 2:
Y.append(2)
else:
Y.append(0)
#Split Training and Test Data by 1/3 to 2/3
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42)
#Build NB Model
from sklearn.naive_bayes import MultinomialNB
gnb = MultinomialNB(alpha = alph)
gnb_fit = gnb.fit(X_train,y_train)
y_pred = gnb_fit.predict(X_test)
#y_pred_tr = gnb_fit.predict(X_train)
test_score = confusion_matrix(y_test, y_pred)
#train_score = confusion_matrix(y_train, y_pred_tr)
#if test_score:
best_test_score = test_score
best_alpha = alph
best_test_size = test_size
best_thresh_hold = thresh
best_model = gnb_fit
print('Best Test Score:',gnb_fit.score(X_test,y_test), '\n', test_score) #,'\t', train_score)
print('Alpha:\t', best_alpha)
print('Test_size:\t',test_size)
print('Thresh:\t', thresh,'\n')
#print('Thresh:',thresh, 'TestSize\t',test_size,'\n' ,'\tTraining Error:', )
#print('\tTesting Error', )
pickle_out = open("nb.pickle","wb")
pickle.dump(gnb_fit, pickle_out)
pickle_out.close()
ingredient_weights = {}
i = 0
print(len(gnb.coef_), best_model.coef_, type(best_model.coef_[0]))
for i in range(gnb_fit.coef_[0].shape[0]):
#print( gnb.coef_[0][i], vectorizer.get_feature_names()[i])
ingredient_weights[vectorizer.get_feature_names()[i]] =(gnb.coef_[0][i])
#print(, gnb.coef_[i])
import operator
sorted_weights = sorted(ingredient_weights.items(), key=operator.itemgetter(1))
for i in range(1,20):
print(sorted_weights[-i])
score = best_model.predict_proba(X_train)
pred = best_model.predict(X_train)
for i in range(100):
print(ewg_prd_df.iloc[i]['Acne_Score'], score[i], pred[i])
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
#%matplotlib inline
ewg_prd_df['Acne_Score'].hist(bins=40)
plt.show()
#for i in range(gnb_fit.coef_
#print(gnb_fit.coef_)
#out = gnb_fit.predict_proba(X_test)
#for i in range(len(out)):
# print(out[i])
#print(gnb_fit.class_log_prior_)
#print(gnb_fit.feature_count_)
#print(gnb_fit.class_count_)
#print(gnb_fit.get_params())
|
SombiriX/w210_capstone
|
buildModel.py
|
buildModel.py
|
py
| 8,443 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pandas.read_json",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_json",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "fuzzywuzzy.process.extract",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "fuzzywuzzy.process",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "fuzzywuzzy.fuzz.token_sort_ratio",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "fuzzywuzzy.fuzz",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "pickle.dump",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.MultinomialNB",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 279,
"usage_type": "name"
}
] |
6112251845
|
import sys
#sys.path.append('/usr/local/Cellar/opencv3/3.2.0/lib/python2.7/site-packages')
sys.path.append("/usr/local/Cellar/opencv3/3.2.0/lib/python3.5/site-packages")
import cv2
import numpy as np
import os
import random
def show_image(im):
height, width = im.shape[:2]
res = cv2.resize(im,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
cv2.imshow("Image", res)
def show_imageOrig(im):
height, width = im.shape[:2]
res = cv2.resize(im,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
cv2.imshow("ImageOrig", res)
#kernel = np.ones((3,3),np.uint8)
#cap = cv2.VideoCapture("dogs.mp4")
#ret, frame1 = cap.read()
#prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
#hsv = np.zeros_like(frame1)
#hsv[...,1] = 255
#fgbg = cv2.createBackgroundSubtractorMOG2(50, 16, False)
#fgbg.setBackgroundRatio(0.8) # frames before object becomes foreground
#fgbg.setVarInit(500) # speed of adaption of new components
#i = 0
#while(1):
# ret, frame2 = cap.read()
# ret, frame2 = cap.read()
# frame2 = cv2.GaussianBlur(frame2,(9,9),0)
# fgmask = fgbg.apply(frame2)
# fgmask = fgbg.apply(frame2,fgmask, 0)
#fgmask = cv2.dilate(fgmask,kernel,iterations = 5)
#fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
'''
^^ Line above may or may not be good
'''
#if (i > 10 and i % 2 == 0):
#cv2.imwrite(str(i) + ".png",fgmask)
# show_image(fgmask)
# k = cv2.waitKey(30) & 0xff
# if k == 27:
# break
#i += 1
#cap.release()
#cv2.destroyAllWindows()
#errorCount = 0
np.random.seed(133)
numLabels = 101
image_size_x = 240
image_size_y = 320
dataRoot = "./UCF-101/"
def processFolder(folder):
#tick = 0
#global errorCount
print(dataRoot + folder)
try:
videoFileNames = os.listdir(dataRoot + folder)
except:
print("Not a directory, moving along.")
return None, None
#i = 0
#data = np.zeros(shape=(len(videoFileNames)*1, image_size_x, image_size_y), dtype=np.float32)
#labels = np.zeros(shape=(len(videoFileNames)*1, 101), dtype=np.float32)
for videoName in videoFileNames:
#if tick < 2:
# tick = tick + 1
# continue
#tick = 0
if random.random() < 0.98:
continue
try:
print(videoName)
cap = cv2.VideoCapture(dataRoot + folder + "/" + videoName)
#ret, frame1 = cap.read()
#prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
#hsv = np.zeros_like(frame1)
#hsv[...,1] = 255
fgbg = cv2.createBackgroundSubtractorMOG2(50, 16, False)
fgbg.setBackgroundRatio(0.8) # frames before object becomes foreground
fgbg.setVarInit(500) # speed of adaption of new components
i = 0
frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
while(cap.get(cv2.CAP_PROP_POS_FRAMES) < frames - 3):
#ret, frame2 = cap.read()
ret, frame2 = cap.read()
if ret == False:
continue
show_imageOrig(frame2)
frame2 = cv2.GaussianBlur(frame2,(9,9),0)
fgmask = fgbg.apply(frame2)
fgmask = fgbg.apply(frame2,fgmask, 0)
show_image(fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
#return data, labels
def iterData(folder):
labelNames = os.listdir(folder)
for i in range(len(labelNames)):#len(labelNames)
processFolder(labelNames[i])
iterData(dataRoot)
|
ltecot/humanMotionClassification
|
img_processing.py
|
img_processing.py
|
py
| 3,562 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "cv2.createBackgroundSubtractorMOG2",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FRAME_COUNT",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_POS_FRAMES",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 120,
"usage_type": "call"
}
] |
36259278100
|
import requests
from bs4 import BeautifulSoup
import json
import secrets
from requests_oauthlib import OAuth1
from operator import itemgetter
import sqlite3
import csv
import base64
import itertools
import plotly.plotly as py
import plotly.graph_objs as go
import webbrowser
spotifybase = "https://accounts.spotify.com/api/token"
spotifyplay = "https://api.spotify.com/v1/search"
foodnet = "https://www.foodnetwork.com/profiles/talent"
spotify_client = secrets.client_id
spotify_secret = secrets.client_secret
auth = (spotify_client, spotify_secret)
grant_type = 'client_credentials'
CACHE_FNAME = 'final_cache.json'
DBNAME = 'food.db'
CHEFS = 'chefs.json'
DISHES = 'dishes.json'
flavor_dict = {'Aaron McCargo Jr.': 'American',
'Aarti Sequeira': 'South Asian',
'Aarón Sánchez': 'Latin',
'Adam Gertler': 'BBQ',
'Aida Mollenkamp': 'Innovative',
'Alex Guarnaschelli': 'Traditional Home-Cooking',
'Amanda Freitag': 'Traditional Home-Cooking',
'Amy Thielen': 'Traditional Home-Cooking',
'Andrew Zimmern': 'Innovative',
'Anne Burrell': 'Rustic',
'Anne Thornton': 'Sweet Treats',
'Ayesha Curry': 'Home-Cooking',
'Bob Blumer': 'Innovative',
'Bobby Flay': 'American',
'Brian Boitano': 'Innovative',
'Buddy Valastro': 'Sweet Treats',
'Carla Hall': 'Southern Comfort',
'Cat Cora': 'Misc.',
'Chris Santos': 'Innovative',
'Claire Robinson': 'Home-Cooking',
'Curtis Stone': 'Home-Cooking',
'Daisy Martinez': 'Latin',
'Damaris Phillips': 'Southern Comfort',
'Danny Boome': 'Healthy',
'Daphne Brogdon': 'Home-Cooking',
'Dave Lieberman': 'Home-Cooking',
'Donatella Arpaia': 'Home-Cooking',
'Duff Goldman': 'Sweet Treats',
'Eddie Jackson': 'Healthy',
'Ellie Krieger': 'Healthy',
'Emeril Lagasse': 'Misc.',
'Food Network Kitchen': 'Misc.',
'Geoffrey Zakarian': 'Modern American',
'George Duran': 'Global Cuisine',
'Giada De Laurentiis': 'Italian',
'Graham Elliot': 'Misc.',
'Guy Fieri': 'American',
'Ina Garten': 'Home-Cooking',
'Ingrid Hoffmann': 'Misc.',
'Jamie Deen': 'BBQ',
'Jamie Oliver': 'Healthy',
'Janet Johnston': 'Home-Cooked',
'Jeff Corwin': 'Latin',
'Jeff Mauro': 'Misc.',
'Jet Tila': 'East Asian',
'Joey Fatone': 'American',
'Jose Garces': 'Latin',
'Judy Joo': 'Misc.',
'Katie Lee': 'Misc.',
'Keegan Gerhard': 'Sweet Treats',
'Kerry Vincent': 'Sweet Treats',
'Lorraine Pascale': 'Home-Cooking',
'Maneet Chauhan': 'South Asian',
'Marc Murphy': 'Modern American',
'Marcela Valladolid': 'Latin',
'Marcus Samuelsson': 'Misc.',
'Mario Batali': 'Italian',
'Mary Nolan': 'Everyday',
'Masaharu Morimoto': 'East Asian',
"Melissa d'Arabian": 'Healthy',
'Michael Chiarello': 'Italian',
'Michael Symon': 'Misc.',
'Nancy Fuller': 'Southern Comfort',
'Nigella Lawson': 'Home-Cooking',
'Patricia Heaton': 'American',
'Paula Deen': 'Southern',
'Rachael Ray': 'Everyday',
'Ree Drummond': 'Southern Comfort',
'Robert Irvine': 'American',
'Robin Miller': 'Everyday',
'Roger Mooking': 'Global Cuisine',
'Ron Ben-Israel': 'Sweet Treats',
'Sandra Lee': 'American',
'Scott Conant': 'Italian',
'Sherry Yard': 'Sweet Treats',
'Sunny Anderson': 'Southern Comfort',
'Ted Allen': 'American',
'The Hearty Boys': 'Innovative',
'The Neelys': 'BBQ',
'Tia Mowry': 'Everyday',
'Tregaye Fraser': 'Innovative',
'Trisha Yearwood': 'Southern Comfort',
'Tyler Florence': 'Home-Cooking',
'Valerie Bertinelli': 'Misc.',
'Warren Brown': 'Sweet Treats'}
try:
cache_file = open(CACHE_FNAME, 'r')
cache_contents = cache_file.read()
CACHE_DICTION = json.loads(cache_contents)
cache_file.close()
except:
CACHE_DICTION = {}
try:
cache_file = open(CHEFS, 'r')
cache_contents = cache_file.read()
CHEF_DICTION = json.loads(cache_contents)
cache_file.close()
except:
CHEF_DICTION = {}
try:
cache_file = open(DISHES, 'r')
cache_contents = cache_file.read()
DISH_DICTION = json.loads(cache_contents)
cache_file.close()
except:
DISH_DICTION = {}
def get_spotify_token(url, auth):
params = {'grant_type': grant_type}
# if url in CACHE_DICTION:
# access_token = CACHE_DICTION[url][17:100]
# return access_token
# else:
resp = requests.post(url, data=params, auth=auth)
resp_data = json.loads(resp.text)
access_token = resp_data["access_token"]
CACHE_DICTION[url] = resp.text
dumped_json_cache = json.dumps(CACHE_DICTION)
fw = open(CACHE_FNAME,"w")
fw.write(dumped_json_cache)
fw.close()
return access_token
def make_request_using_cache(url, headers=None):
if url in CACHE_DICTION:
return CACHE_DICTION[url]
else:
if headers is None:
resp = requests.get(url)
else:
resp = requests.get(url, headers=headers)
CACHE_DICTION[url] = resp.text
dumped_json_cache = json.dumps(CACHE_DICTION)
fw = open(CACHE_FNAME,"w")
fw.write(dumped_json_cache)
fw.close()
return CACHE_DICTION[url]
def get_spotify_playlist(search_term):
end = ["party", "graph", "term"]
params = {'q': search_term}
url = "{}?type=playlist&limit=5&q=".format(spotifyplay) + search_term
access_token = get_spotify_token(spotifybase, auth)
authorization_header = {"Authorization":"Bearer {}".format(access_token)}
response_string = make_request_using_cache(url, authorization_header)
response = json.loads(response_string)
num = 0
spotify_list = []
for r in response:
for i in range(5):
num += 1
spotify_list.append((response[r]["items"][i]["name"], str(response[r]["items"][i]["tracks"]["total"])))
print(str(num) + ". " + response[r]["items"][i]["name"] + " --- " + str(response[r]["items"][i]["tracks"]["total"]))
print("Do you want to see a bar graph comparing these playlist's lengths,"
"look up another term, or"
" do you want to go start throwing your awesome party?")
response = input("Please enter 'party', 'term', or 'graph': ")
while response not in end:
response = input("Please enter 'party', 'term', or 'graph': ")
if response == 'party':
print("Bye! Have fun!")
exit()
elif response == 'graph':
bar_graph_spotify(spotify_list)
print("Alright! Time for you to go throw the best party out there! See you later!")
exit()
elif response == 'term':
response = input("Please enter a new search term! ")
get_spotify_playlist(response)
return spotify_list
def init_db():
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
statement = '''
DROP TABLE IF EXISTS 'Chefs';
'''
cur.execute(statement)
statement = '''
DROP TABLE IF EXISTS 'Dishes';
'''
cur.execute(statement)
conn.commit()
statement = '''
CREATE TABLE 'Chefs' (
'Id' INTEGER PRIMARY KEY AUTOINCREMENT,
'FirstName' TEXT NOT NULL,
'LastName' TEXT NOT NULL,
'ChefUrl' TEXT NOT NULL,
'PopularRecipe' TEXT,
'FlavorProfile' TEXT
);
'''
cur.execute(statement)
statement = '''
CREATE TABLE 'Dishes' (
'Id' INTEGER PRIMARY KEY AUTOINCREMENT,
'DishName' TEXT NOT NULL,
'DishUrl' TEXT NOT NULL,
'ChefID' INTEGER,
'Type' TEXT NOT NULL,
'LevelDifficulty' TEXT NOT NULL,
'Rating' INTEGER
);
'''
cur.execute(statement)
conn.commit()
conn.close()
class Chef:
def __init__(self, FirstName, LastName, ChefUrl=None):
self.FirstName = FirstName
self.LastName = LastName
self.ChefUrl = ChefUrl
self.full_name = FirstName + " " + LastName
if ChefUrl is not None:
unique_page_text = make_request_using_cache(ChefUrl)
unique_page_soup = BeautifulSoup(unique_page_text, 'html.parser')
if self.full_name in flavor_dict:
try:
most_popular_block = unique_page_soup.find(class_ = "m-MediaBlock o-Capsule__m-MediaBlock m-MediaBlock--recipe")
most_popular = most_popular_block.find(class_="m-MediaBlock__a-HeadlineText").text
self.FlavorProfile = flavor_dict[self.full_name]
if self.full_name == "Bobby Flay" or self.full_name == "Duff Goldman" or self.full_name == "Melissa D'Arabian" or self.full_name == "Nigella Lawson":
recipes_url = ChefUrl + "/recipes"
recipes_text = make_request_using_cache(recipes_url)
recipes_soup = BeautifulSoup(recipes_text, 'html.parser')
recipes_list = recipes_soup.find(class_ = "l-List")
most_popular = recipes_list.find(class_ = "m-MediaBlock__a-HeadlineText").text
except:
most_popular = "N/A"
else:
most_popular = "N/A"
self.FlavorProfile = "N/A"
self.PopularRecipe = most_popular
else:
self.PopularRecipe = "N/A"
class Dish:
def __init__(self, DishName, DishUrl, Rating, Chef):
dish_types = ["Side Dish", "Main Dish", "Snack Dish", "Dessert"]
self.DishName = DishName
self.DishUrl = "http:" + DishUrl
self.Rating = Rating
self.Chef = Chef
dish_type = "Unknown"
dish_page_text = make_request_using_cache(self.DishUrl)
dish_page_soup = BeautifulSoup(dish_page_text, 'html.parser')
try:
level_all = dish_page_soup.find(class_ = "o-RecipeInfo o-Level")
level = level_all.find(class_ = "o-RecipeInfo__a-Description").text
except:
level = "Unknown"
try:
tags = dish_page_soup.find_all(class_ = "o-Capsule__a-Tag a-Tag")
for t in tags:
if t.text in dish_types:
dish_type = t.text
else:
dish_type = "Unknown"
except:
dish_type = "Unknown"
pass
self.Type = dish_type
self.LevelDifficulty = level
pass
def get_chef_info():
init_page_text = make_request_using_cache(foodnet)
init_page_soup = BeautifulSoup(init_page_text, 'html.parser')
name_list = init_page_soup.find_all(class_="m-PromoList__a-ListItem")
chef_list = []
num = 0
for n in name_list:
first_name = n.text.split(" ")[0]
second_word = n.text.split(" ")[1]
last_name = n.text.split(" ")[1:]
if len(last_name) == 2:
last_name = last_name[0] + " " + last_name [1]
elif len(last_name) == 3:
last_name = last_name[0] + " " + last_name [1] + " " + last_name [2]
else:
last_name = last_name[0]
if second_word == "and":
first_name = n.text.split(" ")[0] + " and " + n.text.split(" ")[2]
last_name = n.text.split(" ")[3]
chef_url = "https:" + n.find('a')['href']
n = Chef(first_name, last_name, chef_url)
chef_list.append(n)
chef = {"FirstName": n.FirstName,
"LastName": n.LastName,
"ChefUrl": n.ChefUrl,
"PopularRecipe": n.PopularRecipe,
"FlavorProfile": n.FlavorProfile}
CHEF_DICTION[n.full_name] = chef
chef_string = json.dumps(CHEF_DICTION, indent = 4)
fw = open(CHEFS,"w")
fw.write(chef_string)
fw.close()
return chef_list
def get_dish_info():
chefs = get_chef_info()
dishes_list = []
for c in chefs:
chef_dishes = []
if c.full_name in flavor_dict:
dishes_url = c.ChefUrl + "/recipes"
init_page_text = make_request_using_cache(dishes_url)
init_page_soup = BeautifulSoup(init_page_text, 'html.parser')
try:
next_button = init_page_soup.find(class_ = "o-Pagination__a-Button o-Pagination__a-NextButton")
except:
next_button = "No"
big_list = init_page_soup.find(class_="l-List")
ratings_list = []
try:
dish_list = big_list.find_all(class_ = "m-MediaBlock__a-Headline")
except:
pass
try:
ratings = big_list.find_all(class_ = "gig-rating-stars")['title']
for r in ratings:
print(r)
ratings_list.append(ratings)
except:
ratings = "Unknown"
ratings_list.append(ratings)
try:
for d in dish_list:
dish_name = d.text
dish_url = d.find('a')["href"]
dish_rating = "5 out of 5"
d = Dish(dish_name, dish_url, dish_rating, c.full_name)
dishes_list.append(d)
dish = {"DishName": d.DishName,
"DishUrl": d.DishUrl,
"DishRating": d.Rating,
"Type": d.Type,
"LevelDifficulty": d.LevelDifficulty}
chef_dishes.append(dish)
except:
pass
# num = 1
# while next_button != "No":
# num += 1
# next_url = dishes_url + "/trending-/p/" + str(num)
# next_page = make_request_using_cache(next_url)
# next_page_soup = BeautifulSoup(next_page, 'html.parser')
# try:
# next_button = init_page_soup.find(class_ = "o-Pagination__a-Button o-Pagination__a-NextButton")
# except:
# next_button = "No"
# big_list = next_page_soup.find(class_="l-List")
# ratings_list = []
# try:
# dish_list = big_list.find_all(class_ = "m-MediaBlock__a-Headline")
# except:
# dish_list = "no dishes"
# try:
# ratings = big_list.find_all(class_ = "gig-rating-stars")['title']
# for r in ratings:
# print(r)
# ratings_list.append(ratings)
# except:
# ratings = "Unknown"
# ratings_list.append(ratings)
# try:
# for d in dish_list:
# dish_name = d.text
# dish_url = d.find('a')["href"]
# dish_rating = ""
# d = Dish(dish_name, dish_url, dish_rating, c.full_name)
# dishes_list.append(d)
# dish = {"DishName": d.DishName,
# "DishUrl": d.DishUrl,
# "DishRating": d.Rating,
# "Type": d.Type,
# "LevelDifficulty": d.LevelDifficulty}
# chef_dishes.append(dish)
# except:
# pass
# if num == 2:
# break
# try:
# next_button = next_page_soup.find(class_ = "o-Pagination__a-Button o-Pagination__a-NextButton").text
# except:
# next_button = "No"
DISH_DICTION[c.full_name] = chef_dishes
dish_string = json.dumps(DISH_DICTION, indent = 4)
fw = open(DISHES,"w")
fw.write(dish_string)
fw.close()
#print(dishes_list[:30])
return dishes_list
def insert_data():
try:
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
except Error as e:
print(e)
#
# #print('Inserting Data.')
with open(CHEFS) as json_data:
cjson = json.load(json_data)
for c, d in cjson.items():
insertion = (None, d["FirstName"], d["LastName"], d["ChefUrl"], d["PopularRecipe"], d["FlavorProfile"])
statement = 'INSERT INTO "Chefs" '
statement += 'VALUES (?, ?, ?, ?, ?, ?)'
cur.execute(statement, insertion)
chef_dict = {}
statement = '''SELECT Id, FirstName, LastName FROM Chefs'''
cur.execute(statement)
for chef_info in cur:
full_name = chef_info[1] + " " + chef_info [2]
chef_dict[full_name] = chef_info[0]
with open(DISHES) as json_data:
cjson = json.load(json_data)
for c, d in cjson.items():
full_name = c
for i in d:
insertion = (None, i["DishName"].replace("\n", ""), i["DishUrl"], chef_dict[full_name], i["Type"], i["LevelDifficulty"].replace("\n", ""), i["DishRating"])
statement = 'INSERT INTO "Dishes" '
statement += 'VALUES (?, ?, ?, ?, ?, ?, ?)'
cur.execute(statement, insertion)
conn.commit()
conn.close()
def pie_chart(flavor_chef):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
labels = []
values = []
for f in flavor_chef:
labels.append(f)
first_name = f.split(" ")[0]
second_word = f.split(" ")[1]
last_name = f.split(" ")[1:]
if len(last_name) == 2:
last_name = last_name[0] + " " + last_name [1]
elif len(last_name) == 3:
last_name = last_name[0] + " " + last_name [1] + " " + last_name [2]
else:
last_name = last_name[0]
if second_word == "and":
first_name = f.split(" ")[0] + " and " + f.split(" ")[2]
last_name = f.split(" ")[3]
query = '''
SELECT COUNT(*)
FROM Chefs as c
JOIN Dishes as d
ON c.ID = d.ChefID
WHERE c.FirstName = "{}" AND c.LastName = "{}"
GROUP BY c.ID
'''.format(first_name, last_name)
value = cur.execute(query)
for v in value:
values.append(v[0])
trace = go.Pie(labels=labels, values=values)
py.plot([trace], filename='Flavors')
def bar_graph_spotify(spotify):
x = []
y = []
for w, z in spotify:
x.append(w)
y.append(z)
data = [go.Bar(
x = x,
y = y
)]
py.plot(data, filename='bar-Spotify')
def bar_graph_type(command):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
chef_types = {}
first_name = command.split(" ")[0]
second_word = command.split(" ")[1]
last_name = command.split(" ")[1:]
if len(last_name) == 2:
last_name = last_name[0] + " " + last_name [1]
elif len(last_name) == 3:
last_name = last_name[0] + " " + last_name [1] + " " + last_name [2]
else:
last_name = last_name[0]
if second_word == "and":
first_name = command.split(" ")[0] + " and " + command.split(" ")[2]
last_name = command.split(" ")[3]
query = '''
SELECT COUNT(*), d.Type
FROM Chefs as c
JOIN Dishes as d
ON c.ID = d.ChefID
WHERE c.FirstName = "{}" AND c.LastName = "{}"
GROUP BY d.Type
'''.format(first_name, last_name)
types = cur.execute(query)
x = []
y = []
for t in types:
print(t)
x.append(t[1])
y.append(t[0])
data = [go.Bar(
x = x,
y = y
)]
py.plot(data, filename='bar-Type')
def process_flavors(command):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
flavor_chef = []
query = '''
SELECT FirstName, LastName
FROM Chefs
WHERE FlavorProfile = "{}"
'''.format(command)
chefs = cur.execute(query)
for c in chefs:
full_name = c[0] + " " + c[1]
flavor_chef.append(full_name)
return flavor_chef
conn.close()
def process_chef(command):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
dishes_o_chefs = []
first_name = command.split(" ")[0]
second_word = command.split(" ")[1]
last_name = command.split(" ")[1:]
if len(last_name) == 2:
last_name = last_name[0] + " " + last_name [1]
elif len(last_name) == 3:
last_name = last_name[0] + " " + last_name [1] + " " + last_name [2]
else:
last_name = last_name[0]
if second_word == "and":
first_name = command.split(" ")[0] + " and " + command.split(" ")[2]
last_name = command.split(" ")[3]
query = '''
SELECT d.DishName, d.DishUrl, d.Rating, d.Type, d.LevelDifficulty
FROM Chefs as c
JOIN Dishes as d
ON c.ID = d.ChefID
WHERE c.FirstName = "{}" AND c.LastName = "{}"
'''.format(first_name, last_name)
dishes = cur.execute(query)
for d in dishes:
dish = {}
formatted = d[0] + "--- " + d[3] + ", " + d[2] + ", Level: " + d[4]
dish[d[0]] = [d[1], d[2], d[3], d[4]]
dishes_o_chefs.append(dish)
conn.close()
return dishes_o_chefs
def process_dish(command):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
dish = []
query = '''
SELECT d.DishName, d.DishUrl, d.Rating, d.Type, d.LevelDifficulty
FROM Chefs as c
JOIN Dishes as d
ON c.ID = d.ChefID
WHERE d.Type = "{}"
LIMIT 1
'''.format(command)
dishes = cur.execute(query)
for d in dishes:
one_dish = {}
formatted = d[0] + "--- " + d[3] + ", " + d[2] + ", Level: " + d[4]
one_dish[d[0]] = [d[1], d[2], d[3], d[4]]
dish.append(one_dish)
conn.close()
return dish
def flavors():
flavors = ["American", "BBQ", "East Asian", "Everyday", "Global Cuisine", "Healthy",
"Home-Cooking","Innovative","Italian","Latin","Misc.","Modern American",
"Rustic","Southern Comfort","South Asian","Sweet Treats","Trad. Home-Cooking", "exit"]
one_two = ["1", "2", "exit"]
print("Here are the flavors we've put together for your absolutely amazing party: \n"
"American BBQ East Asian\n"
"Everyday Global Cuisine Healthy\n"
"Home-Cooking Innovative Italian\n"
"Latin Misc. Modern American\n"
"Rustic Southern Comfort South Asian\n"
"Sweet Treats Trad. Home-Cooking")
response = input("Please enter a single flavor so we can pull up a list "
"of chefs from FoodNetwork for you! ")
while response not in flavors:
response = input("Whoops! That doesn't look quite right, please try again! ")
if response == "exit":
print("Bye! Hope your party's a blast!")
exit()
flavor_chef = process_flavors(response)
num_chef = 0
print("-"*40, "\n", "CHEFS WITH A ", response, " FLAVOR", "\n", "-"*40)
for f in flavor_chef:
num_chef +=1
print(str(num_chef) + ". " + f)
print("Cool! So you've got a couple of options now! Path 1: You can choose a chef to look at or we can give you"
"a dish from this flavor! Path 2: You can open a plotly pie chart showing the amount of recipes"
"each of these chefs have! Which one do you want to do?")
response = str(input("Enter '1' or '2' for either path: "))
while response not in one_two:
response = input("Enter '1' or '2' for either path: ")
if response == '1':
chef_dish(flavor_chef)
elif response == '2':
pie_chart(flavor_chef)
print("Alright now let's choose a chef/dish!")
chef_dish(flavor_chef)
elif response == 'exit':
print("Bye! Hope your party's a blast!")
exit()
return flavor_chef
def chef_dish(flavor_chef):
chef_dish = ["chef", "dish", "exit"]
kinds = ["Snack", "Side Dish", "Main Dish", "Dessert", "exit"]
response = input("Enter 'chef' or 'dish': ")
while response not in chef_dish:
response = input("Please enter 'chef' or 'dish': ")
if response == "exit":
print("Bye! Hope your party's a blast!")
exit()
elif response == 'chef':
response = input("Nice! Type in the name of the chef you want to look at: ")
while response not in flavor_chef:
response = input("Oops! Did you type that in right? Try again: ")
if response == "exit":
print("Bye! Hope your party's a blast!")
exit()
chef(response)
elif response == 'dish':
print("Solid! Do you want a snack, side, main dish, or dessert?")
response = input("Please enter 'Snack', 'Side Dish', 'Main Dish', or 'Dessert': ")
while response not in kinds:
response = input("Oops! Did you type that in right? Try again: ")
if response == "exit":
print("Bye! Hope your party's a blast!")
exit()
dish(response)
return 0
def dish(kind):
music_flavor = ["music", "flavor"]
yes_no = ["yes", "no", "exit"]
one_two = ["1", "2", "exit"]
print("-"*15, "\n", "A ", kind, "DISH" "\n", "-"*15)
dish = process_dish(kind)
for d in dish:
for i in d:
formatted = i + " --- " + d[i][2] + ", " + d[i][1] + ", Level: " + d[i][3].replace(" ", "")
print(formatted)
print("\n Do you want to go to the url for this dish?")
response = input("Enter 'yes' to go to the url or enter 'no' to go back to flavors: ")
while response not in yes_no:
response = input("Please enter 'yes' or 'no': ")
if response == "yes":
for d in dish:
url = d[i][0]
print("Launching " + url + " in browser!")
webbrowser.open(url)
print("Are you satisfied with your recipe? Do you want to go look at music?")
response = input("Enter 'music' for music or enter 'flavor' to go back to the flavors ")
while response not in music_flavor:
response = input("Please try again: ")
if response == 'music':
response = input("Enter a search term for Spotify: ")
spotify = get_spotify_playlist(response)
bar_graph_spotify(spotify)
elif response == 'flavor':
flavor_chef = flavors()
print("Cool! So you've got a couple of options now! Path 1: You can choose a chef to look at or we can give you "
" a dish from this flavor! Path 2: You can open a plotly pie chart showing the amount of recipes "
" each of these chefs have! Which one do you want to do?")
response = str(input("Enter '1' or '2' for either path: "))
while response not in one_two:
response = input("Enter '1' or '2' for either path: ")
if response == '1':
chef_dish(flavor_chef)
if response == '2':
pie_chart(flavor_chef)
elif response == "no":
flavor_chef = flavors()
chef_dish(flavor_chef)
elif response == "exit":
print("Bye! Hope your party's a blast!")
exit()
return 0
def chef(name):
music_flavor = ["music", "flavor", "exit"]
one_two = ["one", "two", "exit"]
num_chef_dish = 0
print("-"*30, "\n", "DISHES BY ", name, "\n" + "-"*30)
dishes_o_chefs = process_chef(name)
dish_nums = []
for d in dishes_o_chefs:
for i in d:
num_chef_dish += 1
formatted = str(num_chef_dish) + ". " + i + " --- " + d[i][2] + ", " + ", Type: " + d[i][1] + ", Level: " + d[i][3].replace(" ", "")
print(formatted)
dish_nums.append((num_chef_dish - 1, d[i][0]))
response = input("Enter a number to go to that dish's url, enter 'flavor' to go back to the flavors, or"
"enter 'graph' to see a graph of this chef's number of main, side, snack, and dessert dishes! ")
if response == "flavor":
flavor_chef = flavors()
chef_dish(flavor_chef)
elif response.isdigit() == True:
# try:
url = dish_nums[(int(response)-1)][1]
print(url)
print("Launching " + url + " in browser!")
webbrowser.open(url)
# except:
# print("URL Unknown")
print("Are you satisfied with your recipe? Do you want to go look at music?")
response = input("Enter 'music' for music or enter 'flavor' to go back to the flavors ")
while response not in music_flavor:
response = input("Please try again: ")
if response == 'music':
response = input("Enter a search term for Spotify: ")
get_spotify_playlist(response)
elif response == 'flavor':
flavor_chef = flavors()
print("Cool! So you've got a couple of options now! Path 1: You can choose a chef to look at or we can give you"
" a dish from this flavor! Path 2: You can open a plotly pie chart showing the amount of recipes"
" each of these chefs have! Which one do you want to do?")
response = str(input("Enter '1' or '2' for either path: "))
while response not in one_two:
response = input("Enter '1' or '2' for either path: ")
if response == '1':
chef_dish(flavor_chef)
elif response == '2':
pie_chart(flavor_chef)
print("Great! Let's go look at some chef/dishes from this flavor now!")
chef_dish(flavor_chef)
elif response == "exit":
print("Bye! Hope your party's a blast!")
exit()
elif response == "exit":
print("Bye! Hope your party's a blast!")
exit()
elif response == 'graph':
bar_graph_type(name)
print("Nice!")
response = input("Enter a number to go to that dish's url, enter 'flavor' to go back to the flavors, or"
"enter 'graph' to see a graph of this chef's number of main, side, snack, and dessert dishes! ")
if response == "flavor":
flavor_chef = flavors()
chef_dish(flavor_chef)
elif response.isdigit() == True:
#try:
url = dish_nums[(int(response)-1)][1]
print(url)
print("Launching " + url + " in browser!")
webbrowser.open(url)
# except:
# print("URL Unknown")
print("Are you satisfied with your recipe? Do you want to go look at music?")
response = input("Enter 'music' for music or enter 'flavor' to go back to the flavors ")
while response not in music_flavor:
response = input("Please try again: ")
if response == 'music':
response = input("Enter a search term for Spotify: ")
get_spotify_playlist(response)
elif response == 'flavor':
flavor_chef = flavors()
print("Cool! So you've got a couple of options now! Path 1: You can choose a chef to look at or we can give you"
"a dish from this flavor! Path 2: You can open a plotly pie chart showing the amount of recipes"
"each of these chefs have! Which one do you want to do?")
response = str(input("Enter '1' or '2' for either path: "))
while response not in one_two:
response = input("Enter '1' or '2' for either path: ")
if response == '1':
chef_dish(flavor_chef)
if response == '2':
pie_chart(flavor_chef)
print("Great! Let's go look at some chef/dishes from this flavor now!")
chef_dish(flavor_chef)
elif response == "exit":
print("Bye! Hope your party's a blast!")
exit()
else:
print("Hmmm. That doesn't seem right!")
response = input("Enter 'flavor' to go back to the flavors! ")
while response != 'flavor':
print("Hmmm. That doesn't seem right!")
response = input("Enter 'flavor' to go back to the flavors! ")
flavor_chef = flavors()
chef_dish(flavor_chef)
elif response == "exit":
print("Bye! Hope your party's a blast!")
exit()
else:
print("Hmmm. That doesn't seem right!")
response = input("Enter 'flavor' to go back to the flavors! ")
while response != 'flavor':
print("Hmmm. That doesn't seem right!")
response = input("Enter 'flavor' to go back to the flavors! ")
flavor_chef = flavors()
chef_dish(flavor_chef)
def interactive_prompt():
one_two = ["1", "2", "exit"]
print("-"*30, "\n", "PARTY PLANNING PROGRAM \n", "-"*30)
print("Hey! So you wanna plan a party? Don't know where to start? Look no "
"further! We'll help you with the two most important parts of any party: "
"food and music! (You've gotta take care of the conversation on your own, "
"though, sorry!)")
response = input("Enter anything if this is the program you've been looking for "
"your whole life (enter 'exit' if you want to leave!): ")
if response == "exit":
print("Bye! Hope your party's a blast!")
exit()
print("With P^3 you can get delicious recipes and great music for the "
"best party you've ever thrown. Yes, even better than your neighbor Janet's "
"Halloween party last year.")
response = input("Cool right? ")
if response == 'exit':
print("Bye! Hope your party's a blast!")
exit()
print("Yea, we think so too. Let's get started.")
flavor_chef = flavors()
if __name__=="__main__":
#get_dish_info()
#init_db()
#insert_data()
interactive_prompt()
#get_spotify_playlist("country")
|
jntoma/finalproj206
|
final_food.py
|
final_food.py
|
py
| 33,382 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "secrets.client_id",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "secrets.client_secret",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Pie",
"line_number": 506,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 506,
"usage_type": "name"
},
{
"api_name": "plotly.plotly.plot",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "plotly.plotly",
"line_number": 508,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Bar",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 516,
"usage_type": "name"
},
{
"api_name": "plotly.plotly.plot",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "plotly.plotly",
"line_number": 520,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Bar",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 553,
"usage_type": "name"
},
{
"api_name": "plotly.plotly.plot",
"line_number": 557,
"usage_type": "call"
},
{
"api_name": "plotly.plotly",
"line_number": 557,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 560,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 576,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 608,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
"line_number": 717,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
"line_number": 770,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
"line_number": 813,
"usage_type": "call"
}
] |
73574084347
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "Deuces Poker Client",
version = "1.0",
author = "Daniel Fonseca Yarochewsky",
description = ("A client to simulate a Texa Holdem Poker Table"),
license = "Free",
packages=['deuces-master', 'termcolor'],
long_description=read('README')
)
|
yarochewsky/poker-client
|
setup.py
|
setup.py
|
py
| 409 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
}
] |
72699334907
|
import pandas as pd
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModelForMaskedLM
from torch import nn
import numpy as np
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
from torch.optim import Adam
from tqdm import tqdm
import torch
import os
import logging
# **************读取数据和模型************
data = pd.read_csv("../dataset/train.csv")
data_part = data.sample(n=60000, random_state=42, replace=True)
data_shuffled = data_part.sample(frac=1, random_state=42) # 随机打乱数据
train_data, test_data = train_test_split(
data_shuffled, test_size=0.3, random_state=42
) # 分割成训练集和测试集
K_FOLDS = 6 # K折训练
# K折训练的模型
kf = StratifiedKFold(n_splits=K_FOLDS, shuffle=True, random_state=42)
# ***************下载模型*****************
if 1:# 下载模型
print("下载模型中...")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
tokenizer.save_pretrained("../model/Tokenizer")
bert = AutoModelForMaskedLM.from_pretrained("bert-base-cased")
bert.save_pretrained("../model/BERT_ROW")
bert_basic = BertModel.from_pretrained("bert-base-cased")
bert_basic.save_pretrained("../model/BERT_BASIC")
print("!模型下载结束")
if 0:#
print("模型加载中...")
tokenizer = AutoTokenizer.from_pretrained("../model/Tokenizer")
bert = AutoModelForMaskedLM.from_pretrained("../model/BERT_ROW")
bert_basic = BertModel.from_pretrained("../model/BERT_BASIC")
print("模型加载完毕...")
# ***************常量和定义的类与函数************
LABELS = {
"Literature & Fiction": 0,
"Animals": 1,
"Growing Up & Facts of Life": 2,
"Humor": 3,
"Cars, Trains & Things That Go": 4,
"Fairy Tales, Folk Tales & Myths": 5,
"Activities, Crafts & Games": 6,
"Science Fiction & Fantasy": 7,
"Classics": 8,
"Mysteries & Detectives": 9,
"Action & Adventure": 10,
"Geography & Cultures": 11,
"Education & Reference": 12,
"Arts, Music & Photography": 13,
"Holidays & Celebrations": 14,
"Science, Nature & How It Works": 15,
"Early Learning": 16,
"Biographies": 17,
"History": 18,
"Children's Cookbooks": 19,
"Religions": 20,
"Sports & Outdoors": 21,
"Comics & Graphic Novels": 22,
"Computers & Technology": 23,
}
# 日志文件输出目录
logging.basicConfig(filename="../log/train.log", level=logging.INFO)
# *** 封装类 方便数据类型转换 ***************
class Dataset(torch.utils.data.Dataset):
def __init__(self, df):
self.labels = [LABELS[label] for label in df["category"]]
self.texts = [
tokenizer(
text,
padding="max_length",
max_length=512,
truncation=True,
return_tensors="pt",
)
for text in df["text"]
]
def classes(self):
return self.labels
def __len__(self):
return len(self.labels)
def get_batch_labels(self, idx):
# Fetch a batch of labels
return np.array(self.labels[idx])
def get_batch_texts(self, idx):
# Fetch a batch of inputs
return self.texts[idx]
def __getitem__(self, idx):
batch_texts = self.get_batch_texts(idx)
batch_y = self.get_batch_labels(idx)
return batch_texts, batch_y
class BertClassifier(nn.Module):
def __init__(self, dropout=0.5):
super(BertClassifier, self).__init__()
self.bert = bert_basic
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(768, 24)
self.relu = nn.ReLU()
def forward(self, input_id, mask):
_, pooled_output = self.bert(
input_ids=input_id, attention_mask=mask, return_dict=False
)
dropout_output = self.dropout(pooled_output)
linear_output = self.linear(dropout_output)
final_layer = self.relu(linear_output)
return final_layer
def train(model, train_data, val_data, learning_rate, epochs):
# 判断是否使用GPU
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# 通过Dataset类获取训练和验证集
train, val = Dataset(train_data), Dataset(val_data)
# DataLoader根据batch_size获取数据,训练时选择打乱样本
train_dataloader = torch.utils.data.DataLoader(train, batch_size=8, shuffle=True)
val_dataloader = torch.utils.data.DataLoader(val, batch_size=8)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=learning_rate)
if use_cuda:
print("使用gpu")
model = model.to(device)
criterion = criterion.to(device)
# 开始进入训练循环
for epoch_num in range(epochs):
# 定义两个变量,用于存储训练集的准确率和损失
total_acc_train = 0
total_loss_train = 0
for train_input, train_label in tqdm(train_dataloader):
train_label = train_label.to(device)
train_label = train_label.to(torch.long)
mask = train_input["attention_mask"].to(device)
input_id = train_input["input_ids"].squeeze(1).to(device)
# 通过模型得到输出
output = model(input_id, mask)
# 计算损失
batch_loss = criterion(output, train_label)
total_loss_train += batch_loss.item()
# 计算精度
acc = (output.argmax(dim=1) == train_label).sum().item()
total_acc_train += acc
# 模型更新
model.zero_grad()
batch_loss.backward()
optimizer.step()
# ------ 验证模型 -----------
# 定义两个变量,用于存储验证集的准确率和损失
total_acc_val = 0
total_loss_val = 0
# 不需要计算梯度
with torch.no_grad():
# 循环获取数据集,并用训练好的模型进行验证
for val_input, val_label in val_dataloader:
val_label = val_label.to(device)
val_label = val_label.to(torch.long)
mask = val_input["attention_mask"].to(device)
input_id = val_input["input_ids"].squeeze(1).to(device)
output = model(input_id, mask)
batch_loss = criterion(output, val_label)
total_loss_val += batch_loss.item()
acc = (output.argmax(dim=1) == val_label).sum().item()
total_acc_val += acc
logging.info(
"\n| Epochs: %d \n| Train Loss: %.3f \n| Train Accuracy: %.3f \n| Val Loss: %.3f \n| Val Accuracy: %.3f \n",
epoch_num + 1,
total_loss_train / len(train_data),
total_acc_train / len(train_data),
total_loss_val / len(val_data),
total_acc_val / len(val_data),
)
# ************** 运行部分 ********************
model = BertClassifier()
model.load_state_dict(torch.load("../model/BERT-1"))
learning_rate = 5e-6 # 设置学习率
epochs = 1 # 设置训练轮数
train(model, train_data, test_data, learning_rate, epochs)
torch.save(model.state_dict(), "../model/BERT-1")
|
zzhaire/dig-dig-books
|
code/train.py
|
train.py
|
py
| 7,354 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.StratifiedKFold",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer.from_pretrained",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "transformers.AutoModelForMaskedLM.from_pretrained",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "transformers.AutoModelForMaskedLM",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "transformers.BertModel.from_pretrained",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "transformers.BertModel",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "transformers.AutoTokenizer.from_pretrained",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "transformers.AutoModelForMaskedLM.from_pretrained",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "transformers.AutoModelForMaskedLM",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "transformers.BertModel.from_pretrained",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "transformers.BertModel",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "logging.basicConfig",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "torch.utils",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 214,
"usage_type": "call"
}
] |
3929047732
|
from .wav import write_sine_wave_wav_file
def test_sine():
import io
import time
buffer_size = io.DEFAULT_BUFFER_SIZE
filename = "test-5min-512hz-sr48khz-s24le-pcmdatagen.wav"
frequency = 512
sample_rate = 48000
duration = 5 * 60 * sample_rate # 5 minutes
bit_depth = 24
start_time = time.time()
with open(filename, "wb") as fp:
write_sine_wave_wav_file(
fp=fp,
frequency=frequency,
buffer_size=buffer_size,
sample_rate=sample_rate,
num_samples=duration,
bits_per_sample=bit_depth,
)
end_time = time.time()
print(f"Time taken: {end_time - start_time}")
def main():
return test_sine()
if __name__ == "__main__":
main()
|
louie-github/morsel
|
morsel/test_sine.py
|
test_sine.py
|
py
| 772 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "io.DEFAULT_BUFFER_SIZE",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "wav.write_sine_wave_wav_file",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 25,
"usage_type": "call"
}
] |
29818611165
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : get_content_data.py
# @Description: 获取去标签后的文本数据
# @Time : 2020-5-30 上午 11:09
# @Author : Hou
import os
import pandas as pd
import pymysql.cursors
def get_id_list():
original_data = pd.read_excel(os.path.join(os.path.abspath('../..'), 'data', 'raw', 'filtered_data.xlsx'))
id_series = original_data['id']
id_list = id_series.to_numpy()
return id_list
def get_content_data(id_list):
"""获取去标签后的文本数据"""
connection = pymysql.connect(host='58.59.18.101',
port=3306,
user='data',
password='data12399123',
database='bidding_data',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
content_df = pd.DataFrame(columns=('bulletin_id', 'content', 'partition_key'))
try:
with connection.cursor() as cursor:
sql = "SELECT * FROM `bidding_bulletin_text` where bulletin_id= %s"
# 获取2000条数据进行测试
for index in range(2001):
cursor.execute(sql, (id_list[index],))
result = cursor.fetchone()
# print(result)
content_df.loc[index] = result
finally:
connection.close()
return content_df
if __name__ == '__main__':
id_list = get_id_list()
content_df = get_content_data(id_list)
content_df.to_excel(os.path.join(os.path.abspath('../..'), 'data', 'processed', 'content_text_data.xlsx'))
|
Kidron-Hou/category_division
|
src/data/get_content_data.py
|
get_content_data.py
|
py
| 1,684 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_excel",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors.connect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.cursors",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 50,
"usage_type": "call"
}
] |
23609310998
|
import selenium
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from bs4 import BeautifulSoup
import pymysql
from db_setting import db
# 페이지 로딩을 기다리는데 사용할 time 모듈 import
import time
# 브라우저 꺼짐 방지 옵션
chrome_options = Options()
chrome_options.add_experimental_option("detach", True)
# URL of the theater page
CGV_URL = 'http://www.cgv.co.kr/movies/?lt=1&ft=1'
driver = webdriver.Chrome(options=chrome_options)
driver.delete_all_cookies()
driver.get(url=CGV_URL)
# 페이지가 완전히 로딩되도록 1초동안 기다림
time.sleep(0.3)
# 더보기 버튼이 있는지 확인
btn_mores = driver.find_elements(By.CLASS_NAME, 'btn-more-fontbold')
if btn_mores:
for btn in btn_mores:
btn.click()
time.sleep(0.3)
# 영화 클릭
box_elements = driver.find_elements(By.CLASS_NAME, 'box-image')
href_list = []
for element in box_elements:
href_list.append(element.find_element(By.TAG_NAME, 'a').get_attribute('href'))
links = []
for href in href_list:
driver.get(href)
try:
director_dt = driver.find_element(By.XPATH, "//dt[contains(., '감독')]")
director_as = director_dt.find_elements(By.XPATH, "./following-sibling::dd[1]/a")
for director_a in director_as:
new_link = director_a.get_attribute("href")
if new_link not in links:
links.append(new_link)
actor_dt = driver.find_element(By.XPATH, "//dt[contains(., '배우')]")
actor_as = actor_dt.find_elements(By.XPATH, "./following-sibling::dd[1]/a")
for actor_a in actor_as:
new_link = actor_a.get_attribute("href")
if new_link not in links:
links.append(new_link)
except NoSuchElementException:
print("정보 없음")
time.sleep(0.1)
names = []
births = []
nations = []
for link in links:
driver.get(link)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
# 이름
name_tag = soup.find(class_='title').find('strong').get_text(strip=True)
names.append(name_tag)
# 출생, 국적 한번에 가져오기
tags = soup.find(class_='spec').find('dl')
# 출생
birth_tag_sibling = tags.find('dt', text= lambda text: text and '출생' in text)
if birth_tag_sibling:
birth_tag = birth_tag_sibling.find_next_sibling().get_text(strip=True)
else :
birth_tag = ""
births.append(birth_tag)
# 국적
nation_tag_sibling = tags.find('dt', text= lambda text: text and '국적' in text)
if nation_tag_sibling:
nation_tag = nation_tag_sibling.find_next_sibling().get_text(strip=True)
else :
nation_tag = ""
nations.append(nation_tag)
print("name : ", name_tag)
print("birth : ", birth_tag)
print("nation : ", nation_tag)
print("================================")
conn = pymysql.connect(host=db['host'], port=db['port'], user=db['user'], password=db['password'], db=db['db'], charset=db['charset'])
curs = conn.cursor(pymysql.cursors.DictCursor)
for name, birth, nation in zip(names, births, nations):
sql = "INSERT INTO person (name, birth, nation) VALUES (%s, %s, %s)"
val = (name, birth, nation)
curs.execute(sql, val)
conn.commit()
conn.close()
|
Ticket-Cinema/real-time-crawling
|
first_chart_crawling/actor_crawling.py
|
actor_crawling.py
|
py
| 3,333 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.TAG_NAME",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "selenium.common.exceptions.NoSuchElementException",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pymysql.connect",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "db_setting.db",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors",
"line_number": 117,
"usage_type": "attribute"
}
] |
27535780328
|
import time
from functools import wraps
from typing import Dict
import requests
from constants import GITHUB_ROOT, RENDER_ROOT
from logging_config import logger
from render_api.utils import get_headers, get_github_status
session = requests.Session()
# Decorator for logging and error handling
def log_and_handle_errors(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
logger.error(f"Exception in {func.__name__}| {exc}")
return None
return wrapper
@log_and_handle_errors
def manage_deployment_status(data: Dict):
pr = data["pull_request"]
repo_data = data["repository"]
state, merged = pr["state"], pr["merged"]
user_repo, repo_url = repo_data["full_name"], repo_data["html_url"]
owner, repo = repo_data["owner"]["login"], repo_data["name"]
if not (merged and state == "closed"):
return
service_id = get_render_service_id(repo_url)
if not service_id:
logger.error("Render service ID is null")
return
deployment_status = get_render_deployment_status(service_id)
if not deployment_status:
return
process_deployment_status(user_repo, repo, owner, deployment_status, service_id)
@log_and_handle_errors
def process_deployment_status(user_repo, repo, owner, deployment_status, service_id):
github_status = get_github_status(deployment_status["status"])
deployment_id = deployment_status["id"]
github_deployment_id = create_github_deployment(user_repo, repo, owner)
if not github_deployment_id:
logger.error("Failed to create GitHub deployment")
return
update_github_deployment_status(
owner, repo, github_status, deployment_id, user_repo, github_deployment_id, service_id
)
@log_and_handle_errors
def update_github_deployment_status(
owner, repo, status, deployment_id, user_repo, github_deployment_id, service_id
):
create_github_deployment_status(
owner, repo, status, deployment_id, user_repo, github_deployment_id
)
new_status = ""
while new_status not in ["failure", "success"]:
new_render_deployment_status = get_render_deployment_status(service_id)
new_status = get_github_status(new_render_deployment_status["status"])
time.sleep(
10
) # You can remove it (but it's better to not spam the render API [400 GET request/minutes])
create_github_deployment_status(
owner, repo, new_status, deployment_id, user_repo, github_deployment_id
)
@log_and_handle_errors
def get_render_deployment_status(service_id: str) -> Dict:
url = f"{RENDER_ROOT}/services/{service_id}/deploys"
response = session.get(url, headers=get_headers("render"))
logger.info(f"GET: {url} executed with status_code: {response.status_code}")
data = response.json()[0]["deploy"]
return {"status": data["status"], "id": data["id"]}
@log_and_handle_errors
def get_render_service_id(repo: str) -> str:
url = f"{RENDER_ROOT}/services"
response = session.get(url, headers=get_headers("render"))
logger.info(f"GET: {url} executed with status_code: {response.status_code}")
for service in response.json():
if service["service"]["repo"] == repo:
return service["service"]["id"]
@log_and_handle_errors
def create_github_deployment(user_repo: str, repo: str, owner: str) -> str:
url = f"{GITHUB_ROOT}/repos/{user_repo}/deployments"
data = {
"owner": owner,
"repo": repo,
"ref": "main",
"environment": "Production",
"production_environment": True,
"description": "Deployment status from Render",
}
response = session.post(url, headers=get_headers("github"), json=data)
logger.info(f"POST: {url} executed with status_code: {response.status_code}")
return response.json().get("id")
@log_and_handle_errors
def create_github_deployment_status(
owner: str,
repo: str,
status: str,
render_deployment_id: str,
user_repo: str,
github_deployment_id: str,
):
url = f"{GITHUB_ROOT}/repos/{user_repo}/deployments/{github_deployment_id}/statuses"
data = {
"owner": owner,
"repo": repo,
"state": status,
"deployment_id": render_deployment_id,
"environment": "Production",
"description": "Deployment status from Render",
}
response = session.post(url, headers=get_headers("github"), json=data)
logger.info(f"POST: {url} executed with status_code: {response.status_code}")
|
Fyleek/render-api
|
render_api/services/deployment_status_service.py
|
deployment_status_service.py
|
py
| 4,593 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.Session",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging_config.logger.error",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging_config.logger",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "logging_config.logger.error",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "logging_config.logger",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "render_api.utils.get_github_status",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "logging_config.logger.error",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "logging_config.logger",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "render_api.utils.get_github_status",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "constants.RENDER_ROOT",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "render_api.utils.get_headers",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "logging_config.logger.info",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "logging_config.logger",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "constants.RENDER_ROOT",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "render_api.utils.get_headers",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "logging_config.logger.info",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "logging_config.logger",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "constants.GITHUB_ROOT",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "render_api.utils.get_headers",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "logging_config.logger.info",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "logging_config.logger",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "constants.GITHUB_ROOT",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "render_api.utils.get_headers",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "logging_config.logger.info",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "logging_config.logger",
"line_number": 138,
"usage_type": "name"
}
] |
35614869771
|
"""
This will fetch database data from database
"""
from typing import List
from copy import deepcopy
from codegen.table.python_free_connex_table import PythonFreeConnexTable
from codegen.database import DatabaseDriver
from os import path
class DataFetcher:
def __init__(self, db_driver: DatabaseDriver):
"""
Construct a db fetcher instance. It requires to have a db driver input,
in order to fetch different files
:param db_driver: A db_driver, can be postgres_db_driver
"""
self.db_driver = db_driver
def store_data(self, output_dir: str, tables: List[PythonFreeConnexTable], should_write=True) -> List[
PythonFreeConnexTable]:
"""
Perform a select on all tables and stored output data into the [output_dir].
Will also return a new list of tables which has the dat_path and data_size set.
:type should_write: object
:param output_dir: Output dir
:param tables: List of tables
:return:
"""
new_tables = deepcopy(tables)
for i, table in enumerate(tables):
if len(table.annotations) > 0:
annotations = ""
for index, annotation in enumerate(table.annotations):
annotations += f"{annotation} as {table.get_annotation_name(index)}"
if index < len(table.annotations) - 1:
annotations += ","
sql = f"select *, {annotations} from {table._table_name};"
else:
sql = f"select * from {table._table_name};"
output_path = path.join(output_dir, table.variable_table_name) + '.tbl'
size = 0
if should_write:
size = self.db_driver.execute_save(sql=sql, output_filename=output_path)
new_tables[i].data_paths = [output_path]
new_tables[i].data_sizes = [size]
return new_tables
|
secyan/secyan_gen
|
codegen/utils/DataFetcher.py
|
DataFetcher.py
|
py
| 1,945 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "codegen.database.DatabaseDriver",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "codegen.table.python_free_connex_table.PythonFreeConnexTable",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "codegen.table.python_free_connex_table.PythonFreeConnexTable",
"line_number": 22,
"usage_type": "name"
}
] |
45641177766
|
import streamlit as st
import pandas as pd
import numpy as np
import umap
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
from sklearn.decomposition import PCA
import webbrowser
# Set width mode to wide to display plots better
st.set_page_config(layout="wide")
# Streamlit Configuration
st.set_option('deprecation.showPyplotGlobalUse', False)
# Sidebar
st.sidebar.header("Schizophrenia Data Analysis")
uploaded_file = st.sidebar.file_uploader("Choose a CSV file", type="csv")
# Sliders for UMAP and KMeans parameters
st.sidebar.subheader("UMAP Parameters")
n_neighbors = st.sidebar.slider("Number of Neighbors", 2, 50, 5)
min_dist = st.sidebar.slider("Minimum Distance", 0.0, 1.0, 0.3, 0.1)
st.sidebar.subheader("Clustering Parameters")
n_clusters = st.sidebar.slider("Number of Clusters (KMeans)", 2, 20, 5)
n_dendro_clusters = st.sidebar.slider("Number of Clusters (Dendrogram)", 2, 20, 5)
# Add option to choose linkage method for dendrogram
linkage_methods = ["ward", "single", "complete", "average"]
selected_linkage_method = st.sidebar.selectbox("Linkage Method for Dendrogram", linkage_methods, 0)
# Checkbox to toggle PCA and UMAP visualization
show_pca = st.sidebar.checkbox("Show PCA Visualization", False)
show_umap = st.sidebar.checkbox("Show UMAP Visualization", False)
# Load the data
def load_data(uploaded_file):
data = pd.read_csv(uploaded_file)
return data
# Function to perform UMAP embedding and K-means clustering
def umap_and_kmeans(band_data, n_neighbors=n_neighbors, min_dist=min_dist, n_clusters=n_clusters):
embedding = umap.UMAP(n_neighbors=n_neighbors, min_dist=min_dist, random_state=42).fit_transform(band_data)
kmeans_labels = KMeans(n_init=4, n_clusters=n_clusters, random_state=42).fit(embedding).labels_
return embedding, kmeans_labels
# Function to plot UMAP embedding results
def plot_umap_embedding(embedding, kmeans_labels, ax, title):
ax.scatter(embedding[:, 0], embedding[:, 1], c=kmeans_labels, cmap='rainbow', s=20)
# add a text with umap parameters and kmeans cluster number
ax.text(0.99, 0.01, f"n_neighbors={n_neighbors}, min_dist={min_dist}, n_clusters={n_clusters}",
transform=ax.transAxes, ha='right', va='bottom', size=10)
ax.set_title(title)
def plot_dendrogram_colored_ticks(band_data, ax, title, method='ward'):
"""
Plot the dendrogram with correctly colored tick numbers for the "All Subjects" group.
"""
# Hierarchical clustering
Z = linkage(band_data, method=method)
# Plot the dendrogram
ddata = dendrogram(Z, ax=ax, leaf_rotation=90)
ax.set_title(title + " Dendrogram (" + method + " linkage)")
ax.set_xlabel("Sample Index")
ax.set_ylabel("Distance")
# Color the tick numbers based on control and schizophrenia subjects
control_indices = data_control.index.to_list()
schizophrenia_indices = data_schizophrenia.index.to_list()
# Get the x-tick labels (leaf labels) from the dendrogram
leaf_labels = ddata['leaves']
# Iterate through x-ticks and color them based on the group
for idx, label in enumerate(ax.get_xticklabels()):
label_idx = leaf_labels[idx]
if label_idx in control_indices:
label.set_color('black')
elif label_idx in schizophrenia_indices:
label.set_color('red')
def plot_dendrogram_and_pca_with_correct_colored_ticks(band_data, ax_dendro, title, color_ticks=False, method='ward'):
"""
Plot the dendrogram with optionally colored tick numbers and PCA visualization on the given axes.
"""
# Hierarchical clustering
Z = linkage(band_data, method=method)
# Plot the dendrogram
ddata = dendrogram(Z, ax=ax_dendro, leaf_rotation=90)
ax.set_title(str(title) + " Dendrogram (" + str(method) + " linkage)")
ax_dendro.set_xlabel("Sample Index")
ax_dendro.set_ylabel("Distance")
if color_ticks:
# Color the tick numbers based on control and schizophrenia subjects
control_indices = data_control.index.to_list()
schizophrenia_indices = data_schizophrenia.index.to_list()
# Get the x-tick labels (leaf labels) from the dendrogram
leaf_labels = ddata['leaves']
# Iterate through x-ticks and color them based on the group
for idx, label in enumerate(ax_dendro.get_xticklabels()):
label_idx = leaf_labels[idx]
if label_idx in control_indices:
label.set_color('black')
elif label_idx in schizophrenia_indices:
label.set_color('red')
return Z
def plot_band_pca(band_data, Z, ax_pca, title):
# Cut the dendrogram to obtain 3 clusters
labels = fcluster(Z, t=n_dendro_clusters, criterion='maxclust')
band_data['Cluster'] = labels
# Use PCA to reduce the data to 2D
pca = PCA(n_components=2)
band_pca = pca.fit_transform(band_data.drop('Cluster', axis=1))
# return band_pca
# Create a scatter plot for PCA reduced data
ax_pca.scatter(band_pca[:, 0], band_pca[:, 1], c=band_data['Cluster'], cmap='rainbow')
ax_pca.set_title(title + " 2D PCA")
ax_pca.set_xlabel("Principal Component 1")
ax_pca.set_ylabel("Principal Component 2")
# If a CSV file is uploaded
if uploaded_file:
st.write("Dataset loaded successfully!")
# Load the data
data = load_data(uploaded_file)
# Split data into control and schizophrenia groups
data_control = data[data['Group'] == 0]
data_schizophrenia = data[data['Group'] == 1]
data_full = data
# Combined dendrogram for "All Subjects"
all_bands_data = pd.concat([
data.loc[:, data.columns.str.startswith('avpp_delta')],
data.loc[:, data.columns.str.startswith('avpp_theta')],
data.loc[:, data.columns.str.startswith('avpp_alpha')],
data.loc[:, data.columns.str.startswith('avpp_beta')],
data.loc[:, data.columns.str.startswith('avpp_gamma')]
], axis=1)
fig, ax = plt.subplots(figsize=(16, 8))
plot_dendrogram_colored_ticks(all_bands_data, ax, "All Bands Combined", method=selected_linkage_method)
plt.tight_layout()
# Save the dendrogram plot to a PNG file
dendrogram_filename = "Combined_Dendrogram_plot.png"
fig.savefig(dendrogram_filename, dpi=300)
# Provide a download button for the dendrogram PNG file
with open(dendrogram_filename, "rb") as f:
btn = st.download_button(
label="Download Combined Dendrogram Plot",
data=f,
file_name=dendrogram_filename,
mime="image/png"
)
st.pyplot(fig)
st.write("EDA - Exploratory Data Analysis")
# Detect available bands from column names
bands_list = ['delta', 'theta', 'alpha', 'beta', 'gamma']
available_bands = [band for band in bands_list if any(data.columns.str.startswith(f'avpp_{band}'))]
# Note: Replace all `plt.show()` with `st.pyplot()`
# Create the plots with dendrogram, PCA, and UMAP visualizations
nrows = 3 if show_pca and show_umap else 2 if show_pca or show_umap else 1 # Number of rows in the plot
hight = 15 if show_pca and show_umap else 10 if show_pca or show_umap else 5 # Height of the plot
for data_group, title in zip([data_schizophrenia, data_control, data_full], ["Schizophrenia", "Control", "All Subjects"]):
fig, axes = plt.subplots(nrows=nrows, ncols=len(available_bands), figsize=(36, hight))
fig.suptitle(title, fontsize=25)
# Ensure axes is 2D
if nrows == 1:
axes = axes.reshape(1, -1)
# Create band data based on detected bands for the current data group
bands = [(band.capitalize(), data_group.loc[:, data_group.columns.str.startswith(f'avpp_{band}')]) for band in available_bands]
# Configure the axes based on the selected visualizations
axes_mapping = [0] # dendrogram axes index is always 0
if show_pca:
axes_mapping.append(len(axes_mapping))
if show_umap:
axes_mapping.append(len(axes_mapping))
# Plot dendrogram, PCA, and UMAP visualizations for each band
for col, (band_name, band_data) in enumerate(bands):
ax_dendro = axes[axes_mapping[0]][col]
ax_dendro.set_title(band_name)
color_ticks = True if title == "All Subjects" else False
# Dendrogram plots using previous functions
Z = plot_dendrogram_and_pca_with_correct_colored_ticks(band_data.copy(), ax_dendro, band_name, color_ticks, method=selected_linkage_method)
if show_pca:
ax_pca = axes[axes_mapping[1]][col]
plot_band_pca(band_data.copy(), Z, ax_pca, title)
if show_umap:
ax_umap = axes[axes_mapping[-1]][col]
embedding, kmeans_labels = umap_and_kmeans(band_data)
plot_umap_embedding(embedding, kmeans_labels, ax_umap, band_name + " 2D UMAP")
plt.tight_layout()
plt.subplots_adjust(top=0.85)
# Save the plot to a PNG file
plot_filename = f"{title.replace(' ', '_')}_plot.png"
fig.savefig(plot_filename, dpi=600)
# plt.show()
# st.pyplot()
# st.image(plot_filename, use_column_width=True, clamp=True)
st.pyplot(fig)
plt.close(fig)
# Provide a download button for the PNG file
with open(plot_filename, "rb") as f:
btn = st.download_button(
label=f"Download {title} Plot",
data=f,
file_name=plot_filename,
mime="image/png"
)
|
furmanlukasz/clusteringSchizphrenia
|
app.py
|
app.py
|
py
| 9,731 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "streamlit.set_page_config",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "streamlit.set_option",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.header",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.file_uploader",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.subheader",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.slider",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.slider",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.subheader",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.slider",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.slider",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.checkbox",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.checkbox",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "umap.UMAP",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "scipy.cluster.hierarchy.linkage",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "scipy.cluster.hierarchy.dendrogram",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "scipy.cluster.hierarchy.linkage",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "scipy.cluster.hierarchy.dendrogram",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "scipy.cluster.hierarchy.fcluster",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "streamlit.download_button",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "streamlit.pyplot",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "streamlit.pyplot",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "streamlit.download_button",
"line_number": 229,
"usage_type": "call"
}
] |
10701337998
|
import tensorflow as tf
import re
import time, datetime
import os
import data_helper
TOWER_NAME = 'tower'
class CNNClassify(object):
"""CNN图像分类
"""
def __init__(self, batch_size, num_classes, num_train_examples, initial_lr=0.1, lr_decay_factor=0.1,
moving_average_decay=0.9999, num_epochs_per_decay=300, log_frequency=10,
max_steps=200000, checkpoint_every=5000, num_gpus=4, session_conf=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True,
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.3))):
self.batch_size = batch_size
self.num_classes = num_classes
self.moving_average_decay = moving_average_decay # 用于移动平均的衰减
self.initial_lr = initial_lr # 最初的学习速率
self.lr_decay_factor = lr_decay_factor # 学习速率衰减因子
self.num_epochs_per_decay = num_epochs_per_decay # 多少轮衰减一次
self.num_train_examples = num_train_examples # 训练样本数量
self.log_frequency = log_frequency # 多少步控制台打印一次结果
self.max_steps = max_steps
self.checkpoint_every = checkpoint_every # 多少步之后保存一次模型
self.num_checkpoints = 5
self.num_gpus = num_gpus
self.session_conf = session_conf
def _variable_on_cpu(self, name, shape, initializer):
"""帮助创建存储在CPU内存上的变量。"""
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(self, name, shape, stddev, wd):
"""初始化权重变量
Args:
name: name of the variable
shape: list of ints
stddev: 高斯函数标准差
wd: 添加L2范数损失权重衰减系数。如果没有,该变量不添加重量衰减。
Returns:权重变量
"""
dtype = tf.float32
var = self._variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _activation_summary(self, x):
"""创建tensorboard摘要 好可视化查看
"""
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def average_gradients(self, tower_grads):
"""计算所有tower上所有变量的平均梯度
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# 每个梯度和变量类似这样:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# 添加一个0维度来代表tower [grad0_gpuN]
expanded_g = tf.expand_dims(g, 0)
# [[grad0_gpu1],...,[grad0_gpuN]]
grads.append(expanded_g)
# 在tower上进行平均 (上面加维度那部分没理解 加了又合 不是白操作吗?,后续再研究一下)
grad = tf.concat(axis=0, values=grads) # [grad0_gpu1,..., grad0_gpuN]
grad = tf.reduce_mean(grad, 0) # 平均梯度
# 把变量拼接回去
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def inference(self, images):
"""向前传播
"""
# 第一层卷积
with tf.variable_scope('conv1') as scope:
kernel = self._variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0) # 权值矩阵
# 二维卷积
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') # 周围补0 保持形状不变
biases = self._variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name) # relu激活
self._activation_summary(conv1)
# pool1 最大池化
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# norm1 增加一个LRN处理,可以增强模型的泛化能力
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
# 第二层卷积
with tf.variable_scope('conv2') as scope:
kernel = self._variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = self._variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
self._activation_summary(conv2)
# 这次先进行LRN处理
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
# 最大池化
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# 全连接隐层 映射到384维向量
with tf.variable_scope('local3') as scope:
# 将前面的最大池化输出扁平化成一个单一矩阵 好做全连接
reshape = tf.reshape(pool2, [self.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = self._variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004)
biases = self._variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
self._activation_summary(local3)
# 再接一个全连接层 映射到192维向量
with tf.variable_scope('local4') as scope:
weights = self._variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004)
biases = self._variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
self._activation_summary(local4)
# 线性输出层 这里不做softmax 因为在损失函数内部执行了,那样效率更高
with tf.variable_scope('softmax_linear') as scope:
weights = self._variable_with_weight_decay('weights', [192, self.num_classes], stddev=1 / 192.0, wd=0.0)
biases = self._variable_on_cpu('biases', [self.num_classes], tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
self._activation_summary(softmax_linear)
return softmax_linear
def loss(self, logits, labels):
"""损失函数
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def tower_loss(self, scope, logits, labels):
_ = self.loss(logits, labels)
# 把所有损失都集中到当前tower上
losses = tf.get_collection('losses', scope)
total_loss = tf.add_n(losses, name='total_loss')
for l in losses + [total_loss]:
# 去掉变量名前缀 tower_[0-9],变成和单GPU的时候一样
loss_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', l.op.name)
tf.summary.scalar(loss_name, l)
return total_loss
def evaluation(self, logits, labels, k=1):
"""评估函数
:param logits: 预测
:param labels: 标签
"""
correct = tf.nn.in_top_k(logits, labels, k=k)
# correct = tf.equal(self.predictions, tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tf.add_to_collection('accuracy', accuracy)
return tf.add_n(tf.get_collection('accuracy'), name='accuracy')
def tower_evaluation(self, scope, logits, labels, k=1):
"""多gpu的评估函数
"""
_ = self.evaluation(logits, labels, k)
accuracy = tf.get_collection('accuracy', scope)
total_accuracy = tf.reduce_mean(accuracy, axis=0, name='total_accuracy')
return total_accuracy
def _add_loss_summaries(self, total_loss):
"""增加损失摘要
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train_operation(self, total_loss, global_step):
"""训练操作
"""
num_batches_per_epoch = self.num_train_examples / self.batch_size # 每轮的批次数
decay_steps = int(num_batches_per_epoch * self.num_epochs_per_decay) # 多少步衰减
# 基于步数,以指数方式衰减学习率。
lr = tf.train.exponential_decay(self.initial_lr, global_step, decay_steps, self.lr_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', lr)
# 损失移动平均
loss_averages_op = self._add_loss_summaries(total_loss)
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr) # 优化器
grads = opt.compute_gradients(total_loss) # 梯度
# 应用梯度
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # 训练操作
# 为可训练的变量添加直方图
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# 为梯度添加直方图
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# 跟踪所有可训练变量的移动平均线
variable_averages = tf.train.ExponentialMovingAverage(self.moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def train_step(self, sess, summary_writer):
"""单步训练
"""
_, step, cur_loss, cur_acc = sess.run([self.train_op, self.global_step, self._loss, self.accuracy])
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, cur_loss, cur_acc))
# 存储摘要
if step % 100 == 0:
summary_str = sess.run(self.summary)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
def train(self, filename, out_dir):
"""训练
"""
with tf.Graph().as_default():
sess = tf.Session(config=self.session_conf)
with sess.as_default():
self.global_step = tf.contrib.framework.get_or_create_global_step()
with tf.device('/cpu:0'):
images, labels = data_helper.distorted_inputs(filename, self.batch_size)
logits = self.inference(images)
self._loss = self.loss(logits, labels)
self.train_op = self.train_operation(self._loss, self.global_step)
self.accuracy = self.evaluation(logits, labels)
self.summary = tf.summary.merge_all()
# 保存点设置
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model") # 模型存储前缀
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.num_checkpoints)
summary_writer = tf.summary.FileWriter(out_dir + "/summary", sess.graph)
# 初始化所有变量
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
for step in range(self.max_steps):
self.train_step(sess, summary_writer) # 训练
cur_step = tf.train.global_step(sess, self.global_step)
# checkpoint_every 次迭代之后 保存模型
if cur_step % self.checkpoint_every == 0 and cur_step != 0:
path = saver.save(sess, checkpoint_prefix, global_step=cur_step)
print("Saved model checkpoint to {}\n".format(path))
def multi_gpu_train(self, filename, out_dir):
with tf.Graph().as_default(), tf.device('/cpu:0'):
sess = tf.Session(config=self.session_conf)
with sess.as_default():
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
self.global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
# 学习速率衰减设置
num_batches_per_epoch = self.num_train_examples / self.batch_size
decay_steps = int(num_batches_per_epoch * self.num_epochs_per_decay)
# 根据步数衰减学习速率
lr = tf.train.exponential_decay(self.initial_lr, self.global_step, decay_steps, self.lr_decay_factor,
staircase=True)
# 执行梯度下降的优化器
opt = tf.train.GradientDescentOptimizer(lr)
images, labels = data_helper.distorted_inputs(filename, self.batch_size) # 取出数据
# 批次队列 这个函数不是很懂
batch_queue = tf.contrib.slim.prefetch_queue.prefetch_queue([images, labels], capacity=2 * self.num_gpus)
tower_grads = []
summaries = None
with tf.variable_scope(tf.get_variable_scope()):
for i in range(self.num_gpus):
with tf.device('/gpu:{}'.format(i)):
with tf.name_scope('{}_{}'.format(TOWER_NAME, i)) as scope:
# 为gpu列出一个批次
image_batch, label_batch = batch_queue.dequeue()
# 计算一个tower的损失. 并且每个tower共享权重变量
logits = self.inference(image_batch)
self._loss = self.tower_loss(scope, logits, label_batch)
self.accuracy = self.tower_evaluation(scope, logits, label_batch)
# 下一个tower复用变量
tf.get_variable_scope().reuse_variables()
# 保存最终tower的摘要
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# 计算梯度
grads = opt.compute_gradients(self._loss)
# 跟踪所有tower的梯度
tower_grads.append(grads)
grads = self.average_gradients(tower_grads) # 平均梯度
# 添加学习速率的摘要
summaries.append(tf.summary.scalar('learning_rate', lr))
# 添加梯度直方图
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# 应用梯度来调整共享变量
apply_gradient_op = opt.apply_gradients(grads, global_step=self.global_step)
# 所有可训练变量添加直方图
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# 跟踪所有可训练变量的移动平均线
variable_averages = tf.train.ExponentialMovingAverage(self.moving_average_decay, self.global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# 将所有更新集中到一个训练操作
self.train_op = tf.group(apply_gradient_op, variables_averages_op)
# 从最后的tower总结摘要
self.summary = tf.summary.merge(summaries)
# 保存点设置
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model") # 模型存储前缀
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.num_checkpoints)
summary_writer = tf.summary.FileWriter(out_dir + "/summary", sess.graph)
# 初始化所有变量
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
# 启动队列
tf.train.start_queue_runners(sess=sess)
for step in range(self.max_steps):
self.train_step(sess, summary_writer) # 训练
cur_step = tf.train.global_step(sess, self.global_step)
# checkpoint_every 次迭代之后 保存模型
if cur_step % self.checkpoint_every == 0 and cur_step != 0:
path = saver.save(sess, checkpoint_prefix, global_step=cur_step)
print("Saved model checkpoint to {}\n".format(path))
|
mikuh/tf_code
|
cnn/cnn_model.py
|
cnn_model.py
|
py
| 19,630 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "tensorflow.ConfigProto",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensorflow.GPUOptions",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tensorflow.device",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.truncated_normal_initializer",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tensorflow.multiply",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.l2_loss",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.add_to_collection",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.histogram",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.zero_fraction",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "tensorflow.concat",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.conv2d",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.constant_initializer",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.bias_add",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.relu",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.max_pool",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.lrn",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.conv2d",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.constant_initializer",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.bias_add",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.relu",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.lrn",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.max_pool",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant_initializer",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.relu",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.matmul",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant_initializer",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.relu",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.matmul",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant_initializer",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "tensorflow.add",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "tensorflow.cast",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "tensorflow.int64",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "tensorflow.add_to_collection",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "tensorflow.add_n",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_collection",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_collection",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "tensorflow.add_n",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.in_top_k",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "tensorflow.cast",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.add_to_collection",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "tensorflow.add_n",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_collection",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_collection",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.ExponentialMovingAverage",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.get_collection",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.exponential_decay",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.control_dependencies",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.GradientDescentOptimizer",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.trainable_variables",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.histogram",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.histogram",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.ExponentialMovingAverage",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.trainable_variables",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "tensorflow.control_dependencies",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "tensorflow.no_op",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Graph",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib.framework.get_or_create_global_step",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.device",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "data_helper.distorted_inputs",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.merge_all",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.global_variables",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.FileWriter",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.get_checkpoint_state",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 291,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.checkpoint_exists",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.start_queue_runners",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.global_step",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Graph",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "tensorflow.device",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant_initializer",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.exponential_decay",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 322,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.GradientDescentOptimizer",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 325,
"usage_type": "attribute"
},
{
"api_name": "data_helper.distorted_inputs",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib.slim.prefetch_queue.prefetch_queue",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib",
"line_number": 329,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable_scope",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "tensorflow.device",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable_scope",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_collection",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "tensorflow.GraphKeys",
"line_number": 347,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.histogram",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 363,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.trainable_variables",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.histogram",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.ExponentialMovingAverage",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.trainable_variables",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "tensorflow.group",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.merge",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 382,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 386,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 387,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 388,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 390,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.global_variables",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.FileWriter",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 391,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.get_checkpoint_state",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.checkpoint_exists",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 395,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.start_queue_runners",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 402,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.global_step",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 406,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.