{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \"\"\" % (url(\"other\"), url(\"else\"),\n url(\"files/made_with_cherrypy_small.png\"))\n index.exposed = True\n \n def default(self, *args, **kwargs):\n return \"args: %s kwargs: %s\" % (args, kwargs)\n default.exposed = True\n \n def other(self, a=2, b='bananas', c=None):\n cherrypy.response.headers['Content-Type'] = 'text/plain'\n if c is None:\n return \"Have %d %s.\" % (int(a), b)\n else:\n return \"Have %d %s, %s.\" % (int(a), b, c)\n other.exposed = True\n \n files = cherrypy.tools.staticdir.handler(\n section=\"/files\",\n dir=os.path.join(local_dir, \"static\"),\n # Ignore .php files, etc.\n match=r'\\.(css|gif|html?|ico|jpe?g|js|png|swf|xml)$',\n )\n\n\nroot = Root()\n\n# Uncomment the following to use your own favicon instead of CP's default.\n#favicon_path = os.path.join(local_dir, \"favicon.ico\")\n#root.favicon_ico = tools.staticfile.handler(filename=favicon_path)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":1162,"cells":{"repo_name":{"kind":"string","value":"tboyce021/home-assistant"},"path":{"kind":"string","value":"homeassistant/components/mcp23017/switch.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"2770"},"content":{"kind":"string","value":"\"\"\"Support for switch sensor using I2C MCP23017 chip.\"\"\"\nfrom adafruit_mcp230xx.mcp23017 import MCP23017 # pylint: disable=import-error\nimport board # pylint: disable=import-error\nimport busio # pylint: disable=import-error\nimport digitalio # pylint: disable=import-error\nimport voluptuous as vol\n\nfrom homeassistant.components.switch import PLATFORM_SCHEMA\nfrom homeassistant.const import DEVICE_DEFAULT_NAME\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.entity import ToggleEntity\n\nCONF_INVERT_LOGIC = \"invert_logic\"\nCONF_I2C_ADDRESS = \"i2c_address\"\nCONF_PINS = \"pins\"\nCONF_PULL_MODE = \"pull_mode\"\n\nDEFAULT_INVERT_LOGIC = False\nDEFAULT_I2C_ADDRESS = 0x20\n\n_SWITCHES_SCHEMA = vol.Schema({cv.positive_int: cv.string})\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n {\n vol.Required(CONF_PINS): _SWITCHES_SCHEMA,\n vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,\n vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),\n }\n)\n\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Set up the MCP23017 devices.\"\"\"\n invert_logic = config.get(CONF_INVERT_LOGIC)\n i2c_address = config.get(CONF_I2C_ADDRESS)\n\n i2c = busio.I2C(board.SCL, board.SDA)\n mcp = MCP23017(i2c, address=i2c_address)\n\n switches = []\n pins = config.get(CONF_PINS)\n for pin_num, pin_name in pins.items():\n pin = mcp.get_pin(pin_num)\n switches.append(MCP23017Switch(pin_name, pin, invert_logic))\n add_entities(switches)\n\n\nclass MCP23017Switch(ToggleEntity):\n \"\"\"Representation of a MCP23017 output pin.\"\"\"\n\n def __init__(self, name, pin, invert_logic):\n \"\"\"Initialize the pin.\"\"\"\n self._name = name or DEVICE_DEFAULT_NAME\n self._pin = pin\n self._invert_logic = invert_logic\n self._state = False\n\n self._pin.direction = digitalio.Direction.OUTPUT\n self._pin.value = self._invert_logic\n\n @property\n def name(self):\n \"\"\"Return the name of the switch.\"\"\"\n return self._name\n\n @property\n def should_poll(self):\n \"\"\"No polling needed.\"\"\"\n return False\n\n @property\n def is_on(self):\n \"\"\"Return true if device is on.\"\"\"\n return self._state\n\n @property\n def assumed_state(self):\n \"\"\"Return true if optimistic updates are used.\"\"\"\n return True\n\n def turn_on(self, **kwargs):\n \"\"\"Turn the device on.\"\"\"\n self._pin.value = not self._invert_logic\n self._state = True\n self.schedule_update_ha_state()\n\n def turn_off(self, **kwargs):\n \"\"\"Turn the device off.\"\"\"\n self._pin.value = self._invert_logic\n self._state = False\n self.schedule_update_ha_state()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":1163,"cells":{"repo_name":{"kind":"string","value":"cryptica/slapnet"},"path":{"kind":"string","value":"benchmarks/scalable/LeaderElectionCR79/make_net.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2184"},"content":{"kind":"string","value":"#!/usr/bin/python3\n\nimport sys\nimport random\n\ndef make_net(n,order):\n def previous(i):\n return (((i-2) % n) + 1)\n print('petri net \"leader election %i\" {' % n)\n print(' places {')\n for i in range(1,n+1):\n print(' ', end='')\n for j in range(1,n+1):\n print('s%in%i ' % (i,j), end='')\n print()\n print(' ', end='')\n for j in range(1,n+1):\n print('s%im%i ' % (i,j), end='')\n print()\n print()\n print(' lead')\n print(' }')\n print(' transitions {')\n for i in range(1,n+1):\n print(' ', end='')\n for j in range(1,n+1):\n print('s%isend%i ' % (i,j), end='')\n print()\n print(' ', end='')\n for j in range(1,n+1):\n if j < i:\n print('s%idisc%i ' % (i,j), end='')\n elif i == j:\n print('s%iacpt%i ' % (i,j), end='')\n else:\n print('s%ipass%i ' % (i,j), end='')\n print()\n print()\n print(' newleader')\n print(' }')\n print(' arcs {')\n for i in range(1,n+1):\n for j in range(1,n+1):\n print(' s%in%i -> s%isend%i -> s%im%i' % (i,j,i,j,i,j))\n print()\n for j in range(1,n+1):\n print(' s%im%i -> ' % (previous(i),j), end='')\n if j < i:\n print('s%idisc%i ' % (i,j))\n elif i == j:\n print('s%iacpt%i -> lead' % (i,j))\n else:\n print('s%ipass%i -> s%im%i' % (i,j,i,j))\n print()\n print()\n print(' lead -> newleader -> { ', end='')\n for i in range(1,n+1):\n print('s%in%i ' % (i,order[i-1]), end='')\n print('}')\n print(' }')\n print(' initial { ', end='')\n for i in range(1,n+1):\n print('s%in%i ' % (i,order[i-1]), end='')\n print('}')\n print('}')\n #print('safety property {')\n #print(' lead >= 2')\n #print('}')\n print('liveness property {')\n print(' newleader = 0')\n print('}')\n\nn = int(sys.argv[1])\no = sys.argv[2]\n\norder = list(range(1,n+1))\nif o == 'rand':\n random.shuffle(order)\nelif o == 'rev':\n order.reverse()\nmake_net(n,order)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":1164,"cells":{"repo_name":{"kind":"string","value":"Distrotech/intellij-community"},"path":{"kind":"string","value":"python/lib/Lib/site-packages/django/contrib/gis/db/models/manager.py"},"copies":{"kind":"string","value":"505"},"size":{"kind":"string","value":"3578"},"content":{"kind":"string","value":"from django.db.models.manager import Manager\nfrom django.contrib.gis.db.models.query import GeoQuerySet\n\nclass GeoManager(Manager):\n \"Overrides Manager to return Geographic QuerySets.\"\n\n # This manager should be used for queries on related fields\n # so that geometry columns on Oracle and MySQL are selected\n # properly.\n use_for_related_fields = True\n\n def get_query_set(self):\n return GeoQuerySet(self.model, using=self._db)\n\n def area(self, *args, **kwargs):\n return self.get_query_set().area(*args, **kwargs)\n\n def centroid(self, *args, **kwargs):\n return self.get_query_set().centroid(*args, **kwargs)\n\n def collect(self, *args, **kwargs):\n return self.get_query_set().collect(*args, **kwargs)\n\n def difference(self, *args, **kwargs):\n return self.get_query_set().difference(*args, **kwargs)\n\n def distance(self, *args, **kwargs):\n return self.get_query_set().distance(*args, **kwargs)\n\n def envelope(self, *args, **kwargs):\n return self.get_query_set().envelope(*args, **kwargs)\n\n def extent(self, *args, **kwargs):\n return self.get_query_set().extent(*args, **kwargs)\n\n def extent3d(self, *args, **kwargs):\n return self.get_query_set().extent3d(*args, **kwargs)\n\n def force_rhr(self, *args, **kwargs):\n return self.get_query_set().force_rhr(*args, **kwargs)\n\n def geohash(self, *args, **kwargs):\n return self.get_query_set().geohash(*args, **kwargs)\n\n def geojson(self, *args, **kwargs):\n return self.get_query_set().geojson(*args, **kwargs)\n\n def gml(self, *args, **kwargs):\n return self.get_query_set().gml(*args, **kwargs)\n\n def intersection(self, *args, **kwargs):\n return self.get_query_set().intersection(*args, **kwargs)\n\n def kml(self, *args, **kwargs):\n return self.get_query_set().kml(*args, **kwargs)\n\n def length(self, *args, **kwargs):\n return self.get_query_set().length(*args, **kwargs)\n\n def make_line(self, *args, **kwargs):\n return self.get_query_set().make_line(*args, **kwargs)\n\n def mem_size(self, *args, **kwargs):\n return self.get_query_set().mem_size(*args, **kwargs)\n\n def num_geom(self, *args, **kwargs):\n return self.get_query_set().num_geom(*args, **kwargs)\n\n def num_points(self, *args, **kwargs):\n return self.get_query_set().num_points(*args, **kwargs)\n\n def perimeter(self, *args, **kwargs):\n return self.get_query_set().perimeter(*args, **kwargs)\n\n def point_on_surface(self, *args, **kwargs):\n return self.get_query_set().point_on_surface(*args, **kwargs)\n\n def reverse_geom(self, *args, **kwargs):\n return self.get_query_set().reverse_geom(*args, **kwargs)\n\n def scale(self, *args, **kwargs):\n return self.get_query_set().scale(*args, **kwargs)\n\n def snap_to_grid(self, *args, **kwargs):\n return self.get_query_set().snap_to_grid(*args, **kwargs)\n\n def svg(self, *args, **kwargs):\n return self.get_query_set().svg(*args, **kwargs)\n\n def sym_difference(self, *args, **kwargs):\n return self.get_query_set().sym_difference(*args, **kwargs)\n\n def transform(self, *args, **kwargs):\n return self.get_query_set().transform(*args, **kwargs)\n\n def translate(self, *args, **kwargs):\n return self.get_query_set().translate(*args, **kwargs)\n\n def union(self, *args, **kwargs):\n return self.get_query_set().union(*args, **kwargs)\n\n def unionagg(self, *args, **kwargs):\n return self.get_query_set().unionagg(*args, **kwargs)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":1165,"cells":{"repo_name":{"kind":"string","value":"xinwu/horizon"},"path":{"kind":"string","value":"openstack_dashboard/dashboards/project/networks/views.py"},"copies":{"kind":"string","value":"43"},"size":{"kind":"string","value":"5560"},"content":{"kind":"string","value":"# Copyright 2012 NEC Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nViews for managing Neutron Networks.\n\"\"\"\nfrom django.core.urlresolvers import reverse\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom horizon import exceptions\nfrom horizon import forms\nfrom horizon import tables\nfrom horizon.utils import memoized\nfrom horizon import workflows\n\nfrom openstack_dashboard import api\n\nfrom openstack_dashboard.dashboards.project.networks \\\n import forms as project_forms\nfrom openstack_dashboard.dashboards.project.networks.ports \\\n import tables as port_tables\nfrom openstack_dashboard.dashboards.project.networks.subnets \\\n import tables as subnet_tables\nfrom openstack_dashboard.dashboards.project.networks \\\n import tables as project_tables\nfrom openstack_dashboard.dashboards.project.networks \\\n import workflows as project_workflows\n\n\nclass IndexView(tables.DataTableView):\n table_class = project_tables.NetworksTable\n template_name = 'project/networks/index.html'\n page_title = _(\"Networks\")\n\n def get_data(self):\n try:\n tenant_id = self.request.user.tenant_id\n networks = api.neutron.network_list_for_tenant(self.request,\n tenant_id)\n except Exception:\n networks = []\n msg = _('Network list can not be retrieved.')\n exceptions.handle(self.request, msg)\n return networks\n\n\nclass CreateView(workflows.WorkflowView):\n workflow_class = project_workflows.CreateNetwork\n ajax_template_name = 'project/networks/create.html'\n\n\nclass UpdateView(forms.ModalFormView):\n context_object_name = 'network'\n form_class = project_forms.UpdateNetwork\n form_id = \"update_network_form\"\n modal_header = _(\"Edit Network\")\n submit_label = _(\"Save Changes\")\n submit_url = \"horizon:project:networks:update\"\n success_url = reverse_lazy(\"horizon:project:networks:index\")\n template_name = 'project/networks/update.html'\n page_title = _(\"Update Network\")\n\n def get_context_data(self, **kwargs):\n context = super(UpdateView, self).get_context_data(**kwargs)\n args = (self.kwargs['network_id'],)\n context[\"network_id\"] = self.kwargs['network_id']\n context[\"submit_url\"] = reverse(self.submit_url, args=args)\n return context\n\n @memoized.memoized_method\n def _get_object(self, *args, **kwargs):\n network_id = self.kwargs['network_id']\n try:\n return api.neutron.network_get(self.request, network_id)\n except Exception:\n redirect = self.success_url\n msg = _('Unable to retrieve network details.')\n exceptions.handle(self.request, msg, redirect=redirect)\n\n def get_initial(self):\n network = self._get_object()\n return {'network_id': network['id'],\n 'tenant_id': network['tenant_id'],\n 'name': network['name'],\n 'admin_state': network['admin_state_up']}\n\n\nclass DetailView(tables.MultiTableView):\n table_classes = (subnet_tables.SubnetsTable, port_tables.PortsTable)\n template_name = 'project/networks/detail.html'\n page_title = _(\"Network Details: {{ network.name }}\")\n\n def get_subnets_data(self):\n try:\n network = self._get_data()\n subnets = api.neutron.subnet_list(self.request,\n network_id=network.id)\n except Exception:\n subnets = []\n msg = _('Subnet list can not be retrieved.')\n exceptions.handle(self.request, msg)\n return subnets\n\n def get_ports_data(self):\n try:\n network_id = self.kwargs['network_id']\n ports = api.neutron.port_list(self.request, network_id=network_id)\n except Exception:\n ports = []\n msg = _('Port list can not be retrieved.')\n exceptions.handle(self.request, msg)\n return ports\n\n @memoized.memoized_method\n def _get_data(self):\n try:\n network_id = self.kwargs['network_id']\n network = api.neutron.network_get(self.request, network_id)\n network.set_id_as_name_if_empty(length=0)\n except Exception:\n msg = _('Unable to retrieve details for network \"%s\".') \\\n % (network_id)\n exceptions.handle(self.request, msg,\n redirect=self.get_redirect_url())\n return network\n\n def get_context_data(self, **kwargs):\n context = super(DetailView, self).get_context_data(**kwargs)\n network = self._get_data()\n context[\"network\"] = network\n table = project_tables.NetworksTable(self.request)\n context[\"url\"] = self.get_redirect_url()\n context[\"actions\"] = table.render_row_actions(network)\n return context\n\n @staticmethod\n def get_redirect_url():\n return reverse_lazy('horizon:project:networks:index')\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":1166,"cells":{"repo_name":{"kind":"string","value":"kmackenzieii/marauders-map"},"path":{"kind":"string","value":"capture/make_fingerprint.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1582"},"content":{"kind":"string","value":"import re\nimport os\nimport kirk\nimport numpy as n\nimport pickle\ndef trimmean(arr, percent):\n n = len(arr)\n k = int(round(n*(float(percent)/100)/2))\n return n.mean(arr[k+1:n-k])\n\nFile = kirk.File\nwidth = kirk.width\nheight = kirk.height\nbox_size = kirk.box_size\n\n#Dictionary data structure to hold out parsed data\n#For each MAC address there is a multidimensional array of size [x][y]\n#In each of those arrays is a list of RSSI values found at that location\nrssi = {}\n#Loop through every file in our data directory and extract data into rssi\nfor filename in os.listdir('./fingerprint'):\n\tdata = re.split('_',filename)\n\tx = int(data[0])\n\ty = int(data[1])\n\tf = open('./fingerprint/'+filename)\n\n\tfor line in f:\n\t\tread = line.split()\n\t\tif len(read)==3 and read[0] == read[1]:\n\t\t\tmac = read[0]\n\t\t\tif read[2] != '':\n\t\t\t\tstrength = int(read[2].strip())\n\t\t\t\tif mac in rssi:\n\t\t\t\t\trssi[mac][x][y].append(strength)\n\t\t\t\telse:\n\t\t\t\t\tif mac != \"48:5a:3f:45:21:0f\": #Filter out my cellphone\n\t\t\t\t\t\tarr = [[[] for _ in range(kirk.x)] for _ in range(kirk.y)]\n\t\t\t\t\t\trssi.update({mac:arr})\n\t\t\t\t\t\trssi[mac][x][y].append(strength)\n#Now that we have the data, calculate averages for each location\nfingerprint = {}\nfor mac in rssi:\n\tavg = [[None for _ in range(kirk.x)] for _ in range(kirk.y)]\n\tfor x in range(len(rssi[mac])):\n\t\tfor y in range(len(rssi[mac][x])):\n\t\t\tl = rssi[mac][x][y]\n\t\t\tif len(l) > 0:\n\t\t\t\tavg[x][y] = n.mean(l)\n\t\t\t\t#avg[x][y] = trimmean(l, 80)\n\tfingerprint.update({mac:avg})\n\nfinger_file = open(r'fingerprint.pkl', 'wb')\npickle.dump(fingerprint, finger_file)\nfinger_file.close()\n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":1167,"cells":{"repo_name":{"kind":"string","value":"cwu2011/seaborn"},"path":{"kind":"string","value":"seaborn/timeseries.py"},"copies":{"kind":"string","value":"6"},"size":{"kind":"string","value":"13239"},"content":{"kind":"string","value":"\"\"\"Timeseries plotting functions.\"\"\"\nfrom __future__ import division\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats, interpolate\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom .external.six import string_types\n\n\nfrom . import utils\nfrom . import algorithms as algo\nfrom .palettes import color_palette\n\n\ndef tsplot(data, time=None, unit=None, condition=None, value=None,\n err_style=\"ci_band\", ci=68, interpolate=True, color=None,\n estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,\n legend=True, ax=None, **kwargs):\n \"\"\"Plot one or more timeseries with flexible representation of uncertainty.\n\n This function can take data specified either as a long-form (tidy)\n DataFrame or as an ndarray with dimensions for sampling unit, time, and\n (optionally) condition. The interpretation of some of the other parameters\n changes depending on the type of object passed as data.\n\n Parameters\n ----------\n data : DataFrame or ndarray\n Data for the plot. Should either be a \"long form\" dataframe or an\n array with dimensions (unit, time, condition). In both cases, the\n condition field/dimension is optional. The type of this argument\n determines the interpretation of the next few parameters.\n time : string or series-like\n Either the name of the field corresponding to time in the data\n DataFrame or x values for a plot when data is an array. If a Series,\n the name will be used to label the x axis.\n unit : string\n Field in the data DataFrame identifying the sampling unit (e.g.\n subject, neuron, etc.). The error representation will collapse over\n units at each time/condition observation. This has no role when data\n is an array.\n value : string\n Either the name of the field corresponding to the data values in\n the data DataFrame (i.e. the y coordinate) or a string that forms\n the y axis label when data is an array.\n condition : string or Series-like\n Either the name of the field identifying the condition an observation\n falls under in the data DataFrame, or a sequence of names with a length\n equal to the size of the third dimension of data. There will be a\n separate trace plotted for each condition. If condition is a Series\n with a name attribute, the name will form the title for the plot\n legend (unless legend is set to False).\n err_style : string or list of strings or None\n Names of ways to plot uncertainty across units from set of\n {ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.\n Can use one or more than one method.\n ci : float or list of floats in [0, 100]\n Confidence interaval size(s). If a list, it will stack the error\n plots for each confidence interval. Only relevant for error styles\n with \"ci\" in the name.\n interpolate : boolean\n Whether to do a linear interpolation between each timepoint when\n plotting. The value of this parameter also determines the marker\n used for the main plot traces, unless marker is specified as a keyword\n argument.\n color : seaborn palette or matplotlib color name or dictionary\n Palette or color for the main plots and error representation (unless\n plotting by unit, which can be separately controlled with err_palette).\n If a dictionary, should map condition name to color spec.\n estimator : callable\n Function to determine central tendency and to pass to bootstrap\n must take an ``axis`` argument.\n n_boot : int\n Number of bootstrap iterations.\n err_palette: seaborn palette\n Palette name or list of colors used when plotting data for each unit.\n err_kws : dict, optional\n Keyword argument dictionary passed through to matplotlib function\n generating the error plot,\n ax : axis object, optional\n Plot in given axis; if None creates a new figure\n kwargs :\n Other keyword arguments are passed to main plot() call\n\n Returns\n -------\n ax : matplotlib axis\n axis with plot data\n\n \"\"\"\n # Sort out default values for the parameters\n if ax is None:\n ax = plt.gca()\n\n if err_kws is None:\n err_kws = {}\n\n # Handle different types of input data\n if isinstance(data, pd.DataFrame):\n\n xlabel = time\n ylabel = value\n\n # Condition is optional\n if condition is None:\n condition = pd.Series(np.ones(len(data)))\n legend = False\n legend_name = None\n n_cond = 1\n else:\n legend = True and legend\n legend_name = condition\n n_cond = len(data[condition].unique())\n\n else:\n data = np.asarray(data)\n\n # Data can be a timecourse from a single unit or\n # several observations in one condition\n if data.ndim == 1:\n data = data[np.newaxis, :, np.newaxis]\n elif data.ndim == 2:\n data = data[:, :, np.newaxis]\n n_unit, n_time, n_cond = data.shape\n\n # Units are experimental observations. Maybe subjects, or neurons\n if unit is None:\n units = np.arange(n_unit)\n unit = \"unit\"\n units = np.repeat(units, n_time * n_cond)\n ylabel = None\n\n # Time forms the xaxis of the plot\n if time is None:\n times = np.arange(n_time)\n else:\n times = np.asarray(time)\n xlabel = None\n if hasattr(time, \"name\"):\n xlabel = time.name\n time = \"time\"\n times = np.tile(np.repeat(times, n_cond), n_unit)\n\n # Conditions split the timeseries plots\n if condition is None:\n conds = range(n_cond)\n legend = False\n if isinstance(color, dict):\n err = \"Must have condition names if using color dict.\"\n raise ValueError(err)\n else:\n conds = np.asarray(condition)\n legend = True and legend\n if hasattr(condition, \"name\"):\n legend_name = condition.name\n else:\n legend_name = None\n condition = \"cond\"\n conds = np.tile(conds, n_unit * n_time)\n\n # Value forms the y value in the plot\n if value is None:\n ylabel = None\n else:\n ylabel = value\n value = \"value\"\n\n # Convert to long-form DataFrame\n data = pd.DataFrame(dict(value=data.ravel(),\n time=times,\n unit=units,\n cond=conds))\n\n # Set up the err_style and ci arguments for the loop below\n if isinstance(err_style, string_types):\n err_style = [err_style]\n elif err_style is None:\n err_style = []\n if not hasattr(ci, \"__iter__\"):\n ci = [ci]\n\n # Set up the color palette\n if color is None:\n current_palette = mpl.rcParams[\"axes.color_cycle\"]\n if len(current_palette) < n_cond:\n colors = color_palette(\"husl\", n_cond)\n else:\n colors = color_palette(n_colors=n_cond)\n elif isinstance(color, dict):\n colors = [color[c] for c in data[condition].unique()]\n else:\n try:\n colors = color_palette(color, n_cond)\n except ValueError:\n color = mpl.colors.colorConverter.to_rgb(color)\n colors = [color] * n_cond\n\n # Do a groupby with condition and plot each trace\n for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):\n\n df_c = df_c.pivot(unit, time, value)\n x = df_c.columns.values.astype(np.float)\n\n # Bootstrap the data for confidence intervals\n boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,\n axis=0, func=estimator)\n cis = [utils.ci(boot_data, v, axis=0) for v in ci]\n central_data = estimator(df_c.values, axis=0)\n\n # Get the color for this condition\n color = colors[c]\n\n # Use subroutines to plot the uncertainty\n for style in err_style:\n\n # Allow for null style (only plot central tendency)\n if style is None:\n continue\n\n # Grab the function from the global environment\n try:\n plot_func = globals()[\"_plot_%s\" % style]\n except KeyError:\n raise ValueError(\"%s is not a valid err_style\" % style)\n\n # Possibly set up to plot each observation in a different color\n if err_palette is not None and \"unit\" in style:\n orig_color = color\n color = color_palette(err_palette, len(df_c.values))\n\n # Pass all parameters to the error plotter as keyword args\n plot_kwargs = dict(ax=ax, x=x, data=df_c.values,\n boot_data=boot_data,\n central_data=central_data,\n color=color, err_kws=err_kws)\n\n # Plot the error representation, possibly for multiple cis\n for ci_i in cis:\n plot_kwargs[\"ci\"] = ci_i\n plot_func(**plot_kwargs)\n\n if err_palette is not None and \"unit\" in style:\n color = orig_color\n\n # Plot the central trace\n kwargs.setdefault(\"marker\", \"\" if interpolate else \"o\")\n ls = kwargs.pop(\"ls\", \"-\" if interpolate else \"\")\n kwargs.setdefault(\"linestyle\", ls)\n label = cond if legend else \"_nolegend_\"\n ax.plot(x, central_data, color=color, label=label, **kwargs)\n\n # Pad the sides of the plot only when not interpolating\n ax.set_xlim(x.min(), x.max())\n x_diff = x[1] - x[0]\n if not interpolate:\n ax.set_xlim(x.min() - x_diff, x.max() + x_diff)\n\n # Add the plot labels\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n if legend:\n ax.legend(loc=0, title=legend_name)\n\n return ax\n\n# Subroutines for tsplot errorbar plotting\n# ----------------------------------------\n\n\ndef _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):\n \"\"\"Plot translucent error bands around the central tendancy.\"\"\"\n low, high = ci\n if \"alpha\" not in err_kws:\n err_kws[\"alpha\"] = 0.2\n ax.fill_between(x, low, high, color=color, **err_kws)\n\n\ndef _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):\n \"\"\"Plot error bars at each data point.\"\"\"\n for x_i, y_i, (low, high) in zip(x, central_data, ci.T):\n ax.plot([x_i, x_i], [low, high], color=color,\n solid_capstyle=\"round\", **err_kws)\n\n\ndef _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):\n \"\"\"Plot 250 traces from bootstrap.\"\"\"\n err_kws.setdefault(\"alpha\", 0.25)\n err_kws.setdefault(\"linewidth\", 0.25)\n if \"lw\" in err_kws:\n err_kws[\"linewidth\"] = err_kws.pop(\"lw\")\n ax.plot(x, boot_data.T, color=color, label=\"_nolegend_\", **err_kws)\n\n\ndef _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):\n \"\"\"Plot a trace for each observation in the original data.\"\"\"\n if isinstance(color, list):\n if \"alpha\" not in err_kws:\n err_kws[\"alpha\"] = .5\n for i, obs in enumerate(data):\n ax.plot(x, obs, color=color[i], label=\"_nolegend_\", **err_kws)\n else:\n if \"alpha\" not in err_kws:\n err_kws[\"alpha\"] = .2\n ax.plot(x, data.T, color=color, label=\"_nolegend_\", **err_kws)\n\n\ndef _plot_unit_points(ax, x, data, color, err_kws, **kwargs):\n \"\"\"Plot each original data point discretely.\"\"\"\n if isinstance(color, list):\n for i, obs in enumerate(data):\n ax.plot(x, obs, \"o\", color=color[i], alpha=0.8, markersize=4,\n label=\"_nolegend_\", **err_kws)\n else:\n ax.plot(x, data.T, \"o\", color=color, alpha=0.5, markersize=4,\n label=\"_nolegend_\", **err_kws)\n\n\ndef _plot_boot_kde(ax, x, boot_data, color, **kwargs):\n \"\"\"Plot the kernal density estimate of the bootstrap distribution.\"\"\"\n kwargs.pop(\"data\")\n _ts_kde(ax, x, boot_data, color, **kwargs)\n\n\ndef _plot_unit_kde(ax, x, data, color, **kwargs):\n \"\"\"Plot the kernal density estimate over the sample.\"\"\"\n _ts_kde(ax, x, data, color, **kwargs)\n\n\ndef _ts_kde(ax, x, data, color, **kwargs):\n \"\"\"Upsample over time and plot a KDE of the bootstrap distribution.\"\"\"\n kde_data = []\n y_min, y_max = data.min(), data.max()\n y_vals = np.linspace(y_min, y_max, 100)\n upsampler = interpolate.interp1d(x, data)\n data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))\n for pt_data in data_upsample.T:\n pt_kde = stats.kde.gaussian_kde(pt_data)\n kde_data.append(pt_kde(y_vals))\n kde_data = np.transpose(kde_data)\n rgb = mpl.colors.ColorConverter().to_rgb(color)\n img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))\n img[:, :, :3] = rgb\n kde_data /= kde_data.max(axis=0)\n kde_data[kde_data > 1] = 1\n img[:, :, 3] = kde_data\n ax.imshow(img, interpolation=\"spline16\", zorder=2,\n extent=(x.min(), x.max(), y_min, y_max),\n aspect=\"auto\", origin=\"lower\")\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":1168,"cells":{"repo_name":{"kind":"string","value":"dllsf/odootest"},"path":{"kind":"string","value":"addons/l10n_be_invoice_bba/invoice.py"},"copies":{"kind":"string","value":"11"},"size":{"kind":"string","value":"12783"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\r\n##############################################################################\r\n#\r\n# OpenERP, Open Source Management Solution\r\n#\r\n# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU Affero General Public License as\r\n# published by the Free Software Foundation, either version 3 of the\r\n# License, or (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU Affero General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Affero General Public License\r\n# along with this program. If not, see .\r\n#\r\n##############################################################################\r\n\r\nimport re, time, random\r\nfrom openerp import api\r\nfrom openerp.osv import fields, osv\r\nfrom openerp.tools.translate import _\r\nimport logging\r\n_logger = logging.getLogger(__name__)\r\n\r\n\"\"\"\r\naccount.invoice object:\r\n - Add support for Belgian structured communication\r\n - Rename 'reference' field labels to 'Communication'\r\n\"\"\"\r\n\r\nclass account_invoice(osv.osv):\r\n _inherit = 'account.invoice'\r\n\r\n @api.cr_uid_context\r\n def _get_reference_type(self, cursor, user, context=None):\r\n \"\"\"Add BBA Structured Communication Type and change labels from 'reference' into 'communication' \"\"\"\r\n res = super(account_invoice, self)._get_reference_type(cursor, user,\r\n context=context)\r\n res[[i for i,x in enumerate(res) if x[0] == 'none'][0]] = ('none', 'Free Communication')\r\n res.append(('bba', 'BBA Structured Communication'))\r\n #l_logger.warning('reference_type = %s' %res )\r\n return res\r\n\r\n def check_bbacomm(self, val):\r\n supported_chars = '0-9+*/ '\r\n pattern = re.compile('[^' + supported_chars + ']')\r\n if pattern.findall(val or ''):\r\n return False\r\n bbacomm = re.sub('\\D', '', val or '')\r\n if len(bbacomm) == 12:\r\n base = int(bbacomm[:10])\r\n mod = base % 97 or 97\r\n if mod == int(bbacomm[-2:]):\r\n return True\r\n return False\r\n\r\n def _check_communication(self, cr, uid, ids):\r\n for inv in self.browse(cr, uid, ids):\r\n if inv.reference_type == 'bba':\r\n return self.check_bbacomm(inv.reference)\r\n return True\r\n\r\n def onchange_partner_id(self, cr, uid, ids, type, partner_id,\r\n date_invoice=False, payment_term=False,\r\n partner_bank_id=False, company_id=False,\r\n context=None):\r\n result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,\r\n date_invoice, payment_term, partner_bank_id, company_id, context)\r\n# reference_type = self.default_get(cr, uid, ['reference_type'])['reference_type']\r\n# _logger.warning('partner_id %s' % partner_id)\r\n reference = False\r\n reference_type = 'none'\r\n if partner_id:\r\n if (type == 'out_invoice'):\r\n reference_type = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).out_inv_comm_type\r\n if reference_type:\r\n reference = self.generate_bbacomm(cr, uid, ids, type, reference_type, partner_id, '', context=context)['value']['reference']\r\n res_update = {\r\n 'reference_type': reference_type or 'none',\r\n 'reference': reference,\r\n }\r\n result['value'].update(res_update)\r\n return result\r\n\r\n def generate_bbacomm(self, cr, uid, ids, type, reference_type, partner_id, reference, context=None):\r\n partner_obj = self.pool.get('res.partner')\r\n reference = reference or ''\r\n algorithm = False\r\n if partner_id:\r\n algorithm = partner_obj.browse(cr, uid, partner_id, context=context).out_inv_comm_algorithm\r\n algorithm = algorithm or 'random'\r\n if (type == 'out_invoice'):\r\n if reference_type == 'bba':\r\n if algorithm == 'date':\r\n if not self.check_bbacomm(reference):\r\n doy = time.strftime('%j')\r\n year = time.strftime('%Y')\r\n seq = '001'\r\n seq_ids = self.search(cr, uid,\r\n [('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),\r\n ('reference', 'like', '+++%s/%s/%%' % (doy, year))], order='reference')\r\n if seq_ids:\r\n prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])\r\n if prev_seq < 999:\r\n seq = '%03d' % (prev_seq + 1)\r\n else:\r\n raise osv.except_osv(_('Warning!'),\r\n _('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \\\r\n '\\nPlease create manually a unique BBA Structured Communication.'))\r\n bbacomm = doy + year + seq\r\n base = int(bbacomm)\r\n mod = base % 97 or 97\r\n reference = '+++%s/%s/%s%02d+++' % (doy, year, seq, mod)\r\n elif algorithm == 'partner_ref':\r\n if not self.check_bbacomm(reference):\r\n partner_ref = self.pool.get('res.partner').browse(cr, uid, partner_id).ref\r\n partner_ref_nr = re.sub('\\D', '', partner_ref or '')\r\n if (len(partner_ref_nr) < 3) or (len(partner_ref_nr) > 7):\r\n raise osv.except_osv(_('Warning!'),\r\n _('The Partner should have a 3-7 digit Reference Number for the generation of BBA Structured Communications!' \\\r\n '\\nPlease correct the Partner record.'))\r\n else:\r\n partner_ref_nr = partner_ref_nr.ljust(7, '0')\r\n seq = '001'\r\n seq_ids = self.search(cr, uid,\r\n [('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),\r\n ('reference', 'like', '+++%s/%s/%%' % (partner_ref_nr[:3], partner_ref_nr[3:]))], order='reference')\r\n if seq_ids:\r\n prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])\r\n if prev_seq < 999:\r\n seq = '%03d' % (prev_seq + 1)\r\n else:\r\n raise osv.except_osv(_('Warning!'),\r\n _('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \\\r\n '\\nPlease create manually a unique BBA Structured Communication.'))\r\n bbacomm = partner_ref_nr + seq\r\n base = int(bbacomm)\r\n mod = base % 97 or 97\r\n reference = '+++%s/%s/%s%02d+++' % (partner_ref_nr[:3], partner_ref_nr[3:], seq, mod)\r\n elif algorithm == 'random':\r\n if not self.check_bbacomm(reference):\r\n base = random.randint(1, 9999999999)\r\n bbacomm = str(base).rjust(10, '0')\r\n base = int(bbacomm)\r\n mod = base % 97 or 97\r\n mod = str(mod).rjust(2, '0')\r\n reference = '+++%s/%s/%s%s+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)\r\n else:\r\n raise osv.except_osv(_('Error!'),\r\n _(\"Unsupported Structured Communication Type Algorithm '%s' !\" \\\r\n \"\\nPlease contact your OpenERP support channel.\") % algorithm)\r\n return {'value': {'reference': reference}}\r\n\r\n def create(self, cr, uid, vals, context=None):\r\n reference = vals.get('reference', False)\r\n reference_type = vals.get('reference_type', False)\r\n if vals.get('type') == 'out_invoice' and not reference_type:\r\n # fallback on default communication type for partner\r\n reference_type = self.pool.get('res.partner').browse(cr, uid, vals['partner_id']).out_inv_comm_type\r\n if reference_type == 'bba':\r\n reference = self.generate_bbacomm(cr, uid, [], vals['type'], reference_type, vals['partner_id'], '', context={})['value']['reference']\r\n vals.update({\r\n 'reference_type': reference_type or 'none',\r\n 'reference': reference,\r\n })\r\n\r\n if reference_type == 'bba':\r\n if not reference:\r\n raise osv.except_osv(_('Warning!'),\r\n _('Empty BBA Structured Communication!' \\\r\n '\\nPlease fill in a unique BBA Structured Communication.'))\r\n if self.check_bbacomm(reference):\r\n reference = re.sub('\\D', '', reference)\r\n vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'\r\n same_ids = self.search(cr, uid,\r\n [('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),\r\n ('reference', '=', vals['reference'])])\r\n if same_ids:\r\n raise osv.except_osv(_('Warning!'),\r\n _('The BBA Structured Communication has already been used!' \\\r\n '\\nPlease create manually a unique BBA Structured Communication.'))\r\n return super(account_invoice, self).create(cr, uid, vals, context=context)\r\n\r\n def write(self, cr, uid, ids, vals, context=None):\r\n if isinstance(ids, (int, long)):\r\n ids = [ids]\r\n for inv in self.browse(cr, uid, ids, context):\r\n if vals.has_key('reference_type'):\r\n reference_type = vals['reference_type']\r\n else:\r\n reference_type = inv.reference_type or ''\r\n if reference_type == 'bba':\r\n if vals.has_key('reference'):\r\n bbacomm = vals['reference']\r\n else:\r\n bbacomm = inv.reference or ''\r\n if self.check_bbacomm(bbacomm):\r\n reference = re.sub('\\D', '', bbacomm)\r\n vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'\r\n same_ids = self.search(cr, uid,\r\n [('id', '!=', inv.id), ('type', '=', 'out_invoice'),\r\n ('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])])\r\n if same_ids:\r\n raise osv.except_osv(_('Warning!'),\r\n _('The BBA Structured Communication has already been used!' \\\r\n '\\nPlease create manually a unique BBA Structured Communication.'))\r\n return super(account_invoice, self).write(cr, uid, ids, vals, context)\r\n\r\n def copy(self, cr, uid, id, default=None, context=None):\r\n default = default or {}\r\n invoice = self.browse(cr, uid, id, context=context)\r\n if invoice.type in ['out_invoice']:\r\n reference_type = invoice.reference_type or 'none'\r\n default['reference_type'] = reference_type\r\n if reference_type == 'bba':\r\n partner = invoice.partner_id\r\n default['reference'] = self.generate_bbacomm(cr, uid, id,\r\n invoice.type, reference_type,\r\n partner.id, '', context=context)['value']['reference']\r\n return super(account_invoice, self).copy(cr, uid, id, default, context=context)\r\n\r\n _columns = {\r\n 'reference': fields.char('Communication', help=\"The partner reference of this invoice.\"),\r\n 'reference_type': fields.selection(_get_reference_type, 'Communication Type',\r\n required=True),\r\n }\r\n _constraints = [\r\n (_check_communication, 'Invalid BBA Structured Communication !', ['Communication']),\r\n ]\r\n\r\naccount_invoice()\r\n\r\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\r\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":1169,"cells":{"repo_name":{"kind":"string","value":"WillieMaddox/numpy"},"path":{"kind":"string","value":"numpy/linalg/tests/test_regression.py"},"copies":{"kind":"string","value":"78"},"size":{"kind":"string","value":"3097"},"content":{"kind":"string","value":"\"\"\" Test functions for linalg module\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\nfrom numpy import linalg, arange, float64, array, dot, transpose\nfrom numpy.testing import (\n TestCase, run_module_suite, assert_equal, assert_array_equal,\n assert_array_almost_equal, assert_array_less\n)\n\n\nrlevel = 1\n\n\nclass TestRegression(TestCase):\n\n def test_eig_build(self, level=rlevel):\n # Ticket #652\n rva = array([1.03221168e+02 + 0.j,\n -1.91843603e+01 + 0.j,\n -6.04004526e-01 + 15.84422474j,\n -6.04004526e-01 - 15.84422474j,\n -1.13692929e+01 + 0.j,\n -6.57612485e-01 + 10.41755503j,\n -6.57612485e-01 - 10.41755503j,\n 1.82126812e+01 + 0.j,\n 1.06011014e+01 + 0.j,\n 7.80732773e+00 + 0.j,\n -7.65390898e-01 + 0.j,\n 1.51971555e-15 + 0.j,\n -1.51308713e-15 + 0.j])\n a = arange(13 * 13, dtype=float64)\n a.shape = (13, 13)\n a = a % 17\n va, ve = linalg.eig(a)\n va.sort()\n rva.sort()\n assert_array_almost_equal(va, rva)\n\n def test_eigh_build(self, level=rlevel):\n # Ticket 662.\n rvals = [68.60568999, 89.57756725, 106.67185574]\n\n cov = array([[77.70273908, 3.51489954, 15.64602427],\n [3.51489954, 88.97013878, -1.07431931],\n [15.64602427, -1.07431931, 98.18223512]])\n\n vals, vecs = linalg.eigh(cov)\n assert_array_almost_equal(vals, rvals)\n\n def test_svd_build(self, level=rlevel):\n # Ticket 627.\n a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])\n m, n = a.shape\n u, s, vh = linalg.svd(a)\n\n b = dot(transpose(u[:, n:]), a)\n\n assert_array_almost_equal(b, np.zeros((2, 2)))\n\n def test_norm_vector_badarg(self):\n # Regression for #786: Froebenius norm for vectors raises\n # TypeError.\n self.assertRaises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')\n\n def test_lapack_endian(self):\n # For bug #1482\n a = array([[5.7998084, -2.1825367],\n [-2.1825367, 9.85910595]], dtype='>f8')\n b = array(a, dtype=' 0.5)\n assert_equal(c, 1)\n assert_equal(np.linalg.matrix_rank(a), 1)\n assert_array_less(1, np.linalg.norm(a, ord=2))\n\n\nif __name__ == '__main__':\n run_module_suite()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":1170,"cells":{"repo_name":{"kind":"string","value":"pcostell/apitools"},"path":{"kind":"string","value":"apitools/base/py/exceptions.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"4111"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Exceptions for generated client libraries.\"\"\"\n\n\nclass Error(Exception):\n\n \"\"\"Base class for all exceptions.\"\"\"\n\n\nclass TypecheckError(Error, TypeError):\n\n \"\"\"An object of an incorrect type is provided.\"\"\"\n\n\nclass NotFoundError(Error):\n\n \"\"\"A specified resource could not be found.\"\"\"\n\n\nclass UserError(Error):\n\n \"\"\"Base class for errors related to user input.\"\"\"\n\n\nclass InvalidDataError(Error):\n\n \"\"\"Base class for any invalid data error.\"\"\"\n\n\nclass CommunicationError(Error):\n\n \"\"\"Any communication error talking to an API server.\"\"\"\n\n\nclass HttpError(CommunicationError):\n\n \"\"\"Error making a request. Soon to be HttpError.\"\"\"\n\n def __init__(self, response, content, url,\n method_config=None, request=None):\n super(HttpError, self).__init__()\n self.response = response\n self.content = content\n self.url = url\n self.method_config = method_config\n self.request = request\n\n def __str__(self):\n content = self.content\n if isinstance(content, bytes):\n content = self.content.decode('ascii', 'replace')\n return 'HttpError accessing <%s>: response: <%s>, content <%s>' % (\n self.url, self.response, content)\n\n @property\n def status_code(self):\n # TODO(craigcitro): Turn this into something better than a\n # KeyError if there is no status.\n return int(self.response['status'])\n\n @classmethod\n def FromResponse(cls, http_response):\n return cls(http_response.info, http_response.content,\n http_response.request_url)\n\n\nclass InvalidUserInputError(InvalidDataError):\n\n \"\"\"User-provided input is invalid.\"\"\"\n\n\nclass InvalidDataFromServerError(InvalidDataError, CommunicationError):\n\n \"\"\"Data received from the server is malformed.\"\"\"\n\n\nclass BatchError(Error):\n\n \"\"\"Error generated while constructing a batch request.\"\"\"\n\n\nclass ConfigurationError(Error):\n\n \"\"\"Base class for configuration errors.\"\"\"\n\n\nclass GeneratedClientError(Error):\n\n \"\"\"The generated client configuration is invalid.\"\"\"\n\n\nclass ConfigurationValueError(UserError):\n\n \"\"\"Some part of the user-specified client configuration is invalid.\"\"\"\n\n\nclass ResourceUnavailableError(Error):\n\n \"\"\"User requested an unavailable resource.\"\"\"\n\n\nclass CredentialsError(Error):\n\n \"\"\"Errors related to invalid credentials.\"\"\"\n\n\nclass TransferError(CommunicationError):\n\n \"\"\"Errors related to transfers.\"\"\"\n\n\nclass TransferRetryError(TransferError):\n\n \"\"\"Retryable errors related to transfers.\"\"\"\n\n\nclass TransferInvalidError(TransferError):\n\n \"\"\"The given transfer is invalid.\"\"\"\n\n\nclass RequestError(CommunicationError):\n\n \"\"\"The request was not successful.\"\"\"\n\n\nclass RetryAfterError(HttpError):\n\n \"\"\"The response contained a retry-after header.\"\"\"\n\n def __init__(self, response, content, url, retry_after):\n super(RetryAfterError, self).__init__(response, content, url)\n self.retry_after = int(retry_after)\n\n @classmethod\n def FromResponse(cls, http_response):\n return cls(http_response.info, http_response.content,\n http_response.request_url, http_response.retry_after)\n\n\nclass BadStatusCodeError(HttpError):\n\n \"\"\"The request completed but returned a bad status code.\"\"\"\n\n\nclass NotYetImplementedError(GeneratedClientError):\n\n \"\"\"This functionality is not yet implemented.\"\"\"\n\n\nclass StreamExhausted(Error):\n\n \"\"\"Attempted to read more bytes from a stream than were available.\"\"\"\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":1171,"cells":{"repo_name":{"kind":"string","value":"akretion/odoo"},"path":{"kind":"string","value":"addons/stock/tests/common.py"},"copies":{"kind":"string","value":"15"},"size":{"kind":"string","value":"5095"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nfrom odoo.tests import common\n\n\nclass TestStockCommon(common.TransactionCase):\n\n def setUp(self):\n super(TestStockCommon, self).setUp()\n\n self.ProductObj = self.env['product.product']\n self.UomObj = self.env['uom.uom']\n self.PartnerObj = self.env['res.partner']\n self.ModelDataObj = self.env['ir.model.data']\n self.StockPackObj = self.env['stock.move.line']\n self.StockQuantObj = self.env['stock.quant']\n self.PickingObj = self.env['stock.picking']\n self.MoveObj = self.env['stock.move']\n self.InvObj = self.env['stock.inventory']\n self.InvLineObj = self.env['stock.inventory.line']\n self.LotObj = self.env['stock.production.lot']\n\n # Model Data\n self.partner_agrolite_id = self.ModelDataObj.xmlid_to_res_id('base.res_partner_2')\n self.partner_delta_id = self.ModelDataObj.xmlid_to_res_id('base.res_partner_4')\n self.picking_type_in = self.ModelDataObj.xmlid_to_res_id('stock.picking_type_in')\n self.picking_type_out = self.ModelDataObj.xmlid_to_res_id('stock.picking_type_out')\n self.supplier_location = self.ModelDataObj.xmlid_to_res_id('stock.stock_location_suppliers')\n self.stock_location = self.ModelDataObj.xmlid_to_res_id('stock.stock_location_stock')\n pack_location = self.env.ref('stock.location_pack_zone')\n pack_location.active = True\n self.pack_location = pack_location.id\n output_location = self.env.ref('stock.stock_location_output')\n output_location.active = True\n self.output_location = output_location.id\n self.customer_location = self.ModelDataObj.xmlid_to_res_id('stock.stock_location_customers')\n self.categ_unit = self.ModelDataObj.xmlid_to_res_id('uom.product_uom_categ_unit')\n self.categ_kgm = self.ModelDataObj.xmlid_to_res_id('uom.product_uom_categ_kgm')\n\n # Product Created A, B, C, D\n self.productA = self.ProductObj.create({'name': 'Product A', 'type': 'product'})\n self.productB = self.ProductObj.create({'name': 'Product B', 'type': 'product'})\n self.productC = self.ProductObj.create({'name': 'Product C', 'type': 'product'})\n self.productD = self.ProductObj.create({'name': 'Product D', 'type': 'product'})\n self.productE = self.ProductObj.create({'name': 'Product E', 'type': 'product'})\n\n # Configure unit of measure.\n self.uom_kg = self.env['uom.uom'].search([('category_id', '=', self.categ_kgm), ('uom_type', '=', 'reference')], limit=1)\n self.uom_kg.write({\n 'name': 'Test-KG',\n 'rounding': 0.000001})\n self.uom_tone = self.UomObj.create({\n 'name': 'Test-Tone',\n 'category_id': self.categ_kgm,\n 'uom_type': 'bigger',\n 'factor_inv': 1000.0,\n 'rounding': 0.001})\n self.uom_gm = self.UomObj.create({\n 'name': 'Test-G',\n 'category_id': self.categ_kgm,\n 'uom_type': 'smaller',\n 'factor': 1000.0,\n 'rounding': 0.001})\n self.uom_mg = self.UomObj.create({\n 'name': 'Test-MG',\n 'category_id': self.categ_kgm,\n 'uom_type': 'smaller',\n 'factor': 100000.0,\n 'rounding': 0.001})\n # Check Unit\n self.uom_unit = self.env['uom.uom'].search([('category_id', '=', self.categ_unit), ('uom_type', '=', 'reference')], limit=1)\n self.uom_unit.write({\n 'name': 'Test-Unit',\n 'rounding': 1.0})\n self.uom_dozen = self.UomObj.create({\n 'name': 'Test-DozenA',\n 'category_id': self.categ_unit,\n 'factor_inv': 12,\n 'uom_type': 'bigger',\n 'rounding': 0.001})\n self.uom_sdozen = self.UomObj.create({\n 'name': 'Test-SDozenA',\n 'category_id': self.categ_unit,\n 'factor_inv': 144,\n 'uom_type': 'bigger',\n 'rounding': 0.001})\n self.uom_sdozen_round = self.UomObj.create({\n 'name': 'Test-SDozenA Round',\n 'category_id': self.categ_unit,\n 'factor_inv': 144,\n 'uom_type': 'bigger',\n 'rounding': 1.0})\n\n # Product for different unit of measure.\n self.DozA = self.ProductObj.create({'name': 'Dozon-A', 'type': 'product', 'uom_id': self.uom_dozen.id, 'uom_po_id': self.uom_dozen.id})\n self.SDozA = self.ProductObj.create({'name': 'SuperDozon-A', 'type': 'product', 'uom_id': self.uom_sdozen.id, 'uom_po_id': self.uom_sdozen.id})\n self.SDozARound = self.ProductObj.create({'name': 'SuperDozenRound-A', 'type': 'product', 'uom_id': self.uom_sdozen_round.id, 'uom_po_id': self.uom_sdozen_round.id})\n self.UnitA = self.ProductObj.create({'name': 'Unit-A', 'type': 'product'})\n self.kgB = self.ProductObj.create({'name': 'kg-B', 'type': 'product', 'uom_id': self.uom_kg.id, 'uom_po_id': self.uom_kg.id})\n self.gB = self.ProductObj.create({'name': 'g-B', 'type': 'product', 'uom_id': self.uom_gm.id, 'uom_po_id': self.uom_gm.id})\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":1172,"cells":{"repo_name":{"kind":"string","value":"boundary/boundary-plugin-aws-elb"},"path":{"kind":"string","value":"boundary_aws_plugin/cloudwatch_plugin.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"4606"},"content":{"kind":"string","value":"from __future__ import (absolute_import, division, print_function, unicode_literals)\nimport logging\nimport datetime\nimport time\n\nfrom . import boundary_plugin\nfrom . import status_store\n\n\"\"\"\nIf getting statistics from CloudWatch fails, we will retry up to this number of times before\ngiving up and aborting the plugin. Use 0 for unlimited retries.\n\"\"\"\nPLUGIN_RETRY_COUNT = 0\n\"\"\"\nIf getting statistics from CloudWatch fails, we will wait this long (in seconds) before retrying.\nThis value must not be greater than 30 seconds, because the Boundary Relay will think we've\ntimed out and terminate us after 30 seconds of inactivity.\n\"\"\"\nPLUGIN_RETRY_DELAY = 5\n\n\nclass CloudwatchPlugin(object):\n def __init__(self, cloudwatch_metrics_type, boundary_metric_prefix, status_store_filename):\n self.cloudwatch_metrics_type = cloudwatch_metrics_type\n self.boundary_metric_prefix = boundary_metric_prefix\n self.status_store_filename = status_store_filename\n\n def get_metric_data_with_retries(self, *args, **kwargs):\n \"\"\"\n Calls the get_metric_data function, taking into account retry configuration.\n \"\"\"\n retry_range = xrange(PLUGIN_RETRY_COUNT) if PLUGIN_RETRY_COUNT > 0 else iter(int, 1)\n for _ in retry_range:\n try:\n return self.cloudwatch_metrics.get_metric_data(*args, **kwargs)\n except Exception as e:\n logging.error(\"Error retrieving CloudWatch data: %s\" % e)\n boundary_plugin.report_alive()\n time.sleep(PLUGIN_RETRY_DELAY)\n boundary_plugin.report_alive()\n\n logging.fatal(\"Max retries exceeded retrieving CloudWatch data\")\n raise Exception(\"Max retries exceeded retrieving CloudWatch data\")\n\n def handle_metrics(self, data, reported_metrics):\n # Data format:\n # (RegionId, EntityName, MetricName) -> [(Timestamp, Value, Statistic), (Timestamp, Value, Statistic), ...]\n for metric_key, metric_list in data.items():\n region_id, entity_name, metric_name = metric_key\n\n for metric_list_item in metric_list:\n # Do not report duplicate or past samples (note: we are comparing tuples here, which\n # amounts to comparing their timestamps).\n if reported_metrics.get(metric_key, (datetime.datetime.min,)) >= metric_list_item:\n continue\n\n metric_timestamp, metric_value, metric_statistic = metric_list_item\n\n boundary_plugin.boundary_report_metric(self.boundary_metric_prefix + metric_name,\n metric_value, entity_name, metric_timestamp)\n reported_metrics[metric_key] = metric_list_item\n\n status_store.save_status_store(self.status_store_filename, reported_metrics)\n\n def main(self):\n settings = boundary_plugin.parse_params()\n reported_metrics = status_store.load_status_store(self.status_store_filename) or dict()\n\n logging.basicConfig(level=logging.ERROR, filename=settings.get('log_file', None))\n reports_log = settings.get('report_log_file', None)\n if reports_log:\n boundary_plugin.log_metrics_to_file(reports_log)\n boundary_plugin.start_keepalive_subprocess()\n\n self.cloudwatch_metrics = self.cloudwatch_metrics_type(settings['access_key_id'], settings['secret_key'])\n\n # Bring us up to date! Get all data since the last time we know we reported valid data\n # (minus 20 minutes as a buffer), and report it now, so that we report data on any time\n # this plugin was down for any reason.\n try:\n earliest_timestamp = max(reported_metrics.values(), key=lambda v: v[0])[0] - datetime.timedelta(minutes=20)\n except ValueError:\n # Probably first run or someone deleted our status store file - just start from now\n logging.error(\"No status store data; starting data collection from now\")\n pass\n else:\n logging.error(\"Starting historical data collection from %s\" % earliest_timestamp)\n data = self.get_metric_data_with_retries(only_latest=False,\n start_time=earliest_timestamp, end_time=datetime.datetime.utcnow())\n self.handle_metrics(data, reported_metrics)\n logging.error(\"Historical data collection complete\")\n\n while True:\n data = self.get_metric_data_with_retries()\n self.handle_metrics(data, reported_metrics)\n boundary_plugin.sleep_interval()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":1173,"cells":{"repo_name":{"kind":"string","value":"ryfeus/lambda-packs"},"path":{"kind":"string","value":"Tensorflow/source/tensorflow/python/keras/wrappers/scikit_learn/__init__.py"},"copies":{"kind":"string","value":"73"},"size":{"kind":"string","value":"1062"},"content":{"kind":"string","value":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras scikit-learn API wrapper.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.keras._impl.keras.wrappers.scikit_learn import KerasClassifier\nfrom tensorflow.python.keras._impl.keras.wrappers.scikit_learn import KerasRegressor\n\ndel absolute_import\ndel division\ndel print_function\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":1174,"cells":{"repo_name":{"kind":"string","value":"Lonchadepavo/EstacionCisma"},"path":{"kind":"string","value":"tools/expand_filedir_paths.py"},"copies":{"kind":"string","value":"166"},"size":{"kind":"string","value":"3839"},"content":{"kind":"string","value":"#!/usr/bin/env python\r\n\r\nimport re, os, sys, fnmatch\r\n\r\n\r\n# Regex pattern to extract the directory path in a #define FILE_DIR\r\nfiledir_pattern = re.compile(r'^#define\\s*FILE_DIR\\s*\"(.*?)\"')\r\n\r\n# Regex pattern to extract any single quoted piece of text. This can also\r\n# match single quoted strings inside of double quotes, which is part of a\r\n# regular text string and should not be replaced. The replacement function\r\n# however will any match that doesn't appear to be a filename so these\r\n# extra matches should not be a problem.\r\nrename_pattern = re.compile(r\"'(.+?)'\")\r\n\r\n# Only filenames matching this pattern will have their resources renamed\r\nsource_pattern = re.compile(r\"^.*?\\.(dm|dmm)$\")\r\n\r\n# Open the .dme file and return a list of all FILE_DIR paths in it\r\ndef read_filedirs(filename):\r\n result = []\r\n dme_file = file(filename, \"rt\")\r\n \r\n # Read each line from the file and check for regex pattern match\r\n for row in dme_file:\r\n match = filedir_pattern.match(row)\r\n if match:\r\n result.append(match.group(1))\r\n\r\n dme_file.close()\r\n return result\r\n\r\n# Search through a list of directories, and build a dictionary which\r\n# maps every file to its full pathname (relative to the .dme file)\r\n# If the same filename appears in more than one directory, the earlier\r\n# directory in the list takes preference.\r\ndef index_files(file_dirs):\r\n result = {}\r\n\r\n # Reverse the directory list so the earlier directories take precedence\r\n # by replacing the previously indexed file of the same name\r\n for directory in reversed(file_dirs):\r\n for name in os.listdir(directory):\r\n # Replace backslash path separators on Windows with forward slash\r\n # Force \"name\" to lowercase when used as a key since BYOND resource\r\n # names are case insensitive, even on Linux.\r\n if name.find(\".\") == -1:\r\n continue\r\n result[name.lower()] = directory.replace('\\\\', '/') + '/' + name\r\n\r\n return result\r\n\r\n# Recursively search for every .dm/.dmm file in the .dme file directory. For\r\n# each file, search it for any resource names in single quotes, and replace\r\n# them with the full path previously found by index_files()\r\ndef rewrite_sources(resources):\r\n # Create a closure for the regex replacement function to capture the\r\n # resources dictionary which can't be passed directly to this function\r\n def replace_func(name):\r\n key = name.group(1).lower()\r\n if key in resources:\r\n replacement = resources[key]\r\n else:\r\n replacement = name.group(1)\r\n return \"'\" + replacement + \"'\"\r\n \r\n # Search recursively for all .dm and .dmm files\r\n for (dirpath, dirs, files) in os.walk(\".\"):\r\n for name in files:\r\n if source_pattern.match(name):\r\n path = dirpath + '/' + name\r\n source_file = file(path, \"rt\")\r\n output_file = file(path + \".tmp\", \"wt\")\r\n\r\n # Read file one line at a time and perform replacement of all\r\n # single quoted resource names with the fullpath to that resource\r\n # file. Write the updated text back out to a temporary file.\r\n for row in source_file:\r\n row = rename_pattern.sub(replace_func, row)\r\n output_file.write(row)\r\n\r\n output_file.close()\r\n source_file.close()\r\n\r\n # Delete original source file and replace with the temporary\r\n # output. On Windows, an atomic rename() operation is not\r\n # possible like it is under POSIX.\r\n os.remove(path)\r\n os.rename(path + \".tmp\", path)\r\n\r\ndirs = read_filedirs(\"tgstation.dme\");\r\nresources = index_files(dirs)\r\nrewrite_sources(resources)\r\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":1175,"cells":{"repo_name":{"kind":"string","value":"fishscene/streamlink"},"path":{"kind":"string","value":"src/streamlink/plugins/ustreamtv.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"20932"},"content":{"kind":"string","value":"import re\n\nfrom collections import namedtuple\nfrom functools import partial\nfrom random import randint\nfrom time import sleep\n\nfrom streamlink.compat import urlparse, urljoin, range\nfrom streamlink.exceptions import StreamError, PluginError, NoStreamsError\nfrom streamlink.plugin import Plugin, PluginOptions\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.stream import RTMPStream, HLSStream, HTTPStream, Stream\nfrom streamlink.stream.flvconcat import FLVTagConcat\nfrom streamlink.stream.segmented import (\n SegmentedStreamReader, SegmentedStreamWriter, SegmentedStreamWorker\n)\n\ntry:\n import librtmp\n HAS_LIBRTMP = True\nexcept ImportError:\n HAS_LIBRTMP = False\n\n_url_re = re.compile(\"\"\"\n http(s)?://(www\\.)?ustream.tv\n (?:\n (/embed/|/channel/id/)(?P\\d+)\n )?\n (?:\n /recorded/(?P\\d+)\n )?\n\"\"\", re.VERBOSE)\n_channel_id_re = re.compile(\"\\\"channelId\\\":(\\d+)\")\n\nHLS_PLAYLIST_URL = (\n \"http://iphone-streaming.ustream.tv\"\n \"/uhls/{0}/streams/live/iphone/playlist.m3u8\"\n)\nRECORDED_URL = \"http://tcdn.ustream.tv/video/{0}\"\nRTMP_URL = \"rtmp://r{0}-1-{1}-channel-live.ums.ustream.tv:1935/ustream\"\nSWF_URL = \"http://static-cdn1.ustream.tv/swf/live/viewer.rsl:505.swf\"\n\n_module_info_schema = validate.Schema(\n list,\n validate.length(1),\n validate.get(0),\n dict\n)\n_amf3_array = validate.Schema(\n validate.any(\n validate.all(\n {int: object},\n validate.transform(lambda a: list(a.values())),\n ),\n list\n )\n)\n_recorded_schema = validate.Schema({\n validate.optional(\"stream\"): validate.all(\n _amf3_array,\n [{\n \"name\": validate.text,\n \"streams\": validate.all(\n _amf3_array,\n [{\n \"streamName\": validate.text,\n \"bitrate\": float,\n }],\n ),\n validate.optional(\"url\"): validate.text,\n }]\n )\n})\n_stream_schema = validate.Schema(\n validate.any({\n \"name\": validate.text,\n \"url\": validate.text,\n \"streams\": validate.all(\n _amf3_array,\n [{\n \"chunkId\": validate.any(int, float),\n \"chunkRange\": {validate.text: validate.text},\n \"chunkTime\": validate.any(int, float),\n \"offset\": validate.any(int, float),\n \"offsetInMs\": validate.any(int, float),\n \"streamName\": validate.text,\n validate.optional(\"bitrate\"): validate.any(int, float),\n validate.optional(\"height\"): validate.any(int, float),\n validate.optional(\"description\"): validate.text,\n validate.optional(\"isTranscoded\"): bool\n }],\n )\n },\n {\n \"name\": validate.text,\n \"varnishUrl\": validate.text\n })\n)\n_channel_schema = validate.Schema({\n validate.optional(\"stream\"): validate.any(\n validate.all(\n _amf3_array,\n [_stream_schema],\n ),\n \"offline\"\n )\n})\n\nChunk = namedtuple(\"Chunk\", \"num url offset\")\n\n\nif HAS_LIBRTMP:\n from io import BytesIO\n from time import time\n\n from librtmp.rtmp import RTMPTimeoutError, PACKET_TYPE_INVOKE\n from streamlink.packages.flashmedia.types import AMF0Value\n\n def decode_amf(body):\n def generator():\n fd = BytesIO(body)\n while True:\n try:\n yield AMF0Value.read(fd)\n except IOError:\n break\n\n return list(generator())\n\n class FlashmediaRTMP(librtmp.RTMP):\n \"\"\"RTMP connection using python-flashmedia's AMF decoder.\n\n TODO: Move to python-librtmp instead.\n \"\"\"\n\n def process_packets(self, transaction_id=None, invoked_method=None,\n timeout=None):\n start = time()\n\n while self.connected and transaction_id not in self._invoke_results:\n if timeout and (time() - start) >= timeout:\n raise RTMPTimeoutError(\"Timeout\")\n\n packet = self.read_packet()\n if packet.type == PACKET_TYPE_INVOKE:\n try:\n decoded = decode_amf(packet.body)\n except IOError:\n continue\n\n try:\n method, transaction_id_, obj = decoded[:3]\n args = decoded[3:]\n except ValueError:\n continue\n\n if method == \"_result\":\n if len(args) > 0:\n result = args[0]\n else:\n result = None\n\n self._invoke_results[transaction_id_] = result\n else:\n handler = self._invoke_handlers.get(method)\n if handler:\n res = handler(*args)\n if res is not None:\n self.call(\"_result\", res,\n transaction_id=transaction_id_)\n\n if method == invoked_method:\n self._invoke_args[invoked_method] = args\n break\n\n if transaction_id_ == 1.0:\n self._connect_result = packet\n else:\n self.handle_packet(packet)\n else:\n self.handle_packet(packet)\n\n if transaction_id:\n result = self._invoke_results.pop(transaction_id, None)\n\n return result\n\n if invoked_method:\n args = self._invoke_args.pop(invoked_method, None)\n\n return args\n\n\ndef create_ums_connection(app, media_id, page_url, password,\n exception=PluginError):\n url = RTMP_URL.format(randint(0, 0xffffff), media_id)\n params = {\n \"application\": app,\n \"media\": str(media_id),\n \"password\": password\n }\n conn = FlashmediaRTMP(url,\n swfurl=SWF_URL,\n pageurl=page_url,\n connect_data=params)\n\n try:\n conn.connect()\n except librtmp.RTMPError:\n raise exception(\"Failed to connect to RTMP server\")\n\n return conn\n\n\nclass UHSStreamWriter(SegmentedStreamWriter):\n def __init__(self, *args, **kwargs):\n SegmentedStreamWriter.__init__(self, *args, **kwargs)\n\n self.concater = FLVTagConcat(flatten_timestamps=True,\n sync_headers=True)\n\n def fetch(self, chunk, retries=None):\n if not retries or self.closed:\n return\n\n try:\n params = {}\n if chunk.offset:\n params[\"start\"] = chunk.offset\n\n return http.get(chunk.url,\n timeout=self.timeout,\n params=params,\n exception=StreamError)\n except StreamError as err:\n self.logger.error(\"Failed to open chunk {0}: {1}\", chunk.num, err)\n return self.fetch(chunk, retries - 1)\n\n def write(self, chunk, res, chunk_size=8192):\n try:\n for data in self.concater.iter_chunks(buf=res.content,\n skip_header=not chunk.offset):\n self.reader.buffer.write(data)\n\n if self.closed:\n break\n else:\n self.logger.debug(\"Download of chunk {0} complete\", chunk.num)\n except IOError as err:\n self.logger.error(\"Failed to read chunk {0}: {1}\", chunk.num, err)\n\n\nclass UHSStreamWorker(SegmentedStreamWorker):\n def __init__(self, *args, **kwargs):\n SegmentedStreamWorker.__init__(self, *args, **kwargs)\n\n self.chunk_ranges = {}\n self.chunk_id = None\n self.chunk_id_max = None\n self.chunks = []\n self.filename_format = \"\"\n self.module_info_reload_time = 2\n self.process_module_info()\n\n def fetch_module_info(self):\n self.logger.debug(\"Fetching module info\")\n conn = create_ums_connection(\"channel\",\n self.stream.channel_id,\n self.stream.page_url,\n self.stream.password,\n exception=StreamError)\n\n try:\n result = conn.process_packets(invoked_method=\"moduleInfo\",\n timeout=10)\n except (IOError, librtmp.RTMPError) as err:\n raise StreamError(\"Failed to get module info: {0}\".format(err))\n finally:\n conn.close()\n\n result = _module_info_schema.validate(result)\n return _channel_schema.validate(result, \"module info\")\n\n def process_module_info(self):\n if self.closed:\n return\n\n try:\n result = self.fetch_module_info()\n except PluginError as err:\n self.logger.error(\"{0}\", err)\n return\n\n providers = result.get(\"stream\")\n if not providers or providers == \"offline\":\n self.logger.debug(\"Stream went offline\")\n self.close()\n return\n\n for provider in providers:\n if provider.get(\"name\") == self.stream.provider:\n break\n else:\n return\n\n try:\n stream = provider[\"streams\"][self.stream.stream_index]\n except IndexError:\n self.logger.error(\"Stream index not in result\")\n return\n\n filename_format = stream[\"streamName\"].replace(\"%\", \"%s\")\n filename_format = urljoin(provider[\"url\"], filename_format)\n\n self.filename_format = filename_format\n self.update_chunk_info(stream)\n\n def update_chunk_info(self, result):\n chunk_range = result[\"chunkRange\"]\n if not chunk_range:\n return\n\n chunk_id = int(result[\"chunkId\"])\n chunk_offset = int(result[\"offset\"])\n chunk_range = dict(map(partial(map, int), chunk_range.items()))\n\n self.chunk_ranges.update(chunk_range)\n self.chunk_id_min = sorted(chunk_range)[0]\n self.chunk_id_max = int(result[\"chunkId\"])\n self.chunks = [Chunk(i, self.format_chunk_url(i),\n not self.chunk_id and i == chunk_id and chunk_offset)\n for i in range(self.chunk_id_min, self.chunk_id_max + 1)]\n\n if self.chunk_id is None and self.chunks:\n self.chunk_id = chunk_id\n\n def format_chunk_url(self, chunk_id):\n chunk_hash = \"\"\n for chunk_start in sorted(self.chunk_ranges):\n if chunk_id >= chunk_start:\n chunk_hash = self.chunk_ranges[chunk_start]\n\n return self.filename_format % (chunk_id, chunk_hash)\n\n def valid_chunk(self, chunk):\n return self.chunk_id and chunk.num >= self.chunk_id\n\n def iter_segments(self):\n while not self.closed:\n for chunk in filter(self.valid_chunk, self.chunks):\n self.logger.debug(\"Adding chunk {0} to queue\", chunk.num)\n yield chunk\n\n # End of stream\n if self.closed:\n return\n\n self.chunk_id = chunk.num + 1\n\n if self.wait(self.module_info_reload_time):\n try:\n self.process_module_info()\n except StreamError as err:\n self.logger.warning(\"Failed to process module info: {0}\", err)\n\n\nclass UHSStreamReader(SegmentedStreamReader):\n __worker__ = UHSStreamWorker\n __writer__ = UHSStreamWriter\n\n def __init__(self, stream, *args, **kwargs):\n self.logger = stream.session.logger.new_module(\"stream.uhs\")\n\n SegmentedStreamReader.__init__(self, stream, *args, **kwargs)\n\n\nclass UHSStream(Stream):\n __shortname__ = \"uhs\"\n\n def __init__(self, session, channel_id, page_url, provider,\n stream_index, password=\"\"):\n Stream.__init__(self, session)\n\n self.channel_id = channel_id\n self.page_url = page_url\n self.provider = provider\n self.stream_index = stream_index\n self.password = password\n\n def __repr__(self):\n return \"\".format(\n self.channel_id, self.page_url, self.provider,\n self.stream_index, self.password\n )\n\n def __json__(self):\n json = Stream.__json__(self)\n json.update({\n \"channel_id\": self.channel_id,\n \"page_url\": self.page_url,\n \"provider\": self.provider,\n \"stream_index\": self.stream_index,\n \"password\": self.password\n })\n return json\n\n def open(self):\n reader = UHSStreamReader(self)\n reader.open()\n\n return reader\n\n\nclass UStreamTV(Plugin):\n options = PluginOptions({\n \"password\": \"\"\n })\n\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n @classmethod\n def stream_weight(cls, stream):\n match = re.match(\"mobile_(\\w+)\", stream)\n if match:\n weight, group = Plugin.stream_weight(match.group(1))\n weight -= 1\n group = \"mobile_ustream\"\n elif stream == \"recorded\":\n weight, group = 720, \"ustream\"\n else:\n weight, group = Plugin.stream_weight(stream)\n\n return weight, group\n\n def _get_channel_id(self):\n res = http.get(self.url)\n match = _channel_id_re.search(res.text)\n if match:\n return int(match.group(1))\n\n def _get_hls_streams(self, channel_id, wait_for_transcode=False):\n # HLS streams are created on demand, so we may have to wait\n # for a transcode to be started.\n attempts = wait_for_transcode and 10 or 1\n playlist_url = HLS_PLAYLIST_URL.format(channel_id)\n streams = {}\n while attempts and not streams:\n try:\n streams = HLSStream.parse_variant_playlist(self.session,\n playlist_url,\n nameprefix=\"mobile_\")\n except IOError:\n # Channel is probably offline\n break\n\n attempts -= 1\n sleep(3)\n\n return streams\n\n def _create_rtmp_stream(self, cdn, stream_name):\n parsed = urlparse(cdn)\n params = {\n \"rtmp\": cdn,\n \"app\": parsed.path[1:],\n \"playpath\": stream_name,\n \"pageUrl\": self.url,\n \"swfUrl\": SWF_URL,\n \"live\": True\n }\n\n return RTMPStream(self.session, params)\n\n def _get_module_info(self, app, media_id, password=\"\", schema=None):\n self.logger.debug(\"Waiting for moduleInfo invoke\")\n conn = create_ums_connection(app, media_id, self.url, password)\n\n attempts = 3\n while conn.connected and attempts:\n try:\n result = conn.process_packets(invoked_method=\"moduleInfo\",\n timeout=10)\n except (IOError, librtmp.RTMPError) as err:\n raise PluginError(\"Failed to get stream info: {0}\".format(err))\n\n try:\n result = _module_info_schema.validate(result)\n break\n except PluginError:\n attempts -= 1\n\n conn.close()\n\n if schema:\n result = schema.validate(result)\n\n return result\n\n def _get_desktop_streams(self, channel_id):\n password = self.options.get(\"password\")\n channel = self._get_module_info(\"channel\", channel_id, password,\n schema=_channel_schema)\n\n if not isinstance(channel.get(\"stream\"), list):\n raise NoStreamsError(self.url)\n\n streams = {}\n for provider in channel[\"stream\"]:\n if provider[\"name\"] == u\"uhs_akamai\": # not heavily tested, but got a stream working\n continue\n provider_url = provider[\"url\"]\n provider_name = provider[\"name\"]\n for stream_index, stream_info in enumerate(provider[\"streams\"]):\n stream = None\n stream_height = int(stream_info.get(\"height\", 0))\n stream_name = stream_info.get(\"description\")\n if not stream_name:\n if stream_height > 0:\n if not stream_info.get(\"isTranscoded\"):\n stream_name = \"{0}p+\".format(stream_height)\n else:\n stream_name = \"{0}p\".format(stream_height)\n else:\n stream_name = \"live\"\n\n if stream_name in streams:\n provider_name_clean = provider_name.replace(\"uhs_\", \"\")\n stream_name += \"_alt_{0}\".format(provider_name_clean)\n\n if provider_name.startswith(\"uhs_\"):\n stream = UHSStream(self.session, channel_id,\n self.url, provider_name,\n stream_index, password)\n elif provider_url.startswith(\"rtmp\"):\n playpath = stream_info[\"streamName\"]\n stream = self._create_rtmp_stream(provider_url,\n playpath)\n\n if stream:\n streams[stream_name] = stream\n\n return streams\n\n def _get_live_streams(self, channel_id):\n has_desktop_streams = False\n if HAS_LIBRTMP:\n try:\n streams = self._get_desktop_streams(channel_id)\n # TODO: Replace with \"yield from\" when dropping Python 2.\n for stream in streams.items():\n has_desktop_streams = True\n yield stream\n except PluginError as err:\n self.logger.error(\"Unable to fetch desktop streams: {0}\", err)\n except NoStreamsError:\n pass\n else:\n self.logger.warning(\n \"python-librtmp is not installed, but is needed to access \"\n \"the desktop streams\"\n )\n\n try:\n streams = self._get_hls_streams(channel_id,\n wait_for_transcode=not has_desktop_streams)\n\n # TODO: Replace with \"yield from\" when dropping Python 2.\n for stream in streams.items():\n yield stream\n except PluginError as err:\n self.logger.error(\"Unable to fetch mobile streams: {0}\", err)\n except NoStreamsError:\n pass\n\n def _get_recorded_streams(self, video_id):\n if HAS_LIBRTMP:\n recording = self._get_module_info(\"recorded\", video_id,\n schema=_recorded_schema)\n\n if not isinstance(recording.get(\"stream\"), list):\n return\n\n for provider in recording[\"stream\"]:\n base_url = provider.get(\"url\")\n for stream_info in provider[\"streams\"]:\n bitrate = int(stream_info.get(\"bitrate\", 0))\n stream_name = (bitrate > 0 and \"{0}k\".format(bitrate) or\n \"recorded\")\n\n url = stream_info[\"streamName\"]\n if base_url:\n url = base_url + url\n\n if url.startswith(\"http\"):\n yield stream_name, HTTPStream(self.session, url)\n elif url.startswith(\"rtmp\"):\n params = dict(rtmp=url, pageUrl=self.url)\n yield stream_name, RTMPStream(self.session, params)\n\n else:\n self.logger.warning(\n \"The proper API could not be used without python-librtmp \"\n \"installed. Stream URL is not guaranteed to be valid\"\n )\n\n url = RECORDED_URL.format(video_id)\n random_hash = \"{0:02x}{1:02x}\".format(randint(0, 255),\n randint(0, 255))\n params = dict(hash=random_hash)\n stream = HTTPStream(self.session, url, params=params)\n yield \"recorded\", stream\n\n def _get_streams(self):\n match = _url_re.match(self.url)\n\n video_id = match.group(\"video_id\")\n if video_id:\n return self._get_recorded_streams(video_id)\n\n channel_id = match.group(\"channel_id\") or self._get_channel_id()\n if channel_id:\n return self._get_live_streams(channel_id)\n\n__plugin__ = UStreamTV\n"},"license":{"kind":"string","value":"bsd-2-clause"}}},{"rowIdx":1176,"cells":{"repo_name":{"kind":"string","value":"khara914/cf-phpbuildpack"},"path":{"kind":"string","value":"lib/build_pack_utils/downloads.py"},"copies":{"kind":"string","value":"15"},"size":{"kind":"string","value":"4096"},"content":{"kind":"string","value":"import os\nimport urllib2\nimport re\nimport logging\nfrom subprocess import Popen\nfrom subprocess import PIPE\n\n\nclass Downloader(object):\n\n def __init__(self, config):\n self._ctx = config\n self._log = logging.getLogger('downloads')\n self._init_proxy()\n\n def _init_proxy(self):\n handlers = {}\n for key in self._ctx.keys():\n if key.lower().endswith('_proxy'):\n handlers[key.split('_')[0]] = self._ctx[key]\n self._log.debug('Loaded proxy handlers [%s]', handlers)\n openers = []\n if handlers:\n openers.append(urllib2.ProxyHandler(handlers))\n for handler in handlers.values():\n if '@' in handler:\n openers.append(urllib2.ProxyBasicAuthHandler())\n opener = urllib2.build_opener(*openers)\n urllib2.install_opener(opener)\n\n def download(self, url, toFile):\n path_to_download_executable = os.path.join(\n self._ctx['BP_DIR'],\n 'compile-extensions',\n 'bin',\n 'download_dependency')\n\n command_arguments = [\n path_to_download_executable,\n url,\n toFile]\n\n process = Popen(command_arguments, stdout=PIPE)\n exit_code = process.wait()\n translated_uri = process.stdout.read().rstrip()\n\n if exit_code == 0:\n print \"Downloaded [%s] to [%s]\" % (translated_uri, toFile)\n elif exit_code == 1:\n raise RuntimeError(\"Could not download dependency: %s\" % url)\n elif exit_code == 3:\n raise RuntimeError(\"MD5 of downloaded dependency does not match expected value\")\n\n def custom_extension_download(self, url, toFile):\n res = urllib2.urlopen(url)\n with open(toFile, 'w') as f:\n f.write(res.read())\n print 'Downloaded [%s] to [%s]' % (url, toFile)\n self._log.info('Downloaded [%s] to [%s]', url, toFile)\n\n def download_direct(self, url):\n buf = urllib2.urlopen(url).read()\n self._log.info('Downloaded [%s] to memory', url)\n self._log.debug(\"Downloaded [%s] [%s]\", url, buf)\n return buf\n\n\nclass CurlDownloader(object):\n\n def __init__(self, config):\n self._ctx = config\n self._status_pattern = re.compile(r'^(.*)$',\n re.DOTALL)\n self._log = logging.getLogger('downloads')\n\n def download(self, url, toFile):\n cmd = [\"curl\", \"-s\",\n \"-o\", toFile,\n \"-w\", '%{http_code}']\n for key in self._ctx.keys():\n if key.lower().endswith('_proxy'):\n cmd.extend(['-x', self._ctx[key]])\n cmd.append(url)\n self._log.debug(\"Running [%s]\", cmd)\n proc = Popen(cmd, stdout=PIPE)\n output, unused_err = proc.communicate()\n proc.poll()\n self._log.debug(\"Curl returned [%s]\", output)\n if output and \\\n (output.startswith('4') or\n output.startswith('5')):\n raise RuntimeError(\"curl says [%s]\" % output)\n print 'Downloaded [%s] to [%s]' % (url, toFile)\n self._log.info('Downloaded [%s] to [%s]', url, toFile)\n\n def download_direct(self, url):\n cmd = [\"curl\", \"-s\",\n \"-w\", '']\n for key in self._ctx.keys():\n if key.lower().endswith('_proxy'):\n cmd.extend(['-x', self._ctx[key]])\n cmd.append(url)\n self._log.debug(\"Running [%s]\", cmd)\n proc = Popen(cmd, stdout=PIPE)\n output, unused_err = proc.communicate()\n proc.poll()\n m = self._status_pattern.match(output)\n if m:\n resp = m.group(1)\n code = m.group(2)\n self._log.debug(\"Curl returned [%s]\", code)\n if (code.startswith('4') or code.startswith('5')):\n raise RuntimeError(\"curl says [%s]\" % output)\n self._log.info('Downloaded [%s] to memory', url)\n self._log.debug('Downloaded [%s] [%s]', url, resp)\n return resp\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":1177,"cells":{"repo_name":{"kind":"string","value":"SelenaProject/selena"},"path":{"kind":"string","value":"app/core/modules/weather/weather.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5028"},"content":{"kind":"string","value":"# !/usr/bin/env python3\n\nimport threading\nimport time\nimport urllib.request\nimport json\n\nfrom .. import modulebase\n\nweather_check_interval = 60 # check every minute\ncity = 'Kanata,ON'\ncur_weather_url = ('http://api.openweathermap.org/data/2.5/weather?q=%s&units=metric') % (city)\nforecast_url = ('http://api.openweathermap.org/data/2.5/forecast?q=%s&units=metric') % (city)\n\nclass weather(modulebase.ModuleBase):\n data = None\n encode = lambda x : json.dumps(x).encode('utf-8')\n\n def __init__(self) :\n weather.data = WeatherData()\n\n def deinit(self) :\n pass\n\n def GET_temperature(self):\n data = {\n 'temp' : weather.data.cur_temp()\n }\n return weather.encode(data)\n\n def GET_current(self) :\n wd = weather.data\n data = {\n 'city' : city,\n 'temp' : wd.cur_temp(),\n 'weather' : wd.cur_weather(),\n 'humidity' : wd.cur_humidity(),\n 'clouds' : wd.cur_clouds(),\n 'timestamp' : wd.timestamp()\n }\n return weather.encode(data)\n\n def GET_forecast(self) :\n data = weather.data.forecast()\n return weather.encode(data)\n\n def POST_test(self) :\n return \"Good!\"\n\n\nclass WeatherData :\n def __init__(self) :\n self.__cur_temp = -1\n self.__humidity = -1\n self.__clouds = -1\n self.__cur_weather = {}\n self.__forecast = []\n self.__timestamp = 0\n\n self.__lock = threading.Lock()\n self.__start_checker()\n\n\n '''\n Public getters\n '''\n\n def cur_temp(self) :\n with self.__lock :\n return self.__cur_temp\n\n def cur_weather(self) :\n with self.__lock :\n return self.__cur_weather\n\n def cur_humidity(self) :\n with self.__lock :\n return self.__humidity\n\n def cur_clouds(self) :\n with self.__lock :\n return self.__clouds\n\n def forecast(self) :\n with self.__lock :\n return self.__forecast\n\n def timestamp(self) :\n with self.__lock :\n return self.__timestamp\n\n '''\n Private setters\n '''\n\n def __set_cur_temp(self, temp) :\n with self.__lock :\n self.__cur_temp = temp\n\n def __set_cur_weather(self, weather_id, weather_descr) :\n with self.__lock :\n self.__cur_weather['id'] = weather_id\n self.__cur_weather['descr'] = weather_descr\n\n def __set_cur_humidity(self, hum) :\n with self.__lock :\n self.__humidity = hum\n\n def __set_cur_clouds(self, clouds) :\n with self.__lock :\n self.__clouds = clouds\n\n def __set_forecast(self, forecast) :\n with self.__lock :\n self.__forecast = forecast\n\n def __set_timestamp(self, timestamp) :\n with self.__lock :\n self.__timestamp = timestamp\n\n '''\n Threading\n '''\n\n def __start_checker(self) :\n print('Starting weather checker...')\n self.__checker = threading.Thread(target=self.__check_weather)\n self.__checker.daemon = True\n self.__checker.start()\n\n def __check_weather(self) :\n while True :\n print('Checking weather...')\n response = urllib.request.urlopen( urllib.request.Request(url=cur_weather_url) )\n json_obj = json.loads(response.read().decode('utf-8'))\n print (str(json_obj))\n\n self.__set_timestamp(int(time.time()))\n\n main = json_obj.get('main', {})\n temp = main.get('temp', -1)\n hum = main.get('humidity', -1)\n self.__set_cur_temp(temp)\n self.__set_cur_humidity(hum)\n\n weather = json_obj.get('weather', [])\n if len(weather) > 0 :\n wthr_id = weather[0].get('id', 0)\n wthr_descr = weather[0].get('main', '')\n self.__set_cur_weather(wthr_id, wthr_descr)\n\n clouds = json_obj.get('clouds', {}).get('all', -1)\n self.__set_cur_clouds(clouds)\n\n # get forecast\n response = urllib.request.urlopen( urllib.request.Request(url=forecast_url) )\n json_obj = json.loads(response.read().decode('utf-8'))\n\n # extract data\n data_list = json_obj.get('list', [])\n fc_data = []\n\n for list_item in data_list[:8] :\n fc_item = {}\n fc_item['timestamp'] = list_item.get('dt', 0)\n\n fc_main = list_item.get('main', {})\n fc_item['temp'] = fc_main.get('temp', -1)\n fc_item['humidity'] = fc_main.get('humidity', -1)\n\n fc_weather = list_item.get('weather', [])\n fc_item['weather'] = {\n 'id' : fc_weather[0].get('id', 0),\n 'descr' : fc_weather[0].get('main', '')\n } if len(fc_weather) > 0 else { 'id' : 0, 'descr': '' }\n\n fc_data.append(fc_item)\n\n self.__set_forecast(fc_data)\n\n time.sleep(weather_check_interval)\n\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":1178,"cells":{"repo_name":{"kind":"string","value":"sunsrising/xnhb"},"path":{"kind":"string","value":"contrib/devtools/fix-copyright-headers.py"},"copies":{"kind":"string","value":"80"},"size":{"kind":"string","value":"1348"},"content":{"kind":"string","value":"#!/usr/bin/env python\n'''\nRun this script to update all the copyright headers of files\nthat were changed this year.\n\nFor example:\n\n// Copyright (c) 2009-2012 The Bitcoin Core developers\n\nit will change it to\n\n// Copyright (c) 2009-2015 The Bitcoin Core developers\n'''\nimport os\nimport time\nimport re\n\nyear = time.gmtime()[0]\nCMD_GIT_DATE = 'git log --format=@%%at -1 %s | date +\"%%Y\" -u -f -'\nCMD_REGEX= \"perl -pi -e 's/(20\\d\\d)(?:-20\\d\\d)? The Bitcoin/$1-%s The Bitcoin/' %s\"\nREGEX_CURRENT= re.compile(\"%s The Bitcoin\" % year)\nCMD_LIST_FILES= \"find %s | grep %s\"\n\nFOLDERS = [\"./qa\", \"./src\"]\nEXTENSIONS = [\".cpp\",\".h\", \".py\"]\n\ndef get_git_date(file_path):\n r = os.popen(CMD_GIT_DATE % file_path)\n for l in r:\n # Result is one line, so just return\n return l.replace(\"\\n\",\"\")\n return \"\"\n\nn=1\nfor folder in FOLDERS:\n for extension in EXTENSIONS:\n for file_path in os.popen(CMD_LIST_FILES % (folder, extension)):\n file_path = os.getcwd() + file_path[1:-1]\n if file_path.endswith(extension):\n git_date = get_git_date(file_path)\n if str(year) == git_date:\n # Only update if current year is not found\n if REGEX_CURRENT.search(open(file_path, \"r\").read()) is None:\n print n,\"Last git edit\", git_date, \"-\", file_path\n os.popen(CMD_REGEX % (year,file_path))\n n = n + 1\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":1179,"cells":{"repo_name":{"kind":"string","value":"malexzx/grpc"},"path":{"kind":"string","value":"examples/python/multiplex/helloworld_pb2.py"},"copies":{"kind":"string","value":"29"},"size":{"kind":"string","value":"6763"},"content":{"kind":"string","value":"# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: helloworld.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='helloworld.proto',\n package='helloworld',\n syntax='proto3',\n serialized_pb=_b('\\n\\x10helloworld.proto\\x12\\nhelloworld\\\"\\x1c\\n\\x0cHelloRequest\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\\"\\x1d\\n\\nHelloReply\\x12\\x0f\\n\\x07message\\x18\\x01 \\x01(\\t2I\\n\\x07Greeter\\x12>\\n\\x08SayHello\\x12\\x18.helloworld.HelloRequest\\x1a\\x16.helloworld.HelloReply\\\"\\x00\\x42\\x36\\n\\x1bio.grpc.examples.helloworldB\\x0fHelloWorldProtoP\\x01\\xa2\\x02\\x03HLWb\\x06proto3')\n)\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\n\n\n\n_HELLOREQUEST = _descriptor.Descriptor(\n name='HelloRequest',\n full_name='helloworld.HelloRequest',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='name', full_name='helloworld.HelloRequest.name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=32,\n serialized_end=60,\n)\n\n\n_HELLOREPLY = _descriptor.Descriptor(\n name='HelloReply',\n full_name='helloworld.HelloReply',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='message', full_name='helloworld.HelloReply.message', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=62,\n serialized_end=91,\n)\n\nDESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST\nDESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY\n\nHelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(\n DESCRIPTOR = _HELLOREQUEST,\n __module__ = 'helloworld_pb2'\n # @@protoc_insertion_point(class_scope:helloworld.HelloRequest)\n ))\n_sym_db.RegisterMessage(HelloRequest)\n\nHelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(\n DESCRIPTOR = _HELLOREPLY,\n __module__ = 'helloworld_pb2'\n # @@protoc_insertion_point(class_scope:helloworld.HelloReply)\n ))\n_sym_db.RegisterMessage(HelloReply)\n\n\nDESCRIPTOR.has_options = True\nDESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\\n\\033io.grpc.examples.helloworldB\\017HelloWorldProtoP\\001\\242\\002\\003HLW'))\nimport grpc\nfrom grpc.beta import implementations as beta_implementations\nfrom grpc.beta import interfaces as beta_interfaces\nfrom grpc.framework.common import cardinality\nfrom grpc.framework.interfaces.face import utilities as face_utilities\n\n\nclass GreeterStub(object):\n \"\"\"The greeting service definition.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.SayHello = channel.unary_unary(\n '/helloworld.Greeter/SayHello',\n request_serializer=HelloRequest.SerializeToString,\n response_deserializer=HelloReply.FromString,\n )\n\n\nclass GreeterServicer(object):\n \"\"\"The greeting service definition.\n \"\"\"\n\n def SayHello(self, request, context):\n \"\"\"Sends a greeting\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_GreeterServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'SayHello': grpc.unary_unary_rpc_method_handler(\n servicer.SayHello,\n request_deserializer=HelloRequest.FromString,\n response_serializer=HelloReply.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'helloworld.Greeter', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\nclass BetaGreeterServicer(object):\n \"\"\"The greeting service definition.\n \"\"\"\n def SayHello(self, request, context):\n \"\"\"Sends a greeting\n \"\"\"\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)\n\n\nclass BetaGreeterStub(object):\n \"\"\"The greeting service definition.\n \"\"\"\n def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n \"\"\"Sends a greeting\n \"\"\"\n raise NotImplementedError()\n SayHello.future = None\n\n\ndef beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):\n request_deserializers = {\n ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString,\n }\n response_serializers = {\n ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString,\n }\n method_implementations = {\n ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello),\n }\n server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)\n return beta_implementations.server(method_implementations, options=server_options)\n\n\ndef beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString,\n }\n response_deserializers = {\n ('helloworld.Greeter', 'SayHello'): HelloReply.FromString,\n }\n cardinalities = {\n 'SayHello': cardinality.Cardinality.UNARY_UNARY,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options)\n# @@protoc_insertion_point(module_scope)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":1180,"cells":{"repo_name":{"kind":"string","value":"sve-odoo/odoo"},"path":{"kind":"string","value":"addons/website_sale/models/sale_order.py"},"copies":{"kind":"string","value":"26"},"size":{"kind":"string","value":"10438"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport random\n\nfrom openerp import SUPERUSER_ID\nfrom openerp.osv import osv, orm, fields\nfrom openerp.addons.web.http import request\n\n\nclass payment_transaction(orm.Model):\n _inherit = 'payment.transaction'\n\n _columns = {\n # link with the sale order\n 'sale_order_id': fields.many2one('sale.order', 'Sale Order'),\n }\n\nclass sale_order(osv.Model):\n _inherit = \"sale.order\"\n\n def _cart_qty(self, cr, uid, ids, field_name, arg, context=None):\n res = dict()\n for order in self.browse(cr, uid, ids, context=context):\n res[order.id] = int(sum(l.product_uom_qty for l in (order.website_order_line or [])))\n return res\n\n _columns = {\n 'website_order_line': fields.one2many(\n 'sale.order.line', 'order_id',\n string='Order Lines displayed on Website', readonly=True,\n help='Order Lines to be displayed on the website. They should not be used for computation purpose.',\n ),\n 'cart_quantity': fields.function(_cart_qty, type='integer', string='Cart Quantity'),\n 'payment_acquirer_id': fields.many2one('payment.acquirer', 'Payment Acquirer', on_delete='set null'),\n 'payment_tx_id': fields.many2one('payment.transaction', 'Transaction', on_delete='set null'),\n }\n\n def _get_errors(self, cr, uid, order, context=None):\n return []\n\n def _get_website_data(self, cr, uid, order, context):\n return {\n 'partner': order.partner_id.id,\n 'order': order\n }\n\n def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):\n for so in self.browse(cr, uid, ids, context=context):\n domain = [('order_id', '=', so.id), ('product_id', '=', product_id)]\n if line_id:\n domain += [('id', '=', line_id)]\n return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)\n\n def _website_product_id_change(self, cr, uid, ids, order_id, product_id, line_id=None, context=None):\n so = self.pool.get('sale.order').browse(cr, uid, order_id, context=context)\n\n values = self.pool.get('sale.order.line').product_id_change(cr, SUPERUSER_ID, [],\n pricelist=so.pricelist_id.id,\n product=product_id,\n partner_id=so.partner_id.id,\n context=context\n )['value']\n\n if line_id:\n line = self.pool.get('sale.order.line').browse(cr, SUPERUSER_ID, line_id, context=context)\n values['name'] = line.name\n else:\n product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)\n values['name'] = product.description_sale or product.name\n\n values['product_id'] = product_id\n values['order_id'] = order_id\n if values.get('tax_id') != None:\n values['tax_id'] = [(6, 0, values['tax_id'])]\n return values\n\n def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):\n \"\"\" Add or set product quantity, add_qty can be negative \"\"\"\n sol = self.pool.get('sale.order.line')\n\n quantity = 0\n for so in self.browse(cr, uid, ids, context=context):\n if line_id != False:\n line_ids = so._cart_find_product_line(product_id, line_id, context=context, **kwargs)\n if line_ids:\n line_id = line_ids[0]\n\n # Create line if no line with product_id can be located\n if not line_id:\n values = self._website_product_id_change(cr, uid, ids, so.id, product_id, context=context)\n line_id = sol.create(cr, SUPERUSER_ID, values, context=context)\n if add_qty:\n add_qty -= 1\n\n # compute new quantity\n if set_qty:\n quantity = set_qty\n elif add_qty != None:\n quantity = sol.browse(cr, SUPERUSER_ID, line_id, context=context).product_uom_qty + (add_qty or 0)\n\n # Remove zero of negative lines\n if quantity <= 0:\n sol.unlink(cr, SUPERUSER_ID, [line_id], context=context)\n else:\n # update line\n values = self._website_product_id_change(cr, uid, ids, so.id, product_id, line_id, context=context)\n values['product_uom_qty'] = quantity\n sol.write(cr, SUPERUSER_ID, [line_id], values, context=context)\n\n return {'line_id': line_id, 'quantity': quantity}\n\n def _cart_accessories(self, cr, uid, ids, context=None):\n for order in self.browse(cr, uid, ids, context=context):\n s = set(j.id for l in (order.website_order_line or []) for j in (l.product_id.accessory_product_ids or []))\n s -= set(l.product_id.id for l in order.order_line)\n product_ids = random.sample(s, min(len(s),3))\n return self.pool['product.product'].browse(cr, uid, product_ids, context=context)\n\nclass website(orm.Model):\n _inherit = 'website'\n\n _columns = {\n 'pricelist_id': fields.related('user_id','partner_id','property_product_pricelist',\n type='many2one', relation='product.pricelist', string='Default Pricelist'),\n 'currency_id': fields.related('pricelist_id','currency_id',\n type='many2one', relation='res.currency', string='Default Currency'),\n }\n\n def sale_product_domain(self, cr, uid, ids, context=None):\n return [(\"sale_ok\", \"=\", True)]\n\n def sale_get_order(self, cr, uid, ids, force_create=False, code=None, update_pricelist=None, context=None):\n sale_order_obj = self.pool['sale.order']\n sale_order_id = request.session.get('sale_order_id')\n sale_order = None\n # create so if needed\n if not sale_order_id and (force_create or code): \n # TODO cache partner_id session\n partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id\n\n for w in self.browse(cr, uid, ids):\n values = {\n 'user_id': w.user_id.id,\n 'partner_id': partner.id,\n 'pricelist_id': partner.property_product_pricelist.id,\n 'section_id': self.pool.get('ir.model.data').get_object_reference(cr, uid, 'website', 'salesteam_website_sales')[1],\n }\n sale_order_id = sale_order_obj.create(cr, SUPERUSER_ID, values, context=context)\n values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [], partner.id, context=context)['value']\n sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context)\n request.session['sale_order_id'] = sale_order_id\n if sale_order_id:\n # TODO cache partner_id session\n partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id\n\n sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order_id, context=context)\n if not sale_order.exists():\n request.session['sale_order_id'] = None\n return None\n\n # check for change of pricelist with a coupon\n if code and code != sale_order.pricelist_id.code:\n pricelist_ids = self.pool['product.pricelist'].search(cr, SUPERUSER_ID, [('code', '=', code)], context=context)\n if pricelist_ids:\n pricelist_id = pricelist_ids[0]\n request.session['sale_order_code_pricelist_id'] = pricelist_id\n update_pricelist = True\n request.session['sale_order_code_pricelist_id'] = False\n\n pricelist_id = request.session.get('sale_order_code_pricelist_id') or partner.property_product_pricelist.id\n\n # check for change of partner_id ie after signup\n if sale_order.partner_id.id != partner.id and request.website.partner_id.id != partner.id:\n flag_pricelist = False\n if pricelist_id != sale_order.pricelist_id.id:\n flag_pricelist = True\n fiscal_position = sale_order.fiscal_position and sale_order.fiscal_position.id or False\n\n values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [sale_order_id], partner.id, context=context)['value']\n if values.get('fiscal_position'):\n order_lines = map(int,sale_order.order_line)\n values.update(sale_order_obj.onchange_fiscal_position(cr, SUPERUSER_ID, [],\n values['fiscal_position'], [[6, 0, order_lines]], context=context)['value'])\n\n values['partner_id'] = partner.id\n sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context)\n\n if flag_pricelist or values.get('fiscal_position') != fiscal_position:\n update_pricelist = True\n\n # update the pricelist\n if update_pricelist:\n values = {'pricelist_id': pricelist_id}\n values.update(sale_order.onchange_pricelist_id(pricelist_id, None)['value'])\n sale_order.write(values)\n for line in sale_order.order_line:\n sale_order._cart_update(product_id=line.product_id.id, add_qty=0)\n\n # update browse record\n if (code and code != sale_order.pricelist_id.code) or sale_order.partner_id.id != partner.id:\n sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order.id, context=context)\n\n return sale_order\n\n def sale_get_transaction(self, cr, uid, ids, context=None):\n transaction_obj = self.pool.get('payment.transaction')\n tx_id = request.session.get('sale_transaction_id')\n if tx_id:\n tx_ids = transaction_obj.search(cr, uid, [('id', '=', tx_id), ('state', 'not in', ['cancel'])], context=context)\n if tx_ids:\n return transaction_obj.browse(cr, uid, tx_ids[0], context=context)\n else:\n request.session['sale_transaction_id'] = False\n return False\n\n def sale_reset(self, cr, uid, ids, context=None):\n request.session.update({\n 'sale_order_id': False,\n 'sale_transaction_id': False,\n 'sale_order_code_pricelist_id': False,\n })\n\n\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":1181,"cells":{"repo_name":{"kind":"string","value":"saurabh6790/omnitech-libs"},"path":{"kind":"string","value":"core/doctype/custom_script/custom_script.py"},"copies":{"kind":"string","value":"34"},"size":{"kind":"string","value":"1208"},"content":{"kind":"string","value":"# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt \nfrom __future__ import unicode_literals\nimport webnotes\nfrom webnotes.utils import cstr\n\nclass DocType:\n\tdef __init__(self, d, dl):\n\t\tself.doc, self.doclist = d, dl\n\t\t\n\tdef autoname(self):\n\t\tself.doc.name = self.doc.dt + \"-\" + self.doc.script_type\n\n\tdef on_update(self):\n\t\twebnotes.clear_cache(doctype=self.doc.dt)\n\t\n\tdef on_trash(self):\n\t\twebnotes.clear_cache(doctype=self.doc.dt)\n\ndef make_custom_server_script_file(doctype, script=None):\n\timport os\n\tfrom webnotes.plugins import get_path\n\n\tfile_path = get_path(None, \"DocType\", doctype)\n\tif os.path.exists(file_path):\n\t\traise IOError(file_path + \" already exists\")\n\t\t\n\t# create folder if not exists\n\twebnotes.create_folder(os.path.dirname(file_path))\n\t\n\t# create file\n\tcustom_script = \"\"\"from __future__ import unicode_literals\nimport webnotes\nfrom webnotes.utils import cint, cstr, flt\nfrom webnotes.model.doc import Document\nfrom webnotes.model.code import get_obj\nfrom webnotes import msgprint, _\n\nclass CustomDocType(DocType):\n{script}\"\"\".format(script=script or \"\\tpass\")\n\n\twith open(file_path, \"w\") as f:\n\t\tf.write(custom_script.encode(\"utf-8\"))"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":1182,"cells":{"repo_name":{"kind":"string","value":"BenSto/pybikes"},"path":{"kind":"string","value":"pybikes/bysykkel.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"1909"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport json\n\nfrom .base import BikeShareSystem, BikeShareStation\nfrom . import utils\n\n\nclass BySykkel(BikeShareSystem):\n\n authed = True\n\n meta = {\n 'system': 'BySykkel',\n 'company': ['Urban Infrastructure Partner']\n }\n\n def __init__(self, tag, meta, feed_url, feed_details_url, key):\n super(BySykkel, self).__init__(tag, meta)\n self.feed_url = feed_url\n self.feed_details_url = feed_details_url\n self.key = key\n\n def update(self, scraper=None):\n if scraper is None:\n scraper = utils.PyBikesScraper()\n\n scraper.headers['Client-Identifier'] = self.key\n\n self.stations = []\n\n\n stations_data = json.loads(scraper.request(self.feed_url))\n details_data = json.loads(scraper.request(self.feed_details_url))\n\n # Aggregate status and information by uid\n stations_data = {s['id']: s for s in stations_data['stations']}\n details_data = {s['id']: s for s in details_data['stations']}\n\n # Join stationsdata in stations\n stations = [\n (stations_data[id], details_data[id])\n for id in stations_data.keys()\n ]\n\n # append all data to info part of stations and create objects of this\n for info, status in stations:\n info.update(status)\n\n station = BySykkelStation(info)\n\n self.stations.append(station)\n\n\n\nclass BySykkelStation(BikeShareStation):\n def __init__(self, info):\n\n super(BySykkelStation, self).__init__()\n\n self.name = info['title']\n\n self.longitude = float(info['center']['longitude'])\n self.latitude = float(info['center']['latitude'])\n\n self.bikes = info['availability']['bikes']\n self.free = info['availability']['locks']\n self.extra = {\n 'uid': info['id'],\n 'placement': info['subtitle'],\n }\n"},"license":{"kind":"string","value":"lgpl-3.0"}}},{"rowIdx":1183,"cells":{"repo_name":{"kind":"string","value":"yourcelf/btb"},"path":{"kind":"string","value":"scanblog/profiles/models.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"18215"},"content":{"kind":"string","value":"import os\nimport datetime\nimport itertools\nimport string\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.contrib.auth.models import User, Group\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.defaultfilters import slugify\nfrom django.conf import settings\n\nfrom scanning.models import Document\nfrom comments.models import Comment\nfrom btb.utils import OrgManager, OrgQuerySet\n\nclass ProfileManager(OrgManager):\n \"\"\"\n For statuses based on letters (e.g. invited, waitlisted, etc.), any letter,\n whether sent or not, considers the status fulfilled. That is, one is\n \"invited\" if an Letter(type='invited') has been created for the person,\n whether or not it was sent. Creating a Letter is a contract to send it.\n\n This differs from the v1 implementation.\n \"\"\"\n def active(self):\n \"\"\" Everyone that hasn't been removed. \"\"\"\n return self.filter(user__is_active=True)\n\n def inactive(self):\n \"\"\" They have been removed for whatever reason. \"\"\"\n return self.filter(user__is_active=False)\n\n def inactive_commenters(self):\n return self.filter(user__is_active=False, blogger=False)\n\n def inactive_bloggers(self):\n return self.filter(user__is_active=False, blogger=True)\n\n def active_and_inactive_commenters(self):\n return self.filter(blogger=False)\n\n def commenters(self):\n \"\"\" They are not in prison. \"\"\"\n return self.active().filter(blogger=False)\n\n def bloggers(self): \n \"\"\" They are in prison. \"\"\"\n return self.active().filter(blogger=True)\n\n def bloggers_with_posts(self):\n return self.bloggers().select_related('user').filter(\n user__documents_authored__status=\"published\",\n user__documents_authored__type=\"post\",\n ).annotate(\n authored_posts_count=models.Count('user__documents_authored'),\n latest_post=models.Max(\n 'user__documents_authored__date_written'\n ),\n ).order_by('display_name')\n\n def bloggers_with_profiles(self):\n return self.bloggers().select_related('user').filter(\n user__documents_authored__status=\"published\",\n user__documents_authored__type=\"profile\",\n ).annotate(\n authored_posts_count=models.Count('user__documents_authored'),\n latest_post=models.Max(\n 'user__documents_authored__date_written'\n ),\n ).order_by('display_name')\n\n def bloggers_with_just_profiles(self):\n return self.bloggers().select_related('user').filter(\n user__documents_authored__status=\"published\",\n user__documents_authored__type=\"profile\",\n ).exclude(\n user__documents_authored__type=\"post\",\n user__documents_authored__status=\"published\",\n ).order_by('display_name')\n\n def bloggers_with_published_content(self):\n return self.bloggers().select_related('user').filter(\n Q(user__documents_authored__status=\"published\", \n user__documents_authored__type=\"profile\") |\n Q(user__documents_authored__status=\"published\",\n user__documents_authored__type=\"post\")\n ).distinct().order_by('display_name')\n\n def enrolled(self):\n \"\"\" They have returned a consent form. \"\"\"\n return self.bloggers().filter(consent_form_received=True)\n\n def enrolled_in_contact(self):\n \"\"\" They have returned a consent form, and we haven't lost contact. \"\"\"\n return self.enrolled().filter(lost_contact=False)\n\n #\n # Letter-based statuses\n #\n\n def invitable(self):\n \"\"\"\n No invitation letter has been created for them.\n \"\"\"\n return self.bloggers().filter(\n consent_form_received=False\n ).exclude(\n user__received_letters__type=\"consent_form\"\n )\n\n def invited(self):\n \"\"\" \n An invitation letter has been created, but not returned.\n \"\"\"\n return self.bloggers().filter(\n consent_form_received=False\n ).filter(\n user__received_letters__type=\"consent_form\"\n )\n\n def waitlistable(self):\n \"\"\"\n They have not been sent a consent form or waitlist postcard, and we\n haven't received a consent form.\n \"\"\"\n return self.bloggers().filter(\n consent_form_received=False,\n ).exclude(\n user__received_letters__type=\"waitlist\",\n ).exclude(\n user__received_letters__type=\"consent_form\",\n )\n\n def waitlisted(self):\n \"\"\" \n No invitation letter has been created, and a waitlist postcard has been\n created.\n \"\"\"\n return self.bloggers().filter(\n consent_form_received=False\n ).filter(\n user__received_letters__type=\"waitlist\"\n ).exclude(\n user__received_letters__type=\"consent_form\"\n )\n\n def needs_signup_complete_letter(self):\n return self.enrolled().exclude(user__received_letters__type=\"signup_complete\")\n\n def needs_first_post_letter(self):\n return (\n self.enrolled().filter(user__documents_authored__status=\"published\")\n ).exclude(user__received_letters__type=\"first_post\")\n\n def needs_comments_letter(self):\n # Couldn't figure out how to make this a flat ORM query. Using two\n # queries and custom SQL instead.\n pks = Comment.objects.unmailed().values_list('document__author__pk', flat=True)\n if pks:\n where = '\"{0}\".\"{1}\" in ({2})'.format(\n Profile._meta.db_table,\n Profile.user.field.get_attname_column()[0],\n \",\".join(\"%s\" for i in pks),\n )\n return self.enrolled().extra(\n where=[where],\n params=pks\n )\n return self.none()\n\n def recently_active(self, days=2*365):\n \"\"\"\n All bloggers with whom we haven't lost contact, are enrolled or have\n been invited, and have sent us something within the last N days.\n \"\"\"\n cutoff = datetime.datetime.now() - datetime.timedelta(days=days)\n return self.bloggers().filter(\n lost_contact=False\n ).filter(\n Q(consent_form_received=True) |\n Q(user__received_letters__type=\"consent_form\")\n ).filter(\n user__documents_authored__created__gte=cutoff\n ).distinct()\n\nclass Profile(models.Model):\n user = models.OneToOneField(settings.AUTH_USER_MODEL, primary_key=True)\n display_name = models.CharField(max_length=50)\n show_adult_content = models.BooleanField(\n default=False,\n help_text=_('Show posts and comments that have been marked as adult?')\n )\n\n blogger = models.BooleanField(default=False)\n managed = models.BooleanField(default=False)\n lost_contact = models.BooleanField(default=False)\n\n blog_name = models.CharField(blank=True, default=\"\", max_length=255)\n comments_disabled = models.BooleanField(default=False)\n mailing_address = models.TextField(blank=True, default=\"\")\n special_mail_handling = models.TextField(blank=True, default=\"\")\n\n consent_form_received = models.BooleanField(default=False)\n\n objects = ProfileManager()\n\n class QuerySet(OrgQuerySet):\n orgs = [\"user__organization\"]\n\n def light_dict(self):\n return {\n 'id': self.pk,\n 'username': self.user.username,\n 'email': self.user.email,\n 'is_active': self.user.is_active,\n 'date_joined': self.user.date_joined.isoformat(),\n 'blogger': self.blogger,\n 'managed': self.managed,\n 'lost_contact': self.lost_contact,\n 'comments_disabled': self.comments_disabled,\n 'blog_name': self.blog_name,\n 'display_name': self.display_name,\n 'mailing_address': self.mailing_address,\n 'special_mail_handling': self.special_mail_handling,\n 'consent_form_received': self.consent_form_received,\n 'blog_url': self.get_blog_url(),\n 'profile_url': self.get_absolute_url(),\n 'edit_url': self.get_edit_url(),\n 'is_public': self.is_public(),\n }\n\n def to_dict(self):\n scans_authored = getattr(self, \"user__scans_authored\", None)\n dct = self.light_dict()\n dct.update({\n u'organizations': [o.light_dict() for o in self.user.organization_set.all()],\n u'invited': Profile.objects.invited().filter(pk=self.pk).exists(),\n u'waitlisted': Profile.objects.waitlisted().filter(pk=self.pk).exists(),\n u'waitlistable': Profile.objects.waitlistable().filter(pk=self.pk).exists(),\n u'scans_authored': scans_authored,\n u'has_public_profile': self.has_public_profile(),\n })\n return dct\n \n def save(self, *args, **kwargs):\n if not self.display_name:\n self.display_name = self.user.username\n super(Profile, self).save(*args, **kwargs)\n # Since profile status (active/license) can impact publicness of\n # documents, we need to bump the documents if we save profiles.\n for doc in self.user.documents_authored.all():\n doc.set_publicness()\n\n def __unicode__(self):\n return self.display_name\n\n def get_absolute_url(self):\n return reverse('profiles.profile_show', args=[self.pk])\n\n def get_user_edit_url(self):\n return reverse('profiles.profile_edit', args=[self.pk])\n\n def get_edit_url(self):\n return \"%s#/users/%s\" % (reverse('moderation.home'), self.pk)\n\n def get_blog_url(self):\n return reverse('blogs.blog_show', args=[self.pk, self.get_blog_slug()])\n\n def get_bare_blog_url(self):\n return reverse('blogs.blog_show', args=[self.pk, \"\"])\n\n def get_blog_slug(self):\n return slugify(self.display_name)\n\n def full_address(self):\n return \"\\n\".join((\n self.display_name,\n self.mailing_address\n ))\n\n def is_public(self):\n return self.user.is_active and ((not self.blogger) or self.consent_form_received)\n\n def has_public_profile(self):\n return Document.objects.filter(author__pk=self.pk, type=\"profile\",\n status=\"published\").exists()\n\n def has_blog_posts(self):\n return Document.objects.filter(author__pk=self.pk, type=\"post\",\n status=\"published\").exists()\n\n def set_random_password(self):\n \"\"\"\n Set a random password on our associated user object. Does not save the user.\n \"\"\"\n chars = set(string.ascii_uppercase + string.digits)\n char_gen = (c for c in itertools.imap(os.urandom, itertools.repeat(1)) if c in chars)\n self.user.set_password(''.join(itertools.islice(char_gen, None, 32)))\n\n def all_published_posts_as_latex_list(self):\n from correspondence.utils import tex_escape\n posts = self.user.documents_authored.public().order_by('date_written')\n parts = [ur'\\begin{itemize}']\n for post in posts:\n if post.in_reply_to:\n try:\n orig = posts.get(reply_code=post.in_reply_to)\n except Document.DoesNotExist:\n title = post.get_title()\n else:\n title = u'{} (in reply to {})'.format(\n post.get_title(),\n orig.get_title()\n )\n else:\n title = post.get_title()\n\n parts.append(ur' \\item %s (\\emph{%s})' % (\n tex_escape(title), \n post.date_written.strftime('%Y-%m-%d')\n ))\n parts.append(ur'\\end{itemize}')\n return u\"\\n\".join(parts)\n\n\nclass OrganizationManager(OrgManager):\n def public(self):\n return self.filter(public=True)\n\nclass Organization(models.Model):\n name = models.CharField(max_length=255, unique=True)\n slug = models.SlugField(unique=True)\n personal_contact = models.CharField(max_length=255, blank=True)\n public = models.BooleanField(\n default=False,\n help_text=\"Check to make this organization appear in the 'Groups' tab\"\n )\n custom_intro_packet = models.FileField(upload_to=settings.UPLOAD_TO + \"/org_intro_packets\",\n help_text=\"Leave blank to use the default packet, formatted with your address.\",\n blank=True, null=True)\n mailing_address = models.TextField()\n outgoing_mail_handled_by = models.ForeignKey('self', blank=True, null=True)\n\n about = models.TextField(\n blank=True,\n help_text=\"Main text that will appear on the groups page.\",\n )\n footer = models.TextField(\n blank=True,\n help_text=\"Additional text that will appear at the bottom of each post by a member of this organization.\",\n )\n\n members = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True)\n moderators = models.ManyToManyField(settings.AUTH_USER_MODEL,\n related_name=\"organizations_moderated\",\n blank=True\n )\n\n objects = OrganizationManager()\n\n class QuerySet(OrgQuerySet):\n orgs = [\"\"]\n\n def to_dict(self):\n dct = self.light_dict()\n dct['moderators'] = [u.profile.light_dict() for u in self.moderators.select_related('profile').all()]\n dct['members'] = [u.profile.light_dict() for u in self.members.select_related('profile').all()]\n dct['about'] = self.about\n dct['footer'] = self.footer\n dct['mailing_address'] = self.mailing_address\n dct['personal_contact'] = self.personal_contact\n if self.custom_intro_packet:\n dct['custom_intro_packet_url'] = self.custom_intro_packet.url\n else:\n dct['custom_intro_packet_url'] = None\n if self.outgoing_mail_handled_by:\n dct['outgoing_mail_handled_by'] = self.outgoing_mail_handled_by.light_dict()\n else:\n dct['outgoing_mail_handled_by'] = {}\n return dct\n\n def light_dict(self):\n return {\n u'id': self.pk,\n u'slug': self.slug,\n u'name': self.name,\n u'public': self.public,\n u'mailing_address': self.mailing_address,\n }\n\n def members_count(self):\n return self.members.count()\n\n def moderators_list(self):\n return \", \".join(unicode(u.profile) for u in self.moderators.all())\n\n def get_absolute_url(self):\n return reverse(\"profiles.profile_list\", kwargs={'org_slug': self.slug})\n\n def __unicode__(self):\n return self.name\n\nclass AffiliationManager(OrgManager):\n def public(self): return self.all().public()\n def private(self): return self.all().private()\n\nclass Affiliation(models.Model):\n \"\"\"\n Affiliations are like a \"super tag\" for posts, which:\n 1. can append additional HTML to the top of list and detail views\n 2. is limited to use by particular org's.\n \"\"\"\n title = models.CharField(max_length=255)\n slug = models.SlugField(max_length=255, unique=True,\n help_text=\"Use this to identify this affiliation when editing documents.\")\n logo = models.ImageField(upload_to=\"public/uploads/affiliations/\",\n blank=True, null=True)\n list_body = models.TextField(\n help_text=\"HTML for the top of the group page.\")\n detail_body = models.TextField(\n help_text=\"HTML to append to individual posts for this group.\")\n organizations = models.ManyToManyField(Organization,\n help_text=\"Which organizations are allowed to mark posts\"\n \" as belonging to this affiliation?\")\n public = models.BooleanField(\n default=False,\n help_text=\"If false, the affiliation won't be listed publicly.\")\n\n order = models.IntegerField(\n default=0,\n help_text=\"Use to set the order for the list of affiliations on\"\n \" the categories view. Lower numbers come first.\")\n created = models.DateTimeField(default=datetime.datetime.now)\n modified = models.DateTimeField(blank=True)\n\n objects = AffiliationManager()\n\n class QuerySet(OrgQuerySet):\n orgs = [\"organizations\"]\n\n def public(self):\n return self.filter(public=True)\n def private(self):\n return self.filter(public=False)\n \n class Meta:\n ordering = ['order', '-created']\n\n def to_dict(self):\n return {\n u'id': self.pk,\n u'title': self.title,\n u'slug': self.slug,\n u'logo_url': self.logo.url if self.logo else None,\n u'list_body': self.list_body,\n u'detail_body': self.detail_body,\n u'organizations': [o.light_dict() for o in self.organizations.all()],\n u'public': self.public,\n u'order': self.order,\n }\n\n def total_num_responses(self):\n return self.document_set.count()\n\n def get_absolute_url(self):\n return reverse(\"blogs.show_affiliation\", args=[self.slug])\n\n def save(self, *args, **kwargs):\n self.modified = datetime.datetime.now()\n return super(Affiliation, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return self.slug\n\n@receiver(post_save, sender=User)\ndef create_profile(sender, instance=None, **kwargs):\n \"\"\"\n Creates a profile on the User's save signal, so we know every user has one.\n Add the user to the \"readers\" group.\n \"\"\"\n if instance is None:\n return\n profile, created = Profile.objects.get_or_create(user=instance)\n readers, created = Group.objects.get_or_create(name=\"readers\")\n profile.user.groups.add(readers)\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":1184,"cells":{"repo_name":{"kind":"string","value":"astropy/astropy"},"path":{"kind":"string","value":"astropy/io/ascii/tests/test_html.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"22379"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nThis module tests some of the methods related to the ``HTML``\nreader/writer and aims to document its functionality.\n\nRequires `BeautifulSoup `_\nto be installed.\n\"\"\"\n\nfrom io import StringIO\n\nfrom astropy.io.ascii import html\nfrom astropy.io.ascii import core\nfrom astropy.table import Table\n\nimport pytest\nimport numpy as np\n\nfrom .common import setup_function, teardown_function # noqa\nfrom astropy.io import ascii\n\nfrom astropy.utils.compat.optional_deps import HAS_BLEACH, HAS_BS4 # noqa\n\nif HAS_BS4:\n from bs4 import BeautifulSoup, FeatureNotFound\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_soupstring():\n \"\"\"\n Test to make sure the class SoupString behaves properly.\n \"\"\"\n\n soup = BeautifulSoup('

foo

',\n 'html.parser')\n soup_str = html.SoupString(soup)\n assert isinstance(soup_str, str)\n assert isinstance(soup_str, html.SoupString)\n assert soup_str == '

foo

'\n assert soup_str.soup is soup\n\n\ndef test_listwriter():\n \"\"\"\n Test to make sure the class ListWriter behaves properly.\n \"\"\"\n\n lst = []\n writer = html.ListWriter(lst)\n\n for i in range(5):\n writer.write(i)\n for ch in 'abcde':\n writer.write(ch)\n\n assert lst == [0, 1, 2, 3, 4, 'a', 'b', 'c', 'd', 'e']\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_identify_table():\n \"\"\"\n Test to make sure that identify_table() returns whether the\n given BeautifulSoup tag is the correct table to process.\n \"\"\"\n\n # Should return False on non- tags and None\n soup = BeautifulSoup('', 'html.parser')\n assert html.identify_table(soup, {}, 0) is False\n assert html.identify_table(None, {}, 0) is False\n\n soup = BeautifulSoup('
'\n '
A
B
', 'html.parser').table\n assert html.identify_table(soup, {}, 2) is False\n assert html.identify_table(soup, {}, 1) is True # Default index of 1\n\n # Same tests, but with explicit parameter\n assert html.identify_table(soup, {'table_id': 2}, 1) is False\n assert html.identify_table(soup, {'table_id': 1}, 1) is True\n\n # Test identification by string ID\n assert html.identify_table(soup, {'table_id': 'bar'}, 1) is False\n assert html.identify_table(soup, {'table_id': 'foo'}, 1) is True\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_missing_data():\n \"\"\"\n Test reading a table with missing data\n \"\"\"\n # First with default where blank => '0'\n table_in = ['',\n '',\n '',\n '',\n '
A
1
']\n dat = Table.read(table_in, format='ascii.html')\n assert dat.masked is False\n assert np.all(dat['A'].mask == [True, False])\n assert dat['A'].dtype.kind == 'i'\n\n # Now with a specific value '...' => missing\n table_in = ['',\n '',\n '',\n '',\n '
A
...
1
']\n dat = Table.read(table_in, format='ascii.html', fill_values=[('...', '0')])\n assert dat.masked is False\n assert np.all(dat['A'].mask == [True, False])\n assert dat['A'].dtype.kind == 'i'\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_rename_cols():\n \"\"\"\n Test reading a table and renaming cols\n \"\"\"\n table_in = ['',\n '',\n '',\n '
A B
12
']\n\n # Swap column names\n dat = Table.read(table_in, format='ascii.html', names=['B', 'A'])\n assert dat.colnames == ['B', 'A']\n assert len(dat) == 1\n\n # Swap column names and only include A (the renamed version)\n dat = Table.read(table_in, format='ascii.html', names=['B', 'A'], include_names=['A'])\n assert dat.colnames == ['A']\n assert len(dat) == 1\n assert np.all(dat['A'] == 2)\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_no_names():\n \"\"\"\n Test reading a table witn no column header\n \"\"\"\n table_in = ['',\n '',\n '',\n '
1
2
']\n dat = Table.read(table_in, format='ascii.html')\n assert dat.colnames == ['col1']\n assert len(dat) == 2\n\n dat = Table.read(table_in, format='ascii.html', names=['a'])\n assert dat.colnames == ['a']\n assert len(dat) == 2\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_identify_table_fail():\n \"\"\"\n Raise an exception with an informative error message if table_id\n is not found.\n \"\"\"\n table_in = ['',\n '
A
B
']\n\n with pytest.raises(core.InconsistentTableError) as err:\n Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'},\n guess=False)\n assert err.match(\"ERROR: HTML table id 'bad_id' not found$\")\n\n with pytest.raises(core.InconsistentTableError) as err:\n Table.read(table_in, format='ascii.html', htmldict={'table_id': 3},\n guess=False)\n assert err.match(\"ERROR: HTML table number 3 not found$\")\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_backend_parsers():\n \"\"\"\n Make sure the user can specify which back-end parser to use\n and that an error is raised if the parser is invalid.\n \"\"\"\n for parser in ('lxml', 'xml', 'html.parser', 'html5lib'):\n try:\n Table.read('data/html2.html', format='ascii.html',\n htmldict={'parser': parser}, guess=False)\n except FeatureNotFound:\n if parser == 'html.parser':\n raise\n # otherwise ignore if the dependency isn't present\n\n # reading should fail if the parser is invalid\n with pytest.raises(FeatureNotFound):\n Table.read('data/html2.html', format='ascii.html',\n htmldict={'parser': 'foo'}, guess=False)\n\n\n@pytest.mark.skipif('HAS_BS4')\ndef test_htmlinputter_no_bs4():\n \"\"\"\n This should return an OptionalTableImportError if BeautifulSoup\n is not installed.\n \"\"\"\n\n inputter = html.HTMLInputter()\n with pytest.raises(core.OptionalTableImportError):\n inputter.process_lines([])\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_htmlinputter():\n \"\"\"\n Test to ensure that HTMLInputter correctly converts input\n into a list of SoupStrings representing table elements.\n \"\"\"\n\n f = 'data/html.html'\n with open(f) as fd:\n table = fd.read()\n\n inputter = html.HTMLInputter()\n inputter.html = {}\n\n # In absence of table_id, defaults to the first table\n expected = ['Column 1Column 2Column 3',\n '1a1.05',\n '2b2.75',\n '3c-1.25']\n assert [str(x) for x in inputter.get_lines(table)] == expected\n\n # Should raise an InconsistentTableError if the table is not found\n inputter.html = {'table_id': 4}\n with pytest.raises(core.InconsistentTableError):\n inputter.get_lines(table)\n\n # Identification by string ID\n inputter.html['table_id'] = 'second'\n expected = ['Column AColumn BColumn C',\n '4d10.5',\n '5e27.5',\n '6f-12.5']\n assert [str(x) for x in inputter.get_lines(table)] == expected\n\n # Identification by integer index\n inputter.html['table_id'] = 3\n expected = ['C1C2C3',\n '7g105.0',\n '8h275.0',\n '9i-125.0']\n assert [str(x) for x in inputter.get_lines(table)] == expected\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_htmlsplitter():\n \"\"\"\n Test to make sure that HTMLSplitter correctly inputs lines\n of type SoupString to return a generator that gives all\n header and data elements.\n \"\"\"\n\n splitter = html.HTMLSplitter()\n\n lines = [html.SoupString(BeautifulSoup('
Col 1Col 2
',\n 'html.parser').tr),\n html.SoupString(BeautifulSoup('
Data 1Data 2
',\n 'html.parser').tr)]\n expected_data = [['Col 1', 'Col 2'], ['Data 1', 'Data 2']]\n assert list(splitter(lines)) == expected_data\n\n # Make sure the presence of a non-SoupString triggers a TypeError\n lines.append('Data 3Data 4')\n with pytest.raises(TypeError):\n list(splitter(lines))\n\n # Make sure that passing an empty list triggers an error\n with pytest.raises(core.InconsistentTableError):\n list(splitter([]))\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_htmlheader_start():\n \"\"\"\n Test to ensure that the start_line method of HTMLHeader\n returns the first line of header data. Uses t/html.html\n for sample input.\n \"\"\"\n\n f = 'data/html.html'\n with open(f) as fd:\n table = fd.read()\n\n inputter = html.HTMLInputter()\n inputter.html = {}\n header = html.HTMLHeader()\n\n lines = inputter.get_lines(table)\n assert str(lines[header.start_line(lines)]) == \\\n 'Column 1Column 2Column 3'\n inputter.html['table_id'] = 'second'\n lines = inputter.get_lines(table)\n assert str(lines[header.start_line(lines)]) == \\\n 'Column AColumn BColumn C'\n inputter.html['table_id'] = 3\n lines = inputter.get_lines(table)\n assert str(lines[header.start_line(lines)]) == \\\n 'C1C2C3'\n\n # start_line should return None if no valid header is found\n lines = [html.SoupString(BeautifulSoup('
Data
',\n 'html.parser').tr),\n html.SoupString(BeautifulSoup('

Text

', 'html.parser').p)]\n assert header.start_line(lines) is None\n\n # Should raise an error if a non-SoupString is present\n lines.append('Header')\n with pytest.raises(TypeError):\n header.start_line(lines)\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_htmldata():\n \"\"\"\n Test to ensure that the start_line and end_lines methods\n of HTMLData returns the first line of table data. Uses\n t/html.html for sample input.\n \"\"\"\n\n f = 'data/html.html'\n with open(f) as fd:\n table = fd.read()\n\n inputter = html.HTMLInputter()\n inputter.html = {}\n data = html.HTMLData()\n\n lines = inputter.get_lines(table)\n assert str(lines[data.start_line(lines)]) == \\\n '1a1.05'\n # end_line returns the index of the last data element + 1\n assert str(lines[data.end_line(lines) - 1]) == \\\n '3c-1.25'\n\n inputter.html['table_id'] = 'second'\n lines = inputter.get_lines(table)\n assert str(lines[data.start_line(lines)]) == \\\n '4d10.5'\n assert str(lines[data.end_line(lines) - 1]) == \\\n '6f-12.5'\n\n inputter.html['table_id'] = 3\n lines = inputter.get_lines(table)\n assert str(lines[data.start_line(lines)]) == \\\n '7g105.0'\n assert str(lines[data.end_line(lines) - 1]) == \\\n '9i-125.0'\n\n # start_line should raise an error if no table data exists\n lines = [html.SoupString(BeautifulSoup('
', 'html.parser').div),\n html.SoupString(BeautifulSoup('

Text

', 'html.parser').p)]\n with pytest.raises(core.InconsistentTableError):\n data.start_line(lines)\n\n # end_line should return None if no table data exists\n assert data.end_line(lines) is None\n\n # Should raise an error if a non-SoupString is present\n lines.append('Data')\n with pytest.raises(TypeError):\n data.start_line(lines)\n with pytest.raises(TypeError):\n data.end_line(lines)\n\n\ndef test_multicolumn_write():\n \"\"\"\n Test to make sure that the HTML writer writes multidimensional\n columns (those with iterable elements) using the colspan\n attribute of .\n \"\"\"\n\n col1 = [1, 2, 3]\n col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]\n col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]\n table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))\n expected = \"\"\"\\\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
C1C2C3
11.01.0aaa
22.02.0bbb
33.03.0ccc
\n \n\n \"\"\"\n out = html.HTML().write(table)[0].strip()\n assert out == expected.strip()\n\n\n@pytest.mark.skipif('not HAS_BLEACH')\ndef test_multicolumn_write_escape():\n \"\"\"\n Test to make sure that the HTML writer writes multidimensional\n columns (those with iterable elements) using the colspan\n attribute of .\n \"\"\"\n\n col1 = [1, 2, 3]\n col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]\n col3 = [('', '', 'a'), ('', 'b', 'b'), ('c', 'c', 'c')]\n table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))\n expected = \"\"\"\\\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
C1C2C3
11.01.0a
22.02.0bb
33.03.0ccc
\n \n\n \"\"\"\n out = html.HTML(htmldict={'raw_html_cols': 'C3'}).write(table)[0].strip()\n assert out == expected.strip()\n\n\ndef test_write_no_multicols():\n \"\"\"\n Test to make sure that the HTML writer will not use\n multi-dimensional columns if the multicol parameter\n is False.\n \"\"\"\n\n col1 = [1, 2, 3]\n col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]\n col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]\n table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))\n expected = \"\"\"\\\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
C1C2C3
11.0 .. 1.0a .. a
22.0 .. 2.0b .. b
33.0 .. 3.0c .. c
\n \n\n \"\"\"\n assert html.HTML({'multicol': False}).write(table)[0].strip() == \\\n expected.strip()\n\n\n@pytest.mark.skipif('not HAS_BS4')\ndef test_multicolumn_read():\n \"\"\"\n Test to make sure that the HTML reader inputs multidimensional\n columns (those with iterable elements) using the colspan\n attribute of .\n\n Ensure that any string element within a multidimensional column\n casts all elements to string prior to type conversion operations.\n \"\"\"\n\n table = Table.read('data/html2.html', format='ascii.html')\n str_type = np.dtype((str, 21))\n expected = Table(np.array([(['1', '2.5000000000000000001'], 3),\n (['1a', '1'], 3.5)],\n dtype=[('A', str_type, (2,)), ('B', 'x'], ['y']], names=['a', 'b'])\n\n # One column contains raw HTML (string input)\n out = StringIO()\n t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'})\n expected = \"\"\"\\\n \n x\n &lt;em&gt;y&lt;/em&gt;\n \"\"\"\n assert expected in out.getvalue()\n\n # One column contains raw HTML (list input)\n out = StringIO()\n t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']})\n assert expected in out.getvalue()\n\n # Two columns contains raw HTML (list input)\n out = StringIO()\n t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']})\n expected = \"\"\"\\\n \n x\n y\n \"\"\"\n assert expected in out.getvalue()\n\n\n@pytest.mark.skipif('not HAS_BLEACH')\ndef test_raw_html_write_clean():\n \"\"\"\n Test that columns can contain raw HTML which is not escaped.\n \"\"\"\n import bleach # noqa\n\n t = Table([[''], ['

y

'], ['y']], names=['a', 'b', 'c'])\n\n # Confirm that