{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n \"\"\"\n"}}},{"rowIdx":284635,"cells":{"repo_name":{"kind":"string","value":"ContinuumIO/odo"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"odo/backends/tests/test_ssh.py"},"copies":{"kind":"string","value":"4"},"content":{"kind":"string","value":"from __future__ import absolute_import, division, print_function\n\nimport pytest\nparamiko = pytest.importorskip('paramiko')\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport os\nimport sys\n\nfrom odo.utils import tmpfile, filetext\nfrom odo.directory import _Directory, Directory\nfrom odo.backends.ssh import SSH, resource, ssh_pattern, sftp, drop, connect\nfrom odo.backends.csv import CSV\nfrom odo import into, discover, CSV, JSONLines, JSON, convert\nfrom odo.temp import _Temp, Temp\nfrom odo.compatibility import ON_TRAVIS_CI\nimport socket\n\nskipif = pytest.mark.skipif\n\n# NOTE: this is a workaround for paramiko on Py2; connect() hangs without\n# raising an exception. Shows up on paramiko 1.16.0 and 2.0.2 with Py 2.7.\n# KWS: 2016-08-10\n# JJ: Still happens as of 2016-10-20\ntry_to_connect = sys.version_info[0] >= 3\npytestmark = skipif(not try_to_connect, reason='could not connect')\n\nif try_to_connect:\n try:\n ssh = connect(hostname='localhost')\n ssh.close()\n except socket.error:\n pytestmark = pytest.mark.skip('Could not connect')\n except paramiko.PasswordRequiredException as e:\n pytestmark = pytest.mark.skip(str(e))\n except paramiko.SSHException as e:\n pytestmark = pytest.mark.skip(str(e))\n except TypeError:\n # NOTE: This is a workaround for paramiko version 1.16.0 on Python 3.4,\n # that raises a TypeError due to improper indexing internally into\n # dict_keys when a ConnectionRefused error is raised.\n # KWS 2016-04-21.\n pytestmark = pytest.mark.skip('Could not connect')\n\n\ndef test_resource():\n r = resource('ssh://joe@localhost:/path/to/myfile.csv')\n assert isinstance(r, SSH(CSV))\n assert r.path == '/path/to/myfile.csv'\n assert r.auth['hostname'] == 'localhost'\n assert r.auth['username'] == 'joe'\n\n\ndef test_connect():\n a = connect(hostname='localhost')\n b = connect(hostname='localhost')\n assert a is b\n\n a.close()\n\n c = connect(hostname='localhost')\n assert a is c\n assert c.get_transport() and c.get_transport().is_active()\n\n\ndef test_resource_directory():\n r = resource('ssh://joe@localhost:/path/to/')\n assert issubclass(r.subtype, _Directory)\n\n r = resource('ssh://joe@localhost:/path/to/*.csv')\n assert r.subtype == Directory(CSV)\n assert r.path == '/path/to/'\n\n\ndef test_discover():\n with filetext('name,balance\\nAlice,100\\nBob,200') as fn:\n local = CSV(fn)\n remote = SSH(CSV)(fn, hostname='localhost')\n\n assert discover(local) == discover(remote)\n\n\ndef test_discover_from_resource():\n with filetext('name,balance\\nAlice,100\\nBob,200', extension='csv') as fn:\n local = CSV(fn)\n remote = resource('ssh://localhost:' + fn)\n\n assert discover(local) == discover(remote)\n\n\ndef test_ssh_pattern():\n uris = ['localhost:myfile.csv',\n '127.0.0.1:/myfile.csv',\n 'user@127.0.0.1:/myfile.csv',\n 'user@127.0.0.1:/*.csv',\n 'user@127.0.0.1:/my-dir/my-file3.csv']\n for uri in uris:\n assert re.match(ssh_pattern, uri)\n\n\ndef test_copy_remote_csv():\n with tmpfile('csv') as target:\n with filetext('name,balance\\nAlice,100\\nBob,200',\n extension='csv') as fn:\n csv = resource(fn)\n\n uri = 'ssh://localhost:%s.csv' % target\n scsv = into(uri, csv)\n\n assert isinstance(scsv, SSH(CSV))\n assert discover(scsv) == discover(csv)\n\n # Round trip\n csv2 = into(target, scsv)\n assert into(list, csv) == into(list, csv2)\n\n\ndef test_drop():\n with filetext('name,balance\\nAlice,100\\nBob,200', extension='csv') as fn:\n with tmpfile('csv') as target:\n scsv = SSH(CSV)(target, hostname='localhost')\n\n assert not os.path.exists(target)\n\n conn = sftp(**scsv.auth)\n conn.put(fn, target)\n\n assert os.path.exists(target)\n\n drop(scsv)\n drop(scsv)\n\n assert not os.path.exists(target)\n\n\ndef test_drop_of_csv_json_lines_use_ssh_version():\n from odo.backends.ssh import drop_ssh\n for typ in [CSV, JSON, JSONLines]:\n assert drop.dispatch(SSH(typ)) == drop_ssh\n\n\ndef test_convert_local_file_to_temp_ssh_file():\n with filetext('name,balance\\nAlice,100\\nBob,200', extension='csv') as fn:\n csv = CSV(fn)\n scsv = convert(Temp(SSH(CSV)), csv, hostname='localhost')\n\n assert into(list, csv) == into(list, scsv)\n\n\n@skipif(ON_TRAVIS_CI, reason=\"Don't know\")\ndef test_temp_ssh_files():\n with filetext('name,balance\\nAlice,100\\nBob,200', extension='csv') as fn:\n csv = CSV(fn)\n scsv = into(Temp(SSH(CSV)), csv, hostname='localhost')\n assert discover(csv) == discover(scsv)\n\n assert isinstance(scsv, _Temp)\n\n\n@skipif(ON_TRAVIS_CI, reason=\"Don't know\")\ndef test_convert_through_temporary_local_storage():\n with filetext('name,quantity\\nAlice,100\\nBob,200', extension='csv') as fn:\n csv = CSV(fn)\n df = into(pd.DataFrame, csv)\n scsv = into(Temp(SSH(CSV)), csv, hostname='localhost')\n\n assert into(list, csv) == into(list, scsv)\n\n scsv2 = into(Temp(SSH(CSV)), df, hostname='localhost')\n assert into(list, scsv2) == into(list, df)\n\n sjson = into(Temp(SSH(JSONLines)), df, hostname='localhost')\n assert (into(np.ndarray, sjson) == into(np.ndarray, df)).all()\n\n\n@skipif(ON_TRAVIS_CI and sys.version_info[0] == 3,\n reason='Strange hanging on travis for python33 and python34')\ndef test_ssh_csv_to_s3_csv():\n # for some reason this can only be run in the same file as other ssh tests\n # and must be a Temp(SSH(CSV)) otherwise tests above this one fail\n s3_bucket = pytest.importorskip('odo.backends.tests.test_aws').s3_bucket\n\n with filetext('name,balance\\nAlice,100\\nBob,200', extension='csv') as fn:\n remote = into(Temp(SSH(CSV)), CSV(fn), hostname='localhost')\n with s3_bucket('.csv') as b:\n result = into(b, remote)\n assert discover(result) == discover(resource(b))\n\n\n@skipif(ON_TRAVIS_CI and sys.version_info[0] == 3,\n reason='Strange hanging on travis for python33 and python34')\ndef test_s3_to_ssh():\n pytest.importorskip('boto')\n\n tips_uri = 's3://nyqpug/tips.csv'\n with tmpfile('.csv') as fn:\n result = into(Temp(SSH(CSV))(fn, hostname='localhost'), tips_uri)\n assert into(list, result) == into(list, tips_uri)\n assert discover(result) == discover(resource(tips_uri))\n"}}},{"rowIdx":284636,"cells":{"repo_name":{"kind":"string","value":"anfedorov/atom-script"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"examples/version_info.py"},"copies":{"kind":"string","value":"18"},"content":{"kind":"string","value":"#!/usr/bin/env python2.7\nimport sys\n\nprint(sys.version_info)\n"}}},{"rowIdx":284637,"cells":{"repo_name":{"kind":"string","value":"damonkohler/sl4a"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"python/gdata/src/gdata/tlslite/utils/OpenSSL_TripleDES.py"},"copies":{"kind":"string","value":"359"},"content":{"kind":"string","value":"\"\"\"OpenSSL/M2Crypto 3DES implementation.\"\"\"\n\nfrom cryptomath import *\nfrom TripleDES import *\n\nif m2cryptoLoaded:\n\n def new(key, mode, IV):\n return OpenSSL_TripleDES(key, mode, IV)\n\n class OpenSSL_TripleDES(TripleDES):\n\n def __init__(self, key, mode, IV):\n TripleDES.__init__(self, key, mode, IV, \"openssl\")\n self.key = key\n self.IV = IV\n\n def _createContext(self, encrypt):\n context = m2.cipher_ctx_new()\n cipherType = m2.des_ede3_cbc()\n m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)\n return context\n\n def encrypt(self, plaintext):\n TripleDES.encrypt(self, plaintext)\n context = self._createContext(1)\n ciphertext = m2.cipher_update(context, plaintext)\n m2.cipher_ctx_free(context)\n self.IV = ciphertext[-self.block_size:]\n return ciphertext\n\n def decrypt(self, ciphertext):\n TripleDES.decrypt(self, ciphertext)\n context = self._createContext(0)\n #I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.\n #To work around this, we append sixteen zeros to the string, below:\n plaintext = m2.cipher_update(context, ciphertext+('\\0'*16))\n\n #If this bug is ever fixed, then plaintext will end up having a garbage\n #plaintext block on the end. That's okay - the below code will ignore it.\n plaintext = plaintext[:len(ciphertext)]\n m2.cipher_ctx_free(context)\n self.IV = ciphertext[-self.block_size:]\n return plaintext"}}},{"rowIdx":284638,"cells":{"repo_name":{"kind":"string","value":"bigswitch/neutron"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"neutron/tests/unit/agent/metadata/test_agent.py"},"copies":{"kind":"string","value":"1"},"content":{"kind":"string","value":"# Copyright 2012 New Dream Network, LLC (DreamHost)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\nfrom neutron_lib import constants as n_const\nimport testtools\nimport webob\n\nfrom neutron.agent.linux import utils as agent_utils\nfrom neutron.agent.metadata import agent\nfrom neutron.agent.metadata import config\nfrom neutron.agent import metadata_agent\nfrom neutron.common import utils\nfrom neutron.tests import base\n\n\nclass FakeConf(object):\n auth_ca_cert = None\n nova_metadata_ip = '9.9.9.9'\n nova_metadata_port = 8775\n metadata_proxy_shared_secret = 'secret'\n nova_metadata_protocol = 'http'\n nova_metadata_insecure = True\n nova_client_cert = 'nova_cert'\n nova_client_priv_key = 'nova_priv_key'\n cache_url = ''\n\n\nclass FakeConfCache(FakeConf):\n cache_url = 'memory://?default_ttl=5'\n\n\nclass TestMetadataProxyHandlerBase(base.BaseTestCase):\n fake_conf = FakeConf\n\n def setUp(self):\n super(TestMetadataProxyHandlerBase, self).setUp()\n self.log_p = mock.patch.object(agent, 'LOG')\n self.log = self.log_p.start()\n self.handler = agent.MetadataProxyHandler(self.fake_conf)\n self.handler.plugin_rpc = mock.Mock()\n self.handler.context = mock.Mock()\n\n\nclass TestMetadataProxyHandlerRpc(TestMetadataProxyHandlerBase):\n def test_get_port_filters(self):\n router_id = 'test_router_id'\n ip = '1.2.3.4'\n networks = ('net_id1', 'net_id2')\n expected = {'device_id': [router_id],\n 'device_owner': n_const.ROUTER_INTERFACE_OWNERS,\n 'network_id': networks,\n 'fixed_ips': {'ip_address': [ip]}}\n actual = self.handler._get_port_filters(router_id, ip, networks)\n self.assertEqual(expected, actual)\n\n def test_get_router_networks(self):\n router_id = 'router-id'\n expected = ('network_id1', 'network_id2')\n ports = [{'network_id': 'network_id1', 'something': 42},\n {'network_id': 'network_id2', 'something_else': 32}]\n self.handler.plugin_rpc.get_ports.return_value = ports\n networks = self.handler._get_router_networks(router_id)\n self.assertEqual(expected, networks)\n\n def test_get_ports_for_remote_address(self):\n ip = '1.1.1.1'\n networks = ('network_id1', 'network_id2')\n expected = [{'port_id': 'port_id1'},\n {'port_id': 'port_id2'}]\n self.handler.plugin_rpc.get_ports.return_value = expected\n ports = self.handler._get_ports_for_remote_address(ip, networks)\n self.assertEqual(expected, ports)\n\n\nclass TestMetadataProxyHandlerCache(TestMetadataProxyHandlerBase):\n fake_conf = FakeConfCache\n\n def test_call(self):\n req = mock.Mock()\n with mock.patch.object(self.handler,\n '_get_instance_and_tenant_id') as get_ids:\n get_ids.return_value = ('instance_id', 'tenant_id')\n with mock.patch.object(self.handler, '_proxy_request') as proxy:\n proxy.return_value = 'value'\n\n retval = self.handler(req)\n self.assertEqual(retval, 'value')\n\n def test_call_no_instance_match(self):\n req = mock.Mock()\n with mock.patch.object(self.handler,\n '_get_instance_and_tenant_id') as get_ids:\n get_ids.return_value = None, None\n retval = self.handler(req)\n self.assertIsInstance(retval, webob.exc.HTTPNotFound)\n\n def test_call_internal_server_error(self):\n req = mock.Mock()\n with mock.patch.object(self.handler,\n '_get_instance_and_tenant_id') as get_ids:\n get_ids.side_effect = Exception\n retval = self.handler(req)\n self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)\n self.assertEqual(len(self.log.mock_calls), 2)\n\n def test_get_router_networks(self):\n router_id = 'router-id'\n expected = ('network_id1', 'network_id2')\n ports = [{'network_id': 'network_id1', 'something': 42},\n {'network_id': 'network_id2', 'something_else': 32}]\n mock_get_ports = self.handler.plugin_rpc.get_ports\n mock_get_ports.return_value = ports\n networks = self.handler._get_router_networks(router_id)\n mock_get_ports.assert_called_once_with(\n mock.ANY,\n {'device_id': [router_id],\n 'device_owner': n_const.ROUTER_INTERFACE_OWNERS})\n self.assertEqual(expected, networks)\n\n def _test_get_router_networks_twice_helper(self):\n router_id = 'router-id'\n ports = [{'network_id': 'network_id1', 'something': 42}]\n expected_networks = ('network_id1',)\n with mock.patch(\n 'oslo_utils.timeutils.utcnow_ts', return_value=0):\n mock_get_ports = self.handler.plugin_rpc.get_ports\n mock_get_ports.return_value = ports\n networks = self.handler._get_router_networks(router_id)\n mock_get_ports.assert_called_once_with(\n mock.ANY,\n {'device_id': [router_id],\n 'device_owner': n_const.ROUTER_INTERFACE_OWNERS})\n self.assertEqual(expected_networks, networks)\n networks = self.handler._get_router_networks(router_id)\n\n def test_get_router_networks_twice(self):\n self._test_get_router_networks_twice_helper()\n self.assertEqual(\n 1, self.handler.plugin_rpc.get_ports.call_count)\n\n def _get_ports_for_remote_address_cache_hit_helper(self):\n remote_address = 'remote_address'\n networks = ('net1', 'net2')\n mock_get_ports = self.handler.plugin_rpc.get_ports\n mock_get_ports.return_value = [{'network_id': 'net1', 'something': 42}]\n self.handler._get_ports_for_remote_address(remote_address, networks)\n mock_get_ports.assert_called_once_with(\n mock.ANY,\n {'network_id': networks,\n 'fixed_ips': {'ip_address': [remote_address]}}\n )\n self.assertEqual(1, mock_get_ports.call_count)\n self.handler._get_ports_for_remote_address(remote_address,\n networks)\n\n def test_get_ports_for_remote_address_cache_hit(self):\n self._get_ports_for_remote_address_cache_hit_helper()\n self.assertEqual(\n 1, self.handler.plugin_rpc.get_ports.call_count)\n\n def test_get_ports_network_id(self):\n network_id = 'network-id'\n router_id = 'router-id'\n remote_address = 'remote-address'\n expected = ['port1']\n networks = (network_id,)\n with mock.patch.object(self.handler,\n '_get_ports_for_remote_address'\n ) as mock_get_ip_addr,\\\n mock.patch.object(self.handler,\n '_get_router_networks'\n ) as mock_get_router_networks:\n mock_get_ip_addr.return_value = expected\n ports = self.handler._get_ports(remote_address, network_id,\n router_id)\n mock_get_ip_addr.assert_called_once_with(remote_address,\n networks)\n self.assertFalse(mock_get_router_networks.called)\n self.assertEqual(expected, ports)\n\n def test_get_ports_router_id(self):\n router_id = 'router-id'\n remote_address = 'remote-address'\n expected = ['port1']\n networks = ('network1', 'network2')\n with mock.patch.object(self.handler,\n '_get_ports_for_remote_address',\n return_value=expected\n ) as mock_get_ip_addr,\\\n mock.patch.object(self.handler,\n '_get_router_networks',\n return_value=networks\n ) as mock_get_router_networks:\n ports = self.handler._get_ports(remote_address,\n router_id=router_id)\n mock_get_router_networks.assert_called_once_with(router_id)\n mock_get_ip_addr.assert_called_once_with(remote_address, networks)\n self.assertEqual(expected, ports)\n\n def test_get_ports_no_id(self):\n self.assertRaises(TypeError, self.handler._get_ports, 'remote_address')\n\n def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval,\n networks=None, router_id=None):\n remote_address = '192.168.1.1'\n headers['X-Forwarded-For'] = remote_address\n req = mock.Mock(headers=headers)\n\n def mock_get_ports(*args, **kwargs):\n return list_ports_retval.pop(0)\n\n self.handler.plugin_rpc.get_ports.side_effect = mock_get_ports\n instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req)\n\n expected = []\n\n if router_id:\n expected.append(\n mock.call(\n mock.ANY,\n {'device_id': [router_id],\n 'device_owner': n_const.ROUTER_INTERFACE_OWNERS}\n )\n )\n\n expected.append(\n mock.call(\n mock.ANY,\n {'network_id': networks,\n 'fixed_ips': {'ip_address': ['192.168.1.1']}}\n )\n )\n\n self.handler.plugin_rpc.get_ports.assert_has_calls(expected)\n\n return (instance_id, tenant_id)\n\n def test_get_instance_id_router_id(self):\n router_id = 'the_id'\n headers = {\n 'X-Neutron-Router-ID': router_id\n }\n\n networks = ('net1', 'net2')\n ports = [\n [{'network_id': 'net1'}, {'network_id': 'net2'}],\n [{'device_id': 'device_id', 'tenant_id': 'tenant_id',\n 'network_id': 'net1'}]\n ]\n\n self.assertEqual(\n self._get_instance_and_tenant_id_helper(headers, ports,\n networks=networks,\n router_id=router_id),\n ('device_id', 'tenant_id')\n )\n\n def test_get_instance_id_router_id_no_match(self):\n router_id = 'the_id'\n headers = {\n 'X-Neutron-Router-ID': router_id\n }\n\n networks = ('net1', 'net2')\n ports = [\n [{'network_id': 'net1'}, {'network_id': 'net2'}],\n []\n ]\n self.assertEqual(\n self._get_instance_and_tenant_id_helper(headers, ports,\n networks=networks,\n router_id=router_id),\n (None, None)\n )\n\n def test_get_instance_id_network_id(self):\n network_id = 'the_id'\n headers = {\n 'X-Neutron-Network-ID': network_id\n }\n\n ports = [\n [{'device_id': 'device_id',\n 'tenant_id': 'tenant_id',\n 'network_id': 'the_id'}]\n ]\n\n self.assertEqual(\n self._get_instance_and_tenant_id_helper(headers, ports,\n networks=('the_id',)),\n ('device_id', 'tenant_id')\n )\n\n def test_get_instance_id_network_id_no_match(self):\n network_id = 'the_id'\n headers = {\n 'X-Neutron-Network-ID': network_id\n }\n\n ports = [[]]\n\n self.assertEqual(\n self._get_instance_and_tenant_id_helper(headers, ports,\n networks=('the_id',)),\n (None, None)\n )\n\n def _proxy_request_test_helper(self, response_code=200, method='GET'):\n hdrs = {'X-Forwarded-For': '8.8.8.8'}\n body = 'body'\n\n req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs,\n method=method, body=body)\n resp = mock.MagicMock(status=response_code)\n req.response = resp\n with mock.patch.object(self.handler, '_sign_instance_id') as sign:\n sign.return_value = 'signed'\n with mock.patch('httplib2.Http') as mock_http:\n resp.__getitem__.return_value = \"text/plain\"\n mock_http.return_value.request.return_value = (resp, 'content')\n\n retval = self.handler._proxy_request('the_id', 'tenant_id',\n req)\n mock_http.assert_called_once_with(\n ca_certs=None, disable_ssl_certificate_validation=True)\n mock_http.assert_has_calls([\n mock.call().add_certificate(\n FakeConf.nova_client_priv_key,\n FakeConf.nova_client_cert,\n \"%s:%s\" % (FakeConf.nova_metadata_ip,\n FakeConf.nova_metadata_port)\n ),\n mock.call().request(\n 'http://9.9.9.9:8775/the_path',\n method=method,\n headers={\n 'X-Forwarded-For': '8.8.8.8',\n 'X-Instance-ID-Signature': 'signed',\n 'X-Instance-ID': 'the_id',\n 'X-Tenant-ID': 'tenant_id'\n },\n body=body\n )]\n )\n\n return retval\n\n def test_proxy_request_post(self):\n response = self._proxy_request_test_helper(method='POST')\n self.assertEqual(response.content_type, \"text/plain\")\n self.assertEqual(response.body, 'content')\n\n def test_proxy_request_200(self):\n response = self._proxy_request_test_helper(200)\n self.assertEqual(response.content_type, \"text/plain\")\n self.assertEqual(response.body, 'content')\n\n def test_proxy_request_400(self):\n self.assertIsInstance(self._proxy_request_test_helper(400),\n webob.exc.HTTPBadRequest)\n\n def test_proxy_request_403(self):\n self.assertIsInstance(self._proxy_request_test_helper(403),\n webob.exc.HTTPForbidden)\n\n def test_proxy_request_404(self):\n self.assertIsInstance(self._proxy_request_test_helper(404),\n webob.exc.HTTPNotFound)\n\n def test_proxy_request_409(self):\n self.assertIsInstance(self._proxy_request_test_helper(409),\n webob.exc.HTTPConflict)\n\n def test_proxy_request_500(self):\n self.assertIsInstance(self._proxy_request_test_helper(500),\n webob.exc.HTTPInternalServerError)\n\n def test_proxy_request_other_code(self):\n with testtools.ExpectedException(Exception):\n self._proxy_request_test_helper(302)\n\n def test_sign_instance_id(self):\n self.assertEqual(\n self.handler._sign_instance_id('foo'),\n '773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4'\n )\n\n\nclass TestMetadataProxyHandlerNoCache(TestMetadataProxyHandlerCache):\n fake_conf = FakeConf\n\n def test_get_router_networks_twice(self):\n self._test_get_router_networks_twice_helper()\n self.assertEqual(\n 2, self.handler.plugin_rpc.get_ports.call_count)\n\n def test_get_ports_for_remote_address_cache_hit(self):\n self._get_ports_for_remote_address_cache_hit_helper()\n self.assertEqual(\n 2, self.handler.plugin_rpc.get_ports.call_count)\n\n\nclass TestUnixDomainMetadataProxy(base.BaseTestCase):\n def setUp(self):\n super(TestUnixDomainMetadataProxy, self).setUp()\n self.cfg_p = mock.patch.object(agent, 'cfg')\n self.cfg = self.cfg_p.start()\n looping_call_p = mock.patch(\n 'oslo_service.loopingcall.FixedIntervalLoopingCall')\n self.looping_mock = looping_call_p.start()\n self.cfg.CONF.metadata_proxy_socket = '/the/path'\n self.cfg.CONF.metadata_workers = 0\n self.cfg.CONF.metadata_backlog = 128\n self.cfg.CONF.metadata_proxy_socket_mode = config.USER_MODE\n\n @mock.patch.object(utils, 'ensure_dir')\n def test_init_doesnot_exists(self, ensure_dir):\n agent.UnixDomainMetadataProxy(mock.Mock())\n ensure_dir.assert_called_once_with('/the')\n\n def test_init_exists(self):\n with mock.patch('os.path.isdir') as isdir:\n with mock.patch('os.unlink') as unlink:\n isdir.return_value = True\n agent.UnixDomainMetadataProxy(mock.Mock())\n unlink.assert_called_once_with('/the/path')\n\n def test_init_exists_unlink_no_file(self):\n with mock.patch('os.path.isdir') as isdir:\n with mock.patch('os.unlink') as unlink:\n with mock.patch('os.path.exists') as exists:\n isdir.return_value = True\n exists.return_value = False\n unlink.side_effect = OSError\n\n agent.UnixDomainMetadataProxy(mock.Mock())\n unlink.assert_called_once_with('/the/path')\n\n def test_init_exists_unlink_fails_file_still_exists(self):\n with mock.patch('os.path.isdir') as isdir:\n with mock.patch('os.unlink') as unlink:\n with mock.patch('os.path.exists') as exists:\n isdir.return_value = True\n exists.return_value = True\n unlink.side_effect = OSError\n\n with testtools.ExpectedException(OSError):\n agent.UnixDomainMetadataProxy(mock.Mock())\n unlink.assert_called_once_with('/the/path')\n\n @mock.patch.object(agent, 'MetadataProxyHandler')\n @mock.patch.object(agent_utils, 'UnixDomainWSGIServer')\n @mock.patch.object(utils, 'ensure_dir')\n def test_run(self, ensure_dir, server, handler):\n p = agent.UnixDomainMetadataProxy(self.cfg.CONF)\n p.run()\n\n ensure_dir.assert_called_once_with('/the')\n server.assert_has_calls([\n mock.call('neutron-metadata-agent'),\n mock.call().start(handler.return_value,\n '/the/path', workers=0,\n backlog=128, mode=0o644),\n mock.call().wait()]\n )\n\n def test_main(self):\n with mock.patch.object(agent, 'UnixDomainMetadataProxy') as proxy:\n with mock.patch.object(metadata_agent, 'config') as config:\n with mock.patch.object(metadata_agent, 'cfg') as cfg:\n with mock.patch.object(utils, 'cfg'):\n metadata_agent.main()\n\n self.assertTrue(config.setup_logging.called)\n proxy.assert_has_calls([\n mock.call(cfg.CONF),\n mock.call().run()]\n )\n\n def test_init_state_reporting(self):\n with mock.patch('os.makedirs'):\n proxy = agent.UnixDomainMetadataProxy(mock.Mock())\n self.looping_mock.assert_called_once_with(proxy._report_state)\n self.looping_mock.return_value.start.assert_called_once_with(\n interval=mock.ANY)\n\n def test_report_state(self):\n with mock.patch('neutron.agent.rpc.PluginReportStateAPI') as state_api:\n with mock.patch('os.makedirs'):\n proxy = agent.UnixDomainMetadataProxy(mock.Mock())\n self.assertTrue(proxy.agent_state['start_flag'])\n proxy._report_state()\n self.assertNotIn('start_flag', proxy.agent_state)\n state_api_inst = state_api.return_value\n state_api_inst.report_state.assert_called_once_with(\n proxy.context, proxy.agent_state, use_call=True)\n"}}},{"rowIdx":284639,"cells":{"repo_name":{"kind":"string","value":"pixelrebel/st2"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"st2common/tests/unit/test_util_mistral_dsl_transform.py"},"copies":{"kind":"string","value":"7"},"content":{"kind":"string","value":"# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport six\nimport yaml\n\nfrom st2tests import DbTestCase\nfrom st2tests.fixturesloader import FixturesLoader\nfrom st2common.exceptions.workflow import WorkflowDefinitionException\nfrom st2common.models.api.action import ActionAPI, RunnerTypeAPI\nfrom st2common.persistence.action import Action\nfrom st2common.persistence.runner import RunnerType\nfrom st2common.util.workflow import mistral as utils\n\n\nWB_PRE_XFORM_FILE = 'wb_pre_xform.yaml'\nWB_POST_XFORM_FILE = 'wb_post_xform.yaml'\nWF_PRE_XFORM_FILE = 'wf_pre_xform.yaml'\nWF_POST_XFORM_FILE = 'wf_post_xform.yaml'\nWF_NO_REQ_PARAM_FILE = 'wf_missing_required_param.yaml'\nWF_UNEXP_PARAM_FILE = 'wf_has_unexpected_param.yaml'\n\nTEST_FIXTURES = {\n 'workflows': [\n WB_PRE_XFORM_FILE,\n WB_POST_XFORM_FILE,\n WF_PRE_XFORM_FILE,\n WF_POST_XFORM_FILE,\n WF_NO_REQ_PARAM_FILE,\n WF_UNEXP_PARAM_FILE\n ],\n 'actions': [\n 'local.yaml',\n 'a1.yaml',\n 'a2.yaml',\n 'action1.yaml'\n ],\n 'runners': [\n 'run-local.yaml',\n 'testrunner1.yaml',\n 'testrunner2.yaml'\n ]\n}\n\nPACK = 'generic'\nLOADER = FixturesLoader()\nFIXTURES = LOADER.load_fixtures(fixtures_pack=PACK, fixtures_dict=TEST_FIXTURES)\nWB_PRE_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB_PRE_XFORM_FILE)\nWB_PRE_XFORM_DEF = FIXTURES['workflows'][WB_PRE_XFORM_FILE]\nWB_POST_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB_POST_XFORM_FILE)\nWB_POST_XFORM_DEF = FIXTURES['workflows'][WB_POST_XFORM_FILE]\nWF_PRE_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_PRE_XFORM_FILE)\nWF_PRE_XFORM_DEF = FIXTURES['workflows'][WF_PRE_XFORM_FILE]\nWF_POST_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_POST_XFORM_FILE)\nWF_POST_XFORM_DEF = FIXTURES['workflows'][WF_POST_XFORM_FILE]\nWF_NO_REQ_PARAM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_NO_REQ_PARAM_FILE)\nWF_NO_REQ_PARAM_DEF = FIXTURES['workflows'][WF_NO_REQ_PARAM_FILE]\nWF_UNEXP_PARAM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_UNEXP_PARAM_FILE)\nWF_UNEXP_PARAM_DEF = FIXTURES['workflows'][WF_UNEXP_PARAM_FILE]\n\n\nclass DSLTransformTestCase(DbTestCase):\n\n @classmethod\n def setUpClass(cls):\n super(DSLTransformTestCase, cls).setUpClass()\n\n for _, fixture in six.iteritems(FIXTURES['runners']):\n instance = RunnerTypeAPI(**fixture)\n RunnerType.add_or_update(RunnerTypeAPI.to_model(instance))\n\n for _, fixture in six.iteritems(FIXTURES['actions']):\n instance = ActionAPI(**fixture)\n Action.add_or_update(ActionAPI.to_model(instance))\n\n def _read_file_content(self, path):\n with open(path, 'r') as f:\n return f.read()\n\n def _read_yaml_file_as_json(self, path):\n def_yaml = self._read_file_content(path)\n return yaml.safe_load(def_yaml)\n\n def test_transform_workbook_dsl_yaml(self):\n def_yaml = self._read_file_content(WB_PRE_XFORM_PATH)\n new_def = utils.transform_definition(def_yaml)\n actual = yaml.safe_load(new_def)\n expected = copy.deepcopy(WB_POST_XFORM_DEF)\n self.assertDictEqual(actual, expected)\n\n def test_transform_workbook_dsl_dict(self):\n def_dict = self._read_yaml_file_as_json(WB_PRE_XFORM_PATH)\n actual = utils.transform_definition(def_dict)\n expected = copy.deepcopy(WB_POST_XFORM_DEF)\n self.assertDictEqual(actual, expected)\n\n def test_transform_workflow_dsl_yaml(self):\n def_yaml = self._read_file_content(WF_PRE_XFORM_PATH)\n new_def = utils.transform_definition(def_yaml)\n actual = yaml.safe_load(new_def)\n expected = copy.deepcopy(WF_POST_XFORM_DEF)\n self.assertDictEqual(actual, expected)\n\n def test_transform_workflow_dsl_dict(self):\n def_dict = self._read_yaml_file_as_json(WF_PRE_XFORM_PATH)\n actual = utils.transform_definition(def_dict)\n expected = copy.deepcopy(WF_POST_XFORM_DEF)\n self.assertDictEqual(actual, expected)\n\n def test_required_action_params_failure(self):\n def_dict = self._read_yaml_file_as_json(WF_NO_REQ_PARAM_PATH)\n\n with self.assertRaises(WorkflowDefinitionException) as cm:\n utils.transform_definition(def_dict)\n\n self.assertIn('Missing required parameters', cm.exception.message)\n\n def test_unexpected_action_params_failure(self):\n def_dict = self._read_yaml_file_as_json(WF_UNEXP_PARAM_PATH)\n\n with self.assertRaises(WorkflowDefinitionException) as cm:\n utils.transform_definition(def_dict)\n\n self.assertIn('Unexpected parameters', cm.exception.message)\n\n def test_deprecated_callback_action(self):\n def_dict = self._read_yaml_file_as_json(WB_PRE_XFORM_PATH)\n def_dict['workflows']['main']['tasks']['callback'] = {'action': 'st2.callback'}\n def_yaml = yaml.safe_dump(def_dict)\n self.assertRaises(WorkflowDefinitionException, utils.transform_definition, def_yaml)\n"}}},{"rowIdx":284640,"cells":{"repo_name":{"kind":"string","value":"Davideddu/kivy-forkedtouch"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"kivy/core/video/video_null.py"},"copies":{"kind":"string","value":"81"},"content":{"kind":"string","value":"\n'''\nVideoNull: empty implementation of VideoBase for the no provider case\n'''\n\nfrom kivy.core.video import VideoBase\n\n\nclass VideoNull(VideoBase):\n '''VideoBase implementation when there is no provider.\n '''\n pass\n"}}},{"rowIdx":284641,"cells":{"repo_name":{"kind":"string","value":"LUTAN/tensorflow"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py"},"copies":{"kind":"string","value":"81"},"content":{"kind":"string","value":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Support for creating Stochastic Tensors.\n\nSee the @{$python/contrib.bayesflow.stochastic_tensor} guide.\n\n@@BaseStochasticTensor\n@@StochasticTensor\n@@MeanValue\n@@SampleValue\n@@value_type\n@@get_current_value_type\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.contrib.bayesflow.python.ops.stochastic_tensor_impl import *\n# pylint: enable=wildcard-import\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n\n_allowed_symbols = [\n \"BaseStochasticTensor\",\n \"StochasticTensor\",\n \"ObservedStochasticTensor\",\n \"MeanValue\",\n \"SampleValue\",\n \"value_type\",\n \"get_current_value_type\",\n]\n\nremove_undocumented(__name__, _allowed_symbols)\n"}}},{"rowIdx":284642,"cells":{"repo_name":{"kind":"string","value":"jsilhan/dnf"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"dnf/cli/commands/upgrade.py"},"copies":{"kind":"string","value":"1"},"content":{"kind":"string","value":"# upgrade.py\n# Upgrade CLI command.\n#\n# Copyright (C) 2014-2016 Red Hat, Inc.\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# the GNU General Public License v.2, or (at your option) any later version.\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY expressed or implied, including the implied warranties of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General\n# Public License for more details. You should have received a copy of the\n# GNU General Public License along with this program; if not, write to the\n# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the\n# source code or documentation are not subject to the GNU General Public\n# License and may only be used or replicated with the express permission of\n# Red Hat, Inc.\n#\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom dnf.cli import commands\nfrom dnf.i18n import _\nfrom dnf.cli.option_parser import OptionParser\n\nimport dnf.exceptions\nimport logging\n\nlogger = logging.getLogger('dnf')\n\n\nclass UpgradeCommand(commands.Command):\n \"\"\"A class containing methods needed by the cli to execute the\n update command.\n \"\"\"\n aliases = ('upgrade', 'update', 'upgrade-to', 'update-to')\n summary = _('upgrade a package or packages on your system')\n\n @staticmethod\n def set_argparser(parser):\n parser.add_argument('packages', nargs='*', help=_('Package to upgrade'),\n action=OptionParser.ParseSpecGroupFileCallback,\n metavar=_('PACKAGE'))\n\n def configure(self):\n \"\"\"Verify that conditions are met so that this command can run.\n\n These include that there are enabled repositories with gpg\n keys, and that this command is being run by the root user.\n \"\"\"\n demands = self.cli.demands\n demands.sack_activation = True\n demands.available_repos = True\n demands.resolving = True\n demands.root_user = True\n commands._checkGPGKey(self.base, self.cli)\n commands._checkEnabledRepo(self.base, self.opts.filenames)\n self.upgrade_minimal = None\n self.all_security = None\n\n def run(self):\n self.cli._populate_update_security_filter(self.opts,\n minimal=self.upgrade_minimal,\n all=self.all_security)\n done = False\n if self.opts.filenames or self.opts.pkg_specs or self.opts.grp_specs:\n # Update files.\n if self.opts.filenames:\n for pkg in self.base.add_remote_rpms(self.opts.filenames, strict=False):\n try:\n self.base.package_upgrade(pkg)\n except dnf.exceptions.MarkingError as e:\n logger.info(_('No match for argument: %s'),\n self.base.output.term.bold(pkg.location))\n else:\n done = True\n\n # Update packages.\n for pkg_spec in self.opts.pkg_specs:\n try:\n self.base.upgrade(pkg_spec)\n except dnf.exceptions.MarkingError as e:\n logger.info(_('No match for argument: %s'),\n self.base.output.term.bold(pkg_spec))\n else:\n done = True\n\n # Update groups.\n if self.opts.grp_specs:\n self.base.read_comps(arch_filter=True)\n self.base.env_group_upgrade(self.opts.grp_specs)\n done = True\n else:\n # Update all packages.\n self.base.upgrade_all()\n done = True\n if not done:\n raise dnf.exceptions.Error(_('No packages marked for upgrade.'))\n"}}},{"rowIdx":284643,"cells":{"repo_name":{"kind":"string","value":"willprice/weboob"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"modules/ilmatieteenlaitos/pages.py"},"copies":{"kind":"string","value":"5"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n# Copyright(C) 2015 Matthieu Weber\n#\n# This file is part of weboob.\n#\n# weboob is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# weboob is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with weboob. If not, see .\n\nfrom datetime import date\nfrom itertools import imap, ifilter\n\nfrom weboob.browser.pages import JsonPage, HTMLPage\nfrom weboob.browser.elements import ItemElement, ListElement, DictElement, method\nfrom weboob.capabilities.weather import Forecast, Current, City, Temperature\nfrom weboob.browser.filters.json import Dict\nfrom weboob.browser.filters.standard import Filter, CleanText, CleanDecimal, Regexp, Format, Date\n\n\nclass Id(Filter):\n def filter(self, txt):\n return txt.split(\", \")[0]\n\n\nclass SearchCitiesPage(JsonPage):\n @method\n class iter_cities(DictElement):\n item_xpath = '.'\n ignore_duplicate = True\n\n class item(ItemElement):\n klass = City\n\n obj_id = Id(Dict('id'))\n obj_name = Dict('value')\n\n\nclass WeatherPage(HTMLPage):\n @method\n class iter_forecast(ListElement):\n item_xpath = ('//div[contains(@class, \"mid\") and contains(@class, \"local-weather-forecast\")]//'\n 'tr[@class=\"meteogram-dates\"]/td')\n\n class item(ItemElement):\n klass = Forecast\n\n obj_id = CleanText('.//span/@title')\n\n def obj_date(self):\n months = [u'tammikuuta', u'helmikuuta', u'maaliskuuta', u'huhtikuuta', u'toukokuuta', u'kesäkuuta',\n u'heinäkuuta', u'elokuuta', u'syyskuuta', u'lokakuuta', u'marraskuuta', u'joulukuuta']\n d = CleanText('.//span/@title')(self).split()\n return date(int(d[2]), months.index(d[1])+1, int(d[0].strip(\".\")))\n\n def temperatures(self):\n offset = int(CleanText('string(sum(./preceding-sibling::td/@colspan))')(self))\n length = int(CleanText('@colspan')(self))\n temps = CleanText('../../../tbody/tr[@class=\"meteogram-temperatures\"]/td[position() > %d '\n 'and position() <= %d]/span' % (offset, offset+length))(self)\n return [float(_.strip(u'\\xb0')) for _ in temps.split()]\n\n def obj_low(self):\n return Temperature(min(self.temperatures()), u'C')\n\n def obj_high(self):\n return Temperature(max(self.temperatures()), u'C')\n\n def obj_text(self):\n offset = int(CleanText('string(sum(./preceding-sibling::td/@colspan))')(self))\n length = int(CleanText('@colspan')(self))\n hour_test = ('../../tr[@class=\"meteogram-times\"]/td[position() > %d and position() <= %d '\n 'and .//text() = \"%%s\"]' % (offset, offset+length))\n hour_offset = 'string(count(%s/preceding-sibling::td)+1)' % (hour_test)\n values = [\n '../../../tbody/tr[@class=\"meteogram-weather-symbols\"]/td[position() = %d]/div/@title',\n '../../../tbody/tr[@class=\"meteogram-apparent-temperatures\"]/td[position() = %d]/div/@title',\n '../../../tbody/tr[@class=\"meteogram-wind-symbols\"]/td[position() = %d]/div/@title',\n '../../../tbody/tr[@class=\"meteogram-probabilities-of-precipitation\"]/td[position() = %d]' +\n '/div/@title',\n '../../../tbody/tr[@class=\"meteogram-hourly-precipitation-values\"]/td[position() = %d]/span/@title',\n ]\n\n def descriptive_text_for_hour(hour):\n hour_exists = CleanText(hour_test % hour)(self) == hour\n if hour_exists:\n offset = int(CleanText(hour_offset % hour)(self))\n\n def info_for_value(value):\n return CleanText(value % offset)(self).replace(u'edeltävän tunnin ', u'')\n return (\"klo %s: \" % hour) + \", \".join(ifilter(bool, imap(info_for_value, values)))\n\n return u'\\n' + u'\\n'.join(ifilter(bool, imap(descriptive_text_for_hour, [\"02\", \"14\"])))\n\n @method\n class get_current(ItemElement):\n klass = Current\n\n obj_id = date.today()\n obj_date = Date(Regexp(CleanText('//table[@class=\"observation-text\"]//span[@class=\"time-stamp\"]'),\n r'^(\\d+\\.\\d+.\\d+)'))\n obj_text = Format(u'%s, %s, %s',\n CleanText(u'(//table[@class=\"observation-text\"])//tr[2]/td[2]'),\n CleanText(u'(//table[@class=\"observation-text\"])//tr[5]/td[1]'),\n CleanText(u'(//table[@class=\"observation-text\"])//tr[4]/td[2]'))\n\n def obj_temp(self):\n path = u'//table[@class=\"observation-text\"]//span[@class=\"parameter-name\" and text() = \"Lämpötila\"]' + \\\n u'/../span[@class=\"parameter-value\"]'\n temp = CleanDecimal(Regexp(CleanText(path), r'^([^ \\xa0]+)'), replace_dots=True)(self)\n unit = Regexp(CleanText(path), r'\\xb0(\\w)')(self)\n return Temperature(float(temp), unit)\n"}}},{"rowIdx":284644,"cells":{"repo_name":{"kind":"string","value":"dkodnik/Ant"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"addons/account_analytic_analysis/res_config.py"},"copies":{"kind":"string","value":"426"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Business Applications\n# Copyright (C) 2004-2012 OpenERP S.A. ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\n\nclass sale_configuration(osv.osv_memory):\n _inherit = 'sale.config.settings'\n\n _columns = {\n 'group_template_required': fields.boolean(\"Mandatory use of templates.\",\n implied_group='account_analytic_analysis.group_template_required',\n help=\"Allows you to set the template field as required when creating an analytic account or a contract.\"),\n }\n"}}},{"rowIdx":284645,"cells":{"repo_name":{"kind":"string","value":"Sodki/ansible-modules-extras"},"ref":{"kind":"string","value":"refs/heads/devel"},"path":{"kind":"string","value":"monitoring/pagerduty.py"},"copies":{"kind":"string","value":"132"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n\nDOCUMENTATION = '''\n\nmodule: pagerduty\nshort_description: Create PagerDuty maintenance windows\ndescription:\n - This module will let you create PagerDuty maintenance windows\nversion_added: \"1.2\"\nauthor:\n - \"Andrew Newdigate (@suprememoocow)\"\n - \"Dylan Silva (@thaumos)\"\n - \"Justin Johns\"\n - \"Bruce Pennypacker\"\nrequirements:\n - PagerDuty API access\noptions:\n state:\n description:\n - Create a maintenance window or get a list of ongoing windows.\n required: true\n default: null\n choices: [ \"running\", \"started\", \"ongoing\", \"absent\" ]\n aliases: []\n name:\n description:\n - PagerDuty unique subdomain.\n required: true\n default: null\n choices: []\n aliases: []\n user:\n description:\n - PagerDuty user ID.\n required: true\n default: null\n choices: []\n aliases: []\n passwd:\n description:\n - PagerDuty user password.\n required: true\n default: null\n choices: []\n aliases: []\n token:\n description:\n - A pagerduty token, generated on the pagerduty site. Can be used instead of\n user/passwd combination.\n required: true\n default: null\n choices: []\n aliases: []\n version_added: '1.8'\n requester_id:\n description:\n - ID of user making the request. Only needed when using a token and creating a maintenance_window.\n required: true\n default: null\n choices: []\n aliases: []\n version_added: '1.8'\n service:\n description:\n - A comma separated list of PagerDuty service IDs.\n required: false\n default: null\n choices: []\n aliases: [ services ]\n hours:\n description:\n - Length of maintenance window in hours.\n required: false\n default: 1\n choices: []\n aliases: []\n minutes:\n description:\n - Maintenance window in minutes (this is added to the hours).\n required: false\n default: 0\n choices: []\n aliases: []\n version_added: '1.8'\n desc:\n description:\n - Short description of maintenance window.\n required: false\n default: Created by Ansible\n choices: []\n aliases: []\n validate_certs:\n description:\n - If C(no), SSL certificates will not be validated. This should only be used\n on personally controlled sites using self-signed certificates.\n required: false\n default: 'yes'\n choices: ['yes', 'no']\n version_added: 1.5.1\n'''\n\nEXAMPLES='''\n# List ongoing maintenance windows using a user/passwd\n- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing\n\n# List ongoing maintenance windows using a token\n- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing\n\n# Create a 1 hour maintenance window for service FOO123, using a user/passwd\n- pagerduty: name=companyabc\n user=example@example.com\n passwd=password123\n state=running\n service=FOO123\n\n# Create a 5 minute maintenance window for service FOO123, using a token\n- pagerduty: name=companyabc\n token=xxxxxxxxxxxxxx\n hours=0\n minutes=5\n state=running\n service=FOO123\n\n\n# Create a 4 hour maintenance window for service FOO123 with the description \"deployment\".\n- pagerduty: name=companyabc\n user=example@example.com\n passwd=password123\n state=running\n service=FOO123\n hours=4\n desc=deployment\n register: pd_window\n\n# Delete the previous maintenance window\n- pagerduty: name=companyabc\n user=example@example.com\n passwd=password123\n state=absent\n service={{ pd_window.result.maintenance_window.id }}\n'''\n\nimport datetime\nimport base64\n\ndef auth_header(user, passwd, token):\n if token:\n return \"Token token=%s\" % token\n\n auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\\n', '')\n return \"Basic %s\" % auth\n\ndef ongoing(module, name, user, passwd, token):\n url = \"https://\" + name + \".pagerduty.com/api/v1/maintenance_windows/ongoing\"\n headers = {\"Authorization\": auth_header(user, passwd, token)}\n\n response, info = fetch_url(module, url, headers=headers)\n if info['status'] != 200:\n module.fail_json(msg=\"failed to lookup the ongoing window: %s\" % info['msg'])\n\n try:\n json_out = json.loads(response.read())\n except:\n json_out = \"\"\n\n return False, json_out, False\n\n\ndef create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc):\n now = datetime.datetime.utcnow()\n later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))\n start = now.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n end = later.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n url = \"https://\" + name + \".pagerduty.com/api/v1/maintenance_windows\"\n headers = {\n 'Authorization': auth_header(user, passwd, token),\n 'Content-Type' : 'application/json',\n }\n request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}}\n \n if requester_id:\n request_data['requester_id'] = requester_id\n else:\n if token:\n module.fail_json(msg=\"requester_id is required when using a token\")\n\n data = json.dumps(request_data)\n response, info = fetch_url(module, url, data=data, headers=headers, method='POST')\n if info['status'] != 200:\n module.fail_json(msg=\"failed to create the window: %s\" % info['msg'])\n\n try:\n json_out = json.loads(response.read())\n except:\n json_out = \"\"\n\n return False, json_out, True\n\ndef absent(module, name, user, passwd, token, requester_id, service):\n url = \"https://\" + name + \".pagerduty.com/api/v1/maintenance_windows/\" + service[0]\n headers = {\n 'Authorization': auth_header(user, passwd, token),\n 'Content-Type' : 'application/json',\n }\n request_data = {}\n \n if requester_id:\n request_data['requester_id'] = requester_id\n else:\n if token:\n module.fail_json(msg=\"requester_id is required when using a token\")\n\n data = json.dumps(request_data)\n response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE')\n if info['status'] != 200:\n module.fail_json(msg=\"failed to delete the window: %s\" % info['msg'])\n\n try:\n json_out = json.loads(response.read())\n except:\n json_out = \"\"\n\n return False, json_out, True\n\n\ndef main():\n\n module = AnsibleModule(\n argument_spec=dict(\n state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),\n name=dict(required=True),\n user=dict(required=False),\n passwd=dict(required=False),\n token=dict(required=False),\n service=dict(required=False, type='list', aliases=[\"services\"]),\n requester_id=dict(required=False),\n hours=dict(default='1', required=False),\n minutes=dict(default='0', required=False),\n desc=dict(default='Created by Ansible', required=False),\n validate_certs = dict(default='yes', type='bool'),\n )\n )\n\n state = module.params['state']\n name = module.params['name']\n user = module.params['user']\n passwd = module.params['passwd']\n token = module.params['token']\n service = module.params['service']\n hours = module.params['hours']\n minutes = module.params['minutes']\n token = module.params['token']\n desc = module.params['desc']\n requester_id = module.params['requester_id']\n\n if not token and not (user or passwd):\n module.fail_json(msg=\"neither user and passwd nor token specified\")\n\n if state == \"running\" or state == \"started\":\n if not service:\n module.fail_json(msg=\"service not specified\")\n (rc, out, changed) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)\n if rc == 0:\n changed=True\n\n if state == \"ongoing\":\n (rc, out, changed) = ongoing(module, name, user, passwd, token)\n\n if state == \"absent\":\n (rc, out, changed) = absent(module, name, user, passwd, token, requester_id, service)\n\n if rc != 0:\n module.fail_json(msg=\"failed\", result=out)\n\n\n module.exit_json(msg=\"success\", result=out, changed=changed)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.urls import *\n\nmain()\n"}}},{"rowIdx":284646,"cells":{"repo_name":{"kind":"string","value":"margaritis/iTerm2"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"tools/ply/ply-3.4/test/lex_rule2.py"},"copies":{"kind":"string","value":"174"},"content":{"kind":"string","value":"# lex_rule2.py\n#\n# Rule function with incorrect number of arguments\n\nimport sys\nif \"..\" not in sys.path: sys.path.insert(0,\"..\")\n\nimport ply.lex as lex\n\ntokens = [\n \"PLUS\",\n \"MINUS\",\n \"NUMBER\",\n ]\n\nt_PLUS = r'\\+'\nt_MINUS = r'-'\ndef t_NUMBER():\n r'\\d+'\n return t\n\ndef t_error(t):\n pass\n\n\n\nlex.lex()\n\n\n"}}},{"rowIdx":284647,"cells":{"repo_name":{"kind":"string","value":"JamesClough/networkx"},"ref":{"kind":"string","value":"refs/heads/inverse_line_graph"},"path":{"kind":"string","value":"networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py"},"copies":{"kind":"string","value":"7"},"content":{"kind":"string","value":"#!/usr/bin/env python\nfrom nose.tools import *\nfrom nose import SkipTest\nimport networkx as nx\nfrom nose.plugins.attrib import attr\n\nfrom networkx import edge_current_flow_betweenness_centrality \\\n as edge_current_flow\n\nfrom networkx import edge_current_flow_betweenness_centrality_subset \\\n as edge_current_flow_subset\n\nclass TestFlowBetweennessCentrality(object):\n numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test\n @classmethod\n def setupClass(cls):\n global np\n try:\n import numpy as np\n import scipy\n except ImportError:\n raise SkipTest('NumPy not available.')\n\n \n def test_K4_normalized(self):\n \"\"\"Betweenness centrality: K4\"\"\"\n G=nx.complete_graph(4)\n b=nx.current_flow_betweenness_centrality_subset(G,\n list(G),\n list(G),\n normalized=True)\n b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)\n for n in sorted(G):\n assert_almost_equal(b[n],b_answer[n])\n\n\n def test_K4(self):\n \"\"\"Betweenness centrality: K4\"\"\"\n G=nx.complete_graph(4)\n b=nx.current_flow_betweenness_centrality_subset(G,\n list(G),\n list(G),\n normalized=True)\n b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)\n for n in sorted(G):\n assert_almost_equal(b[n],b_answer[n])\n # test weighted network\n G.add_edge(0,1,weight=0.5,other=0.3)\n b=nx.current_flow_betweenness_centrality_subset(G,\n list(G),\n list(G),\n normalized=True,\n weight=None)\n for n in sorted(G):\n assert_almost_equal(b[n],b_answer[n])\n b=nx.current_flow_betweenness_centrality_subset(G,\n list(G),\n list(G),\n normalized=True)\n b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)\n for n in sorted(G):\n assert_almost_equal(b[n],b_answer[n])\n b=nx.current_flow_betweenness_centrality_subset(G,\n list(G),\n list(G),\n normalized=True,\n weight='other')\n b_answer=nx.current_flow_betweenness_centrality(G,normalized=True,weight='other')\n for n in sorted(G):\n assert_almost_equal(b[n],b_answer[n])\n\n\n def test_P4_normalized(self):\n \"\"\"Betweenness centrality: P4 normalized\"\"\"\n G=nx.path_graph(4)\n b=nx.current_flow_betweenness_centrality_subset(G,\n list(G),\n list(G),\n normalized=True)\n b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)\n for n in sorted(G):\n assert_almost_equal(b[n],b_answer[n])\n\n\n def test_P4(self):\n \"\"\"Betweenness centrality: P4\"\"\"\n G=nx.path_graph(4)\n b=nx.current_flow_betweenness_centrality_subset(G,\n list(G),\n list(G),\n normalized=True)\n b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)\n for n in sorted(G):\n assert_almost_equal(b[n],b_answer[n])\n\n def test_star(self):\n \"\"\"Betweenness centrality: star \"\"\"\n G=nx.Graph()\n nx.add_star(G, ['a', 'b', 'c', 'd'])\n b=nx.current_flow_betweenness_centrality_subset(G,\n list(G),\n list(G),\n normalized=True)\n b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)\n for n in sorted(G):\n assert_almost_equal(b[n],b_answer[n])\n\n\n\n# class TestWeightedFlowBetweennessCentrality():\n# pass\n\n\nclass TestEdgeFlowBetweennessCentrality(object):\n numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test\n @classmethod\n def setupClass(cls):\n global np\n try:\n import numpy as np\n import scipy\n except ImportError:\n raise SkipTest('NumPy not available.')\n \n def test_K4_normalized(self):\n \"\"\"Betweenness centrality: K4\"\"\"\n G=nx.complete_graph(4)\n b=edge_current_flow_subset(G,list(G),list(G),normalized=True)\n b_answer=edge_current_flow(G,normalized=True)\n for (s,t),v1 in b_answer.items():\n v2=b.get((s,t),b.get((t,s)))\n assert_almost_equal(v1,v2)\n\n def test_K4(self):\n \"\"\"Betweenness centrality: K4\"\"\"\n G=nx.complete_graph(4)\n b=edge_current_flow_subset(G,list(G),list(G),normalized=False)\n b_answer=edge_current_flow(G,normalized=False)\n for (s,t),v1 in b_answer.items():\n v2=b.get((s,t),b.get((t,s)))\n assert_almost_equal(v1,v2)\n # test weighted network\n G.add_edge(0,1,weight=0.5,other=0.3)\n b=edge_current_flow_subset(G,list(G),list(G),normalized=False,weight=None)\n # weight is None => same as unweighted network\n for (s,t),v1 in b_answer.items():\n v2=b.get((s,t),b.get((t,s)))\n assert_almost_equal(v1,v2)\n\n b=edge_current_flow_subset(G,list(G),list(G),normalized=False)\n b_answer=edge_current_flow(G,normalized=False)\n for (s,t),v1 in b_answer.items():\n v2=b.get((s,t),b.get((t,s)))\n assert_almost_equal(v1,v2)\n\n b=edge_current_flow_subset(G,list(G),list(G),normalized=False,weight='other')\n b_answer=edge_current_flow(G,normalized=False,weight='other')\n for (s,t),v1 in b_answer.items():\n v2=b.get((s,t),b.get((t,s)))\n assert_almost_equal(v1,v2)\n\n\n def test_C4(self):\n \"\"\"Edge betweenness centrality: C4\"\"\"\n G=nx.cycle_graph(4)\n b=edge_current_flow_subset(G,list(G),list(G),normalized=True)\n b_answer=edge_current_flow(G,normalized=True)\n for (s,t),v1 in b_answer.items():\n v2=b.get((s,t),b.get((t,s)))\n assert_almost_equal(v1,v2)\n\n\n def test_P4(self):\n \"\"\"Edge betweenness centrality: P4\"\"\"\n G=nx.path_graph(4)\n b=edge_current_flow_subset(G, list(G), list(G), normalized=True)\n b_answer=edge_current_flow(G,normalized=True)\n for (s,t),v1 in b_answer.items():\n v2=b.get((s,t),b.get((t,s)))\n assert_almost_equal(v1,v2)\n\n"}}},{"rowIdx":284648,"cells":{"repo_name":{"kind":"string","value":"numba/numba"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"numba/misc/help/inspector.py"},"copies":{"kind":"string","value":"4"},"content":{"kind":"string","value":"\"\"\"\nThis file contains `__main__` so that it can be run as a commandline tool.\n\nThis file contains functions to inspect Numba's support for a given Python\nmodule or a Python package.\n\"\"\"\n\nimport argparse\nimport pkgutil\nimport warnings\nimport types as pytypes\n\nfrom numba.core import errors\nfrom numba._version import get_versions\nfrom numba.core.registry import cpu_target\nfrom numba.tests.support import captured_stdout\n\n\ndef _get_commit():\n full = get_versions()['full'].split('.')[0]\n if not full:\n warnings.warn(\n \"Cannot find git commit hash. Source links could be inaccurate.\",\n category=errors.NumbaWarning,\n )\n return 'master'\n return full\n\n\ncommit = _get_commit()\ngithub_url = 'https://github.com/numba/numba/blob/{commit}/{path}#L{firstline}-L{lastline}' # noqa: E501\n\n\ndef inspect_function(function, target=None):\n \"\"\"Return information about the support of a function.\n\n Returns\n -------\n info : dict\n Defined keys:\n - \"numba_type\": str or None\n The numba type object of the function if supported.\n - \"explained\": str\n A textual description of the support.\n - \"source_infos\": dict\n A dictionary containing the source location of each definition.\n \"\"\"\n target = target or cpu_target\n tyct = target.typing_context\n # Make sure we have loaded all extensions\n tyct.refresh()\n target.target_context.refresh()\n\n info = {}\n # Try getting the function type\n source_infos = {}\n try:\n nbty = tyct.resolve_value_type(function)\n except ValueError:\n nbty = None\n explained = 'not supported'\n else:\n # Make a longer explanation of the type\n explained = tyct.explain_function_type(nbty)\n for temp in nbty.templates:\n try:\n source_infos[temp] = temp.get_source_info()\n except AttributeError:\n source_infos[temp] = None\n\n info['numba_type'] = nbty\n info['explained'] = explained\n info['source_infos'] = source_infos\n return info\n\n\ndef inspect_module(module, target=None, alias=None):\n \"\"\"Inspect a module object and yielding results from `inspect_function()`\n for each function object in the module.\n \"\"\"\n alias = {} if alias is None else alias\n # Walk the module\n for name in dir(module):\n if name.startswith('_'):\n # Skip\n continue\n obj = getattr(module, name)\n supported_types = (pytypes.FunctionType, pytypes.BuiltinFunctionType)\n\n if not isinstance(obj, supported_types):\n # Skip if it's not a function\n continue\n\n info = dict(module=module, name=name, obj=obj)\n if obj in alias:\n info['alias'] = alias[obj]\n else:\n alias[obj] = \"{module}.{name}\".format(module=module.__name__,\n name=name)\n info.update(inspect_function(obj, target=target))\n yield info\n\n\nclass _Stat(object):\n \"\"\"For gathering simple statistic of (un)supported functions\"\"\"\n def __init__(self):\n self.supported = 0\n self.unsupported = 0\n\n @property\n def total(self):\n total = self.supported + self.unsupported\n return total\n\n @property\n def ratio(self):\n ratio = self.supported / self.total * 100\n return ratio\n\n def describe(self):\n if self.total == 0:\n return \"empty\"\n return \"supported = {supported} / {total} = {ratio:.2f}%\".format(\n supported=self.supported,\n total=self.total,\n ratio=self.ratio,\n )\n\n def __repr__(self):\n return \"{clsname}({describe})\".format(\n clsname=self.__class__.__name__,\n describe=self.describe(),\n )\n\n\ndef filter_private_module(module_components):\n return not any(x.startswith('_') for x in module_components)\n\n\ndef filter_tests_module(module_components):\n return not any(x == 'tests' for x in module_components)\n\n\n_default_module_filters = (\n filter_private_module,\n filter_tests_module,\n)\n\n\ndef list_modules_in_package(package, module_filters=_default_module_filters):\n \"\"\"Yield all modules in a given package.\n\n Recursively walks the package tree.\n \"\"\"\n onerror_ignore = lambda _: None\n\n prefix = package.__name__ + \".\"\n package_walker = pkgutil.walk_packages(\n package.__path__,\n prefix,\n onerror=onerror_ignore,\n )\n\n def check_filter(modname):\n module_components = modname.split('.')\n return any(not filter_fn(module_components)\n for filter_fn in module_filters)\n\n modname = package.__name__\n if not check_filter(modname):\n yield package\n\n for pkginfo in package_walker:\n modname = pkginfo[1]\n if check_filter(modname):\n continue\n # In case importing of the module print to stdout\n with captured_stdout():\n try:\n # Import the module\n mod = __import__(modname)\n except Exception:\n continue\n\n # Extract the module\n for part in modname.split('.')[1:]:\n try:\n mod = getattr(mod, part)\n except AttributeError:\n # Suppress error in getting the attribute\n mod = None\n break\n\n # Ignore if mod is not a module\n if not isinstance(mod, pytypes.ModuleType):\n # Skip non-module\n continue\n\n yield mod\n\n\nclass Formatter(object):\n \"\"\"Base class for formatters.\n \"\"\"\n def __init__(self, fileobj):\n self._fileobj = fileobj\n\n def print(self, *args, **kwargs):\n kwargs.setdefault('file', self._fileobj)\n print(*args, **kwargs)\n\n\nclass HTMLFormatter(Formatter):\n \"\"\"Formatter that outputs HTML\n \"\"\"\n\n def escape(self, text):\n import html\n return html.escape(text)\n\n def title(self, text):\n self.print('

', text, '

')\n\n def begin_module_section(self, modname):\n self.print('

', modname, '

')\n self.print('
    ')\n\n def end_module_section(self):\n self.print('
')\n\n def write_supported_item(self, modname, itemname, typename, explained,\n sources, alias):\n self.print('
  • ')\n self.print('{}.{}'.format(\n modname,\n itemname,\n ))\n self.print(': {}'.format(typename))\n self.print('
    ', explained, '
    ')\n\n self.print(\"
      \")\n for tcls, source in sources.items():\n if source:\n self.print(\"
    • \")\n impl = source['name']\n sig = source['sig']\n filename = source['filename']\n lines = source['lines']\n self.print(\n \"

      defined by {}{} at {}:{}-{}

      \".format(\n self.escape(impl), self.escape(sig),\n self.escape(filename), lines[0], lines[1],\n ),\n )\n self.print('

      {}

      '.format(\n self.escape(source['docstring'] or '')\n ))\n else:\n self.print(\"
    • {}\".format(self.escape(str(tcls))))\n self.print(\"
    • \")\n self.print(\"
    \")\n self.print('
  • ')\n\n def write_unsupported_item(self, modname, itemname):\n self.print('
  • ')\n self.print('{}.{}: UNSUPPORTED'.format(\n modname,\n itemname,\n ))\n self.print('
  • ')\n\n def write_statistic(self, stats):\n self.print('

    {}

    '.format(stats.describe()))\n\n\nclass ReSTFormatter(Formatter):\n \"\"\"Formatter that output ReSTructured text format for Sphinx docs.\n \"\"\"\n def escape(self, text):\n return text\n\n def title(self, text):\n self.print(text)\n self.print('=' * len(text))\n self.print()\n\n def begin_module_section(self, modname):\n self.print(modname)\n self.print('-' * len(modname))\n self.print()\n\n def end_module_section(self):\n self.print()\n\n def write_supported_item(self, modname, itemname, typename, explained,\n sources, alias):\n self.print('.. function:: {}.{}'.format(modname, itemname))\n self.print(' :noindex:')\n self.print()\n\n if alias:\n self.print(\" Alias to: ``{}``\".format(alias))\n self.print()\n\n for tcls, source in sources.items():\n if source:\n impl = source['name']\n sig = source['sig']\n filename = source['filename']\n lines = source['lines']\n source_link = github_url.format(\n commit=commit,\n path=filename,\n firstline=lines[0],\n lastline=lines[1],\n )\n self.print(\n \" - defined by ``{}{}`` at `{}:{}-{} <{}>`_\".format(\n impl, sig, filename, lines[0], lines[1], source_link,\n ),\n )\n\n else:\n self.print(\" - defined by ``{}``\".format(str(tcls)))\n self.print()\n\n def write_unsupported_item(self, modname, itemname):\n pass\n\n def write_statistic(self, stat):\n if stat.supported == 0:\n self.print(\"This module is not supported.\")\n else:\n msg = \"Not showing {} unsupported functions.\"\n self.print(msg.format(stat.unsupported))\n self.print()\n self.print(stat.describe())\n self.print()\n\n\ndef _format_module_infos(formatter, package_name, mod_sequence, target=None):\n \"\"\"Format modules.\n \"\"\"\n formatter.title('Listings for {}'.format(package_name))\n alias_map = {} # remember object seen to track alias\n for mod in mod_sequence:\n stat = _Stat()\n modname = mod.__name__\n formatter.begin_module_section(formatter.escape(modname))\n for info in inspect_module(mod, target=target, alias=alias_map):\n nbtype = info['numba_type']\n if nbtype is not None:\n stat.supported += 1\n formatter.write_supported_item(\n modname=formatter.escape(info['module'].__name__),\n itemname=formatter.escape(info['name']),\n typename=formatter.escape(str(nbtype)),\n explained=formatter.escape(info['explained']),\n sources=info['source_infos'],\n alias=info.get('alias'),\n )\n\n else:\n stat.unsupported += 1\n formatter.write_unsupported_item(\n modname=formatter.escape(info['module'].__name__),\n itemname=formatter.escape(info['name']),\n )\n\n formatter.write_statistic(stat)\n formatter.end_module_section()\n\n\ndef write_listings(package_name, filename, output_format):\n \"\"\"Write listing information into a file.\n\n Parameters\n ----------\n package_name : str\n Name of the package to inspect.\n filename : str\n Output filename. Always overwrite.\n output_format : str\n Support formats are \"html\" and \"rst\".\n \"\"\"\n package = __import__(package_name)\n if hasattr(package, '__path__'):\n mods = list_modules_in_package(package)\n else:\n mods = [package]\n\n if output_format == 'html':\n with open(filename + '.html', 'w') as fout:\n fmtr = HTMLFormatter(fileobj=fout)\n _format_module_infos(fmtr, package_name, mods)\n elif output_format == 'rst':\n with open(filename + '.rst', 'w') as fout:\n fmtr = ReSTFormatter(fileobj=fout)\n _format_module_infos(fmtr, package_name, mods)\n else:\n raise ValueError(\n \"Output format '{}' is not supported\".format(output_format))\n\n\nprogram_description = \"\"\"\nInspect Numba support for a given top-level package.\n\"\"\".strip()\n\n\ndef main():\n parser = argparse.ArgumentParser(description=program_description)\n parser.add_argument(\n 'package', metavar='package', type=str,\n help='Package to inspect',\n )\n parser.add_argument(\n '--format', dest='format', default='html',\n help='Output format; i.e. \"html\", \"rst\"',\n )\n parser.add_argument(\n '--file', dest='file', default='inspector_output',\n help='Output filename. Defaults to \"inspector_output.\"',\n )\n\n args = parser.parse_args()\n package_name = args.package\n output_format = args.format\n filename = args.file\n write_listings(package_name, filename, output_format)\n\n\nif __name__ == '__main__':\n main()\n"}}},{"rowIdx":284649,"cells":{"repo_name":{"kind":"string","value":"hydralabs/pyamf"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"doc/tutorials/examples/gateways/appengine/demo/simplejson/__init__.py"},"copies":{"kind":"string","value":"10"},"content":{"kind":"string","value":"r\"\"\"\nA simple, fast, extensible JSON encoder and decoder\n\nJSON (JavaScript Object Notation) is a subset of\nJavaScript syntax (ECMA-262 3rd edition) used as a lightweight data\ninterchange format.\n\nsimplejson exposes an API familiar to uses of the standard library\nmarshal and pickle modules.\n\nEncoding basic Python object hierarchies::\n \n >>> import simplejson\n >>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])\n '[\"foo\", {\"bar\": [\"baz\", null, 1.0, 2]}]'\n >>> print simplejson.dumps(\"\\\"foo\\bar\")\n \"\\\"foo\\bar\"\n >>> print simplejson.dumps(u'\\u1234')\n \"\\u1234\"\n >>> print simplejson.dumps('\\\\')\n \"\\\\\"\n >>> print simplejson.dumps({\"c\": 0, \"b\": 0, \"a\": 0}, sort_keys=True)\n {\"a\": 0, \"b\": 0, \"c\": 0}\n >>> from StringIO import StringIO\n >>> io = StringIO()\n >>> simplejson.dump(['streaming API'], io)\n >>> io.getvalue()\n '[\"streaming API\"]'\n\nCompact encoding::\n\n >>> import simplejson\n >>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))\n '[1,2,3,{\"4\":5,\"6\":7}]'\n\nPretty printing::\n\n >>> import simplejson\n >>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)\n {\n \"4\": 5, \n \"6\": 7\n }\n\nDecoding JSON::\n \n >>> import simplejson\n >>> simplejson.loads('[\"foo\", {\"bar\":[\"baz\", null, 1.0, 2]}]')\n [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]\n >>> simplejson.loads('\"\\\\\"foo\\\\bar\"')\n u'\"foo\\x08ar'\n >>> from StringIO import StringIO\n >>> io = StringIO('[\"streaming API\"]')\n >>> simplejson.load(io)\n [u'streaming API']\n\nSpecializing JSON object decoding::\n\n >>> import simplejson\n >>> def as_complex(dct):\n ... if '__complex__' in dct:\n ... return complex(dct['real'], dct['imag'])\n ... return dct\n ... \n >>> simplejson.loads('{\"__complex__\": true, \"real\": 1, \"imag\": 2}',\n ... object_hook=as_complex)\n (1+2j)\n >>> import decimal\n >>> simplejson.loads('1.1', parse_float=decimal.Decimal)\n decimal.Decimal(1.1)\n\nExtending JSONEncoder::\n \n >>> import simplejson\n >>> class ComplexEncoder(simplejson.JSONEncoder):\n ... def default(self, obj):\n ... if isinstance(obj, complex):\n ... return [obj.real, obj.imag]\n ... return simplejson.JSONEncoder.default(self, obj)\n ... \n >>> dumps(2 + 1j, cls=ComplexEncoder)\n '[2.0, 1.0]'\n >>> ComplexEncoder().encode(2 + 1j)\n '[2.0, 1.0]'\n >>> list(ComplexEncoder().iterencode(2 + 1j))\n ['[', '2.0', ', ', '1.0', ']']\n \n\nUsing simplejson from the shell to validate and\npretty-print::\n \n $ echo '{\"json\":\"obj\"}' | python -msimplejson\n {\n \"json\": \"obj\"\n }\n $ echo '{ 1.2:3.4}' | python -msimplejson\n Expecting property name: line 1 column 2 (char 2)\n\nNote that the JSON produced by this module's default settings\nis a subset of YAML, so it may be used as a serializer for that as well.\n\"\"\"\n__version__ = '1.8.1'\n__all__ = [\n 'dump', 'dumps', 'load', 'loads',\n 'JSONDecoder', 'JSONEncoder',\n]\n\nif __name__ == '__main__':\n from simplejson.decoder import JSONDecoder\n from simplejson.encoder import JSONEncoder\nelse:\n from decoder import JSONDecoder\n from encoder import JSONEncoder\n\n_default_encoder = JSONEncoder(\n skipkeys=False,\n ensure_ascii=True,\n check_circular=True,\n allow_nan=True,\n indent=None,\n separators=None,\n encoding='utf-8',\n default=None,\n)\n\ndef dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,\n allow_nan=True, cls=None, indent=None, separators=None,\n encoding='utf-8', default=None, **kw):\n \"\"\"\n Serialize ``obj`` as a JSON formatted stream to ``fp`` (a\n ``.write()``-supporting file-like object).\n\n If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types\n (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) \n will be skipped instead of raising a ``TypeError``.\n\n If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``\n may be ``unicode`` instances, subject to normal Python ``str`` to\n ``unicode`` coercion rules. Unless ``fp.write()`` explicitly\n understands ``unicode`` (as in ``codecs.getwriter()``) this is likely\n to cause an error.\n\n If ``check_circular`` is ``False``, then the circular reference check\n for container types will be skipped and a circular reference will\n result in an ``OverflowError`` (or worse).\n\n If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to\n serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)\n in strict compliance of the JSON specification, instead of using the\n JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).\n\n If ``indent`` is a non-negative integer, then JSON array elements and object\n members will be pretty-printed with that indent level. An indent level\n of 0 will only insert newlines. ``None`` is the most compact representation.\n\n If ``separators`` is an ``(item_separator, dict_separator)`` tuple\n then it will be used instead of the default ``(', ', ': ')`` separators.\n ``(',', ':')`` is the most compact JSON representation.\n\n ``encoding`` is the character encoding for str instances, default is UTF-8.\n\n ``default(obj)`` is a function that should return a serializable version\n of obj or raise TypeError. The default simply raises TypeError.\n\n To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the\n ``.default()`` method to serialize additional types), specify it with\n the ``cls`` kwarg.\n \"\"\"\n # cached encoder\n if (skipkeys is False and ensure_ascii is True and\n check_circular is True and allow_nan is True and\n cls is None and indent is None and separators is None and\n encoding == 'utf-8' and default is None and not kw):\n iterable = _default_encoder.iterencode(obj)\n else:\n if cls is None:\n cls = JSONEncoder\n iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,\n check_circular=check_circular, allow_nan=allow_nan, indent=indent,\n separators=separators, encoding=encoding,\n default=default, **kw).iterencode(obj)\n # could accelerate with writelines in some versions of Python, at\n # a debuggability cost\n for chunk in iterable:\n fp.write(chunk)\n\n\ndef dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,\n allow_nan=True, cls=None, indent=None, separators=None,\n encoding='utf-8', default=None, **kw):\n \"\"\"\n Serialize ``obj`` to a JSON formatted ``str``.\n\n If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types\n (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) \n will be skipped instead of raising a ``TypeError``.\n\n If ``ensure_ascii`` is ``False``, then the return value will be a\n ``unicode`` instance subject to normal Python ``str`` to ``unicode``\n coercion rules instead of being escaped to an ASCII ``str``.\n\n If ``check_circular`` is ``False``, then the circular reference check\n for container types will be skipped and a circular reference will\n result in an ``OverflowError`` (or worse).\n\n If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to\n serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in\n strict compliance of the JSON specification, instead of using the\n JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).\n\n If ``indent`` is a non-negative integer, then JSON array elements and\n object members will be pretty-printed with that indent level. An indent\n level of 0 will only insert newlines. ``None`` is the most compact\n representation.\n\n If ``separators`` is an ``(item_separator, dict_separator)`` tuple\n then it will be used instead of the default ``(', ', ': ')`` separators.\n ``(',', ':')`` is the most compact JSON representation.\n\n ``encoding`` is the character encoding for str instances, default is UTF-8.\n\n ``default(obj)`` is a function that should return a serializable version\n of obj or raise TypeError. The default simply raises TypeError.\n\n To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the\n ``.default()`` method to serialize additional types), specify it with\n the ``cls`` kwarg.\n \"\"\"\n # cached encoder\n if (skipkeys is False and ensure_ascii is True and\n check_circular is True and allow_nan is True and\n cls is None and indent is None and separators is None and\n encoding == 'utf-8' and default is None and not kw):\n return _default_encoder.encode(obj)\n if cls is None:\n cls = JSONEncoder\n return cls(\n skipkeys=skipkeys, ensure_ascii=ensure_ascii,\n check_circular=check_circular, allow_nan=allow_nan, indent=indent,\n separators=separators, encoding=encoding, default=default,\n **kw).encode(obj)\n\n_default_decoder = JSONDecoder(encoding=None, object_hook=None)\n\ndef load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,\n parse_int=None, parse_constant=None, **kw):\n \"\"\"\n Deserialize ``fp`` (a ``.read()``-supporting file-like object containing\n a JSON document) to a Python object.\n\n If the contents of ``fp`` is encoded with an ASCII based encoding other\n than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must\n be specified. Encodings that are not ASCII based (such as UCS-2) are\n not allowed, and should be wrapped with\n ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``\n object and passed to ``loads()``\n\n ``object_hook`` is an optional function that will be called with the\n result of any object literal decode (a ``dict``). The return value of\n ``object_hook`` will be used instead of the ``dict``. This feature\n can be used to implement custom decoders (e.g. JSON-RPC class hinting).\n \n To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``\n kwarg.\n \"\"\"\n return loads(fp.read(),\n encoding=encoding, cls=cls, object_hook=object_hook,\n parse_float=parse_float, parse_int=parse_int,\n parse_constant=parse_constant, **kw)\n\ndef loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,\n parse_int=None, parse_constant=None, **kw):\n \"\"\"\n Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON\n document) to a Python object.\n\n If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding\n other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name\n must be specified. Encodings that are not ASCII based (such as UCS-2)\n are not allowed and should be decoded to ``unicode`` first.\n\n ``object_hook`` is an optional function that will be called with the\n result of any object literal decode (a ``dict``). The return value of\n ``object_hook`` will be used instead of the ``dict``. This feature\n can be used to implement custom decoders (e.g. JSON-RPC class hinting).\n\n ``parse_float``, if specified, will be called with the string\n of every JSON float to be decoded. By default this is equivalent to\n float(num_str). This can be used to use another datatype or parser\n for JSON floats (e.g. decimal.Decimal).\n\n ``parse_int``, if specified, will be called with the string\n of every JSON int to be decoded. By default this is equivalent to\n int(num_str). This can be used to use another datatype or parser\n for JSON integers (e.g. float).\n\n ``parse_constant``, if specified, will be called with one of the\n following strings: -Infinity, Infinity, NaN, null, true, false.\n This can be used to raise an exception if invalid JSON numbers\n are encountered.\n\n To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``\n kwarg.\n \"\"\"\n if (cls is None and encoding is None and object_hook is None and\n parse_int is None and parse_float is None and\n parse_constant is None and not kw):\n return _default_decoder.decode(s)\n if cls is None:\n cls = JSONDecoder\n if object_hook is not None:\n kw['object_hook'] = object_hook\n if parse_float is not None:\n kw['parse_float'] = parse_float\n if parse_int is not None:\n kw['parse_int'] = parse_int\n if parse_constant is not None:\n kw['parse_constant'] = parse_constant\n return cls(encoding=encoding, **kw).decode(s)\n\n#\n# Compatibility cruft from other libraries\n#\n\ndef decode(s):\n \"\"\"\n demjson, python-cjson API compatibility hook. Use loads(s) instead.\n \"\"\"\n import warnings\n warnings.warn(\"simplejson.loads(s) should be used instead of decode(s)\",\n DeprecationWarning)\n return loads(s)\n\ndef encode(obj):\n \"\"\"\n demjson, python-cjson compatibility hook. Use dumps(s) instead.\n \"\"\"\n import warnings\n warnings.warn(\"simplejson.dumps(s) should be used instead of encode(s)\",\n DeprecationWarning)\n return dumps(obj)\n\ndef read(s):\n \"\"\"\n jsonlib, JsonUtils, python-json, json-py API compatibility hook.\n Use loads(s) instead.\n \"\"\"\n import warnings\n warnings.warn(\"simplejson.loads(s) should be used instead of read(s)\",\n DeprecationWarning)\n return loads(s)\n\ndef write(obj):\n \"\"\"\n jsonlib, JsonUtils, python-json, json-py API compatibility hook.\n Use dumps(s) instead.\n \"\"\"\n import warnings\n warnings.warn(\"simplejson.dumps(s) should be used instead of write(s)\",\n DeprecationWarning)\n return dumps(obj)\n\n#\n# Pretty printer:\n# curl http://mochikit.com/examples/ajax_tables/domains.json | python -msimplejson\n#\n\ndef main():\n import sys\n if len(sys.argv) == 1:\n infile = sys.stdin\n outfile = sys.stdout\n elif len(sys.argv) == 2:\n infile = open(sys.argv[1], 'rb')\n outfile = sys.stdout\n elif len(sys.argv) == 3:\n infile = open(sys.argv[1], 'rb')\n outfile = open(sys.argv[2], 'wb')\n else:\n raise SystemExit(\"%s [infile [outfile]]\" % (sys.argv[0],))\n try:\n obj = load(infile)\n except ValueError, e:\n raise SystemExit(e)\n dump(obj, outfile, sort_keys=True, indent=4)\n outfile.write('\\n')\n\nif __name__ == '__main__':\n main()\n"}}},{"rowIdx":284650,"cells":{"repo_name":{"kind":"string","value":"z2care/sample-code-in-the-cloud"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"secure-chat/tchat.py"},"copies":{"kind":"string","value":"1"},"content":{"kind":"string","value":"import webapp2\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\nimport datetime\nimport os\n\nfrom google.appengine.ext.webapp import template\n\n#START: ChatMessage\nclass ChatMessage(db.Model):\n user = db.StringProperty(required=True)\n timestamp = db.DateTimeProperty(auto_now_add=True)\n message = db.TextProperty(required=True)\n chat = db.StringProperty(required=True)\n\nCHATS = ['main', 'book', 'flame' ]\n#END: ChatMessage\n\n#START: UserRoles\nclass UserRole(db.Model):\n name = db.StringProperty(required=True)\n role = db.StringProperty(choices=[\"User\", \"admin\", \"privileged\"],\n default=\"User\")\n\n @staticmethod\n def GetUserRole(name):\n user_record = db.GqlQuery(\"SELECT * from UserRole WHERE \" +\n \"name = :1\",\n name).get()\n if user_record != None:\n return user.role\n else:\n\t return \"User\"\n#END: UserRoles \n\n#START: ValidateRole\ndef ValidateUserRole(actual, required):\n if required == \"admin\": #\n return actual == \"admin\"\n elif required == \"privileged\": #\n return (actual == \"admin\" and actual or \"privileged\")\n elif required == \"User\":\n\treturn True\n else: #\n return False\n#END: ValidateRole\n\n#START: NewChatRoom\nclass NewChatRoomHandler(webapp2.RequestHandler):\n '''@login_required''' #\n #http://djangosnippets.org/snippets/691/\n #http://flask.pocoo.org/docs/patterns/viewdecorators/\n def get(self):\n user = users.get_current_user()\n role = GetUserRole(user) #\n if not ValidateRole(role, \"privileged\"):\n self.response.headers[\"Context-Type\"] = \"text/html\"\n self.response.write(\n \"\\n\" +\n \"Insufficient Privileges\\n\" +\n \"\\n\" +\n \"

    Insufficient Privileges

    \\n\" +\n \"

    I'm sorry but you aren't allowed to \" +\n \"access this page

    \\n\" +\n \"\\n\")\n else:\n self.response.headers[\"Content-Type\"] = \"text/html\"\n template_values = {\n 'title': \"MarkCC's AppEngine Chat Room\",\n }\n path = os.path.join(os.path.dirname(__file__), 'new-chat.html')\n page = template.render(path, template_values)\n self.response.write(page)\n#END: NewChatRoom\n\n#START: NewChatRoomPost\nclass NewChatRoomPostHandler(webapp2.RequestHandler):\n '''@login_required'''\n def post(self):\n user = users.get_current_user()\n role = GetUserRole(user) \n if not ValidateRole(role, \"privileged\"):\n self.response.headers[\"Context-Type\"] = \"text/html\"\n self.response.write(\n \"Insufficient Privileges\\n\" +\n \"

    Insufficient Privileges

    \\n\" +\n \"

    I'm sorry but you aren't allowed to access this page

    \\n\" +\n \"\\n\")\n else:\n newchat = cgi.escape(self.request.get(\"newchat\"))\n CreateChat(user, newchat)\n self.response.write(\n \"Chat Room Created\\n\" +\n \"

    Chat Room Created

    \\n\" +\n \"

    New chat room %s created.

    \\n\" \n \"\\n\" % newchat)\n#END: NewChatRoomPost\n\n#START: GenericChat\nclass GenericChatPage(webapp2.RequestHandler):\n def get(self):\n requested_chat = self.request.get(\"chat\", default_value=\"none\")\n if requested_chat == \"none\" or requested_chat not in CHATS:\n template_params = {\n 'title': \"Error! Requested chat not found!\",\n 'chatname': requested_chat,\n }\n error_template = os.path.join(os.path.dirname(__file__), 'error.html')\n page = template.render(error_template, template_params)\n self.response.write(page)\n else:\n messages = db.GqlQuery(\"SELECT * from ChatMessage WHERE chat = :1 \"\n \"ORDER BY time\", requested_chat)\n template_params = {\n 'title': \"MarkCC's AppEngine Chat Room\",\n 'msg_list': messages,\n 'chat': requested_chat\n }\n path = os.path.join(os.path.dirname(__file__), 'multichat.html')\n page = template.render(path, template_params)\n self.response.write(page)\n#END: GenericChat \n\n#START: ChatRoomCounted\nclass ChatRoomCountedHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if user is None: \n self.redirect(users.create_login_url(self.request.uri))\n else:\n self.response.headers[\"Content-Type\"] = \"text/html\"\n messages = db.GqlQuery(\"SELECT * From ChatMessage ORDER BY time \"\n \"DESC LIMIT 20\")\n msglist = list(messages).reverse()\n for msg in msglist:\n msg.deltatime = datetime.datetime.now() - msg.timestamp\n template_values = {\n 'title': \"MarkCC's AppEngine Chat Room\",\n 'msg_list': messages,\n }\n path = os.path.join(os.path.dirname(__file__), 'count.html')\n page = template.render(path, template_values)\n self.response.write(page)\n#END: ChatRoomCounted\n\n\n#START: LandingPage\nclass ChatRoomLandingPage(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if user is None: \n self.redirect(users.create_login_url(self.request.uri))\n else:\n self.response.headers[\"Content-Type\"] = \"text/html\"\n messages = db.GqlQuery(\"SELECT * From ChatMessage ORDER BY timestamp \"\n \"DESC LIMIT 20\")\n# msglist = list(messages).reverse()\n template_values = {\n 'title': \"MarkCC's AppEngine Chat Room\",\n 'msg_list': messages,\n }\n path = os.path.join(os.path.dirname(__file__), 'landing.html')\n page = template.render(path, template_values)\n self.response.write(page)\n#END: LandingPage\n\n#START: ChatRoomPoster\nclass ChatRoomPoster(webapp2.RequestHandler):\n def post(self):\n user = users.get_current_user()\n msgtext = self.request.get(\"message\")\n chat = self.request.get(\"chat\")\n msg = ChatMessage(user=user.nickname(), message=msgtext, chat=chat)\n msg.put() \n # Now that we've added the message to the chat, we'll redirect\n # to the root page,\n self.redirect('/enterchat&chat=%s' % chat)\n#END: ChatRoomPoster\n\n# START: Frame\nchatapp = webapp2.WSGIApplication([('/', ChatRoomLandingPage),\n ('/talk', ChatRoomPoster),\n ('/enterchat', GenericChatPage)])\n\n# END: Frame\n\n"}}},{"rowIdx":284651,"cells":{"repo_name":{"kind":"string","value":"ericpre/hyperspy"},"ref":{"kind":"string","value":"refs/heads/RELEASE_next_minor"},"path":{"kind":"string","value":"hyperspy/tests/io/test_bruker.py"},"copies":{"kind":"string","value":"2"},"content":{"kind":"string","value":"import json\nimport os\n\nimport numpy as np\nimport pytest\n\nfrom hyperspy import signals\nfrom hyperspy.io import load\nfrom hyperspy.misc.test_utils import assert_deep_almost_equal\n\ntest_files = ['30x30_instructively_packed_16bit_compressed.bcf',\n '16x16_12bit_packed_8bit.bcf',\n 'P45_the_default_job.bcf',\n 'test_TEM.bcf',\n 'Hitachi_TM3030Plus.bcf',\n 'over16bit.bcf',\n 'bcf_v2_50x50px.bcf',\n 'bcf-edx-ebsd.bcf']\nnp_file = ['30x30_16bit.npy', '30x30_16bit_ds.npy']\nspx_files = ['extracted_from_bcf.spx',\n 'bruker_nano.spx']\n\nmy_path = os.path.dirname(__file__)\n\n\ndef test_load_16bit():\n # test bcf from hyperspy load function level\n # some of functions can be not covered\n # it cant use cython parsing implementation, as it is not compiled\n filename = os.path.join(my_path, 'bruker_data', test_files[0])\n print('testing bcf instructively packed 16bit...')\n s = load(filename)\n bse, hype = s\n # Bruker saves all images in true 16bit:\n assert bse.data.dtype == np.uint16\n assert bse.data.shape == (30, 30)\n np_filename = os.path.join(my_path, 'bruker_data', np_file[0])\n np.testing.assert_array_equal(hype.data[:, :, 222:224],\n np.load(np_filename))\n assert hype.data.shape == (30, 30, 2048)\n\n\ndef test_load_16bit_reduced():\n filename = os.path.join(my_path, 'bruker_data', test_files[0])\n print('testing downsampled 16bit bcf...')\n s = load(filename, downsample=4, cutoff_at_kV=10)\n bse, hype = s\n # sem images are never downsampled\n assert bse.data.shape == (30, 30)\n np_filename = os.path.join(my_path, 'bruker_data', np_file[1])\n np.testing.assert_array_equal(hype.data[:, :, 222:224],\n np.load(np_filename))\n assert hype.data.shape == (8, 8, 1047)\n # Bruker saves all images in true 16bit:\n assert bse.data.dtype == np.uint16\n # hypermaps should always return unsigned integers:\n assert str(hype.data.dtype)[0] == 'u'\n\n\ndef test_load_8bit():\n for bcffile in test_files[1:3]:\n filename = os.path.join(my_path, 'bruker_data', bcffile)\n print('testing simple 8bit bcf...')\n s = load(filename)\n bse, hype = s[0], s[-1]\n # Bruker saves all images in true 16bit:\n assert bse.data.dtype == np.uint16\n # hypermaps should always return unsigned integers:\n assert str(hype.data.dtype)[0] == 'u'\n\n\ndef test_hyperspy_wrap():\n filename = os.path.join(my_path, 'bruker_data', test_files[0])\n print('testing bcf wrap to hyperspy signal...')\n from hyperspy.exceptions import VisibleDeprecationWarning\n with pytest.warns(VisibleDeprecationWarning):\n hype = load(filename, select_type='spectrum')\n hype = load(filename, select_type='spectrum_image')\n np.testing.assert_allclose(\n hype.axes_manager[0].scale,\n 1.66740910949362,\n atol=1E-12)\n np.testing.assert_allclose(\n hype.axes_manager[1].scale,\n 1.66740910949362,\n atol=1E-12)\n assert hype.axes_manager[1].units == 'µm'\n np.testing.assert_allclose(hype.axes_manager[2].scale, 0.009999)\n np.testing.assert_allclose(hype.axes_manager[2].offset, -0.47225277)\n assert hype.axes_manager[2].units == 'keV'\n assert hype.axes_manager[2].is_binned == True\n\n md_ref = {\n 'Acquisition_instrument': {\n 'SEM': {\n 'beam_energy': 20,\n 'magnification': 1819.22595,\n 'Detector': {\n 'EDS': {\n 'elevation_angle': 35.0,\n 'detector_type': 'XFlash 6|10',\n 'azimuth_angle': 90.0,\n 'real_time': 70.07298,\n 'energy_resolution_MnKa': 130.0}},\n 'Stage': {\n 'tilt_alpha': 0.0,\n 'rotation': 326.10089,\n 'x': 66940.81,\n 'y': 54233.16,\n 'z': 39194.77}}},\n 'General': {\n 'original_filename':\n '30x30_instructively_packed_16bit_compressed.bcf',\n 'title': 'EDX',\n 'date': '2018-10-04',\n 'time': '13:02:07'},\n 'Sample': {\n 'name': 'chevkinite',\n 'elements': ['Al', 'C', 'Ca', 'Ce', 'Fe', 'Gd', 'K', 'Mg', 'Na',\n 'Nd', 'O', 'P', 'Si', 'Sm', 'Th', 'Ti'],\n 'xray_lines': ['Al_Ka', 'C_Ka', 'Ca_Ka', 'Ce_La', 'Fe_Ka',\n 'Gd_La', 'K_Ka', 'Mg_Ka', 'Na_Ka', 'Nd_La',\n 'O_Ka', 'P_Ka', 'Si_Ka', 'Sm_La', 'Th_Ma',\n 'Ti_Ka']},\n 'Signal': {\n 'quantity': 'X-rays (Counts)',\n 'signal_type': 'EDS_SEM'},\n '_HyperSpy': {\n 'Folding': {'original_axes_manager': None,\n 'original_shape': None,\n 'signal_unfolded': False,\n 'unfolded': False}}}\n\n filename_omd = os.path.join(my_path,\n 'bruker_data',\n '30x30_original_metadata.json')\n with open(filename_omd) as fn:\n # original_metadata:\n omd_ref = json.load(fn)\n assert_deep_almost_equal(hype.metadata.as_dictionary(), md_ref)\n assert_deep_almost_equal(hype.original_metadata.as_dictionary(), omd_ref)\n assert hype.metadata.General.date == \"2018-10-04\"\n assert hype.metadata.General.time == \"13:02:07\"\n assert hype.metadata.Signal.quantity == \"X-rays (Counts)\"\n\n\ndef test_hyperspy_wrap_downsampled():\n filename = os.path.join(my_path, 'bruker_data', test_files[0])\n print('testing bcf wrap to hyperspy signal...')\n hype = load(filename, select_type='spectrum_image', downsample=5)\n np.testing.assert_allclose(\n hype.axes_manager[0].scale,\n 8.337045547468101,\n atol=1E-12)\n np.testing.assert_allclose(\n hype.axes_manager[1].scale,\n 8.337045547468101,\n atol=1E-12)\n assert hype.axes_manager[1].units == 'µm'\n\n\ndef test_get_mode():\n filename = os.path.join(my_path, 'bruker_data', test_files[0])\n s = load(filename, select_type='spectrum_image', instrument='SEM')\n assert s.metadata.Signal.signal_type == \"EDS_SEM\"\n assert isinstance(s, signals.EDSSEMSpectrum)\n\n filename = os.path.join(my_path, 'bruker_data', test_files[0])\n s = load(filename, select_type='spectrum_image', instrument='TEM')\n assert s.metadata.Signal.signal_type == \"EDS_TEM\"\n assert isinstance(s, signals.EDSTEMSpectrum)\n\n filename = os.path.join(my_path, 'bruker_data', test_files[0])\n s = load(filename, select_type='spectrum_image')\n assert s.metadata.Signal.signal_type == \"EDS_SEM\"\n assert isinstance(s, signals.EDSSEMSpectrum)\n\n filename = os.path.join(my_path, 'bruker_data', test_files[3])\n s = load(filename, select_type='spectrum_image')\n assert s.metadata.Signal.signal_type == \"EDS_TEM\"\n assert isinstance(s, signals.EDSTEMSpectrum)\n\n\ndef test_wrong_file():\n filename = os.path.join(my_path, 'bruker_data', 'Nope.bcf')\n with pytest.raises(TypeError):\n load(filename)\n\n\ndef test_fast_bcf():\n thingy = pytest.importorskip(\"hyperspy.io_plugins.unbcf_fast\")\n from hyperspy.io_plugins import bruker\n for bcffile in test_files:\n filename = os.path.join(my_path, 'bruker_data', bcffile)\n thingy = bruker.BCF_reader(filename)\n for j in range(2, 5, 1):\n print('downsampling:', j)\n bruker.fast_unbcf = True # manually enabling fast parsing\n hmap1 = thingy.parse_hypermap(downsample=j) # using cython\n bruker.fast_unbcf = False # manually disabling fast parsing\n hmap2 = thingy.parse_hypermap(downsample=j) # py implementation\n np.testing.assert_array_equal(hmap1, hmap2)\n\n\ndef test_decimal_regex():\n from hyperspy.io_plugins.bruker import fix_dec_patterns\n dummy_xml_positive = [b'85,658',\n b'85,658E-8',\n b'-85,658E-8',\n b'-85.658', # negative check\n b'85.658E-8'] # negative check\n dummy_xml_negative = [b'12,25,23,45,56,12,45',\n b'12e1,23,-24E-5']\n for i in dummy_xml_positive:\n assert b'85.658' in fix_dec_patterns.sub(b'\\\\1.\\\\2', i)\n for j in dummy_xml_negative:\n assert b'.' not in fix_dec_patterns.sub(b'\\\\1.\\\\2', j)\n\n\ndef test_all_spx_loads():\n for spxfile in spx_files:\n filename = os.path.join(my_path, 'bruker_data', spxfile)\n s = load(filename)\n assert s.data.dtype == np.uint64\n assert s.metadata.Signal.signal_type == 'EDS_SEM'\n\n\ndef test_stand_alone_spx():\n filename = os.path.join(my_path, 'bruker_data', 'bruker_nano.spx')\n s = load(filename)\n assert s.metadata.Sample.elements == ['Fe', 'S', 'Cu']\n assert s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time == 7.385\n\n\ndef test_bruker_XRF():\n # See https://github.com/hyperspy/hyperspy/issues/2689\n # Bruker M6 Jetstream SPX\n filename = os.path.join(my_path, 'bruker_data',\n 'bruker_m6_jetstream_file_example.spx')\n s = load(filename)\n assert s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time == 28.046\n assert s.metadata.Acquisition_instrument.TEM.beam_energy == 50\n"}}},{"rowIdx":284652,"cells":{"repo_name":{"kind":"string","value":"ksrajkumar/openerp-6.1"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"openerp/pychart/afm/Courier_Oblique.py"},"copies":{"kind":"string","value":"15"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# AFM font Courier-Oblique (path: /usr/share/fonts/afms/adobe/pcrro8a.afm).\n# Derived from Ghostscript distribution.\n# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.\nimport dir\ndir.afm[\"Courier-Oblique\"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 600, 600, 600, 500, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 500, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 600, 500, 600, 600, 600, 600, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 500, 600, 500, 500, 500, 500, 600, 600, 600, 600, 500, 500, 500, 500, 500, 600, 500, 500, 500, 600, 500, 500, 600, 600, 600, 600, )\n"}}},{"rowIdx":284653,"cells":{"repo_name":{"kind":"string","value":"40223246/0622W17test2"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"static/Brython3.1.1-20150328-091302/Lib/browser/indexed_db.py"},"copies":{"kind":"string","value":"632"},"content":{"kind":"string","value":"class EventListener:\n def __init__(self, events=[]):\n self._events=events\n\n def append(self, event):\n self._events.append(event)\n\n def fire(self, e):\n for _event in self._events:\n _event(e)\n\nclass IndexedDB:\n def __init__(self):\n if not __BRYTHON__.has_indexedDB:\n raise NotImplementedError(\"Your browser doesn't support indexedDB\")\n return\n\n self._indexedDB=__BRYTHON__.indexedDB()\n self._db=None\n self._version=None\n\n def _onsuccess(self, event):\n self._db=event.target.result\n\n def open(self, name, onsuccess, version=1.0, onerror=None, \n onupgradeneeded=None):\n self._version=version\n _result=self._indexedDB.open(name, version)\n _success=EventListener([self._onsuccess, onsuccess])\n _result.onsuccess=_success.fire\n _result.onupgradeneeded=onupgradeneeded\n\n #if onerror is None:\n def onerror(e):\n print(\"onerror: %s:%s\" % (e.type, e.target.result))\n\n def onblocked(e):\n print(\"blocked: %s:%s\" % (e.type, e.result))\n\n _result.onerror=onerror\n _result.onblocked=onblocked\n\n def transaction(self, entities, mode='read'):\n return Transaction(self._db.transaction(entities, mode))\n\nclass Transaction:\n\n def __init__(self, transaction):\n self._transaction=transaction\n\n def objectStore(self, name):\n return ObjectStore(self._transaction.objectStore(name))\n\nclass ObjectStore:\n\n def __init__(self, objectStore):\n self._objectStore=objectStore\n self._data=[]\n\n def clear(self, onsuccess=None, onerror=None):\n _result=self._objectStore.clear()\n\n if onsuccess is not None:\n _result.onsuccess=onsuccess\n\n if onerror is not None:\n _result.onerror=onerror\n\n def _helper(self, func, object, onsuccess=None, onerror=None):\n _result=func(object)\n\n if onsuccess is not None:\n _result.onsuccess=onsuccess\n\n if onerror is not None:\n _result.onerror=onerror\n\n def put(self, obj, key=None, onsuccess=None, onerror=None):\n _r = self._objectStore.put(obj, key)\n _r.onsuccess = onsuccess\n _r.onerror = onerror\n\n def add(self, obj, key, onsuccess=None, onerror=None):\n _r = self._objectStore.add(obj, key)\n _r.onsuccess = onsuccess\n _r.onerror = onerror\n #self._helper(self._objectStore.add, object, onsuccess, onerror)\n\n def delete(self, index, onsuccess=None, onerror=None): \n self._helper(self._objectStore.delete, index, onsuccess, onerror)\n \n def query(self, *args):\n self._data=[]\n def onsuccess(event):\n cursor=event.target.result\n if cursor is not None:\n self._data.append(cursor.value)\n getattr(cursor,\"continue\")() # cursor.continue() is illegal\n\n self._objectStore.openCursor(args).onsuccess=onsuccess\n\n def fetchall(self):\n yield self._data\n\n def get(self, key, onsuccess=None, onerror=None):\n self._helper(self._objectStore.get, key, onsuccess, onerror)\n"}}},{"rowIdx":284654,"cells":{"repo_name":{"kind":"string","value":"jruiperezv/ANALYSE"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"common/djangoapps/enrollment/urls.py"},"copies":{"kind":"string","value":"8"},"content":{"kind":"string","value":"\"\"\"\nURLs for the Enrollment API\n\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls import patterns, url\n\nfrom .views import get_course_enrollment, list_student_enrollments\n\nurlpatterns = []\n\nif settings.FEATURES.get('ENABLE_COMBINED_LOGIN_REGISTRATION'):\n urlpatterns += patterns(\n 'enrollment.views',\n url(r'^student$', list_student_enrollments, name='courseenrollments'),\n url(\n r'^course/{course_key}$'.format(course_key=settings.COURSE_ID_PATTERN),\n get_course_enrollment,\n name='courseenrollment'\n ),\n )\n"}}},{"rowIdx":284655,"cells":{"repo_name":{"kind":"string","value":"NobleNoob/buildpack"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"lib/build_pack_utils/cloudfoundry.py"},"copies":{"kind":"string","value":"12"},"content":{"kind":"string","value":"import os\nimport sys\nimport json\nimport tempfile\nimport shutil\nimport utils\nimport logging\nfrom urlparse import urlparse\nfrom zips import UnzipUtil\nfrom hashes import HashUtil\nfrom cache import DirectoryCacheManager\nfrom downloads import Downloader\nfrom downloads import CurlDownloader\nfrom utils import safe_makedirs\n\n\n_log = logging.getLogger('cloudfoundry')\n\n\nclass CloudFoundryUtil(object):\n @staticmethod\n def initialize():\n # Open stdout unbuffered\n if hasattr(sys.stdout, 'fileno'):\n sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)\n ctx = utils.FormattedDict()\n # Add environment variables\n ctx.update(os.environ)\n # Convert JSON env variables\n ctx['VCAP_APPLICATION'] = json.loads(ctx.get('VCAP_APPLICATION',\n '{}',\n format=False))\n ctx['VCAP_SERVICES'] = json.loads(ctx.get('VCAP_SERVICES',\n '{}',\n format=False))\n # Build Pack Location\n ctx['BP_DIR'] = os.path.dirname(os.path.dirname(sys.argv[0]))\n # User's Application Files, build droplet here\n ctx['BUILD_DIR'] = sys.argv[1]\n # Cache space for the build pack\n ctx['CACHE_DIR'] = (len(sys.argv) == 3) and sys.argv[2] or None\n # Temp space\n if 'TMPDIR' not in ctx.keys():\n ctx['TMPDIR'] = tempfile.gettempdir()\n # Make sure cache & build directories exist\n if not os.path.exists(ctx['BUILD_DIR']):\n os.makedirs(ctx['BUILD_DIR'])\n if ctx['CACHE_DIR'] and not os.path.exists(ctx['CACHE_DIR']):\n os.makedirs(ctx['CACHE_DIR'])\n # Add place holder for extensions\n ctx['EXTENSIONS'] = []\n # Init Logging\n CloudFoundryUtil.init_logging(ctx)\n _log.info('CloudFoundry Initialized.')\n _log.debug(\"CloudFoundry Context Setup [%s]\", ctx)\n return ctx\n\n @staticmethod\n def init_logging(ctx):\n logFmt = '%(asctime)s [%(levelname)s] %(name)s - %(message)s'\n if ctx.get('BP_DEBUG', False):\n logging.basicConfig(level=logging.DEBUG, format=logFmt)\n else:\n logLevelStr = ctx.get('BP_LOG_LEVEL', 'INFO')\n logLevel = getattr(logging, logLevelStr, logging.INFO)\n logDir = os.path.join(ctx['BUILD_DIR'], '.bp', 'logs')\n safe_makedirs(logDir)\n logging.basicConfig(level=logLevel, format=logFmt,\n filename=os.path.join(logDir, 'bp.log'))\n\n @staticmethod\n def load_json_config_file_from(folder, cfgFile):\n return CloudFoundryUtil.load_json_config_file(os.path.join(folder,\n cfgFile))\n\n @staticmethod\n def load_json_config_file(cfgPath):\n if os.path.exists(cfgPath):\n _log.debug(\"Loading config from [%s]\", cfgPath)\n with open(cfgPath, 'rt') as cfgFile:\n return json.load(cfgFile)\n return {}\n\n\nclass CloudFoundryInstaller(object):\n def __init__(self, ctx):\n self._log = _log\n self._ctx = ctx\n self._unzipUtil = UnzipUtil(ctx)\n self._hashUtil = HashUtil(ctx)\n self._dcm = DirectoryCacheManager(ctx)\n self._dwn = self._get_downloader(ctx)(ctx)\n\n def _get_downloader(self, ctx):\n method = ctx.get('DOWNLOAD_METHOD', 'python')\n if method == 'python':\n self._log.debug('Using python downloader.')\n return Downloader\n elif method == 'curl':\n self._log.debug('Using cURL downloader.')\n return CurlDownloader\n elif method == 'custom':\n fullClsName = ctx['DOWNLOAD_CLASS']\n self._log.debug('Using custom downloader [%s].', fullClsName)\n dotLoc = fullClsName.rfind('.')\n if dotLoc >= 0:\n clsName = fullClsName[dotLoc + 1: len(fullClsName)]\n modName = fullClsName[0:dotLoc]\n m = __import__(modName, globals(), locals(), [clsName])\n try:\n return getattr(m, clsName)\n except AttributeError:\n self._log.exception(\n 'WARNING: DOWNLOAD_CLASS not found!')\n else:\n self._log.error(\n 'WARNING: DOWNLOAD_CLASS invalid, must include '\n 'package name!')\n return Downloader\n\n def _is_url(self, val):\n return urlparse(val).scheme != ''\n\n def install_binary_direct(self, url, hsh, installDir,\n fileName=None, strip=False):\n self._log.debug(\"Installing direct [%s]\", url)\n if not fileName:\n fileName = url.split('/')[-1]\n if self._is_url(hsh):\n digest = self._dwn.download_direct(hsh)\n else:\n digest = hsh\n self._log.debug(\n \"Installing [%s] with digest [%s] into [%s] with \"\n \"name [%s] stripping [%s]\",\n url, digest, installDir, fileName, strip)\n fileToInstall = self._dcm.get(fileName, digest)\n if fileToInstall is None:\n self._log.debug('File [%s] not in cache.', fileName)\n fileToInstall = os.path.join(self._ctx['TMPDIR'], fileName)\n self._dwn.download(url, fileToInstall)\n digest = self._hashUtil.calculate_hash(fileToInstall)\n fileToInstall = self._dcm.put(fileName, fileToInstall, digest)\n return self._unzipUtil.extract(fileToInstall,\n installDir,\n strip)\n\n def install_binary(self, installKey):\n self._log.debug('Installing [%s]', installKey)\n url = self._ctx['%s_DOWNLOAD_URL' % installKey]\n hashUrl = self._ctx.get(\n '%s_HASH_DOWNLOAD_URL' % installKey,\n \"%s.%s\" % (url, self._ctx['CACHE_HASH_ALGORITHM']))\n installDir = os.path.join(self._ctx['BUILD_DIR'],\n self._ctx.get(\n '%s_PACKAGE_INSTALL_DIR' % installKey,\n installKey.lower()))\n strip = self._ctx.get('%s_STRIP' % installKey, False)\n return self.install_binary_direct(url, hashUrl, installDir,\n strip=strip)\n\n def _install_from(self, fromPath, fromLoc, toLocation=None, ignore=None):\n \"\"\"Copy file or directory from a location to the droplet\n\n Copies a file or directory from a location to the application\n droplet. Directories are copied recursively, but specific files\n in those directories can be ignored by specifing the ignore parameter.\n\n fromPath -> file to copy, relative build pack\n fromLoc -> root of the from path. Full path to file or\n directory to be copied is fromLoc + fromPath\n toLocation -> optional location where to copy the file\n relative to app droplet. If not specified\n uses fromPath.\n ignore -> an optional callable that is passed to\n the ignore argument of shutil.copytree.\n \"\"\"\n self._log.debug(\"Install file [%s] from [%s]\", fromPath, fromLoc)\n fullPathFrom = os.path.join(fromLoc, fromPath)\n if os.path.exists(fullPathFrom):\n fullPathTo = os.path.join(\n self._ctx['BUILD_DIR'],\n ((toLocation is None) and fromPath or toLocation))\n safe_makedirs(os.path.dirname(fullPathTo))\n self._log.debug(\"Copying [%s] to [%s]\", fullPathFrom, fullPathTo)\n if os.path.isfile(fullPathFrom):\n shutil.copy(fullPathFrom, fullPathTo)\n else:\n utils.copytree(fullPathFrom, fullPathTo, ignore=ignore)\n\n def install_from_build_pack(self, fromPath, toLocation=None, ignore=None):\n \"\"\"Copy file or directory from the build pack to the droplet\n\n Copies a file or directory from the build pack to the application\n droplet. Directories are copied recursively, but specific files\n in those directories can be ignored by specifing the ignore parameter.\n\n fromPath -> file to copy, relative build pack\n toLocation -> optional location where to copy the file\n relative to app droplet. If not specified\n uses fromPath.\n ignore -> an optional callable that is passed to\n the ignore argument of shutil.copytree.\n \"\"\"\n self._install_from(\n fromPath,\n self._ctx['BP_DIR'],\n toLocation,\n ignore)\n\n def install_from_application(self, fromPath, toLocation, ignore=None):\n \"\"\"Copy file or directory from one place to another in the application\n\n Copies a file or directory from one place to another place within the\n application droplet.\n\n fromPath -> file or directory to copy, relative\n to application droplet.\n toLocation -> location where to copy the file,\n relative to app droplet.\n ignore -> optional callable that is passed to the\n ignore argument of shutil.copytree\n \"\"\"\n self._install_from(\n fromPath,\n self._ctx['BUILD_DIR'],\n toLocation,\n ignore)\n"}}},{"rowIdx":284656,"cells":{"repo_name":{"kind":"string","value":"brendangregg/bcc"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"tools/tcpsubnet.py"},"copies":{"kind":"string","value":"4"},"content":{"kind":"string","value":"#!/usr/bin/python\n# @lint-avoid-python-3-compatibility-imports\n#\n# tcpsubnet Summarize TCP bytes sent to different subnets.\n# For Linux, uses BCC, eBPF. Embedded C.\n#\n# USAGE: tcpsubnet [-h] [-v] [-J] [-f FORMAT] [-i INTERVAL] [subnets]\n#\n# This uses dynamic tracing of kernel functions, and will need to be updated\n# to match kernel changes.\n#\n# This is an adaptation of tcptop from written by Brendan Gregg.\n#\n# WARNING: This traces all send at the TCP level, and while it\n# summarizes data in-kernel to reduce overhead, there may still be some\n# overhead at high TCP send/receive rates (eg, ~13% of one CPU at 100k TCP\n# events/sec. This is not the same as packet rate: funccount can be used to\n# count the kprobes below to find out the TCP rate). Test in a lab environment\n# first. If your send rate is low (eg, <1k/sec) then the overhead is\n# expected to be negligible.\n#\n# Copyright 2017 Rodrigo Manyari\n# Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# 03-Oct-2017 Rodrigo Manyari Created this based on tcptop.\n# 13-Feb-2018 Rodrigo Manyari Fix pep8 errors, some refactoring.\n# 05-Mar-2018 Rodrigo Manyari Add date time to output.\n\nimport argparse\nimport json\nimport logging\nimport struct\nimport socket\nfrom bcc import BPF\nfrom datetime import datetime as dt\nfrom time import sleep\n\n# arguments\nexamples = \"\"\"examples:\n ./tcpsubnet # Trace TCP sent to the default subnets:\n # 127.0.0.1/32,10.0.0.0/8,172.16.0.0/12,\n # 192.168.0.0/16,0.0.0.0/0\n ./tcpsubnet -f K # Trace TCP sent to the default subnets\n # aggregated in KBytes.\n ./tcpsubnet 10.80.0.0/24 # Trace TCP sent to 10.80.0.0/24 only\n ./tcpsubnet -J # Format the output in JSON.\n\"\"\"\n\ndefault_subnets = \"127.0.0.1/32,10.0.0.0/8,\" \\\n \"172.16.0.0/12,192.168.0.0/16,0.0.0.0/0\"\n\nparser = argparse.ArgumentParser(\n description=\"Summarize TCP send and aggregate by subnet\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=examples)\nparser.add_argument(\"subnets\", help=\"comma separated list of subnets\",\n type=str, nargs=\"?\", default=default_subnets)\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"output debug statements\")\nparser.add_argument(\"-J\", \"--json\", action=\"store_true\",\n help=\"format output in JSON\")\nparser.add_argument(\"--ebpf\", action=\"store_true\",\n help=argparse.SUPPRESS)\nparser.add_argument(\"-f\", \"--format\", default=\"B\",\n help=\"[bkmBKM] format to report: bits, Kbits, Mbits, bytes, \" +\n \"KBytes, MBytes (default B)\", choices=[\"b\", \"k\", \"m\", \"B\", \"K\", \"M\"])\nparser.add_argument(\"-i\", \"--interval\", default=1, type=int,\n help=\"output interval, in seconds (default 1)\")\nargs = parser.parse_args()\n\nlevel = logging.INFO\nif args.verbose:\n level = logging.DEBUG\n\nlogging.basicConfig(level=level)\n\nlogging.debug(\"Starting with the following args:\")\nlogging.debug(args)\n\n# args checking\nif int(args.interval) <= 0:\n logging.error(\"Invalid interval, must be > 0. Exiting.\")\n exit(1)\nelse:\n args.interval = int(args.interval)\n\n# map of supported formats\nformats = {\n \"b\": lambda x: (x * 8),\n \"k\": lambda x: ((x * 8) / 1024),\n \"m\": lambda x: ((x * 8) / pow(1024, 2)),\n \"B\": lambda x: x,\n \"K\": lambda x: x / 1024,\n \"M\": lambda x: x / pow(1024, 2)\n}\n\n# Let's swap the string with the actual numeric value\n# once here so we don't have to do it on every interval\nformatFn = formats[args.format]\n\n# define the basic structure of the BPF program\nbpf_text = \"\"\"\n#include \n#include \n#include \n\nstruct index_key_t {\n u32 index;\n};\n\nBPF_HASH(ipv4_send_bytes, struct index_key_t);\n\nint kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk,\n struct msghdr *msg, size_t size)\n{\n u16 family = sk->__sk_common.skc_family;\n\n if (family == AF_INET) {\n u32 dst = sk->__sk_common.skc_daddr;\n unsigned categorized = 0;\n __SUBNETS__\n }\n return 0;\n}\n\"\"\"\n\n\n# Takes in a mask and returns the integer equivalent\n# e.g.\n# mask_to_int(8) returns 4278190080\ndef mask_to_int(n):\n return ((1 << n) - 1) << (32 - n)\n\n# Takes in a list of subnets and returns a list\n# of tuple-3 containing:\n# - The subnet info at index 0\n# - The addr portion as an int at index 1\n# - The mask portion as an int at index 2\n#\n# e.g.\n# parse_subnets([10.10.0.0/24]) returns\n# [\n# ['10.10.0.0/24', 168427520, 4294967040],\n# ]\ndef parse_subnets(subnets):\n m = []\n for s in subnets:\n parts = s.split(\"/\")\n if len(parts) != 2:\n msg = \"Subnet [%s] is invalid, please refer to the examples.\" % s\n raise ValueError(msg)\n netaddr_int = 0\n mask_int = 0\n try:\n netaddr_int = struct.unpack(\"!I\", socket.inet_aton(parts[0]))[0]\n except:\n msg = (\"Invalid net address in subnet [%s], \" +\n \"please refer to the examples.\") % s\n raise ValueError(msg)\n try:\n mask_int = int(parts[1])\n except:\n msg = \"Invalid mask in subnet [%s]. Mask must be an int\" % s\n raise ValueError(msg)\n if mask_int < 0 or mask_int > 32:\n msg = (\"Invalid mask in subnet [%s]. Must be an \" +\n \"int between 0 and 32.\") % s\n raise ValueError(msg)\n mask_int = mask_to_int(int(parts[1]))\n m.append([s, netaddr_int, mask_int])\n return m\n\ndef generate_bpf_subnets(subnets):\n template = \"\"\"\n if (!categorized && (__NET_ADDR__ & __NET_MASK__) ==\n (dst & __NET_MASK__)) {\n struct index_key_t key = {.index = __POS__};\n ipv4_send_bytes.increment(key, size);\n categorized = 1;\n }\n \"\"\"\n bpf = ''\n for i, s in enumerate(subnets):\n branch = template\n branch = branch.replace(\"__NET_ADDR__\", str(socket.htonl(s[1])))\n branch = branch.replace(\"__NET_MASK__\", str(socket.htonl(s[2])))\n branch = branch.replace(\"__POS__\", str(i))\n bpf += branch\n return bpf\n\nsubnets = []\nif args.subnets:\n subnets = args.subnets.split(\",\")\n\nsubnets = parse_subnets(subnets)\n\nlogging.debug(\"Packets are going to be categorized in the following subnets:\")\nlogging.debug(subnets)\n\nbpf_subnets = generate_bpf_subnets(subnets)\n\n# initialize BPF\nbpf_text = bpf_text.replace(\"__SUBNETS__\", bpf_subnets)\n\nlogging.debug(\"Done preprocessing the BPF program, \" +\n \"this is what will actually get executed:\")\nlogging.debug(bpf_text)\n\nif args.ebpf:\n print(bpf_text)\n exit()\n\nb = BPF(text=bpf_text)\n\nipv4_send_bytes = b[\"ipv4_send_bytes\"]\n\nif not args.json:\n print(\"Tracing... Output every %d secs. Hit Ctrl-C to end\" % args.interval)\n\n# output\nexiting = 0\nwhile (1):\n\n try:\n sleep(args.interval)\n except KeyboardInterrupt:\n exiting = 1\n\n # IPv4: build dict of all seen keys\n keys = ipv4_send_bytes\n for k, v in ipv4_send_bytes.items():\n if k not in keys:\n keys[k] = v\n\n # to hold json data\n data = {}\n\n # output\n now = dt.now()\n data['date'] = now.strftime('%x')\n data['time'] = now.strftime('%X')\n data['entries'] = {}\n if not args.json:\n print(now.strftime('[%x %X]'))\n for k, v in reversed(sorted(keys.items(), key=lambda keys: keys[1].value)):\n send_bytes = 0\n if k in ipv4_send_bytes:\n send_bytes = int(ipv4_send_bytes[k].value)\n subnet = subnets[k.index][0]\n send = formatFn(send_bytes)\n if args.json:\n data['entries'][subnet] = send\n else:\n print(\"%-21s %6d\" % (subnet, send))\n\n if args.json:\n print(json.dumps(data))\n\n ipv4_send_bytes.clear()\n\n if exiting:\n exit(0)\n"}}},{"rowIdx":284657,"cells":{"repo_name":{"kind":"string","value":"postlund/home-assistant"},"ref":{"kind":"string","value":"refs/heads/dev"},"path":{"kind":"string","value":"homeassistant/components/opentherm_gw/__init__.py"},"copies":{"kind":"string","value":"3"},"content":{"kind":"string","value":"\"\"\"Support for OpenTherm Gateway devices.\"\"\"\nimport asyncio\nfrom datetime import date, datetime\nimport logging\n\nimport pyotgw\nimport pyotgw.vars as gw_vars\nimport voluptuous as vol\n\nfrom homeassistant.components.binary_sensor import DOMAIN as COMP_BINARY_SENSOR\nfrom homeassistant.components.climate import DOMAIN as COMP_CLIMATE\nfrom homeassistant.components.sensor import DOMAIN as COMP_SENSOR\nfrom homeassistant.config_entries import SOURCE_IMPORT\nfrom homeassistant.const import (\n ATTR_DATE,\n ATTR_ID,\n ATTR_MODE,\n ATTR_TEMPERATURE,\n ATTR_TIME,\n CONF_DEVICE,\n CONF_ID,\n CONF_NAME,\n EVENT_HOMEASSISTANT_STOP,\n PRECISION_HALVES,\n PRECISION_TENTHS,\n PRECISION_WHOLE,\n)\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.dispatcher import async_dispatcher_send\n\nfrom .const import (\n ATTR_DHW_OVRD,\n ATTR_GW_ID,\n ATTR_LEVEL,\n CONF_CLIMATE,\n CONF_FLOOR_TEMP,\n CONF_PRECISION,\n DATA_GATEWAYS,\n DATA_OPENTHERM_GW,\n DOMAIN,\n SERVICE_RESET_GATEWAY,\n SERVICE_SET_CLOCK,\n SERVICE_SET_CONTROL_SETPOINT,\n SERVICE_SET_GPIO_MODE,\n SERVICE_SET_HOT_WATER_OVRD,\n SERVICE_SET_LED_MODE,\n SERVICE_SET_MAX_MOD,\n SERVICE_SET_OAT,\n SERVICE_SET_SB_TEMP,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nCLIMATE_SCHEMA = vol.Schema(\n {\n vol.Optional(CONF_PRECISION): vol.In(\n [PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]\n ),\n vol.Optional(CONF_FLOOR_TEMP, default=False): cv.boolean,\n }\n)\n\nCONFIG_SCHEMA = vol.Schema(\n {\n DOMAIN: cv.schema_with_slug_keys(\n {\n vol.Required(CONF_DEVICE): cv.string,\n vol.Optional(CONF_CLIMATE, default={}): CLIMATE_SCHEMA,\n vol.Optional(CONF_NAME): cv.string,\n }\n )\n },\n extra=vol.ALLOW_EXTRA,\n)\n\n\nasync def options_updated(hass, entry):\n \"\"\"Handle options update.\"\"\"\n gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]]\n async_dispatcher_send(hass, gateway.options_update_signal, entry)\n\n\nasync def async_setup_entry(hass, config_entry):\n \"\"\"Set up the OpenTherm Gateway component.\"\"\"\n if DATA_OPENTHERM_GW not in hass.data:\n hass.data[DATA_OPENTHERM_GW] = {DATA_GATEWAYS: {}}\n\n gateway = OpenThermGatewayDevice(hass, config_entry)\n hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][config_entry.data[CONF_ID]] = gateway\n\n config_entry.add_update_listener(options_updated)\n\n # Schedule directly on the loop to avoid blocking HA startup.\n hass.loop.create_task(gateway.connect_and_subscribe())\n\n for comp in [COMP_BINARY_SENSOR, COMP_CLIMATE, COMP_SENSOR]:\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(config_entry, comp)\n )\n\n register_services(hass)\n return True\n\n\nasync def async_setup(hass, config):\n \"\"\"Set up the OpenTherm Gateway component.\"\"\"\n if not hass.config_entries.async_entries(DOMAIN) and DOMAIN in config:\n conf = config[DOMAIN]\n for device_id, device_config in conf.items():\n device_config[CONF_ID] = device_id\n\n hass.async_create_task(\n hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=device_config\n )\n )\n return True\n\n\ndef register_services(hass):\n \"\"\"Register services for the component.\"\"\"\n service_reset_schema = vol.Schema(\n {\n vol.Required(ATTR_GW_ID): vol.All(\n cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])\n )\n }\n )\n service_set_clock_schema = vol.Schema(\n {\n vol.Required(ATTR_GW_ID): vol.All(\n cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])\n ),\n vol.Optional(ATTR_DATE, default=date.today()): cv.date,\n vol.Optional(ATTR_TIME, default=datetime.now().time()): cv.time,\n }\n )\n service_set_control_setpoint_schema = vol.Schema(\n {\n vol.Required(ATTR_GW_ID): vol.All(\n cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])\n ),\n vol.Required(ATTR_TEMPERATURE): vol.All(\n vol.Coerce(float), vol.Range(min=0, max=90)\n ),\n }\n )\n service_set_hot_water_ovrd_schema = vol.Schema(\n {\n vol.Required(ATTR_GW_ID): vol.All(\n cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])\n ),\n vol.Required(ATTR_DHW_OVRD): vol.Any(\n vol.Equal(\"A\"), vol.All(vol.Coerce(int), vol.Range(min=0, max=1))\n ),\n }\n )\n service_set_gpio_mode_schema = vol.Schema(\n vol.Any(\n vol.Schema(\n {\n vol.Required(ATTR_GW_ID): vol.All(\n cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])\n ),\n vol.Required(ATTR_ID): vol.Equal(\"A\"),\n vol.Required(ATTR_MODE): vol.All(\n vol.Coerce(int), vol.Range(min=0, max=6)\n ),\n }\n ),\n vol.Schema(\n {\n vol.Required(ATTR_GW_ID): vol.All(\n cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])\n ),\n vol.Required(ATTR_ID): vol.Equal(\"B\"),\n vol.Required(ATTR_MODE): vol.All(\n vol.Coerce(int), vol.Range(min=0, max=7)\n ),\n }\n ),\n )\n )\n service_set_led_mode_schema = vol.Schema(\n {\n vol.Required(ATTR_GW_ID): vol.All(\n cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])\n ),\n vol.Required(ATTR_ID): vol.In(\"ABCDEF\"),\n vol.Required(ATTR_MODE): vol.In(\"RXTBOFHWCEMP\"),\n }\n )\n service_set_max_mod_schema = vol.Schema(\n {\n vol.Required(ATTR_GW_ID): vol.All(\n cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])\n ),\n vol.Required(ATTR_LEVEL): vol.All(\n vol.Coerce(int), vol.Range(min=-1, max=100)\n ),\n }\n )\n service_set_oat_schema = vol.Schema(\n {\n vol.Required(ATTR_GW_ID): vol.All(\n cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])\n ),\n vol.Required(ATTR_TEMPERATURE): vol.All(\n vol.Coerce(float), vol.Range(min=-40, max=99)\n ),\n }\n )\n service_set_sb_temp_schema = vol.Schema(\n {\n vol.Required(ATTR_GW_ID): vol.All(\n cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])\n ),\n vol.Required(ATTR_TEMPERATURE): vol.All(\n vol.Coerce(float), vol.Range(min=0, max=30)\n ),\n }\n )\n\n async def reset_gateway(call):\n \"\"\"Reset the OpenTherm Gateway.\"\"\"\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n mode_rst = gw_vars.OTGW_MODE_RESET\n status = await gw_dev.gateway.set_mode(mode_rst)\n gw_dev.status = status\n async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)\n\n hass.services.async_register(\n DOMAIN, SERVICE_RESET_GATEWAY, reset_gateway, service_reset_schema\n )\n\n async def set_control_setpoint(call):\n \"\"\"Set the control setpoint on the OpenTherm Gateway.\"\"\"\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n gw_var = gw_vars.DATA_CONTROL_SETPOINT\n value = await gw_dev.gateway.set_control_setpoint(call.data[ATTR_TEMPERATURE])\n gw_dev.status.update({gw_var: value})\n async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)\n\n hass.services.async_register(\n DOMAIN,\n SERVICE_SET_CONTROL_SETPOINT,\n set_control_setpoint,\n service_set_control_setpoint_schema,\n )\n\n async def set_dhw_ovrd(call):\n \"\"\"Set the domestic hot water override on the OpenTherm Gateway.\"\"\"\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n gw_var = gw_vars.OTGW_DHW_OVRD\n value = await gw_dev.gateway.set_hot_water_ovrd(call.data[ATTR_DHW_OVRD])\n gw_dev.status.update({gw_var: value})\n async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)\n\n hass.services.async_register(\n DOMAIN,\n SERVICE_SET_HOT_WATER_OVRD,\n set_dhw_ovrd,\n service_set_hot_water_ovrd_schema,\n )\n\n async def set_device_clock(call):\n \"\"\"Set the clock on the OpenTherm Gateway.\"\"\"\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n attr_date = call.data[ATTR_DATE]\n attr_time = call.data[ATTR_TIME]\n await gw_dev.gateway.set_clock(datetime.combine(attr_date, attr_time))\n\n hass.services.async_register(\n DOMAIN, SERVICE_SET_CLOCK, set_device_clock, service_set_clock_schema\n )\n\n async def set_gpio_mode(call):\n \"\"\"Set the OpenTherm Gateway GPIO modes.\"\"\"\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n gpio_id = call.data[ATTR_ID]\n gpio_mode = call.data[ATTR_MODE]\n mode = await gw_dev.gateway.set_gpio_mode(gpio_id, gpio_mode)\n gpio_var = getattr(gw_vars, f\"OTGW_GPIO_{gpio_id}\")\n gw_dev.status.update({gpio_var: mode})\n async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)\n\n hass.services.async_register(\n DOMAIN, SERVICE_SET_GPIO_MODE, set_gpio_mode, service_set_gpio_mode_schema\n )\n\n async def set_led_mode(call):\n \"\"\"Set the OpenTherm Gateway LED modes.\"\"\"\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n led_id = call.data[ATTR_ID]\n led_mode = call.data[ATTR_MODE]\n mode = await gw_dev.gateway.set_led_mode(led_id, led_mode)\n led_var = getattr(gw_vars, f\"OTGW_LED_{led_id}\")\n gw_dev.status.update({led_var: mode})\n async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)\n\n hass.services.async_register(\n DOMAIN, SERVICE_SET_LED_MODE, set_led_mode, service_set_led_mode_schema\n )\n\n async def set_max_mod(call):\n \"\"\"Set the max modulation level.\"\"\"\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n gw_var = gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD\n level = call.data[ATTR_LEVEL]\n if level == -1:\n # Backend only clears setting on non-numeric values.\n level = \"-\"\n value = await gw_dev.gateway.set_max_relative_mod(level)\n gw_dev.status.update({gw_var: value})\n async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)\n\n hass.services.async_register(\n DOMAIN, SERVICE_SET_MAX_MOD, set_max_mod, service_set_max_mod_schema\n )\n\n async def set_outside_temp(call):\n \"\"\"Provide the outside temperature to the OpenTherm Gateway.\"\"\"\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n gw_var = gw_vars.DATA_OUTSIDE_TEMP\n value = await gw_dev.gateway.set_outside_temp(call.data[ATTR_TEMPERATURE])\n gw_dev.status.update({gw_var: value})\n async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)\n\n hass.services.async_register(\n DOMAIN, SERVICE_SET_OAT, set_outside_temp, service_set_oat_schema\n )\n\n async def set_setback_temp(call):\n \"\"\"Set the OpenTherm Gateway SetBack temperature.\"\"\"\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n gw_var = gw_vars.OTGW_SB_TEMP\n value = await gw_dev.gateway.set_setback_temp(call.data[ATTR_TEMPERATURE])\n gw_dev.status.update({gw_var: value})\n async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)\n\n hass.services.async_register(\n DOMAIN, SERVICE_SET_SB_TEMP, set_setback_temp, service_set_sb_temp_schema\n )\n\n\nasync def async_unload_entry(hass, entry):\n \"\"\"Cleanup and disconnect from gateway.\"\"\"\n await asyncio.gather(\n hass.config_entries.async_forward_entry_unload(entry, COMP_BINARY_SENSOR),\n hass.config_entries.async_forward_entry_unload(entry, COMP_CLIMATE),\n hass.config_entries.async_forward_entry_unload(entry, COMP_SENSOR),\n )\n gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]]\n await gateway.cleanup()\n return True\n\n\nclass OpenThermGatewayDevice:\n \"\"\"OpenTherm Gateway device class.\"\"\"\n\n def __init__(self, hass, config_entry):\n \"\"\"Initialize the OpenTherm Gateway.\"\"\"\n self.hass = hass\n self.device_path = config_entry.data[CONF_DEVICE]\n self.gw_id = config_entry.data[CONF_ID]\n self.name = config_entry.data[CONF_NAME]\n self.climate_config = config_entry.options\n self.status = {}\n self.update_signal = f\"{DATA_OPENTHERM_GW}_{self.gw_id}_update\"\n self.options_update_signal = f\"{DATA_OPENTHERM_GW}_{self.gw_id}_options_update\"\n self.gateway = pyotgw.pyotgw()\n self.gw_version = None\n\n async def cleanup(self, event=None):\n \"\"\"Reset overrides on the gateway.\"\"\"\n await self.gateway.set_control_setpoint(0)\n await self.gateway.set_max_relative_mod(\"-\")\n await self.gateway.disconnect()\n\n async def connect_and_subscribe(self):\n \"\"\"Connect to serial device and subscribe report handler.\"\"\"\n self.status = await self.gateway.connect(self.hass.loop, self.device_path)\n _LOGGER.debug(\"Connected to OpenTherm Gateway at %s\", self.device_path)\n self.gw_version = self.status.get(gw_vars.OTGW_BUILD)\n\n self.hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, self.cleanup)\n\n async def handle_report(status):\n \"\"\"Handle reports from the OpenTherm Gateway.\"\"\"\n _LOGGER.debug(\"Received report: %s\", status)\n self.status = status\n async_dispatcher_send(self.hass, self.update_signal, status)\n\n self.gateway.subscribe(handle_report)\n"}}},{"rowIdx":284658,"cells":{"repo_name":{"kind":"string","value":"sunlianqiang/kbengine"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"kbe/res/scripts/common/Lib/test/leakers/test_selftype.py"},"copies":{"kind":"string","value":"195"},"content":{"kind":"string","value":"# Reference cycles involving only the ob_type field are rather uncommon\n# but possible. Inspired by SF bug 1469629.\n\nimport gc\n\ndef leak():\n class T(type):\n pass\n class U(type, metaclass=T):\n pass\n U.__class__ = U\n del U\n gc.collect(); gc.collect(); gc.collect()\n"}}},{"rowIdx":284659,"cells":{"repo_name":{"kind":"string","value":"Grogdor/CouchPotatoServer"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"libs/dateutil/__init__.py"},"copies":{"kind":"string","value":"147"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2003-2010 Gustavo Niemeyer \n\nThis module offers extensions to the standard Python\ndatetime module.\n\"\"\"\n__author__ = \"Tomi Pieviläinen \"\n__license__ = \"Simplified BSD\"\n__version__ = \"2.1\"\n"}}},{"rowIdx":284660,"cells":{"repo_name":{"kind":"string","value":"collmot/ardupilot"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"Tools/scripts/build_binaries_history.py"},"copies":{"kind":"string","value":"18"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport os\nimport sqlite3\n\n\nclass BuildBinariesHistory():\n def __init__(self, db_filepath):\n self.db_filepath = db_filepath\n self.assure_db_present()\n\n def progress(self, msg):\n print(\"BBHIST: %s\" % msg)\n\n def conn(self):\n return sqlite3.connect(self.db_filepath)\n\n def create_schema(self, c):\n '''create our tables and whatnot'''\n schema_version = 1\n c.execute(\"create table version (version integer)\")\n c.execute(\"insert into version (version) values (?)\", (schema_version,))\n # at some stage we should probably directly associate build with runs....\n c.execute(\"create table build (hash text, tag text, vehicle text, board text, \"\n \"frame text, text integer, data integer, bss integer, start_time real, duration real)\")\n c.execute(\"create table run (hash text, tag text, start_time real, duration real)\")\n c.commit()\n\n def sizes_for_file(self, filepath):\n cmd = \"size %s\" % (filepath,)\n stuff = os.popen(cmd).read()\n lines = stuff.split(\"\\n\")\n sizes = lines[1].split(\"\\t\")\n text = int(sizes[0])\n data = int(sizes[1])\n bss = int(sizes[2])\n self.progress(\"Binary size of %s:\" % filepath)\n self.progress(\"text=%u\" % text)\n self.progress(\"data=%u\" % data)\n self.progress(\"bss=%u\" % bss)\n return (text, data, bss)\n\n def assure_db_present(self):\n c = self.conn()\n need_schema_create = False\n try:\n version_cursor = c.execute(\"select version from version\")\n except sqlite3.OperationalError as e:\n if \"no such table\" in str(e): # FIXME: do better here? what's in \"e\"?\n print(\"need schema create\")\n need_schema_create = True\n\n if need_schema_create:\n self.create_schema(c)\n version_cursor = c.execute(\"select version from version\")\n\n version_results = version_cursor.fetchall()\n\n if len(version_results) == 0:\n raise IOError(\"No version number?\")\n if len(version_results) > 1:\n raise IOError(\"More than one version result?\")\n first = version_results[0]\n want_version = 1\n got_version = first[0]\n if got_version != want_version:\n raise IOError(\"Bad version number (want=%u got=%u\" %\n (want_version, got_version))\n self.progress(\"Got history version %u\" % got_version)\n\n def record_build(self, hash, tag, vehicle, board, frame, bare_path, start_time, duration):\n if bare_path is None:\n (text, data, bss) = (None, None, None)\n else:\n (text, data, bss) = self.sizes_for_file(bare_path)\n c = self.conn()\n c.execute(\"replace into build (hash, tag, vehicle, board, frame, text, data, bss, start_time, duration) \"\n \"values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n (hash, tag, vehicle, board, frame, text, data, bss, start_time, duration))\n c.commit()\n\n def record_run(self, hash, tag, start_time, duration):\n c = self.conn()\n c.execute(\"replace into run (hash, tag, start_time, duration) \"\n \"values (?, ?, ?, ?)\",\n (hash, tag, start_time, duration))\n c.commit()\n"}}},{"rowIdx":284661,"cells":{"repo_name":{"kind":"string","value":"gerv/bedrock"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"tests/functional/firefox/desktop/test_all.py"},"copies":{"kind":"string","value":"1"},"content":{"kind":"string","value":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport pytest\n\nfrom pages.firefox.desktop.all import FirefoxDesktopBasePage\n\n\n@pytest.mark.skip_if_firefox(reason='Download button is not shown for up-to-date Firefox browsers.')\n@pytest.mark.nondestructive\n@pytest.mark.parametrize(('slug', 'locale'), [\n ('customize', None),\n ('fast', 'de'),\n ('trust', None)])\ndef test_download_button_is_displayed(slug, locale, base_url, selenium):\n locale = locale or 'en-US'\n page = FirefoxDesktopBasePage(selenium, base_url, locale, slug=slug).open()\n assert page.download_button.is_displayed\n"}}},{"rowIdx":284662,"cells":{"repo_name":{"kind":"string","value":"kerr-huang/SL4A"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"python/src/Lib/glob.py"},"copies":{"kind":"string","value":"173"},"content":{"kind":"string","value":"\"\"\"Filename globbing utility.\"\"\"\n\nimport sys\nimport os\nimport re\nimport fnmatch\n\n__all__ = [\"glob\", \"iglob\"]\n\ndef glob(pathname):\n \"\"\"Return a list of paths matching a pathname pattern.\n\n The pattern may contain simple shell-style wildcards a la fnmatch.\n\n \"\"\"\n return list(iglob(pathname))\n\ndef iglob(pathname):\n \"\"\"Return an iterator which yields the paths matching a pathname pattern.\n\n The pattern may contain simple shell-style wildcards a la fnmatch.\n\n \"\"\"\n if not has_magic(pathname):\n if os.path.lexists(pathname):\n yield pathname\n return\n dirname, basename = os.path.split(pathname)\n if not dirname:\n for name in glob1(os.curdir, basename):\n yield name\n return\n if has_magic(dirname):\n dirs = iglob(dirname)\n else:\n dirs = [dirname]\n if has_magic(basename):\n glob_in_dir = glob1\n else:\n glob_in_dir = glob0\n for dirname in dirs:\n for name in glob_in_dir(dirname, basename):\n yield os.path.join(dirname, name)\n\n# These 2 helper functions non-recursively glob inside a literal directory.\n# They return a list of basenames. `glob1` accepts a pattern while `glob0`\n# takes a literal basename (so it only has to check for its existence).\n\ndef glob1(dirname, pattern):\n if not dirname:\n dirname = os.curdir\n if isinstance(pattern, unicode) and not isinstance(dirname, unicode):\n dirname = unicode(dirname, sys.getfilesystemencoding() or\n sys.getdefaultencoding())\n try:\n names = os.listdir(dirname)\n except os.error:\n return []\n if pattern[0] != '.':\n names = filter(lambda x: x[0] != '.', names)\n return fnmatch.filter(names, pattern)\n\ndef glob0(dirname, basename):\n if basename == '':\n # `os.path.split()` returns an empty basename for paths ending with a\n # directory separator. 'q*x/' should match only directories.\n if os.path.isdir(dirname):\n return [basename]\n else:\n if os.path.lexists(os.path.join(dirname, basename)):\n return [basename]\n return []\n\n\nmagic_check = re.compile('[*?[]')\n\ndef has_magic(s):\n return magic_check.search(s) is not None\n"}}},{"rowIdx":284663,"cells":{"repo_name":{"kind":"string","value":"rasata/ansible"},"ref":{"kind":"string","value":"refs/heads/devel"},"path":{"kind":"string","value":"lib/ansible/executor/task_queue_manager.py"},"copies":{"kind":"string","value":"9"},"content":{"kind":"string","value":"# (c) 2012-2014, Michael DeHaan \n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport multiprocessing\nimport os\nimport socket\nimport sys\nimport tempfile\n\nfrom ansible import constants as C\nfrom ansible.errors import AnsibleError\nfrom ansible.executor.play_iterator import PlayIterator\nfrom ansible.executor.process.worker import WorkerProcess\nfrom ansible.executor.process.result import ResultProcess\nfrom ansible.executor.stats import AggregateStats\nfrom ansible.playbook.play_context import PlayContext\nfrom ansible.plugins import callback_loader, strategy_loader, module_loader\nfrom ansible.template import Templar\n\n__all__ = ['TaskQueueManager']\n\nclass TaskQueueManager:\n\n '''\n This class handles the multiprocessing requirements of Ansible by\n creating a pool of worker forks, a result handler fork, and a\n manager object with shared datastructures/queues for coordinating\n work between all processes.\n\n The queue manager is responsible for loading the play strategy plugin,\n which dispatches the Play's tasks to hosts.\n '''\n\n def __init__(self, inventory, variable_manager, loader, display, options, passwords, stdout_callback=None):\n\n self._inventory = inventory\n self._variable_manager = variable_manager\n self._loader = loader\n self._display = display\n self._options = options\n self._stats = AggregateStats()\n self.passwords = passwords\n self._stdout_callback = stdout_callback\n\n self._callbacks_loaded = False\n self._callback_plugins = []\n\n # make sure the module path (if specified) is parsed and\n # added to the module_loader object\n if options.module_path is not None:\n for path in options.module_path.split(os.pathsep):\n module_loader.add_directory(path)\n\n # a special flag to help us exit cleanly\n self._terminated = False\n\n # this dictionary is used to keep track of notified handlers\n self._notified_handlers = dict()\n\n # dictionaries to keep track of failed/unreachable hosts\n self._failed_hosts = dict()\n self._unreachable_hosts = dict()\n\n self._final_q = multiprocessing.Queue()\n\n # create the pool of worker threads, based on the number of forks specified\n try:\n fileno = sys.stdin.fileno()\n except ValueError:\n fileno = None\n\n # A temporary file (opened pre-fork) used by connection\n # plugins for inter-process locking.\n self._connection_lockfile = tempfile.TemporaryFile()\n\n self._workers = []\n for i in range(self._options.forks):\n main_q = multiprocessing.Queue()\n rslt_q = multiprocessing.Queue()\n\n prc = WorkerProcess(self, main_q, rslt_q, loader)\n prc.start()\n\n self._workers.append((prc, main_q, rslt_q))\n\n self._result_prc = ResultProcess(self._final_q, self._workers)\n self._result_prc.start()\n\n def _initialize_notified_handlers(self, handlers):\n '''\n Clears and initializes the shared notified handlers dict with entries\n for each handler in the play, which is an empty array that will contain\n inventory hostnames for those hosts triggering the handler.\n '''\n\n # Zero the dictionary first by removing any entries there.\n # Proxied dicts don't support iteritems, so we have to use keys()\n for key in self._notified_handlers.keys():\n del self._notified_handlers[key]\n\n # FIXME: there is a block compile helper for this...\n handler_list = []\n for handler_block in handlers:\n for handler in handler_block.block:\n handler_list.append(handler)\n\n # then initialize it with the handler names from the handler list\n for handler in handler_list:\n self._notified_handlers[handler.get_name()] = []\n\n def load_callbacks(self):\n '''\n Loads all available callbacks, with the exception of those which\n utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',\n only one such callback plugin will be loaded.\n '''\n\n if self._callbacks_loaded:\n return\n\n stdout_callback_loaded = False\n if self._stdout_callback is None:\n self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK\n\n if self._stdout_callback not in callback_loader:\n raise AnsibleError(\"Invalid callback for stdout specified: %s\" % self._stdout_callback)\n\n for callback_plugin in callback_loader.all(class_only=True):\n if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:\n # we only allow one callback of type 'stdout' to be loaded, so check\n # the name of the current plugin and type to see if we need to skip\n # loading this callback plugin\n callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)\n (callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))\n if callback_type == 'stdout':\n if callback_name != self._stdout_callback or stdout_callback_loaded:\n continue\n stdout_callback_loaded = True\n elif C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST:\n continue\n\n self._callback_plugins.append(callback_plugin(self._display))\n else:\n self._callback_plugins.append(callback_plugin())\n\n self._callbacks_loaded = True\n\n def run(self, play):\n '''\n Iterates over the roles/tasks in a play, using the given (or default)\n strategy for queueing tasks. The default is the linear strategy, which\n operates like classic Ansible by keeping all hosts in lock-step with\n a given task (meaning no hosts move on to the next task until all hosts\n are done with the current task).\n '''\n\n if not self._callbacks_loaded:\n self.load_callbacks()\n\n all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)\n templar = Templar(loader=self._loader, variables=all_vars)\n\n new_play = play.copy()\n new_play.post_validate(templar)\n\n play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())\n for callback_plugin in self._callback_plugins:\n if hasattr(callback_plugin, 'set_play_context'):\n callback_plugin.set_play_context(play_context)\n\n self.send_callback('v2_playbook_on_play_start', new_play)\n\n # initialize the shared dictionary containing the notified handlers\n self._initialize_notified_handlers(new_play.handlers)\n\n # load the specified strategy (or the default linear one)\n strategy = strategy_loader.get(new_play.strategy, self)\n if strategy is None:\n raise AnsibleError(\"Invalid play strategy specified: %s\" % new_play.strategy, obj=play._ds)\n\n # build the iterator\n iterator = PlayIterator(\n inventory=self._inventory,\n play=new_play,\n play_context=play_context,\n variable_manager=self._variable_manager,\n all_vars=all_vars,\n )\n\n # and run the play using the strategy\n return strategy.run(iterator, play_context)\n\n def cleanup(self):\n self._display.debug(\"RUNNING CLEANUP\")\n\n self.terminate()\n\n self._final_q.close()\n self._result_prc.terminate()\n\n for (worker_prc, main_q, rslt_q) in self._workers:\n rslt_q.close()\n main_q.close()\n worker_prc.terminate()\n\n def clear_failed_hosts(self):\n self._failed_hosts = dict()\n self._unreachable_hosts = dict()\n\n def get_inventory(self):\n return self._inventory\n\n def get_variable_manager(self):\n return self._variable_manager\n\n def get_loader(self):\n return self._loader\n\n def get_notified_handlers(self):\n return self._notified_handlers\n\n def get_workers(self):\n return self._workers[:]\n\n def terminate(self):\n self._terminated = True\n\n def send_callback(self, method_name, *args, **kwargs):\n for callback_plugin in self._callback_plugins:\n # a plugin that set self.disabled to True will not be called\n # see osx_say.py example for such a plugin\n if getattr(callback_plugin, 'disabled', False):\n continue\n methods = [\n getattr(callback_plugin, method_name, None),\n getattr(callback_plugin, 'v2_on_any', None)\n ]\n for method in methods:\n if method is not None:\n try:\n method(*args, **kwargs)\n except Exception as e:\n self._display.warning('Error when using %s: %s' % (method, str(e)))\n\n"}}},{"rowIdx":284664,"cells":{"repo_name":{"kind":"string","value":"yumaokao/gdrv"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"gdrv/commands/command_base.py"},"copies":{"kind":"string","value":"1"},"content":{"kind":"string","value":"#!/usr/bin/python\n# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai\nimport os\nimport sys\nimport logging\nimport fnmatch\nimport httplib2\nfrom apiclient import errors\nfrom apiclient.discovery import build\nfrom oauth2client.file import Storage\n\nlg = logging.getLogger(\"BASE\")\n# lg.setLevel(logging.INFO)\n\n\nclass DriveCommand():\n \"\"\" A Drive Command Class \"\"\"\n\n def __init__(self, pconfig):\n self.config = pconfig\n self.msgout = sys.stdout\n\n @staticmethod\n def static_add_sub_command_parser(psub_par):\n pass\n\n def __call__(self, args=None):\n if args is not None:\n self.args = args\n self.do_drive_command()\n\n def do_drive_command(self):\n pass\n\n# ## base command methods ##\n def info(self, *args):\n try:\n self.msgout.write(*args)\n self.msgout.write('\\n')\n self.msgout.flush()\n except UnicodeError:\n pass\n\n def info_append(self, *args):\n try:\n self.msgout.write(*args)\n self.msgout.flush()\n # self.msgout.write('\\n')\n except UnicodeError:\n pass\n\n def parse_input_string(self, pinstr, pmaxlen):\n idxs = []\n if pinstr == 'a':\n return range(pmaxlen)\n for acom in pinstr.split(','):\n arange = acom.split('-')\n # lg.debug(\"aidx \")\n # lg.debug(arange)\n try:\n if len(arange) == 1:\n aidx = int(arange[0])\n idxs.append(aidx)\n elif len(arange) == 2:\n aidx = int(arange[0])\n bidx = int(arange[1])\n idxs.extend(range(aidx, bidx + 1))\n except ValueError:\n pass\n # lg.debug(\"aidx %d bidx %d\") % (aidx, bidx)\n # ridx = filter(lambda x: x < pmaxlen, idxs)\n # lg.debug(ridx)\n return set(filter(lambda x: x < pmaxlen, idxs))\n\n\nclass DriveServiceCommand(DriveCommand):\n \"\"\" A Drive Service Command Class \"\"\"\n\n def get_storage(self):\n self.storage = Storage(\n os.path.expanduser(self.config.get('api', 'storage')))\n\n def get_credentials(self):\n self.credentials = None\n self.get_storage()\n self.credentials = self.storage.get()\n\n def get_service(self):\n self.service = None\n self.get_credentials()\n if self.credentials is None or self.credentials.invalid:\n print \"Please init oauth2 flow first\"\n else:\n http = httplib2.Http()\n http = self.credentials.authorize(http)\n self.service = build('drive', 'v2', http=http)\n\n def do_drive_command(self):\n self.get_service()\n if self.service is not None:\n self.do_service_command()\n\n def do_service_command(self):\n pass\n\n# ## helper drive apis ##\n def find_drive_files(self, psrcdir, pname,\n hidedir=False, hidetrashed=True):\n matches = []\n files = self.get_all_children(psrcdir,\n hidedir=hidedir, hidetrashed=hidetrashed)\n for afile in files:\n if fnmatch.fnmatch(afile['title'], pname):\n matches.append(afile)\n return matches\n\n def get_all_children(self, psrcdir, hidedir=False, hidetrashed=True):\n parentid = self.find_parent_id(psrcdir)\n if parentid is None:\n lg.error(\"Can't find directory %s in drive\" % psrcdir)\n sys.exit(\"Can't find directory %s in drive\" % psrcdir)\n query = \"'%s' in parents\" % parentid\n if hidedir is True:\n query += \" and mimeType != 'application/vnd.google-apps.folder'\"\n if hidetrashed is True:\n query += \" and trashed = false\"\n return self.file_list(query)\n\n def find_parent_id(self, pdir, pmkdir=False):\n dirs = pdir.split('/')\n parentid = 'root'\n # for aidx in range(len(dirs)):\n for adir in dirs:\n # lg.debug(\"dirs %s\" % (adir))\n if adir == '':\n continue\n children_dirs = self.check_children_dirs(adir, parentid)\n dirs_nums = len(children_dirs)\n if dirs_nums == 0:\n lg.error(\"Can't find directory %s\" % (adir))\n return None\n elif dirs_nums > 1:\n lg.warn(\"Find %d instances of directory %s\" % (\n dirs_nums, adir))\n parentid = children_dirs[0]['id']\n return parentid\n\n def check_children_dirs(self, dirname, parent=\"root\"):\n query = \"mimeType = 'application/vnd.google-apps.folder'\"\n query += \" and title = '%s'\" % dirname\n query += \" and '%s' in parents\" % parent\n # lg.debug(\"query %s\" % query)\n children_dirs = self.file_list(query)\n # for adir in children_dirs:\n # lg.debug(\"children %s id %s\" % (adir['title'], adir['id']))\n return children_dirs\n\n# ## basic drive apis ##\n def file_list(self, query=\"\"):\n \"\"\"Retrieve a list of File resources.\n\n Args:\n service: Drive API service instance.\n Returns:\n List of File resources.\n \"\"\"\n # lg.debug(\"file_list query %s\" % query)\n result = []\n page_token = None\n while True:\n try:\n param = {}\n if query != \"\":\n param['q'] = query\n if page_token:\n param['pageToken'] = page_token\n files = self.service.files().list(**param).execute()\n\n result.extend(files['items'])\n page_token = files.get('nextPageToken')\n if not page_token:\n break\n except errors.HttpError, error:\n print 'An error occurred: %s' % error\n break\n return result\n\n def permission_list(self, pfile=\"root\"):\n \"\"\"Retrieve a list of permissions of the file\n\n Args:\n pfile: drive file id\n Returns:\n list of file permissions\n \"\"\"\n # lg.debug(\"permission_list query %s\" % query)\n result = []\n page_token = None\n while True:\n try:\n param = {}\n if page_token:\n param['pageToken'] = page_token\n perms = self.service.permissions().list(fileId=pfile).execute()\n\n result.extend(perms['items'])\n page_token = perms.get('nextPageToken')\n if not page_token:\n break\n except errors.HttpError, error:\n print 'An error occurred: %s' % error\n break\n return result\n\n # deprecated\n def children_list(self, parent=\"root\", query=\"\"):\n \"\"\"Retrieve a list of File resources.\n\n Args:\n parent: parent id or alias 'root'\n query: query string\n Returns:\n List of File resources.\n \"\"\"\n result = []\n page_token = None\n while True:\n try:\n param = {}\n if query != \"\":\n param['q'] = query\n if page_token:\n param['pageToken'] = page_token\n files = self.service.children().list(\n folderId=parent, **param).execute()\n\n result.extend(files['items'])\n page_token = files.get('nextPageToken')\n if not page_token:\n break\n except errors.HttpError, error:\n print 'An error occurred: %s' % error\n break\n return result\n"}}},{"rowIdx":284665,"cells":{"repo_name":{"kind":"string","value":"Ahn1/Clinq"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"web/clinq/management/commands/updateindex.py"},"copies":{"kind":"string","value":"1"},"content":{"kind":"string","value":"import os\nimport clinq.models as model\n\nfrom optparse import make_option\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom django.conf import settings\n\nimport clinq.management.commands.tagHandler as handler\n\nclass Command(BaseCommand):\n\t#option_list = BaseCommand.option_list + (\n\t#\tmake_option('--long', '-l', dest='long',\n\t#\t\thelp='Help for the long options'),\n\t#)\n\thelp = 'Refresh media index'\n\n\tdef handle(self, **options):\n\t\tpath = settings.MEDIA_PATH\n\t\tprint path\n\n\t\tself.IndexFolder(path)\n\n\n\n\tdef IndexFolder(self, path):\n\t\toslist = os.listdir(path)\n\t\toslist = [os.path.join(path,f) for f in oslist]\n\n\t\tfiles = [f for f in oslist if os.path.isfile(f)]\n\t\tdirs = [f for f in oslist if os.path.isdir(f)]\n\n\t\tfor subdir in dirs:\n\t\t\tself.IndexFolder(subdir)\n\t\t\t#print subdir\n\n\t\tfor targetFile in files:\n\t\t\tself.IndexFile(targetFile)\n\n\n\tdef IndexFile(self, path):\n\t\ttry:\n\t\t\tfileName, fileExtension = os.path.splitext(path)\n\n\t\t\trelPath = os.path.relpath(path,settings.MEDIA_PATH)\n\n\n\t\t\tdbObj = None\n\t\t\tif model.File.objects.filter(path=relPath).count() == 0:\n\t\t\t\tdbObj = model.File()\n\t\t\t\tdbObj.path = relPath\n\t\t\telse:\n\t\t\t\tdbObj = model.File.objects.filter(path=relPath)[:1][0]\n\n\t\t\tlastEditTime = os.stat(path).st_mtime\n\n\t\t\tif dbObj.changeDate < lastEditTime:\n\n\t\t\t\tdbObj.changeDate = lastEditTime\n\n\t\t\t\tdbObj.save()\n\n\t\t\t\tif fileExtension in [\".mp3\"]:\n\t\t\t\t\tself.HandleAudioFile(path, dbObj)\n\t\t\telse:\n\t\t\t\tprint(\"Skip file '%s'\" % path)\n\n\n\t\texcept Exception, e:\n\t\t\tprint e\n\n\n\tdef HandleAudioFile(self, path, refdbFile):\n\t\tprint \"Try to handle {0}\".format(path)\n\n\t\tfileName, fileExtension = os.path.splitext(path)\n\n\t\ttagObject = None\n\t\tif model.AudioFile.objects.filter(refFile=refdbFile).count() == 0:\n\t\t\ttagObject = model.AudioFile()\n\t\t\ttagObject.refFile = refdbFile\n\t\t\tprint \"Create new mp3 Tag\"\n\t\telse:\n\t\t\ttagObject = model.AudioFile.objects.filter(refFile=refdbFile)[:1][0]\n\t\t\tprint \"Load mp3 Tag\"\n\n\t\tif fileExtension in handler.audio:\n\t\t\thandlerClass = handler.audio[fileExtension]\n\n\t\t\thandlerObj = handlerClass()\n\n\t\t\thandlerObj.UpdateTags(path, tagObject)\n\n\t\t\tprint [tagObject]\n\n\t\ttagObject.save()\n\n\n"}}},{"rowIdx":284666,"cells":{"repo_name":{"kind":"string","value":"bqbn/addons-server"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"src/olympia/addons/api_urls.py"},"copies":{"kind":"string","value":"1"},"content":{"kind":"string","value":"from django.conf.urls import include, url\n\nfrom rest_framework.routers import SimpleRouter\nfrom rest_framework_nested.routers import NestedSimpleRouter\n\nfrom olympia.activity.views import VersionReviewNotesViewSet\n\nfrom .views import (\n AddonAutoCompleteSearchView, AddonFeaturedView, AddonRecommendationView,\n AddonSearchView, AddonVersionViewSet, AddonViewSet, CompatOverrideView,\n LanguageToolsView, ReplacementAddonView, StaticCategoryView)\n\n\naddons = SimpleRouter()\naddons.register(r'addon', AddonViewSet, basename='addon')\n\n# Router for children of /addons/addon/{addon_pk}/.\nsub_addons = NestedSimpleRouter(addons, r'addon', lookup='addon')\nsub_addons.register('versions', AddonVersionViewSet, basename='addon-version')\nsub_versions = NestedSimpleRouter(sub_addons, r'versions', lookup='version')\nsub_versions.register(r'reviewnotes', VersionReviewNotesViewSet,\n basename='version-reviewnotes')\n\nurls = [\n url(r'', include(addons.urls)),\n url(r'', include(sub_addons.urls)),\n url(r'', include(sub_versions.urls)),\n url(r'^autocomplete/$', AddonAutoCompleteSearchView.as_view(),\n name='addon-autocomplete'),\n url(r'^search/$', AddonSearchView.as_view(), name='addon-search'),\n url(r'^categories/$', StaticCategoryView.as_view(), name='category-list'),\n url(r'^language-tools/$', LanguageToolsView.as_view(),\n name='addon-language-tools'),\n url(r'^replacement-addon/$', ReplacementAddonView.as_view(),\n name='addon-replacement-addon'),\n\n url(r'^recommendations/$', AddonRecommendationView.as_view(),\n name='addon-recommendations'),\n]\n\naddons_v3 = urls + [\n url(r'^compat-override/$', CompatOverrideView.as_view(),\n name='addon-compat-override'),\n url(r'^featured/$', AddonFeaturedView.as_view(), name='addon-featured'),\n]\n\naddons_v4 = urls\n"}}},{"rowIdx":284667,"cells":{"repo_name":{"kind":"string","value":"Dubrzr/django-push-notifications"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"tests/test_models.py"},"copies":{"kind":"string","value":"14"},"content":{"kind":"string","value":"import json\nimport mock\nfrom django.test import TestCase\nfrom django.utils import timezone\nfrom push_notifications.models import GCMDevice, APNSDevice\nfrom tests.mock_responses import GCM_PLAIN_RESPONSE, \\\n GCM_MULTIPLE_JSON_RESPONSE, GCM_PLAIN_RESPONSE_ERROR, \\\n GCM_JSON_RESPONSE_ERROR, GCM_PLAIN_RESPONSE_ERROR_B\nfrom push_notifications.gcm import GCMError\n\nclass ModelTestCase(TestCase):\n def test_can_save_gcm_device(self):\n device = GCMDevice.objects.create(\n registration_id=\"a valid registration id\"\n )\n assert device.id is not None\n assert device.date_created is not None\n assert device.date_created.date() == timezone.now().date()\n\n def test_can_create_save_device(self):\n device = APNSDevice.objects.create(\n registration_id=\"a valid registration id\"\n )\n assert device.id is not None\n assert device.date_created is not None\n assert device.date_created.date() == timezone.now().date()\n\n def test_gcm_send_message(self):\n device = GCMDevice.objects.create(\n registration_id=\"abc\",\n )\n with mock.patch(\"push_notifications.gcm._gcm_send\", return_value=GCM_PLAIN_RESPONSE) as p:\n device.send_message(\"Hello world\")\n p.assert_called_once_with(\n b\"data.message=Hello+world&registration_id=abc\",\n \"application/x-www-form-urlencoded;charset=UTF-8\")\n\n def test_gcm_send_message_extra(self):\n device = GCMDevice.objects.create(\n registration_id=\"abc\",\n )\n with mock.patch(\"push_notifications.gcm._gcm_send\", return_value=GCM_PLAIN_RESPONSE) as p:\n device.send_message(\"Hello world\", extra={\"foo\": \"bar\"})\n p.assert_called_once_with(\n b\"data.foo=bar&data.message=Hello+world&registration_id=abc\",\n \"application/x-www-form-urlencoded;charset=UTF-8\")\n\n def test_gcm_send_message_collapse_key(self):\n device = GCMDevice.objects.create(\n registration_id=\"abc\",\n )\n with mock.patch(\"push_notifications.gcm._gcm_send\", return_value=GCM_PLAIN_RESPONSE) as p:\n device.send_message(\"Hello world\", collapse_key=\"test_key\")\n p.assert_called_once_with(\n b\"collapse_key=test_key&data.message=Hello+world&registration_id=abc\",\n \"application/x-www-form-urlencoded;charset=UTF-8\")\n\n def test_gcm_send_message_to_multiple_devices(self):\n GCMDevice.objects.create(\n registration_id=\"abc\",\n )\n\n GCMDevice.objects.create(\n registration_id=\"abc1\",\n )\n\n with mock.patch(\"push_notifications.gcm._gcm_send\", return_value=GCM_MULTIPLE_JSON_RESPONSE) as p:\n GCMDevice.objects.all().send_message(\"Hello world\")\n p.assert_called_once_with(\n json.dumps({\n \"data\": { \"message\": \"Hello world\" },\n \"registration_ids\": [\"abc\", \"abc1\"]\n }, separators=(\",\", \":\"), sort_keys=True).encode(\"utf-8\"), \"application/json\")\n\n def test_gcm_send_message_extra_to_multiple_devices(self):\n GCMDevice.objects.create(\n registration_id=\"abc\",\n )\n\n GCMDevice.objects.create(\n registration_id=\"abc1\",\n )\n\n with mock.patch(\"push_notifications.gcm._gcm_send\", return_value=GCM_MULTIPLE_JSON_RESPONSE) as p:\n GCMDevice.objects.all().send_message(\"Hello world\", extra={\"foo\": \"bar\"})\n p.assert_called_once_with(\n json.dumps({\n \"data\": { \"foo\": \"bar\", \"message\": \"Hello world\" },\n \"registration_ids\": [\"abc\", \"abc1\"]\n }, separators=(\",\", \":\"), sort_keys=True).encode(\"utf-8\"), \"application/json\")\n\n def test_gcm_send_message_collapse_to_multiple_devices(self):\n GCMDevice.objects.create(\n registration_id=\"abc\",\n )\n\n GCMDevice.objects.create(\n registration_id=\"abc1\",\n )\n\n with mock.patch(\"push_notifications.gcm._gcm_send\", return_value=GCM_MULTIPLE_JSON_RESPONSE) as p:\n GCMDevice.objects.all().send_message(\"Hello world\", collapse_key=\"test_key\")\n p.assert_called_once_with(\n json.dumps({\n \"collapse_key\": \"test_key\",\n \"data\": { \"message\": \"Hello world\" },\n \"registration_ids\": [\"abc\", \"abc1\"]\n }, separators=(\",\", \":\"), sort_keys=True).encode(\"utf-8\"), \"application/json\")\n\n def test_gcm_send_message_to_single_device_with_error(self):\n # these errors are device specific, device.active will be set false\n device_list = ['abc', 'abc1']\n self.create_devices(device_list)\n for index, error in enumerate(GCM_PLAIN_RESPONSE_ERROR):\n with mock.patch(\"push_notifications.gcm._gcm_send\",\n return_value=error) as p:\n device = GCMDevice.objects. \\\n get(registration_id=device_list[index])\n device.send_message(\"Hello World!\")\n assert GCMDevice.objects.get(registration_id=device_list[index]) \\\n .active is False\n\n def test_gcm_send_message_to_single_device_with_error_b(self):\n # these errors are not device specific, GCMError should be thrown\n device_list = ['abc']\n self.create_devices(device_list)\n with mock.patch(\"push_notifications.gcm._gcm_send\",\n return_value=GCM_PLAIN_RESPONSE_ERROR_B) as p:\n device = GCMDevice.objects. \\\n get(registration_id=device_list[0])\n with self.assertRaises(GCMError):\n device.send_message(\"Hello World!\")\n assert GCMDevice.objects.get(registration_id=device_list[0]) \\\n .active is True\n\n def test_gcm_send_message_to_multiple_devices_with_error(self):\n device_list = ['abc', 'abc1', 'abc2']\n self.create_devices(device_list)\n with mock.patch(\"push_notifications.gcm._gcm_send\",\n return_value=GCM_JSON_RESPONSE_ERROR) as p:\n devices = GCMDevice.objects.all()\n devices.send_message(\"Hello World\")\n assert GCMDevice.objects.get(registration_id=device_list[0]) \\\n .active is False\n assert GCMDevice.objects.get(registration_id=device_list[1]) \\\n .active is True\n assert GCMDevice.objects.get(registration_id=device_list[2]) \\\n .active is False\n\n def test_apns_send_message(self):\n device = APNSDevice.objects.create(\n registration_id=\"abc\",\n )\n socket = mock.MagicMock()\n with mock.patch(\"push_notifications.apns._apns_pack_frame\") as p:\n device.send_message(\"Hello world\", socket=socket, expiration=1)\n p.assert_called_once_with(\"abc\", b'{\"aps\":{\"alert\":\"Hello world\"}}', 0, 1, 10)\n\n def test_apns_send_message_extra(self):\n device = APNSDevice.objects.create(\n registration_id=\"abc\",\n )\n socket = mock.MagicMock()\n with mock.patch(\"push_notifications.apns._apns_pack_frame\") as p:\n device.send_message(\"Hello world\", extra={\"foo\": \"bar\"}, socket=socket, identifier=1, expiration=2, priority=5)\n p.assert_called_once_with(\"abc\", b'{\"aps\":{\"alert\":\"Hello world\"},\"foo\":\"bar\"}', 1, 2, 5)\n\n def create_devices(self, devices):\n for device in devices:\n GCMDevice.objects.create(\n registration_id=device,\n )\n"}}},{"rowIdx":284668,"cells":{"repo_name":{"kind":"string","value":"kthordarson/youtube-dl-ruv"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"youtube_dl/extractor/comedycentral.py"},"copies":{"kind":"string","value":"3"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom .mtv import MTVServicesInfoExtractor\nfrom ..utils import (\n compat_str,\n compat_urllib_parse,\n ExtractorError,\n float_or_none,\n unified_strdate,\n)\n\n\nclass ComedyCentralIE(MTVServicesInfoExtractor):\n _VALID_URL = r'''(?x)https?://(?:www\\.)?cc\\.com/\n (video-clips|episodes|cc-studios|video-collections|full-episodes)\n /(?P.*)'''\n _FEED_URL = 'http://comedycentral.com/feeds/mrss/'\n\n _TEST = {\n 'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',\n 'md5': 'c4f48e9eda1b16dd10add0744344b6d8',\n 'info_dict': {\n 'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',\n 'ext': 'mp4',\n 'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',\n 'description': 'After a certain point, breastfeeding becomes c**kblocking.',\n },\n }\n\n\nclass ComedyCentralShowsIE(InfoExtractor):\n IE_DESC = 'The Daily Show / The Colbert Report'\n # urls can be abbreviations like :thedailyshow or :colbert\n # urls for episodes like:\n # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day\n # or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news\n # or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524\n _VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)\n |https?://(:www\\.)?\n (?P<showname>thedailyshow|thecolbertreport)\\.(?:cc\\.)?com/\n ((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|\n (?P<clip>\n (?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))\n |(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))\n |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))\n )|\n (?P<interview>\n extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))\n (?:[?#].*|$)'''\n _TESTS = [{\n 'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',\n 'md5': '4e2f5cb088a83cd8cdb7756132f9739d',\n 'info_dict': {\n 'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',\n 'ext': 'mp4',\n 'upload_date': '20121213',\n 'description': 'Kristen Stewart learns to let loose in \"On the Road.\"',\n 'uploader': 'thedailyshow',\n 'title': 'thedailyshow kristen-stewart part 1',\n }\n }, {\n 'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',\n 'only_matching': True,\n }, {\n 'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',\n 'only_matching': True,\n }, {\n 'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',\n 'only_matching': True,\n }, {\n 'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',\n 'only_matching': True,\n }, {\n 'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',\n 'only_matching': True,\n }, {\n 'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',\n 'only_matching': True,\n }, {\n 'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',\n 'only_matching': True,\n }, {\n 'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',\n 'only_matching': True,\n }, {\n 'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',\n 'only_matching': True,\n }]\n\n _available_formats = ['3500', '2200', '1700', '1200', '750', '400']\n\n _video_extensions = {\n '3500': 'mp4',\n '2200': 'mp4',\n '1700': 'mp4',\n '1200': 'mp4',\n '750': 'mp4',\n '400': 'mp4',\n }\n _video_dimensions = {\n '3500': (1280, 720),\n '2200': (960, 540),\n '1700': (768, 432),\n '1200': (640, 360),\n '750': (512, 288),\n '400': (384, 216),\n }\n\n @staticmethod\n def _transform_rtmp_url(rtmp_video_url):\n m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\\.comedystor/.*)$', rtmp_video_url)\n if not m:\n raise ExtractorError('Cannot transform RTMP url')\n base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'\n return base + m.group('finalid')\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url, re.VERBOSE)\n if mobj is None:\n raise ExtractorError('Invalid URL: %s' % url)\n\n if mobj.group('shortname'):\n if mobj.group('shortname') in ('tds', 'thedailyshow'):\n url = 'http://thedailyshow.cc.com/full-episodes/'\n else:\n url = 'http://thecolbertreport.cc.com/full-episodes/'\n mobj = re.match(self._VALID_URL, url, re.VERBOSE)\n assert mobj is not None\n\n if mobj.group('clip'):\n if mobj.group('videotitle'):\n epTitle = mobj.group('videotitle')\n elif mobj.group('showname') == 'thedailyshow':\n epTitle = mobj.group('tdstitle')\n else:\n epTitle = mobj.group('cntitle')\n dlNewest = False\n elif mobj.group('interview'):\n epTitle = mobj.group('interview_title')\n dlNewest = False\n else:\n dlNewest = not mobj.group('episode')\n if dlNewest:\n epTitle = mobj.group('showname')\n else:\n epTitle = mobj.group('episode')\n show_name = mobj.group('showname')\n\n webpage, htmlHandle = self._download_webpage_handle(url, epTitle)\n if dlNewest:\n url = htmlHandle.geturl()\n mobj = re.match(self._VALID_URL, url, re.VERBOSE)\n if mobj is None:\n raise ExtractorError('Invalid redirected URL: ' + url)\n if mobj.group('episode') == '':\n raise ExtractorError('Redirected URL is still not specific: ' + url)\n epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]\n\n mMovieParams = re.findall('(?:<param name=\"movie\" value=\"|var url = \")(http://media.mtvnservices.com/([^\"]*(?:episode|video).*?:.*?))\"', webpage)\n if len(mMovieParams) == 0:\n # The Colbert Report embeds the information in a without\n # a URL prefix; so extract the alternate reference\n # and then add the URL prefix manually.\n\n altMovieParams = re.findall('data-mgid=\"([^\"]*(?:episode|video|playlist).*?:.*?)\"', webpage)\n if len(altMovieParams) == 0:\n raise ExtractorError('unable to find Flash URL in webpage ' + url)\n else:\n mMovieParams = [(\"http://media.mtvnservices.com/\" + altMovieParams[0], altMovieParams[0])]\n\n uri = mMovieParams[0][1]\n # Correct cc.com in uri\n uri = re.sub(r'(episode:[^.]+)(\\.cc)?\\.com', r'\\1.cc.com', uri)\n\n index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))\n idoc = self._download_xml(\n index_url, epTitle,\n 'Downloading show index', 'Unable to download episode index')\n\n title = idoc.find('./channel/title').text\n description = idoc.find('./channel/description').text\n\n entries = []\n item_els = idoc.findall('.//item')\n for part_num, itemEl in enumerate(item_els):\n upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)\n thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')\n\n content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')\n duration = float_or_none(content.attrib.get('duration'))\n mediagen_url = content.attrib['url']\n guid = itemEl.find('./guid').text.rpartition(':')[-1]\n\n cdoc = self._download_xml(\n mediagen_url, epTitle,\n 'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els)))\n\n turls = []\n for rendition in cdoc.findall('.//rendition'):\n finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)\n turls.append(finfo)\n\n formats = []\n for format, rtmp_video_url in turls:\n w, h = self._video_dimensions.get(format, (None, None))\n formats.append({\n 'format_id': 'vhttp-%s' % format,\n 'url': self._transform_rtmp_url(rtmp_video_url),\n 'ext': self._video_extensions.get(format, 'mp4'),\n 'height': h,\n 'width': w,\n\n 'format_note': 'HTTP 400 at the moment (patches welcome!)',\n 'preference': -100,\n })\n formats.append({\n 'format_id': 'rtmp-%s' % format,\n 'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'),\n 'ext': self._video_extensions.get(format, 'mp4'),\n 'height': h,\n 'width': w,\n })\n self._sort_formats(formats)\n\n virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)\n entries.append({\n 'id': guid,\n 'title': virtual_id,\n 'formats': formats,\n 'uploader': show_name,\n 'upload_date': upload_date,\n 'duration': duration,\n 'thumbnail': thumbnail,\n 'description': description,\n })\n\n return {\n '_type': 'playlist',\n 'entries': entries,\n 'title': show_name + ' ' + title,\n 'description': description,\n }\n"}}},{"rowIdx":284669,"cells":{"repo_name":{"kind":"string","value":"MaizerGomes/youtube-dl"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"youtube_dl/extractor/collegerama.py"},"copies":{"kind":"string","value":"111"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nimport json\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_urllib_request\nfrom ..utils import (\n float_or_none,\n int_or_none,\n)\n\n\nclass CollegeRamaIE(InfoExtractor):\n _VALID_URL = r'https?://collegerama\\.tudelft\\.nl/Mediasite/Play/(?P<id>[\\da-f]+)'\n _TESTS = [\n {\n 'url': 'https://collegerama.tudelft.nl/Mediasite/Play/585a43626e544bdd97aeb71a0ec907a01d',\n 'md5': '481fda1c11f67588c0d9d8fbdced4e39',\n 'info_dict': {\n 'id': '585a43626e544bdd97aeb71a0ec907a01d',\n 'ext': 'mp4',\n 'title': 'Een nieuwe wereld: waarden, bewustzijn en techniek van de mensheid 2.0.',\n 'description': '',\n 'thumbnail': 're:^https?://.*\\.jpg$',\n 'duration': 7713.088,\n 'timestamp': 1413309600,\n 'upload_date': '20141014',\n },\n },\n {\n 'url': 'https://collegerama.tudelft.nl/Mediasite/Play/86a9ea9f53e149079fbdb4202b521ed21d?catalog=fd32fd35-6c99-466c-89d4-cd3c431bc8a4',\n 'md5': 'ef1fdded95bdf19b12c5999949419c92',\n 'info_dict': {\n 'id': '86a9ea9f53e149079fbdb4202b521ed21d',\n 'ext': 'wmv',\n 'title': '64ste Vakantiecursus: Afvalwater',\n 'description': 'md5:7fd774865cc69d972f542b157c328305',\n 'duration': 10853,\n 'timestamp': 1326446400,\n 'upload_date': '20120113',\n },\n },\n ]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n player_options_request = {\n \"getPlayerOptionsRequest\": {\n \"ResourceId\": video_id,\n \"QueryString\": \"\",\n }\n }\n\n request = compat_urllib_request.Request(\n 'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions',\n json.dumps(player_options_request))\n request.add_header('Content-Type', 'application/json')\n\n player_options = self._download_json(request, video_id)\n\n presentation = player_options['d']['Presentation']\n title = presentation['Title']\n description = presentation.get('Description')\n thumbnail = None\n duration = float_or_none(presentation.get('Duration'), 1000)\n timestamp = int_or_none(presentation.get('UnixTime'), 1000)\n\n formats = []\n for stream in presentation['Streams']:\n for video in stream['VideoUrls']:\n thumbnail_url = stream.get('ThumbnailUrl')\n if thumbnail_url:\n thumbnail = 'http://collegerama.tudelft.nl' + thumbnail_url\n format_id = video['MediaType']\n if format_id == 'SS':\n continue\n formats.append({\n 'url': video['Location'],\n 'format_id': format_id,\n })\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'thumbnail': thumbnail,\n 'duration': duration,\n 'timestamp': timestamp,\n 'formats': formats,\n }\n"}}},{"rowIdx":284670,"cells":{"repo_name":{"kind":"string","value":"DVSBA/ajenti"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"ajenti/feedback.py"},"copies":{"kind":"string","value":"17"},"content":{"kind":"string","value":"\"\"\"\nModule for sending usage statistics to ajenti.org\n\"\"\"\n\n__all__ = ['send_stats', 'check_uid']\n\nimport os\nimport base64\nimport random\n\nfrom ajenti.utils import *\nfrom ajenti import version\n\n\nglobal uid\nuid = ''\n\n\ndef send_stats(server, plugins, addplugin=None, delplugin=None):\n \"\"\"\n Sends usage statistics to the server. Statistics include: OS name, list of\n installed plugins and Ajenti version.\n\n :param server: server URL\n :type server: str\n :param addplugin: plugin being currently installed or None\n :type addplugin: str\n :param delplugin: plugin being currently removed or None\n :type delplugin: str\n \"\"\"\n plugs = []\n plugs.extend(plugins)\n if addplugin:\n plugs.append(addplugin)\n if delplugin and delplugin in plugs:\n plugs.remove(delplugin)\n plugs = ','.join(plugs)\n data = '1|%s|%s|%s|,%s,' % (uid, version(), detect_platform(mapping=False), plugs)\n data = base64.b64encode(data)\n download('http://%s/api/submit?data=%s' % (server, data))\n\n\ndef check_uid():\n \"\"\"\n Checks that installation UID is present and generates it if it's not.\n \"\"\"\n global uid\n file = '/var/lib/ajenti/installation-uid'\n if not os.path.exists(file):\n uid = str(random.randint(1, 9000*9000))\n try:\n open(file, 'w').write(uid)\n except:\n uid = '0'\n else:\n uid = open(file).read()\n"}}},{"rowIdx":284671,"cells":{"repo_name":{"kind":"string","value":"tshirtman/zine_ad_sense"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"__init__.py"},"copies":{"kind":"string","value":"1"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\n zine.plugins.ad_sense\n ~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n :copyright: (c) 2011 by gabriel pettier\n :license: GPL, see LICENSE for more details.\n\"\"\"\nfrom os.path import dirname, join\nfrom random import choice\n\nfrom zine.api import *\nfrom zine.views.admin import render_admin_response\nfrom zine.utils.admin import flash\nfrom zine.utils.http import redirect\nfrom zine.utils.forms import TextField\nfrom zine.config import ConfigurationTransactionError\nfrom zine.privileges import BLOG_ADMIN\n\nTEMPLATES = join(dirname(__file__), 'templates')\n\ndef add_ad_sense_link(req, navigation_bar):\n if not req.user.has_privilege(BLOG_ADMIN):\n return\n for link_id, url, title, children in navigation_bar:\n if link_id == 'options':\n children.insert(-3, ('ad sense', url_for('ad_sense/config'),\n _('Ad sense')))\n\n\n@require_privilege(BLOG_ADMIN)\ndef view_ad_sense_config(req):\n client_code = req.args.get('client_code')\n banner_slot = req.args.get('banner_slot')\n width = req.args.get('width')\n height = req.args.get('height')\n if client_code and banner_slot and width and height:\n try:\n req.app.cfg.change_single('ad_sense/client_code', client_code)\n req.app.cfg.change_single('ad_sense/banner_slot', banner_slot)\n req.app.cfg.change_single('ad_sense/width', width)\n req.app.cfg.change_single('ad_sense/height', height)\n flash(_('Config updated!'), 'info')\n except ConfigurationTransactionError, e:\n flash(_('The code could not be changed.'), 'error')\n return redirect(url_for('ad_sense/config'))\n\n return render_admin_response('admin/ad_sense.html',\n 'config.ad_sense',\n client_code=req.app.cfg['ad_sense/client_code'],\n banner_slot=req.app.cfg['ad_sense/banner_slot'],\n width=req.app.cfg['ad_sense/width'],\n height=req.app.cfg['ad_sense/height']\n )\n\n\ndef add_adsense_banner(post):\n conf = get_application().cfg\n client_code = conf['ad_sense/client_code']\n banner_slot = conf['ad_sense/banner_slot']\n banner_width = conf['ad_sense/width']\n banner_height = conf['ad_sense/height']\n if choice((True, False)):\n return '''\n <span class=\"ad\">\n <script type=\"text/javascript\"><!--\n google_ad_client = \"'''+client_code+'''\";\n google_ad_slot = \"'''+banner_slot+'''\";\n google_ad_width = '''+banner_width+''';\n google_ad_height = '''+banner_height+''';\n //-->\n </script>\n <script type=\"text/javascript\"\n src=\"http://pagead2.googlesyndication.com/pagead/show_ads.js\">\n </script>\n </span>\n '''\n else:\n return ''\n\ndef insert_header_js(metadata):\n metadata.append('''\n <script type=\"text/javascript\">\n\n var _gaq = _gaq || [];\n _gaq.push(['_setAccount', 'UA-23430110-1']);\n _gaq.push(['_trackPageview']);\n\n (function() {\n var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;\n ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';\n var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);\n })();\n\n </script>\n ''')\n\ndef setup(app, plugin):\n \"\"\"This function is called by Zine in the application initialization\n phase. Here we connect to the events and register our template paths,\n url rules, views etc.\n \"\"\"\n app.connect_event('after-entry-rendered', add_adsense_banner)\n\n # our fish has a configurable skin. So we register one for it which\n # defaults to blue.\n app.add_config_var('ad_sense/client_code', TextField(default=''))\n app.add_config_var('ad_sense/banner_slot', TextField(default=''))\n app.add_config_var('ad_sense/width', TextField(default=''))\n app.add_config_var('ad_sense/height', TextField(default=''))\n\n app.connect_event('modify-admin-navigation-bar', add_ad_sense_link)\n app.connect_event('before-metadata-assembled', insert_header_js)\n\n # for the admin panel we add a url rule. Because it's an admin panel\n # page located in options we add such an url rule.\n app.add_url_rule('/options/ad_sense', prefix='admin',\n endpoint='ad_sense/config',\n view=view_ad_sense_config)\n\n # add our templates to the searchpath so that Zine can find the\n # admin panel template for the fish config panel.\n app.add_template_searchpath(TEMPLATES)\n"}}},{"rowIdx":284672,"cells":{"repo_name":{"kind":"string","value":"jonyroda97/redbot-amigosprovaveis"},"ref":{"kind":"string","value":"refs/heads/develop"},"path":{"kind":"string","value":"lib/pip/_vendor/urllib3/contrib/socks.py"},"copies":{"kind":"string","value":"65"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains provisional support for SOCKS proxies from within\nurllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and\nSOCKS5. To enable its functionality, either install PySocks or install this\nmodule with the ``socks`` extra.\n\nThe SOCKS implementation supports the full range of urllib3 features. It also\nsupports the following SOCKS features:\n\n- SOCKS4\n- SOCKS4a\n- SOCKS5\n- Usernames and passwords for the SOCKS proxy\n\nKnown Limitations:\n\n- Currently PySocks does not support contacting remote websites via literal\n IPv6 addresses. Any such connection attempt will fail. You must use a domain\n name.\n- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any\n such connection attempt will fail.\n\"\"\"\nfrom __future__ import absolute_import\n\ntry:\n import socks\nexcept ImportError:\n import warnings\n from ..exceptions import DependencyWarning\n\n warnings.warn((\n 'SOCKS support in urllib3 requires the installation of optional '\n 'dependencies: specifically, PySocks. For more information, see '\n 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'\n ),\n DependencyWarning\n )\n raise\n\nfrom socket import error as SocketError, timeout as SocketTimeout\n\nfrom ..connection import (\n HTTPConnection, HTTPSConnection\n)\nfrom ..connectionpool import (\n HTTPConnectionPool, HTTPSConnectionPool\n)\nfrom ..exceptions import ConnectTimeoutError, NewConnectionError\nfrom ..poolmanager import PoolManager\nfrom ..util.url import parse_url\n\ntry:\n import ssl\nexcept ImportError:\n ssl = None\n\n\nclass SOCKSConnection(HTTPConnection):\n \"\"\"\n A plain-text HTTP connection that connects via a SOCKS proxy.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self._socks_options = kwargs.pop('_socks_options')\n super(SOCKSConnection, self).__init__(*args, **kwargs)\n\n def _new_conn(self):\n \"\"\"\n Establish a new connection via the SOCKS proxy.\n \"\"\"\n extra_kw = {}\n if self.source_address:\n extra_kw['source_address'] = self.source_address\n\n if self.socket_options:\n extra_kw['socket_options'] = self.socket_options\n\n try:\n conn = socks.create_connection(\n (self.host, self.port),\n proxy_type=self._socks_options['socks_version'],\n proxy_addr=self._socks_options['proxy_host'],\n proxy_port=self._socks_options['proxy_port'],\n proxy_username=self._socks_options['username'],\n proxy_password=self._socks_options['password'],\n proxy_rdns=self._socks_options['rdns'],\n timeout=self.timeout,\n **extra_kw\n )\n\n except SocketTimeout as e:\n raise ConnectTimeoutError(\n self, \"Connection to %s timed out. (connect timeout=%s)\" %\n (self.host, self.timeout))\n\n except socks.ProxyError as e:\n # This is fragile as hell, but it seems to be the only way to raise\n # useful errors here.\n if e.socket_err:\n error = e.socket_err\n if isinstance(error, SocketTimeout):\n raise ConnectTimeoutError(\n self,\n \"Connection to %s timed out. (connect timeout=%s)\" %\n (self.host, self.timeout)\n )\n else:\n raise NewConnectionError(\n self,\n \"Failed to establish a new connection: %s\" % error\n )\n else:\n raise NewConnectionError(\n self,\n \"Failed to establish a new connection: %s\" % e\n )\n\n except SocketError as e: # Defensive: PySocks should catch all these.\n raise NewConnectionError(\n self, \"Failed to establish a new connection: %s\" % e)\n\n return conn\n\n\n# We don't need to duplicate the Verified/Unverified distinction from\n# urllib3/connection.py here because the HTTPSConnection will already have been\n# correctly set to either the Verified or Unverified form by that module. This\n# means the SOCKSHTTPSConnection will automatically be the correct type.\nclass SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):\n pass\n\n\nclass SOCKSHTTPConnectionPool(HTTPConnectionPool):\n ConnectionCls = SOCKSConnection\n\n\nclass SOCKSHTTPSConnectionPool(HTTPSConnectionPool):\n ConnectionCls = SOCKSHTTPSConnection\n\n\nclass SOCKSProxyManager(PoolManager):\n \"\"\"\n A version of the urllib3 ProxyManager that routes connections via the\n defined SOCKS proxy.\n \"\"\"\n pool_classes_by_scheme = {\n 'http': SOCKSHTTPConnectionPool,\n 'https': SOCKSHTTPSConnectionPool,\n }\n\n def __init__(self, proxy_url, username=None, password=None,\n num_pools=10, headers=None, **connection_pool_kw):\n parsed = parse_url(proxy_url)\n\n if username is None and password is None and parsed.auth is not None:\n split = parsed.auth.split(':')\n if len(split) == 2:\n username, password = split\n if parsed.scheme == 'socks5':\n socks_version = socks.PROXY_TYPE_SOCKS5\n rdns = False\n elif parsed.scheme == 'socks5h':\n socks_version = socks.PROXY_TYPE_SOCKS5\n rdns = True\n elif parsed.scheme == 'socks4':\n socks_version = socks.PROXY_TYPE_SOCKS4\n rdns = False\n elif parsed.scheme == 'socks4a':\n socks_version = socks.PROXY_TYPE_SOCKS4\n rdns = True\n else:\n raise ValueError(\n \"Unable to determine SOCKS version from %s\" % proxy_url\n )\n\n self.proxy_url = proxy_url\n\n socks_options = {\n 'socks_version': socks_version,\n 'proxy_host': parsed.host,\n 'proxy_port': parsed.port,\n 'username': username,\n 'password': password,\n 'rdns': rdns\n }\n connection_pool_kw['_socks_options'] = socks_options\n\n super(SOCKSProxyManager, self).__init__(\n num_pools, headers, **connection_pool_kw\n )\n\n self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme\n"}}},{"rowIdx":284673,"cells":{"repo_name":{"kind":"string","value":"acshan/odoo"},"ref":{"kind":"string","value":"refs/heads/8.0"},"path":{"kind":"string","value":"addons/google_calendar/google_calendar.py"},"copies":{"kind":"string","value":"59"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport operator\nimport simplejson\nimport urllib2\n\nimport openerp\nfrom openerp import tools\nfrom openerp import SUPERUSER_ID\nfrom openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, exception_to_unicode\n\nfrom openerp.tools.translate import _\nfrom openerp.http import request\nfrom datetime import datetime, timedelta\nfrom dateutil import parser\nimport pytz\nfrom openerp.osv import fields, osv\n\nimport logging\n_logger = logging.getLogger(__name__)\n\n\ndef status_response(status, substr=False):\n if substr:\n return int(str(status)[0])\n else:\n return status_response(status, substr=True) == 2\n\n\nclass Meta(type):\n \"\"\" This Meta class allow to define class as a structure, and so instancied variable\n in __init__ to avoid to have side effect alike 'static' variable \"\"\"\n def __new__(typ, name, parents, attrs):\n methods = dict((k, v) for k, v in attrs.iteritems()\n if callable(v))\n attrs = dict((k, v) for k, v in attrs.iteritems()\n if not callable(v))\n\n def init(self, **kw):\n for k, v in attrs.iteritems():\n setattr(self, k, v)\n for k, v in kw.iteritems():\n assert k in attrs\n setattr(self, k, v)\n\n methods['__init__'] = init\n methods['__getitem__'] = getattr\n return type.__new__(typ, name, parents, methods)\n\n\nclass Struct(object):\n __metaclass__ = Meta\n\n\nclass OpenerpEvent(Struct):\n event = False\n found = False\n event_id = False\n isRecurrence = False\n isInstance = False\n update = False\n status = False\n attendee_id = False\n synchro = False\n\n\nclass GmailEvent(Struct):\n event = False\n found = False\n isRecurrence = False\n isInstance = False\n update = False\n status = False\n\n\nclass SyncEvent(object):\n def __init__(self):\n self.OE = OpenerpEvent()\n self.GG = GmailEvent()\n self.OP = None\n\n def __getitem__(self, key):\n return getattr(self, key)\n\n def compute_OP(self, modeFull=True):\n #If event are already in Gmail and in OpenERP\n if self.OE.found and self.GG.found:\n is_owner = self.OE.event.env.user.id == self.OE.event.user_id.id\n #If the event has been deleted from one side, we delete on other side !\n if self.OE.status != self.GG.status and is_owner:\n self.OP = Delete((self.OE.status and \"OE\") or (self.GG.status and \"GG\"),\n 'The event has been deleted from one side, we delete on other side !')\n #If event is not deleted !\n elif self.OE.status and (self.GG.status or not is_owner):\n if self.OE.update.split('.')[0] != self.GG.update.split('.')[0]:\n if self.OE.update < self.GG.update:\n tmpSrc = 'GG'\n elif self.OE.update > self.GG.update:\n tmpSrc = 'OE'\n assert tmpSrc in ['GG', 'OE']\n\n #if self.OP.action == None:\n if self[tmpSrc].isRecurrence:\n if self[tmpSrc].status:\n self.OP = Update(tmpSrc, 'Only need to update, because i\\'m active')\n else:\n self.OP = Exclude(tmpSrc, 'Need to Exclude (Me = First event from recurrence) from recurrence')\n\n elif self[tmpSrc].isInstance:\n self.OP = Update(tmpSrc, 'Only need to update, because already an exclu')\n else:\n self.OP = Update(tmpSrc, 'Simply Update... I\\'m a single event')\n else:\n if not self.OE.synchro or self.OE.synchro.split('.')[0] < self.OE.update.split('.')[0]:\n self.OP = Update('OE', 'Event already updated by another user, but not synchro with my google calendar')\n else:\n self.OP = NothingToDo(\"\", 'Not update needed')\n else:\n self.OP = NothingToDo(\"\", \"Both are already deleted\")\n\n # New in openERP... Create on create_events of synchronize function\n elif self.OE.found and not self.GG.found:\n if self.OE.status:\n self.OP = Delete('OE', 'Update or delete from GOOGLE')\n else:\n if not modeFull:\n self.OP = Delete('GG', 'Deleted from Odoo, need to delete it from Gmail if already created')\n else:\n self.OP = NothingToDo(\"\", \"Already Deleted in gmail and unlinked in Odoo\")\n elif self.GG.found and not self.OE.found:\n tmpSrc = 'GG'\n if not self.GG.status and not self.GG.isInstance:\n # don't need to make something... because event has been created and deleted before the synchronization\n self.OP = NothingToDo(\"\", 'Nothing to do... Create and Delete directly')\n else:\n if self.GG.isInstance:\n if self[tmpSrc].status:\n self.OP = Exclude(tmpSrc, 'Need to create the new exclu')\n else:\n self.OP = Exclude(tmpSrc, 'Need to copy and Exclude')\n else:\n self.OP = Create(tmpSrc, 'New EVENT CREATE from GMAIL')\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n myPrint = \"\\n\\n---- A SYNC EVENT ---\"\n myPrint += \"\\n ID OE: %s \" % (self.OE.event and self.OE.event.id)\n myPrint += \"\\n ID GG: %s \" % (self.GG.event and self.GG.event.get('id', False))\n myPrint += \"\\n Name OE: %s \" % (self.OE.event and self.OE.event.name.encode('utf8'))\n myPrint += \"\\n Name GG: %s \" % (self.GG.event and self.GG.event.get('summary', '').encode('utf8'))\n myPrint += \"\\n Found OE:%5s vs GG: %5s\" % (self.OE.found, self.GG.found)\n myPrint += \"\\n Recurrence OE:%5s vs GG: %5s\" % (self.OE.isRecurrence, self.GG.isRecurrence)\n myPrint += \"\\n Instance OE:%5s vs GG: %5s\" % (self.OE.isInstance, self.GG.isInstance)\n myPrint += \"\\n Synchro OE: %10s \" % (self.OE.synchro)\n myPrint += \"\\n Update OE: %10s \" % (self.OE.update)\n myPrint += \"\\n Update GG: %10s \" % (self.GG.update)\n myPrint += \"\\n Status OE:%5s vs GG: %5s\" % (self.OE.status, self.GG.status)\n if (self.OP is None):\n myPrint += \"\\n Action %s\" % \"---!!!---NONE---!!!---\"\n else:\n myPrint += \"\\n Action %s\" % type(self.OP).__name__\n myPrint += \"\\n Source %s\" % (self.OP.src)\n myPrint += \"\\n comment %s\" % (self.OP.info)\n return myPrint\n\n\nclass SyncOperation(object):\n def __init__(self, src, info, **kw):\n self.src = src\n self.info = info\n for k, v in kw.items():\n setattr(self, k, v)\n\n def __str__(self):\n return 'in__STR__'\n\n\nclass Create(SyncOperation):\n pass\n\n\nclass Update(SyncOperation):\n pass\n\n\nclass Delete(SyncOperation):\n pass\n\n\nclass NothingToDo(SyncOperation):\n pass\n\n\nclass Exclude(SyncOperation):\n pass\n\n\nclass google_calendar(osv.AbstractModel):\n STR_SERVICE = 'calendar'\n _name = 'google.%s' % STR_SERVICE\n\n def generate_data(self, cr, uid, event, isCreating=False, context=None):\n if not context:\n context = {}\n if event.allday:\n start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T').split('T')[0]\n final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=1), context=context).isoformat('T').split('T')[0]\n type = 'date'\n vstype = 'dateTime'\n else:\n start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')\n final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')\n type = 'dateTime'\n vstype = 'date'\n attendee_list = []\n for attendee in event.attendee_ids:\n email = tools.email_split(attendee.email)\n email = email[0] if email else 'NoEmail@mail.com'\n attendee_list.append({\n 'email': email,\n 'displayName': attendee.partner_id.name,\n 'responseStatus': attendee.state or 'needsAction',\n })\n\n reminders = []\n for alarm in event.alarm_ids:\n reminders.append({\n \"method\": \"email\" if alarm.type == \"email\" else \"popup\",\n \"minutes\": alarm.duration_minutes\n })\n data = {\n \"summary\": event.name or '',\n \"description\": event.description or '',\n \"start\": {\n type: start_date,\n vstype: None,\n 'timeZone': context.get('tz', 'UTC'),\n },\n \"end\": {\n type: final_date,\n vstype: None,\n 'timeZone': context.get('tz', 'UTC'),\n },\n \"attendees\": attendee_list,\n \"reminders\": {\n \"overrides\": reminders,\n \"useDefault\": \"false\"\n },\n \"location\": event.location or '',\n \"visibility\": event['class'] or 'public',\n }\n if event.recurrency and event.rrule:\n data[\"recurrence\"] = [\"RRULE:\" + event.rrule]\n\n if not event.active:\n data[\"state\"] = \"cancelled\"\n\n if not self.get_need_synchro_attendee(cr, uid, context=context):\n data.pop(\"attendees\")\n if isCreating:\n other_google_ids = [other_att.google_internal_event_id for other_att in event.attendee_ids if other_att.google_internal_event_id]\n if other_google_ids:\n data[\"id\"] = other_google_ids[0]\n return data\n\n def create_an_event(self, cr, uid, event, context=None):\n gs_pool = self.pool['google.service']\n data = self.generate_data(cr, uid, event, isCreating=True, context=context)\n\n url = \"/calendar/v3/calendars/%s/events?fields=%s&access_token=%s\" % ('primary', urllib2.quote('id,updated'), self.get_token(cr, uid, context))\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n data_json = simplejson.dumps(data)\n\n return gs_pool._do_request(cr, uid, url, data_json, headers, type='POST', context=context)\n\n def delete_an_event(self, cr, uid, event_id, context=None):\n gs_pool = self.pool['google.service']\n\n params = {\n 'access_token': self.get_token(cr, uid, context)\n }\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n url = \"/calendar/v3/calendars/%s/events/%s\" % ('primary', event_id)\n\n return gs_pool._do_request(cr, uid, url, params, headers, type='DELETE', context=context)\n\n def get_calendar_primary_id(self, cr, uid, context=None):\n params = {\n 'fields': 'id',\n 'access_token': self.get_token(cr, uid, context)\n }\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n\n url = \"/calendar/v3/calendars/primary\"\n\n try:\n st, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)\n except Exception, e:\n\n if (e.code == 401): # Token invalid / Acces unauthorized\n error_msg = \"Your token is invalid or has been revoked !\"\n\n registry = openerp.modules.registry.RegistryManager.get(request.session.db)\n with registry.cursor() as cur:\n self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_token': False, 'google_calendar_token_validity': False}, context=context)\n\n raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)\n raise\n\n return (status_response(st), content['id'] or False, ask_time)\n\n def get_event_synchro_dict(self, cr, uid, lastSync=False, token=False, nextPageToken=False, context=None):\n if not token:\n token = self.get_token(cr, uid, context)\n\n params = {\n 'fields': 'items,nextPageToken',\n 'access_token': token,\n 'maxResults': 1000,\n #'timeMin': self.get_minTime(cr, uid, context=context).strftime(\"%Y-%m-%dT%H:%M:%S.%fz\"),\n }\n\n if lastSync:\n params['updatedMin'] = lastSync.strftime(\"%Y-%m-%dT%H:%M:%S.%fz\")\n params['showDeleted'] = True\n else:\n params['timeMin'] = self.get_minTime(cr, uid, context=context).strftime(\"%Y-%m-%dT%H:%M:%S.%fz\")\n\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n\n url = \"/calendar/v3/calendars/%s/events\" % 'primary'\n if nextPageToken:\n params['pageToken'] = nextPageToken\n\n status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)\n\n google_events_dict = {}\n for google_event in content['items']:\n google_events_dict[google_event['id']] = google_event\n\n if content.get('nextPageToken'):\n google_events_dict.update(\n self.get_event_synchro_dict(cr, uid, lastSync=lastSync, token=token, nextPageToken=content['nextPageToken'], context=context)\n )\n\n return google_events_dict\n\n def get_one_event_synchro(self, cr, uid, google_id, context=None):\n token = self.get_token(cr, uid, context)\n\n params = {\n 'access_token': token,\n 'maxResults': 1000,\n 'showDeleted': True,\n }\n\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n\n url = \"/calendar/v3/calendars/%s/events/%s\" % ('primary', google_id)\n try:\n status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)\n except:\n _logger.info(\"Calendar Synchro - In except of get_one_event_synchro\")\n pass\n\n return status_response(status) and content or False\n\n def update_to_google(self, cr, uid, oe_event, google_event, context):\n calendar_event = self.pool['calendar.event']\n\n url = \"/calendar/v3/calendars/%s/events/%s?fields=%s&access_token=%s\" % ('primary', google_event['id'], 'id,updated', self.get_token(cr, uid, context))\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n data = self.generate_data(cr, uid, oe_event, context=context)\n data['sequence'] = google_event.get('sequence', 0)\n data_json = simplejson.dumps(data)\n\n status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, data_json, headers, type='PATCH', context=context)\n\n update_date = datetime.strptime(content['updated'], \"%Y-%m-%dT%H:%M:%S.%fz\")\n calendar_event.write(cr, uid, [oe_event.id], {'oe_update_date': update_date})\n\n if context['curr_attendee']:\n self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date}, context)\n\n def update_an_event(self, cr, uid, event, context=None):\n data = self.generate_data(cr, uid, event, context=context)\n\n url = \"/calendar/v3/calendars/%s/events/%s\" % ('primary', event.google_internal_event_id)\n headers = {}\n data['access_token'] = self.get_token(cr, uid, context)\n\n status, response, ask_time = self.pool['google.service']._do_request(cr, uid, url, data, headers, type='GET', context=context)\n #TO_CHECK : , if http fail, no event, do DELETE ?\n return response\n\n def update_recurrent_event_exclu(self, cr, uid, instance_id, event_ori_google_id, event_new, context=None):\n gs_pool = self.pool['google.service']\n\n data = self.generate_data(cr, uid, event_new, context=context)\n\n data['recurringEventId'] = event_ori_google_id\n data['originalStartTime'] = event_new.recurrent_id_date\n\n url = \"/calendar/v3/calendars/%s/events/%s?access_token=%s\" % ('primary', instance_id, self.get_token(cr, uid, context))\n headers = {'Content-type': 'application/json'}\n\n data['sequence'] = self.get_sequence(cr, uid, instance_id, context)\n\n data_json = simplejson.dumps(data)\n return gs_pool._do_request(cr, uid, url, data_json, headers, type='PUT', context=context)\n\n def update_from_google(self, cr, uid, event, single_event_dict, type, context):\n if context is None:\n context = []\n\n calendar_event = self.pool['calendar.event']\n res_partner_obj = self.pool['res.partner']\n calendar_attendee_obj = self.pool['calendar.attendee']\n calendar_alarm_obj = self.pool['calendar.alarm']\n user_obj = self.pool['res.users']\n myPartnerID = user_obj.browse(cr, uid, uid, context).partner_id.id\n attendee_record = []\n alarm_record = set()\n partner_record = [(4, myPartnerID)]\n result = {}\n\n if self.get_need_synchro_attendee(cr, uid, context=context):\n for google_attendee in single_event_dict.get('attendees', []):\n partner_email = google_attendee.get('email', False)\n if type == \"write\":\n for oe_attendee in event['attendee_ids']:\n if oe_attendee.email == partner_email:\n calendar_attendee_obj.write(cr, uid, [oe_attendee.id], {'state': google_attendee['responseStatus']}, context=context)\n google_attendee['found'] = True\n continue\n\n if google_attendee.get('found'):\n continue\n\n attendee_id = res_partner_obj.search(cr, uid, [('email', '=', partner_email)], context=context)\n if not attendee_id:\n data = {\n 'email': partner_email,\n 'customer': False,\n 'name': google_attendee.get(\"displayName\", False) or partner_email\n }\n attendee_id = [res_partner_obj.create(cr, uid, data, context=context)]\n attendee = res_partner_obj.read(cr, uid, attendee_id[0], ['email'], context=context)\n partner_record.append((4, attendee.get('id')))\n attendee['partner_id'] = attendee.pop('id')\n attendee['state'] = google_attendee['responseStatus']\n attendee_record.append((0, 0, attendee))\n for google_alarm in single_event_dict.get('reminders', {}).get('overrides', []):\n alarm_id = calendar_alarm_obj.search(\n cr,\n uid,\n [\n ('type', '=', google_alarm['method'] if google_alarm['method'] == 'email' else 'notification'),\n ('duration_minutes', '=', google_alarm['minutes'])\n ],\n context=context\n )\n if not alarm_id:\n data = {\n 'type': google_alarm['method'] if google_alarm['method'] == 'email' else 'notification',\n 'duration': google_alarm['minutes'],\n 'interval': 'minutes',\n 'name': \"%s minutes - %s\" % (google_alarm['minutes'], google_alarm['method'])\n }\n alarm_id = [calendar_alarm_obj.create(cr, uid, data, context=context)]\n alarm_record.add(alarm_id[0])\n\n UTC = pytz.timezone('UTC')\n if single_event_dict.get('start') and single_event_dict.get('end'): # If not cancelled\n\n if single_event_dict['start'].get('dateTime', False) and single_event_dict['end'].get('dateTime', False):\n date = parser.parse(single_event_dict['start']['dateTime'])\n stop = parser.parse(single_event_dict['end']['dateTime'])\n date = str(date.astimezone(UTC))[:-6]\n stop = str(stop.astimezone(UTC))[:-6]\n allday = False\n else:\n date = (single_event_dict['start']['date'])\n stop = (single_event_dict['end']['date'])\n d_end = datetime.strptime(stop, DEFAULT_SERVER_DATE_FORMAT)\n allday = True\n d_end = d_end + timedelta(days=-1)\n stop = d_end.strftime(DEFAULT_SERVER_DATE_FORMAT)\n\n update_date = datetime.strptime(single_event_dict['updated'], \"%Y-%m-%dT%H:%M:%S.%fz\")\n result.update({\n 'start': date,\n 'stop': stop,\n 'allday': allday\n })\n result.update({\n 'attendee_ids': attendee_record,\n 'partner_ids': list(set(partner_record)),\n 'alarm_ids': [(6, 0, list(alarm_record))],\n\n 'name': single_event_dict.get('summary', 'Event'),\n 'description': single_event_dict.get('description', False),\n 'location': single_event_dict.get('location', False),\n 'class': single_event_dict.get('visibility', 'public'),\n 'oe_update_date': update_date,\n })\n\n if single_event_dict.get(\"recurrence\", False):\n rrule = [rule for rule in single_event_dict[\"recurrence\"] if rule.startswith(\"RRULE:\")][0][6:]\n result['rrule'] = rrule\n\n context = dict(context or {}, no_mail_to_attendees=True)\n if type == \"write\":\n res = calendar_event.write(cr, uid, event['id'], result, context=context)\n elif type == \"copy\":\n result['recurrence'] = True\n res = calendar_event.write(cr, uid, [event['id']], result, context=context)\n elif type == \"create\":\n res = calendar_event.create(cr, uid, result, context=context)\n\n if context['curr_attendee']:\n self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date, 'google_internal_event_id': single_event_dict.get('id', False)}, context)\n return res\n\n def remove_references(self, cr, uid, context=None):\n current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)\n reset_data = {\n 'google_calendar_rtoken': False,\n 'google_calendar_token': False,\n 'google_calendar_token_validity': False,\n 'google_calendar_last_sync_date': False,\n 'google_calendar_cal_id': False,\n }\n\n all_my_attendees = self.pool['calendar.attendee'].search(cr, uid, [('partner_id', '=', current_user.partner_id.id)], context=context)\n self.pool['calendar.attendee'].write(cr, uid, all_my_attendees, {'oe_synchro_date': False, 'google_internal_event_id': False}, context=context)\n current_user.write(reset_data)\n return True\n\n def synchronize_events_cron(self, cr, uid, context=None):\n ids = self.pool['res.users'].search(cr, uid, [('google_calendar_last_sync_date', '!=', False)], context=context)\n _logger.info(\"Calendar Synchro - Started by cron\")\n\n for user_to_sync in ids:\n _logger.info(\"Calendar Synchro - Starting synchronization for a new user [%s] \" % user_to_sync)\n try:\n resp = self.synchronize_events(cr, user_to_sync, False, lastSync=True, context=None)\n if resp.get(\"status\") == \"need_reset\":\n _logger.info(\"[%s] Calendar Synchro - Failed - NEED RESET !\" % user_to_sync)\n else:\n _logger.info(\"[%s] Calendar Synchro - Done with status : %s !\" % (user_to_sync, resp.get(\"status\")))\n except Exception, e:\n _logger.info(\"[%s] Calendar Synchro - Exception : %s !\" % (user_to_sync, exception_to_unicode(e)))\n _logger.info(\"Calendar Synchro - Ended by cron\")\n\n def synchronize_events(self, cr, uid, ids, lastSync=True, context=None):\n if context is None:\n context = {}\n\n # def isValidSync(syncToken):\n # gs_pool = self.pool['google.service']\n # params = {\n # 'maxResults': 1,\n # 'fields': 'id',\n # 'access_token': self.get_token(cr, uid, context),\n # 'syncToken': syncToken,\n # }\n # url = \"/calendar/v3/calendars/primary/events\"\n # status, response = gs_pool._do_request(cr, uid, url, params, type='GET', context=context)\n # return int(status) != 410\n\n user_to_sync = ids and ids[0] or uid\n current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, user_to_sync, context=context)\n\n st, current_google, ask_time = self.get_calendar_primary_id(cr, user_to_sync, context=context)\n\n if current_user.google_calendar_cal_id:\n if current_google != current_user.google_calendar_cal_id:\n return {\n \"status\": \"need_reset\",\n \"info\": {\n \"old_name\": current_user.google_calendar_cal_id,\n \"new_name\": current_google\n },\n \"url\": ''\n }\n\n if lastSync and self.get_last_sync_date(cr, user_to_sync, context=context) and not self.get_disable_since_synchro(cr, user_to_sync, context=context):\n lastSync = self.get_last_sync_date(cr, user_to_sync, context)\n _logger.info(\"[%s] Calendar Synchro - MODE SINCE_MODIFIED : %s !\" % (user_to_sync, lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT)))\n else:\n lastSync = False\n _logger.info(\"[%s] Calendar Synchro - MODE FULL SYNCHRO FORCED\" % user_to_sync)\n else:\n current_user.write({'google_calendar_cal_id': current_google})\n lastSync = False\n _logger.info(\"[%s] Calendar Synchro - MODE FULL SYNCHRO - NEW CAL ID\" % user_to_sync)\n\n new_ids = []\n new_ids += self.create_new_events(cr, user_to_sync, context=context)\n new_ids += self.bind_recurring_events_to_google(cr, user_to_sync, context)\n\n res = self.update_events(cr, user_to_sync, lastSync, context)\n\n current_user.write({'google_calendar_last_sync_date': ask_time})\n return {\n \"status\": res and \"need_refresh\" or \"no_new_event_from_google\",\n \"url\": ''\n }\n\n def create_new_events(self, cr, uid, context=None):\n if context is None:\n context = {}\n\n new_ids = []\n ev_obj = self.pool['calendar.event']\n att_obj = self.pool['calendar.attendee']\n user_obj = self.pool['res.users']\n myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id\n\n context_norecurrent = context.copy()\n context_norecurrent['virtual_id'] = False\n my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID),\n ('google_internal_event_id', '=', False),\n '|',\n ('event_id.stop', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('event_id.final_date', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ], context=context_norecurrent)\n for att in att_obj.browse(cr, uid, my_att_ids, context=context):\n other_google_ids = [other_att.google_internal_event_id for other_att in att.event_id.attendee_ids if other_att.google_internal_event_id and other_att.id != att.id]\n for other_google_id in other_google_ids:\n if self.get_one_event_synchro(cr, uid, other_google_id, context=context):\n att_obj.write(cr, uid, [att.id], {'google_internal_event_id': other_google_id})\n break\n else:\n if not att.event_id.recurrent_id or att.event_id.recurrent_id == 0:\n st, response, ask_time = self.create_an_event(cr, uid, att.event_id, context=context)\n if status_response(st):\n update_date = datetime.strptime(response['updated'], \"%Y-%m-%dT%H:%M:%S.%fz\")\n ev_obj.write(cr, uid, att.event_id.id, {'oe_update_date': update_date})\n new_ids.append(response['id'])\n att_obj.write(cr, uid, [att.id], {'google_internal_event_id': response['id'], 'oe_synchro_date': update_date})\n cr.commit()\n else:\n _logger.warning(\"Impossible to create event %s. [%s]\" % (att.event_id.id, st))\n _logger.warning(\"Response : %s\" % response)\n return new_ids\n\n def get_context_no_virtual(self, context):\n context_norecurrent = context.copy()\n context_norecurrent['virtual_id'] = False\n context_norecurrent['active_test'] = False\n return context_norecurrent\n\n def bind_recurring_events_to_google(self, cr, uid, context=None):\n if context is None:\n context = {}\n\n new_ids = []\n ev_obj = self.pool['calendar.event']\n att_obj = self.pool['calendar.attendee']\n user_obj = self.pool['res.users']\n myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id\n\n context_norecurrent = self.get_context_no_virtual(context)\n my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('google_internal_event_id', '=', False)], context=context_norecurrent)\n\n for att in att_obj.browse(cr, uid, my_att_ids, context=context):\n if att.event_id.recurrent_id and att.event_id.recurrent_id > 0:\n new_google_internal_event_id = False\n source_event_record = ev_obj.browse(cr, uid, att.event_id.recurrent_id, context)\n source_attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', source_event_record.id)], context=context)\n if not source_attendee_record_id:\n continue\n source_attendee_record = att_obj.browse(cr, uid, source_attendee_record_id, context)[0]\n\n if att.event_id.recurrent_id_date and source_event_record.allday and source_attendee_record.google_internal_event_id:\n new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.split(' ')[0].replace('-', '')\n elif att.event_id.recurrent_id_date and source_attendee_record.google_internal_event_id:\n new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.replace('-', '').replace(' ', 'T').replace(':', '') + 'Z'\n\n if new_google_internal_event_id:\n #TODO WARNING, NEED TO CHECK THAT EVENT and ALL instance NOT DELETE IN GMAIL BEFORE !\n try:\n st, response, ask_time = self.update_recurrent_event_exclu(cr, uid, new_google_internal_event_id, source_attendee_record.google_internal_event_id, att.event_id, context=context)\n if status_response(st):\n att_obj.write(cr, uid, [att.id], {'google_internal_event_id': new_google_internal_event_id}, context=context)\n new_ids.append(new_google_internal_event_id)\n cr.commit()\n else:\n _logger.warning(\"Impossible to create event %s. [%s]\" % (att.event_id.id, st))\n _logger.warning(\"Response : %s\" % response)\n except:\n pass\n return new_ids\n\n def update_events(self, cr, uid, lastSync=False, context=None):\n context = dict(context or {})\n\n calendar_event = self.pool['calendar.event']\n user_obj = self.pool['res.users']\n att_obj = self.pool['calendar.attendee']\n myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id\n context_novirtual = self.get_context_no_virtual(context)\n\n if lastSync:\n try:\n all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=lastSync, context=context)\n except urllib2.HTTPError, e:\n if e.code == 410: # GONE, Google is lost.\n # we need to force the rollback from this cursor, because it locks my res_users but I need to write in this tuple before to raise.\n cr.rollback()\n registry = openerp.modules.registry.RegistryManager.get(request.session.db)\n with registry.cursor() as cur:\n self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_last_sync_date': False}, context=context)\n error_key = simplejson.loads(str(e))\n error_key = error_key.get('error', {}).get('message', 'nc')\n error_msg = \"Google is lost... the next synchro will be a full synchro. \\n\\n %s\" % error_key\n raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)\n\n my_google_att_ids = att_obj.search(cr, uid, [\n ('partner_id', '=', myPartnerID),\n ('google_internal_event_id', 'in', all_event_from_google.keys())\n ], context=context_novirtual)\n\n my_openerp_att_ids = att_obj.search(cr, uid, [\n ('partner_id', '=', myPartnerID),\n ('event_id.oe_update_date', '>', lastSync and lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT) or self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('google_internal_event_id', '!=', False),\n ], context=context_novirtual)\n\n my_openerp_googleinternal_ids = att_obj.read(cr, uid, my_openerp_att_ids, ['google_internal_event_id', 'event_id'], context=context_novirtual)\n\n if self.get_print_log(cr, uid, context=context):\n _logger.info(\"Calendar Synchro - \\n\\nUPDATE IN GOOGLE\\n%s\\n\\nRETRIEVE FROM OE\\n%s\\n\\nUPDATE IN OE\\n%s\\n\\nRETRIEVE FROM GG\\n%s\\n\\n\" % (all_event_from_google, my_google_att_ids, my_openerp_att_ids, my_openerp_googleinternal_ids))\n\n for giid in my_openerp_googleinternal_ids:\n active = True # if not sure, we request google\n if giid.get('event_id'):\n active = calendar_event.browse(cr, uid, int(giid.get('event_id')[0]), context=context_novirtual).active\n\n if giid.get('google_internal_event_id') and not all_event_from_google.get(giid.get('google_internal_event_id')) and active:\n one_event = self.get_one_event_synchro(cr, uid, giid.get('google_internal_event_id'), context=context)\n if one_event:\n all_event_from_google[one_event['id']] = one_event\n\n my_att_ids = list(set(my_google_att_ids + my_openerp_att_ids))\n\n else:\n domain = [\n ('partner_id', '=', myPartnerID),\n ('google_internal_event_id', '!=', False),\n '|',\n ('event_id.stop', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('event_id.final_date', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ]\n\n # Select all events from OpenERP which have been already synchronized in gmail\n my_att_ids = att_obj.search(cr, uid, domain, context=context_novirtual)\n all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=False, context=context)\n\n event_to_synchronize = {}\n for att in att_obj.browse(cr, uid, my_att_ids, context=context):\n event = att.event_id\n\n base_event_id = att.google_internal_event_id.rsplit('_', 1)[0]\n\n if base_event_id not in event_to_synchronize:\n event_to_synchronize[base_event_id] = {}\n\n if att.google_internal_event_id not in event_to_synchronize[base_event_id]:\n event_to_synchronize[base_event_id][att.google_internal_event_id] = SyncEvent()\n\n ev_to_sync = event_to_synchronize[base_event_id][att.google_internal_event_id]\n\n ev_to_sync.OE.attendee_id = att.id\n ev_to_sync.OE.event = event\n ev_to_sync.OE.found = True\n ev_to_sync.OE.event_id = event.id\n ev_to_sync.OE.isRecurrence = event.recurrency\n ev_to_sync.OE.isInstance = bool(event.recurrent_id and event.recurrent_id > 0)\n ev_to_sync.OE.update = event.oe_update_date\n ev_to_sync.OE.status = event.active\n ev_to_sync.OE.synchro = att.oe_synchro_date\n\n for event in all_event_from_google.values():\n event_id = event.get('id')\n base_event_id = event_id.rsplit('_', 1)[0]\n\n if base_event_id not in event_to_synchronize:\n event_to_synchronize[base_event_id] = {}\n\n if event_id not in event_to_synchronize[base_event_id]:\n event_to_synchronize[base_event_id][event_id] = SyncEvent()\n\n ev_to_sync = event_to_synchronize[base_event_id][event_id]\n\n ev_to_sync.GG.event = event\n ev_to_sync.GG.found = True\n ev_to_sync.GG.isRecurrence = bool(event.get('recurrence', ''))\n ev_to_sync.GG.isInstance = bool(event.get('recurringEventId', 0))\n ev_to_sync.GG.update = event.get('updated', None) # if deleted, no date without browse event\n if ev_to_sync.GG.update:\n ev_to_sync.GG.update = ev_to_sync.GG.update.replace('T', ' ').replace('Z', '')\n ev_to_sync.GG.status = (event.get('status') != 'cancelled')\n\n ######################\n # PRE-PROCESSING #\n ######################\n for base_event in event_to_synchronize:\n for current_event in event_to_synchronize[base_event]:\n event_to_synchronize[base_event][current_event].compute_OP(modeFull=not lastSync)\n if self.get_print_log(cr, uid, context=context):\n if not isinstance(event_to_synchronize[base_event][current_event].OP, NothingToDo):\n _logger.info(event_to_synchronize[base_event])\n\n ######################\n # DO ACTION #\n ######################\n for base_event in event_to_synchronize:\n event_to_synchronize[base_event] = sorted(event_to_synchronize[base_event].iteritems(), key=operator.itemgetter(0))\n for current_event in event_to_synchronize[base_event]:\n cr.commit()\n event = current_event[1] # event is an Sync Event !\n actToDo = event.OP\n actSrc = event.OP.src\n\n context['curr_attendee'] = event.OE.attendee_id\n\n if isinstance(actToDo, NothingToDo):\n continue\n elif isinstance(actToDo, Create):\n context_tmp = context.copy()\n context_tmp['NewMeeting'] = True\n if actSrc == 'GG':\n res = self.update_from_google(cr, uid, False, event.GG.event, \"create\", context=context_tmp)\n event.OE.event_id = res\n meeting = calendar_event.browse(cr, uid, res, context=context)\n attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', res)], context=context)\n self.pool['calendar.attendee'].write(cr, uid, attendee_record_id, {'oe_synchro_date': meeting.oe_update_date, 'google_internal_event_id': event.GG.event['id']}, context=context_tmp)\n elif actSrc == 'OE':\n raise \"Should be never here, creation for OE is done before update !\"\n #TODO Add to batch\n elif isinstance(actToDo, Update):\n if actSrc == 'GG':\n self.update_from_google(cr, uid, event.OE.event, event.GG.event, 'write', context)\n elif actSrc == 'OE':\n self.update_to_google(cr, uid, event.OE.event, event.GG.event, context)\n elif isinstance(actToDo, Exclude):\n if actSrc == 'OE':\n self.delete_an_event(cr, uid, current_event[0], context=context)\n elif actSrc == 'GG':\n new_google_event_id = event.GG.event['id'].rsplit('_', 1)[1]\n if 'T' in new_google_event_id:\n new_google_event_id = new_google_event_id.replace('T', '')[:-1]\n else:\n new_google_event_id = new_google_event_id + \"000000\"\n\n if event.GG.status:\n parent_event = {}\n if not event_to_synchronize[base_event][0][1].OE.event_id:\n main_ev = att_obj.search_read(cr, uid, [('google_internal_event_id', '=', event.GG.event['id'].rsplit('_', 1)[0])], fields=['event_id'], context=context_novirtual)\n event_to_synchronize[base_event][0][1].OE.event_id = main_ev[0].get('event_id')[0]\n\n parent_event['id'] = \"%s-%s\" % (event_to_synchronize[base_event][0][1].OE.event_id, new_google_event_id)\n res = self.update_from_google(cr, uid, parent_event, event.GG.event, \"copy\", context)\n else:\n parent_oe_id = event_to_synchronize[base_event][0][1].OE.event_id\n if parent_oe_id:\n calendar_event.unlink(cr, uid, \"%s-%s\" % (parent_oe_id, new_google_event_id), can_be_deleted=True, context=context)\n\n elif isinstance(actToDo, Delete):\n if actSrc == 'GG':\n try:\n self.delete_an_event(cr, uid, current_event[0], context=context)\n except Exception, e:\n error = simplejson.loads(e.read())\n error_nr = error.get('error', {}).get('code')\n # if already deleted from gmail or never created\n if error_nr in (404, 410,):\n pass\n else:\n raise e\n elif actSrc == 'OE':\n calendar_event.unlink(cr, uid, event.OE.event_id, can_be_deleted=False, context=context)\n return True\n\n def check_and_sync(self, cr, uid, oe_event, google_event, context):\n if datetime.strptime(oe_event.oe_update_date, \"%Y-%m-%d %H:%M:%S.%f\") > datetime.strptime(google_event['updated'], \"%Y-%m-%dT%H:%M:%S.%fz\"):\n self.update_to_google(cr, uid, oe_event, google_event, context)\n elif datetime.strptime(oe_event.oe_update_date, \"%Y-%m-%d %H:%M:%S.%f\") < datetime.strptime(google_event['updated'], \"%Y-%m-%dT%H:%M:%S.%fz\"):\n self.update_from_google(cr, uid, oe_event, google_event, 'write', context)\n\n def get_sequence(self, cr, uid, instance_id, context=None):\n gs_pool = self.pool['google.service']\n\n params = {\n 'fields': 'sequence',\n 'access_token': self.get_token(cr, uid, context)\n }\n\n headers = {'Content-type': 'application/json'}\n\n url = \"/calendar/v3/calendars/%s/events/%s\" % ('primary', instance_id)\n\n st, content, ask_time = gs_pool._do_request(cr, uid, url, params, headers, type='GET', context=context)\n return content.get('sequence', 0)\n#################################\n## MANAGE CONNEXION TO GMAIL ##\n#################################\n\n def get_token(self, cr, uid, context=None):\n current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)\n if not current_user.google_calendar_token_validity or \\\n datetime.strptime(current_user.google_calendar_token_validity.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT) < (datetime.now() + timedelta(minutes=1)):\n self.do_refresh_token(cr, uid, context=context)\n current_user.refresh()\n return current_user.google_calendar_token\n\n def get_last_sync_date(self, cr, uid, context=None):\n current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)\n return current_user.google_calendar_last_sync_date and datetime.strptime(current_user.google_calendar_last_sync_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(minutes=0) or False\n\n def do_refresh_token(self, cr, uid, context=None):\n current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)\n gs_pool = self.pool['google.service']\n\n all_token = gs_pool._refresh_google_token_json(cr, uid, current_user.google_calendar_rtoken, self.STR_SERVICE, context=context)\n\n vals = {}\n vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))\n vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')\n\n self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)\n\n def need_authorize(self, cr, uid, context=None):\n current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)\n return current_user.google_calendar_rtoken is False\n\n def get_calendar_scope(self, RO=False):\n readonly = RO and '.readonly' or ''\n return 'https://www.googleapis.com/auth/calendar%s' % (readonly)\n\n def authorize_google_uri(self, cr, uid, from_url='http://www.openerp.com', context=None):\n url = self.pool['google.service']._get_authorize_uri(cr, uid, from_url, self.STR_SERVICE, scope=self.get_calendar_scope(), context=context)\n return url\n\n def can_authorize_google(self, cr, uid, context=None):\n return self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager')\n\n def set_all_tokens(self, cr, uid, authorization_code, context=None):\n gs_pool = self.pool['google.service']\n all_token = gs_pool._get_google_token_json(cr, uid, authorization_code, self.STR_SERVICE, context=context)\n\n vals = {}\n vals['google_%s_rtoken' % self.STR_SERVICE] = all_token.get('refresh_token')\n vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))\n vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')\n self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)\n\n def get_minTime(self, cr, uid, context=None):\n number_of_week = int(self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.week_synchro', default=13))\n return datetime.now() - timedelta(weeks=number_of_week)\n\n def get_need_synchro_attendee(self, cr, uid, context=None):\n return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_synchro_attendee', default=True)\n\n def get_disable_since_synchro(self, cr, uid, context=None):\n return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_since_synchro', default=False)\n\n def get_print_log(self, cr, uid, context=None):\n return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.debug_print', default=False)\n\n\nclass res_users(osv.Model):\n _inherit = 'res.users'\n\n _columns = {\n 'google_calendar_rtoken': fields.char('Refresh Token'),\n 'google_calendar_token': fields.char('User token'),\n 'google_calendar_token_validity': fields.datetime('Token Validity'),\n 'google_calendar_last_sync_date': fields.datetime('Last synchro date'),\n 'google_calendar_cal_id': fields.char('Calendar ID', help='Last Calendar ID who has been synchronized. If it is changed, we remove \\\nall links between GoogleID and Odoo Google Internal ID')\n }\n\n\nclass calendar_event(osv.Model):\n _inherit = \"calendar.event\"\n\n def get_fields_need_update_google(self, cr, uid, context=None):\n return ['name', 'description', 'allday', 'start', 'date_end', 'stop',\n 'attendee_ids', 'alarm_ids', 'location', 'class', 'active',\n 'start_date', 'start_datetime', 'stop_date', 'stop_datetime']\n\n def write(self, cr, uid, ids, vals, context=None):\n if context is None:\n context = {}\n sync_fields = set(self.get_fields_need_update_google(cr, uid, context))\n if (set(vals.keys()) & sync_fields) and 'oe_update_date' not in vals.keys() and 'NewMeeting' not in context:\n vals['oe_update_date'] = datetime.now()\n\n return super(calendar_event, self).write(cr, uid, ids, vals, context=context)\n\n def copy(self, cr, uid, id, default=None, context=None):\n default = default or {}\n if default.get('write_type', False):\n del default['write_type']\n elif default.get('recurrent_id', False):\n default['oe_update_date'] = datetime.now()\n else:\n default['oe_update_date'] = False\n return super(calendar_event, self).copy(cr, uid, id, default, context)\n\n def unlink(self, cr, uid, ids, can_be_deleted=False, context=None):\n return super(calendar_event, self).unlink(cr, uid, ids, can_be_deleted=can_be_deleted, context=context)\n\n _columns = {\n 'oe_update_date': fields.datetime('Odoo Update Date'),\n }\n\n\nclass calendar_attendee(osv.Model):\n _inherit = 'calendar.attendee'\n\n _columns = {\n 'google_internal_event_id': fields.char('Google Calendar Event Id'),\n 'oe_synchro_date': fields.datetime('Odoo Synchro Date'),\n }\n _sql_constraints = [('google_id_uniq', 'unique(google_internal_event_id,partner_id,event_id)', 'Google ID should be unique!')]\n\n def write(self, cr, uid, ids, vals, context=None):\n if context is None:\n context = {}\n\n for id in ids:\n ref = vals.get('event_id', self.browse(cr, uid, id, context=context).event_id.id)\n\n # If attendees are updated, we need to specify that next synchro need an action\n # Except if it come from an update_from_google\n if not context.get('curr_attendee', False) and not context.get('NewMeeting', False):\n self.pool['calendar.event'].write(cr, uid, ref, {'oe_update_date': datetime.now()}, context)\n return super(calendar_attendee, self).write(cr, uid, ids, vals, context=context)\n"}}},{"rowIdx":284674,"cells":{"repo_name":{"kind":"string","value":"dfalt974/SickRage"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"lib/html5lib/_trie/_base.py"},"copies":{"kind":"string","value":"79"},"content":{"kind":"string","value":"from __future__ import absolute_import, division, unicode_literals\n\nfrom collections import Mapping\n\n\nclass Trie(Mapping):\n \"\"\"Abstract base class for tries\"\"\"\n\n def keys(self, prefix=None):\n # pylint:disable=arguments-differ\n keys = super(Trie, self).keys()\n\n if prefix is None:\n return set(keys)\n\n return {x for x in keys if x.startswith(prefix)}\n\n def has_keys_with_prefix(self, prefix):\n for key in self.keys():\n if key.startswith(prefix):\n return True\n\n return False\n\n def longest_prefix(self, prefix):\n if prefix in self:\n return prefix\n\n for i in range(1, len(prefix) + 1):\n if prefix[:-i] in self:\n return prefix[:-i]\n\n raise KeyError(prefix)\n\n def longest_prefix_item(self, prefix):\n lprefix = self.longest_prefix(prefix)\n return (lprefix, self[lprefix])\n"}}},{"rowIdx":284675,"cells":{"repo_name":{"kind":"string","value":"edisonlz/fruit"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"web_project/base/site-packages/grappelli/__init__.py"},"copies":{"kind":"string","value":"1"},"content":{"kind":"string","value":"VERSION = '2.3.6'"}}},{"rowIdx":284676,"cells":{"repo_name":{"kind":"string","value":"ULHPC/modules"},"ref":{"kind":"string","value":"refs/heads/devel"},"path":{"kind":"string","value":"easybuild/easybuild-easyblocks/easybuild/easyblocks/m/metavelvet.py"},"copies":{"kind":"string","value":"12"},"content":{"kind":"string","value":"##\n# This file is an EasyBuild reciPY as per https://github.com/hpcugent/easybuild\n#\n# Copyright:: Copyright 2012-2015 Uni.Lu/LCSB, NTUA\n# Authors:: Cedric Laczny <cedric.laczny@uni.lu>, Fotis Georgatos <fotis@cern.ch>, Kenneth Hoste\n# License:: MIT/GPL\n# $Id$\n#\n# This work implements a part of the HPCBIOS project and is a component of the policy:\n# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html\n##\n\"\"\"\nEasyBuild support for building and installing MetaVelvet, implemented as an easyblock\n\n@author: Cedric Laczny (Uni.Lu)\n@author: Fotis Georgatos (Uni.Lu)\n@author: Kenneth Hoste (Ghent University)\n\"\"\"\n\nimport os\nimport shutil\n\nfrom easybuild.easyblocks.generic.configuremake import ConfigureMake\nfrom easybuild.tools.build_log import EasyBuildError\n\n\nclass EB_MetaVelvet(ConfigureMake):\n \"\"\"\n Support for building MetaVelvet\n \"\"\"\n\n def configure_step(self):\n \"\"\"\n No configure\n \"\"\"\n pass\n\n def install_step(self):\n \"\"\"\n Install by copying files to install dir\n \"\"\"\n srcdir = self.cfg['start_dir']\n destdir = os.path.join(self.installdir, 'bin')\n srcfile = None\n # Get executable files: for i in $(find . -maxdepth 1 -type f -perm +111 -print | sed -e 's/\\.\\///g' | awk '{print \"\\\"\"$0\"\\\"\"}' | grep -vE \"\\.sh|\\.html\"); do echo -ne \"$i, \"; done && echo\n try:\n os.makedirs(destdir)\n for filename in [\"meta-velvetg\"]:\n srcfile = os.path.join(srcdir, filename)\n shutil.copy2(srcfile, destdir)\n except OSError, err:\n raise EasyBuildError(\"Copying %s to installation dir %s failed: %s\", srcfile, destdir, err)\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for MetaVelvet.\"\"\"\n\n custom_paths = {\n 'files': ['bin/meta-velvetg'],\n 'dirs': []\n }\n\n super(EB_MetaVelvet, self).sanity_check_step(custom_paths=custom_paths)\n"}}},{"rowIdx":284677,"cells":{"repo_name":{"kind":"string","value":"igorg1312/googlepythonsskeleton"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"lib/jinja2/loaders.py"},"copies":{"kind":"string","value":"333"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\n jinja2.loaders\n ~~~~~~~~~~~~~~\n\n Jinja loader classes.\n\n :copyright: (c) 2010 by the Jinja Team.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport os\nimport sys\nimport weakref\nfrom types import ModuleType\nfrom os import path\nfrom hashlib import sha1\nfrom jinja2.exceptions import TemplateNotFound\nfrom jinja2.utils import open_if_exists, internalcode\nfrom jinja2._compat import string_types, iteritems\n\n\ndef split_template_path(template):\n \"\"\"Split a path into segments and perform a sanity check. If it detects\n '..' in the path it will raise a `TemplateNotFound` error.\n \"\"\"\n pieces = []\n for piece in template.split('/'):\n if path.sep in piece \\\n or (path.altsep and path.altsep in piece) or \\\n piece == path.pardir:\n raise TemplateNotFound(template)\n elif piece and piece != '.':\n pieces.append(piece)\n return pieces\n\n\nclass BaseLoader(object):\n \"\"\"Baseclass for all loaders. Subclass this and override `get_source` to\n implement a custom loading mechanism. The environment provides a\n `get_template` method that calls the loader's `load` method to get the\n :class:`Template` object.\n\n A very basic example for a loader that looks up templates on the file\n system could look like this::\n\n from jinja2 import BaseLoader, TemplateNotFound\n from os.path import join, exists, getmtime\n\n class MyLoader(BaseLoader):\n\n def __init__(self, path):\n self.path = path\n\n def get_source(self, environment, template):\n path = join(self.path, template)\n if not exists(path):\n raise TemplateNotFound(template)\n mtime = getmtime(path)\n with file(path) as f:\n source = f.read().decode('utf-8')\n return source, path, lambda: mtime == getmtime(path)\n \"\"\"\n\n #: if set to `False` it indicates that the loader cannot provide access\n #: to the source of templates.\n #:\n #: .. versionadded:: 2.4\n has_source_access = True\n\n def get_source(self, environment, template):\n \"\"\"Get the template source, filename and reload helper for a template.\n It's passed the environment and template name and has to return a\n tuple in the form ``(source, filename, uptodate)`` or raise a\n `TemplateNotFound` error if it can't locate the template.\n\n The source part of the returned tuple must be the source of the\n template as unicode string or a ASCII bytestring. The filename should\n be the name of the file on the filesystem if it was loaded from there,\n otherwise `None`. The filename is used by python for the tracebacks\n if no loader extension is used.\n\n The last item in the tuple is the `uptodate` function. If auto\n reloading is enabled it's always called to check if the template\n changed. No arguments are passed so the function must store the\n old state somewhere (for example in a closure). If it returns `False`\n the template will be reloaded.\n \"\"\"\n if not self.has_source_access:\n raise RuntimeError('%s cannot provide access to the source' %\n self.__class__.__name__)\n raise TemplateNotFound(template)\n\n def list_templates(self):\n \"\"\"Iterates over all templates. If the loader does not support that\n it should raise a :exc:`TypeError` which is the default behavior.\n \"\"\"\n raise TypeError('this loader cannot iterate over all templates')\n\n @internalcode\n def load(self, environment, name, globals=None):\n \"\"\"Loads a template. This method looks up the template in the cache\n or loads one by calling :meth:`get_source`. Subclasses should not\n override this method as loaders working on collections of other\n loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)\n will not call this method but `get_source` directly.\n \"\"\"\n code = None\n if globals is None:\n globals = {}\n\n # first we try to get the source for this template together\n # with the filename and the uptodate function.\n source, filename, uptodate = self.get_source(environment, name)\n\n # try to load the code from the bytecode cache if there is a\n # bytecode cache configured.\n bcc = environment.bytecode_cache\n if bcc is not None:\n bucket = bcc.get_bucket(environment, name, filename, source)\n code = bucket.code\n\n # if we don't have code so far (not cached, no longer up to\n # date) etc. we compile the template\n if code is None:\n code = environment.compile(source, name, filename)\n\n # if the bytecode cache is available and the bucket doesn't\n # have a code so far, we give the bucket the new code and put\n # it back to the bytecode cache.\n if bcc is not None and bucket.code is None:\n bucket.code = code\n bcc.set_bucket(bucket)\n\n return environment.template_class.from_code(environment, code,\n globals, uptodate)\n\n\nclass FileSystemLoader(BaseLoader):\n \"\"\"Loads templates from the file system. This loader can find templates\n in folders on the file system and is the preferred way to load them.\n\n The loader takes the path to the templates as string, or if multiple\n locations are wanted a list of them which is then looked up in the\n given order::\n\n >>> loader = FileSystemLoader('/path/to/templates')\n >>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])\n\n Per default the template encoding is ``'utf-8'`` which can be changed\n by setting the `encoding` parameter to something else.\n\n To follow symbolic links, set the *followlinks* parameter to ``True``::\n\n >>> loader = FileSystemLoader('/path/to/templates', followlinks=True)\n\n .. versionchanged:: 2.8+\n The *followlinks* parameter was added.\n \"\"\"\n\n def __init__(self, searchpath, encoding='utf-8', followlinks=False):\n if isinstance(searchpath, string_types):\n searchpath = [searchpath]\n self.searchpath = list(searchpath)\n self.encoding = encoding\n self.followlinks = followlinks\n\n def get_source(self, environment, template):\n pieces = split_template_path(template)\n for searchpath in self.searchpath:\n filename = path.join(searchpath, *pieces)\n f = open_if_exists(filename)\n if f is None:\n continue\n try:\n contents = f.read().decode(self.encoding)\n finally:\n f.close()\n\n mtime = path.getmtime(filename)\n\n def uptodate():\n try:\n return path.getmtime(filename) == mtime\n except OSError:\n return False\n return contents, filename, uptodate\n raise TemplateNotFound(template)\n\n def list_templates(self):\n found = set()\n for searchpath in self.searchpath:\n walk_dir = os.walk(searchpath, followlinks=self.followlinks)\n for dirpath, dirnames, filenames in walk_dir:\n for filename in filenames:\n template = os.path.join(dirpath, filename) \\\n [len(searchpath):].strip(os.path.sep) \\\n .replace(os.path.sep, '/')\n if template[:2] == './':\n template = template[2:]\n if template not in found:\n found.add(template)\n return sorted(found)\n\n\nclass PackageLoader(BaseLoader):\n \"\"\"Load templates from python eggs or packages. It is constructed with\n the name of the python package and the path to the templates in that\n package::\n\n loader = PackageLoader('mypackage', 'views')\n\n If the package path is not given, ``'templates'`` is assumed.\n\n Per default the template encoding is ``'utf-8'`` which can be changed\n by setting the `encoding` parameter to something else. Due to the nature\n of eggs it's only possible to reload templates if the package was loaded\n from the file system and not a zip file.\n \"\"\"\n\n def __init__(self, package_name, package_path='templates',\n encoding='utf-8'):\n from pkg_resources import DefaultProvider, ResourceManager, \\\n get_provider\n provider = get_provider(package_name)\n self.encoding = encoding\n self.manager = ResourceManager()\n self.filesystem_bound = isinstance(provider, DefaultProvider)\n self.provider = provider\n self.package_path = package_path\n\n def get_source(self, environment, template):\n pieces = split_template_path(template)\n p = '/'.join((self.package_path,) + tuple(pieces))\n if not self.provider.has_resource(p):\n raise TemplateNotFound(template)\n\n filename = uptodate = None\n if self.filesystem_bound:\n filename = self.provider.get_resource_filename(self.manager, p)\n mtime = path.getmtime(filename)\n def uptodate():\n try:\n return path.getmtime(filename) == mtime\n except OSError:\n return False\n\n source = self.provider.get_resource_string(self.manager, p)\n return source.decode(self.encoding), filename, uptodate\n\n def list_templates(self):\n path = self.package_path\n if path[:2] == './':\n path = path[2:]\n elif path == '.':\n path = ''\n offset = len(path)\n results = []\n def _walk(path):\n for filename in self.provider.resource_listdir(path):\n fullname = path + '/' + filename\n if self.provider.resource_isdir(fullname):\n _walk(fullname)\n else:\n results.append(fullname[offset:].lstrip('/'))\n _walk(path)\n results.sort()\n return results\n\n\nclass DictLoader(BaseLoader):\n \"\"\"Loads a template from a python dict. It's passed a dict of unicode\n strings bound to template names. This loader is useful for unittesting:\n\n >>> loader = DictLoader({'index.html': 'source here'})\n\n Because auto reloading is rarely useful this is disabled per default.\n \"\"\"\n\n def __init__(self, mapping):\n self.mapping = mapping\n\n def get_source(self, environment, template):\n if template in self.mapping:\n source = self.mapping[template]\n return source, None, lambda: source == self.mapping.get(template)\n raise TemplateNotFound(template)\n\n def list_templates(self):\n return sorted(self.mapping)\n\n\nclass FunctionLoader(BaseLoader):\n \"\"\"A loader that is passed a function which does the loading. The\n function receives the name of the template and has to return either\n an unicode string with the template source, a tuple in the form ``(source,\n filename, uptodatefunc)`` or `None` if the template does not exist.\n\n >>> def load_template(name):\n ... if name == 'index.html':\n ... return '...'\n ...\n >>> loader = FunctionLoader(load_template)\n\n The `uptodatefunc` is a function that is called if autoreload is enabled\n and has to return `True` if the template is still up to date. For more\n details have a look at :meth:`BaseLoader.get_source` which has the same\n return value.\n \"\"\"\n\n def __init__(self, load_func):\n self.load_func = load_func\n\n def get_source(self, environment, template):\n rv = self.load_func(template)\n if rv is None:\n raise TemplateNotFound(template)\n elif isinstance(rv, string_types):\n return rv, None, None\n return rv\n\n\nclass PrefixLoader(BaseLoader):\n \"\"\"A loader that is passed a dict of loaders where each loader is bound\n to a prefix. The prefix is delimited from the template by a slash per\n default, which can be changed by setting the `delimiter` argument to\n something else::\n\n loader = PrefixLoader({\n 'app1': PackageLoader('mypackage.app1'),\n 'app2': PackageLoader('mypackage.app2')\n })\n\n By loading ``'app1/index.html'`` the file from the app1 package is loaded,\n by loading ``'app2/index.html'`` the file from the second.\n \"\"\"\n\n def __init__(self, mapping, delimiter='/'):\n self.mapping = mapping\n self.delimiter = delimiter\n\n def get_loader(self, template):\n try:\n prefix, name = template.split(self.delimiter, 1)\n loader = self.mapping[prefix]\n except (ValueError, KeyError):\n raise TemplateNotFound(template)\n return loader, name\n\n def get_source(self, environment, template):\n loader, name = self.get_loader(template)\n try:\n return loader.get_source(environment, name)\n except TemplateNotFound:\n # re-raise the exception with the correct fileame here.\n # (the one that includes the prefix)\n raise TemplateNotFound(template)\n\n @internalcode\n def load(self, environment, name, globals=None):\n loader, local_name = self.get_loader(name)\n try:\n return loader.load(environment, local_name, globals)\n except TemplateNotFound:\n # re-raise the exception with the correct fileame here.\n # (the one that includes the prefix)\n raise TemplateNotFound(name)\n\n def list_templates(self):\n result = []\n for prefix, loader in iteritems(self.mapping):\n for template in loader.list_templates():\n result.append(prefix + self.delimiter + template)\n return result\n\n\nclass ChoiceLoader(BaseLoader):\n \"\"\"This loader works like the `PrefixLoader` just that no prefix is\n specified. If a template could not be found by one loader the next one\n is tried.\n\n >>> loader = ChoiceLoader([\n ... FileSystemLoader('/path/to/user/templates'),\n ... FileSystemLoader('/path/to/system/templates')\n ... ])\n\n This is useful if you want to allow users to override builtin templates\n from a different location.\n \"\"\"\n\n def __init__(self, loaders):\n self.loaders = loaders\n\n def get_source(self, environment, template):\n for loader in self.loaders:\n try:\n return loader.get_source(environment, template)\n except TemplateNotFound:\n pass\n raise TemplateNotFound(template)\n\n @internalcode\n def load(self, environment, name, globals=None):\n for loader in self.loaders:\n try:\n return loader.load(environment, name, globals)\n except TemplateNotFound:\n pass\n raise TemplateNotFound(name)\n\n def list_templates(self):\n found = set()\n for loader in self.loaders:\n found.update(loader.list_templates())\n return sorted(found)\n\n\nclass _TemplateModule(ModuleType):\n \"\"\"Like a normal module but with support for weak references\"\"\"\n\n\nclass ModuleLoader(BaseLoader):\n \"\"\"This loader loads templates from precompiled templates.\n\n Example usage:\n\n >>> loader = ChoiceLoader([\n ... ModuleLoader('/path/to/compiled/templates'),\n ... FileSystemLoader('/path/to/templates')\n ... ])\n\n Templates can be precompiled with :meth:`Environment.compile_templates`.\n \"\"\"\n\n has_source_access = False\n\n def __init__(self, path):\n package_name = '_jinja2_module_templates_%x' % id(self)\n\n # create a fake module that looks for the templates in the\n # path given.\n mod = _TemplateModule(package_name)\n if isinstance(path, string_types):\n path = [path]\n else:\n path = list(path)\n mod.__path__ = path\n\n sys.modules[package_name] = weakref.proxy(mod,\n lambda x: sys.modules.pop(package_name, None))\n\n # the only strong reference, the sys.modules entry is weak\n # so that the garbage collector can remove it once the\n # loader that created it goes out of business.\n self.module = mod\n self.package_name = package_name\n\n @staticmethod\n def get_template_key(name):\n return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()\n\n @staticmethod\n def get_module_filename(name):\n return ModuleLoader.get_template_key(name) + '.py'\n\n @internalcode\n def load(self, environment, name, globals=None):\n key = self.get_template_key(name)\n module = '%s.%s' % (self.package_name, key)\n mod = getattr(self.module, module, None)\n if mod is None:\n try:\n mod = __import__(module, None, None, ['root'])\n except ImportError:\n raise TemplateNotFound(name)\n\n # remove the entry from sys.modules, we only want the attribute\n # on the module object we have stored on the loader.\n sys.modules.pop(module, None)\n\n return environment.template_class.from_module_dict(\n environment, mod.__dict__, globals)\n"}}},{"rowIdx":284678,"cells":{"repo_name":{"kind":"string","value":"ArduPilot/MAVProxy"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"MAVProxy/modules/mavproxy_mode.py"},"copies":{"kind":"string","value":"3"},"content":{"kind":"string","value":"#!/usr/bin/env python\n'''mode command handling'''\n\nimport time, os\nfrom pymavlink import mavutil\n\nfrom MAVProxy.modules.lib import mp_module\n\nclass ModeModule(mp_module.MPModule):\n def __init__(self, mpstate):\n super(ModeModule, self).__init__(mpstate, \"mode\", public=True)\n self.add_command('mode', self.cmd_mode, \"mode change\", self.available_modes())\n self.add_command('guided', self.cmd_guided, \"fly to a clicked location on map\")\n\n def cmd_mode(self, args):\n '''set arbitrary mode'''\n mode_mapping = self.master.mode_mapping()\n if mode_mapping is None:\n print('No mode mapping available')\n return\n if len(args) != 1:\n print('Available modes: ', mode_mapping.keys())\n return\n if args[0].isdigit():\n modenum = int(args[0])\n else:\n mode = args[0].upper()\n if mode not in mode_mapping:\n print('Unknown mode %s: ' % mode)\n return\n modenum = mode_mapping[mode]\n self.master.set_mode(modenum)\n\n def available_modes(self):\n if self.master is None:\n print('No mode mapping available')\n return []\n mode_mapping = self.master.mode_mapping()\n if mode_mapping is None:\n print('No mode mapping available')\n return []\n return mode_mapping.keys()\n\n def unknown_command(self, args):\n '''handle mode switch by mode name as command'''\n mode_mapping = self.master.mode_mapping()\n mode = args[0].upper()\n if mode in mode_mapping:\n self.master.set_mode(mode_mapping[mode])\n return True\n return False\n\n def cmd_guided(self, args):\n '''set GUIDED target'''\n if len(args) != 1 and len(args) != 3:\n print(\"Usage: guided ALTITUDE | guided LAT LON ALTITUDE\")\n return\n\n if len(args) == 3:\n latitude = float(args[0])\n longitude = float(args[1])\n altitude = float(args[2])\n latlon = (latitude, longitude)\n else:\n latlon = self.mpstate.click_location\n if latlon is None:\n print(\"No map click position available\")\n return\n altitude = float(args[0])\n\n print(\"Guided %s %s\" % (str(latlon), str(altitude)))\n self.master.mav.mission_item_int_send (self.settings.target_system,\n self.settings.target_component,\n 0,\n self.module('wp').get_default_frame(),\n mavutil.mavlink.MAV_CMD_NAV_WAYPOINT,\n 2, 0, 0, 0, 0, 0,\n int(latlon[0]*1.0e7),\n int(latlon[1]*1.0e7),\n altitude)\n\ndef init(mpstate):\n '''initialise module'''\n return ModeModule(mpstate)\n"}}},{"rowIdx":284679,"cells":{"repo_name":{"kind":"string","value":"drxos/python-social-auth"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"social/tests/test_utils.py"},"copies":{"kind":"string","value":"73"},"content":{"kind":"string","value":"import sys\nimport unittest2 as unittest\n\nfrom mock import Mock\n\nfrom social.utils import sanitize_redirect, user_is_authenticated, \\\n user_is_active, slugify, build_absolute_uri, \\\n partial_pipeline_data\n\n\nPY3 = sys.version_info[0] == 3\n\n\nclass SanitizeRedirectTest(unittest.TestCase):\n def test_none_redirect(self):\n self.assertEqual(sanitize_redirect('myapp.com', None), None)\n\n def test_empty_redirect(self):\n self.assertEqual(sanitize_redirect('myapp.com', ''), None)\n\n def test_dict_redirect(self):\n self.assertEqual(sanitize_redirect('myapp.com', {}), None)\n\n def test_invalid_redirect(self):\n self.assertEqual(sanitize_redirect('myapp.com', {'foo': 'bar'}), None)\n\n def test_wrong_path_redirect(self):\n self.assertEqual(\n sanitize_redirect('myapp.com', 'http://notmyapp.com/path/'),\n None\n )\n\n def test_valid_absolute_redirect(self):\n self.assertEqual(\n sanitize_redirect('myapp.com', 'http://myapp.com/path/'),\n 'http://myapp.com/path/'\n )\n\n def test_valid_relative_redirect(self):\n self.assertEqual(sanitize_redirect('myapp.com', '/path/'), '/path/')\n\n\nclass UserIsAuthenticatedTest(unittest.TestCase):\n def test_user_is_none(self):\n self.assertEqual(user_is_authenticated(None), False)\n\n def test_user_is_not_none(self):\n self.assertEqual(user_is_authenticated(object()), True)\n\n def test_user_has_is_authenticated(self):\n class User(object):\n is_authenticated = True\n self.assertEqual(user_is_authenticated(User()), True)\n\n def test_user_has_is_authenticated_callable(self):\n class User(object):\n def is_authenticated(self):\n return True\n self.assertEqual(user_is_authenticated(User()), True)\n\n\nclass UserIsActiveTest(unittest.TestCase):\n def test_user_is_none(self):\n self.assertEqual(user_is_active(None), False)\n\n def test_user_is_not_none(self):\n self.assertEqual(user_is_active(object()), True)\n\n def test_user_has_is_active(self):\n class User(object):\n is_active = True\n self.assertEqual(user_is_active(User()), True)\n\n def test_user_has_is_active_callable(self):\n class User(object):\n def is_active(self):\n return True\n self.assertEqual(user_is_active(User()), True)\n\n\nclass SlugifyTest(unittest.TestCase):\n def test_slugify_formats(self):\n if PY3:\n self.assertEqual(slugify('FooBar'), 'foobar')\n self.assertEqual(slugify('Foo Bar'), 'foo-bar')\n self.assertEqual(slugify('Foo (Bar)'), 'foo-bar')\n else:\n self.assertEqual(slugify('FooBar'.decode('utf-8')), 'foobar')\n self.assertEqual(slugify('Foo Bar'.decode('utf-8')), 'foo-bar')\n self.assertEqual(slugify('Foo (Bar)'.decode('utf-8')), 'foo-bar')\n\n\nclass BuildAbsoluteURITest(unittest.TestCase):\n def setUp(self):\n self.host = 'http://foobar.com'\n\n def tearDown(self):\n self.host = None\n\n def test_path_none(self):\n self.assertEqual(build_absolute_uri(self.host), self.host)\n\n def test_path_empty(self):\n self.assertEqual(build_absolute_uri(self.host, ''), self.host)\n\n def test_path_http(self):\n self.assertEqual(build_absolute_uri(self.host, 'http://barfoo.com'),\n 'http://barfoo.com')\n\n def test_path_https(self):\n self.assertEqual(build_absolute_uri(self.host, 'https://barfoo.com'),\n 'https://barfoo.com')\n\n def test_host_ends_with_slash_and_path_starts_with_slash(self):\n self.assertEqual(build_absolute_uri(self.host + '/', '/foo/bar'),\n 'http://foobar.com/foo/bar')\n\n def test_absolute_uri(self):\n self.assertEqual(build_absolute_uri(self.host, '/foo/bar'),\n 'http://foobar.com/foo/bar')\n\n\nclass PartialPipelineData(unittest.TestCase):\n def test_kwargs_included_in_result(self):\n backend = self._backend()\n key, val = ('foo', 'bar')\n _, xkwargs = partial_pipeline_data(backend, None,\n *(), **dict([(key, val)]))\n self.assertTrue(key in xkwargs)\n self.assertEqual(xkwargs[key], val)\n\n def test_update_user(self):\n user = object()\n backend = self._backend(session_kwargs={'user': None})\n _, xkwargs = partial_pipeline_data(backend, user)\n self.assertTrue('user' in xkwargs)\n self.assertEqual(xkwargs['user'], user)\n\n def _backend(self, session_kwargs=None):\n strategy = Mock()\n strategy.request = None\n strategy.session_get.return_value = object()\n strategy.partial_from_session.return_value = \\\n (0, 'mock-backend', [], session_kwargs or {})\n\n backend = Mock()\n backend.name = 'mock-backend'\n backend.strategy = strategy\n return backend\n"}}},{"rowIdx":284680,"cells":{"repo_name":{"kind":"string","value":"kstrauser/ansible"},"ref":{"kind":"string","value":"refs/heads/devel"},"path":{"kind":"string","value":"lib/ansible/plugins/shell/__init__.py"},"copies":{"kind":"string","value":"7690"},"content":{"kind":"string","value":"# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\n"}}},{"rowIdx":284681,"cells":{"repo_name":{"kind":"string","value":"MungoRae/home-assistant"},"ref":{"kind":"string","value":"refs/heads/dev"},"path":{"kind":"string","value":"homeassistant/components/remote/itach.py"},"copies":{"kind":"string","value":"3"},"content":{"kind":"string","value":"\"\"\"\nSupport for iTach IR Devices.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/remote.itach/\n\"\"\"\n\nimport logging\n\nimport voluptuous as vol\n\nimport homeassistant.helpers.config_validation as cv\nimport homeassistant.components.remote as remote\nfrom homeassistant.const import (\n DEVICE_DEFAULT_NAME, CONF_NAME, CONF_MAC, CONF_HOST, CONF_PORT,\n CONF_DEVICES)\nfrom homeassistant.components.remote import PLATFORM_SCHEMA\n\nREQUIREMENTS = ['pyitachip2ir==0.0.6']\n\n_LOGGER = logging.getLogger(__name__)\n\nDEFAULT_PORT = 4998\nCONNECT_TIMEOUT = 5000\n\nCONF_MODADDR = 'modaddr'\nCONF_CONNADDR = 'connaddr'\nCONF_COMMANDS = 'commands'\nCONF_DATA = 'data'\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Optional(CONF_MAC): cv.string,\n vol.Required(CONF_HOST): cv.string,\n vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,\n vol.Required(CONF_DEVICES): vol.All(cv.ensure_list, [{\n vol.Optional(CONF_NAME): cv.string,\n vol.Optional(CONF_MODADDR): vol.Coerce(int),\n vol.Required(CONF_CONNADDR): vol.Coerce(int),\n vol.Required(CONF_COMMANDS): vol.All(cv.ensure_list, [{\n vol.Required(CONF_NAME): cv.string,\n vol.Required(CONF_DATA): cv.string\n }])\n }])\n})\n\n\n# pylint: disable=unused-argument\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the ITach connection and devices.\"\"\"\n import pyitachip2ir\n itachip2ir = pyitachip2ir.ITachIP2IR(\n config.get(CONF_MAC), config.get(CONF_HOST),\n int(config.get(CONF_PORT)))\n\n if not itachip2ir.ready(CONNECT_TIMEOUT):\n _LOGGER.error(\"Unable to find iTach\")\n return False\n\n devices = []\n for data in config.get(CONF_DEVICES):\n name = data.get(CONF_NAME)\n modaddr = int(data.get(CONF_MODADDR, 1))\n connaddr = int(data.get(CONF_CONNADDR, 1))\n cmddatas = \"\"\n for cmd in data.get(CONF_COMMANDS):\n cmdname = cmd[CONF_NAME].strip()\n if not cmdname:\n cmdname = '\"\"'\n cmddata = cmd[CONF_DATA].strip()\n if not cmddata:\n cmddata = '\"\"'\n cmddatas += \"{}\\n{}\\n\".format(cmdname, cmddata)\n itachip2ir.addDevice(name, modaddr, connaddr, cmddatas)\n devices.append(ITachIP2IRRemote(itachip2ir, name))\n add_devices(devices, True)\n return True\n\n\nclass ITachIP2IRRemote(remote.RemoteDevice):\n \"\"\"Device that sends commands to an ITachIP2IR device.\"\"\"\n\n def __init__(self, itachip2ir, name):\n \"\"\"Initialize device.\"\"\"\n self.itachip2ir = itachip2ir\n self._power = False\n self._name = name or DEVICE_DEFAULT_NAME\n\n @property\n def name(self):\n \"\"\"Return the name of the device.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return true if device is on.\"\"\"\n return self._power\n\n def turn_on(self, **kwargs):\n \"\"\"Turn the device on.\"\"\"\n self._power = True\n self.itachip2ir.send(self._name, \"ON\", 1)\n self.schedule_update_ha_state()\n\n def turn_off(self, **kwargs):\n \"\"\"Turn the device off.\"\"\"\n self._power = False\n self.itachip2ir.send(self._name, \"OFF\", 1)\n self.schedule_update_ha_state()\n\n def send_command(self, command, **kwargs):\n \"\"\"Send a command to one device.\"\"\"\n for single_command in command:\n self.itachip2ir.send(self._name, single_command, 1)\n\n def update(self):\n \"\"\"Update the device.\"\"\"\n self.itachip2ir.update()\n"}}},{"rowIdx":284682,"cells":{"repo_name":{"kind":"string","value":"Yichuans/ccv"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"languages/hu.py"},"copies":{"kind":"string","value":"162"},"content":{"kind":"string","value":"# coding: utf8\n{\n'!langcode!': 'hu',\n'!langname!': 'Magyar',\n'\"update\" is an optional expression like \"field1=\\'newvalue\\'\". You cannot update or delete the results of a JOIN': '\"update\" is an optional expression like \"field1=\\'newvalue\\'\". You cannot update or delete the results of a JOIN',\n'%s %%{row} deleted': '%s sorok törlődtek',\n'%s %%{row} updated': '%s sorok frissítődtek',\n'%s selected': '%s kiválasztott',\n'%Y-%m-%d': '%Y.%m.%d.',\n'%Y-%m-%d %H:%M:%S': '%Y.%m.%d. %H:%M:%S',\n'About': 'About',\n'Access Control': 'Access Control',\n'Administrative Interface': 'Administrative Interface',\n'Administrative interface': 'az adminisztrációs felületért kattints ide',\n'Ajax Recipes': 'Ajax Recipes',\n'appadmin is disabled because insecure channel': 'az appadmin a biztonságtalan csatorna miatt letiltva',\n'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',\n'Available Databases and Tables': 'Elérhető adatbázisok és táblák',\n'Buy this book': 'Buy this book',\n'cache': 'gyorsítótár',\n'Cache': 'Cache',\n'Cache Keys': 'Cache Keys',\n'Cannot be empty': 'Nem lehet üres',\n'change password': 'jelszó megváltoztatása',\n'Check to delete': 'Törléshez válaszd ki',\n'Clear CACHE?': 'Clear CACHE?',\n'Clear DISK': 'Clear DISK',\n'Clear RAM': 'Clear RAM',\n'Client IP': 'Client IP',\n'Community': 'Community',\n'Components and Plugins': 'Components and Plugins',\n'Controller': 'Controller',\n'Copyright': 'Copyright',\n'Current request': 'Jelenlegi lekérdezés',\n'Current response': 'Jelenlegi válasz',\n'Current session': 'Jelenlegi folyamat',\n'customize me!': 'változtass meg!',\n'data uploaded': 'adat feltöltve',\n'Database': 'adatbázis',\n'Database %s select': 'adatbázis %s kiválasztás',\n'db': 'db',\n'DB Model': 'DB Model',\n'Delete:': 'Töröl:',\n'Demo': 'Demo',\n'Deployment Recipes': 'Deployment Recipes',\n'Description': 'Description',\n'design': 'design',\n'DISK': 'DISK',\n'Disk Cache Keys': 'Disk Cache Keys',\n'Disk Cleared': 'Disk Cleared',\n'Documentation': 'Documentation',\n\"Don't know what to do?\": \"Don't know what to do?\",\n'done!': 'kész!',\n'Download': 'Download',\n'E-mail': 'E-mail',\n'Edit': 'Szerkeszt',\n'Edit current record': 'Aktuális bejegyzés szerkesztése',\n'edit profile': 'profil szerkesztése',\n'Edit This App': 'Alkalmazást szerkeszt',\n'Email and SMS': 'Email and SMS',\n'Errors': 'Errors',\n'export as csv file': 'exportál csv fájlba',\n'FAQ': 'FAQ',\n'First name': 'First name',\n'Forms and Validators': 'Forms and Validators',\n'Free Applications': 'Free Applications',\n'Group ID': 'Group ID',\n'Groups': 'Groups',\n'Hello World': 'Hello Világ',\n'Home': 'Home',\n'How did you get here?': 'How did you get here?',\n'import': 'import',\n'Import/Export': 'Import/Export',\n'Index': 'Index',\n'insert new': 'új beillesztése',\n'insert new %s': 'új beillesztése %s',\n'Internal State': 'Internal State',\n'Introduction': 'Introduction',\n'Invalid email': 'Invalid email',\n'Invalid Query': 'Hibás lekérdezés',\n'invalid request': 'hibás kérés',\n'Key': 'Key',\n'Last name': 'Last name',\n'Layout': 'Szerkezet',\n'Layout Plugins': 'Layout Plugins',\n'Layouts': 'Layouts',\n'Live Chat': 'Live Chat',\n'login': 'belép',\n'logout': 'kilép',\n'lost password': 'elveszett jelszó',\n'Lost Password': 'Lost Password',\n'Main Menu': 'Főmenü',\n'Manage Cache': 'Manage Cache',\n'Menu Model': 'Menü model',\n'My Sites': 'My Sites',\n'Name': 'Name',\n'New Record': 'Új bejegyzés',\n'new record inserted': 'új bejegyzés felvéve',\n'next 100 rows': 'következő 100 sor',\n'No databases in this application': 'Nincs adatbázis ebben az alkalmazásban',\n'Online examples': 'online példákért kattints ide',\n'or import from csv file': 'vagy betöltés csv fájlból',\n'Origin': 'Origin',\n'Other Plugins': 'Other Plugins',\n'Other Recipes': 'Other Recipes',\n'Overview': 'Overview',\n'Password': 'Password',\n'Plugins': 'Plugins',\n'Powered by': 'Powered by',\n'Preface': 'Preface',\n'previous 100 rows': 'előző 100 sor',\n'Python': 'Python',\n'Query:': 'Lekérdezés:',\n'Quick Examples': 'Quick Examples',\n'RAM': 'RAM',\n'RAM Cache Keys': 'RAM Cache Keys',\n'Ram Cleared': 'Ram Cleared',\n'Recipes': 'Recipes',\n'Record': 'bejegyzés',\n'record does not exist': 'bejegyzés nem létezik',\n'Record ID': 'Record ID',\n'Record id': 'bejegyzés id',\n'Register': 'Register',\n'register': 'regisztráció',\n'Registration key': 'Registration key',\n'Reset Password key': 'Reset Password key',\n'Role': 'Role',\n'Rows in Table': 'Sorok a táblában',\n'Rows selected': 'Kiválasztott sorok',\n'Semantic': 'Semantic',\n'Services': 'Services',\n'Size of cache:': 'Size of cache:',\n'state': 'állapot',\n'Statistics': 'Statistics',\n'Stylesheet': 'Stylesheet',\n'submit': 'submit',\n'Support': 'Support',\n'Sure you want to delete this object?': 'Biztos törli ezt az objektumot?',\n'Table': 'tábla',\n'Table name': 'Table name',\n'The \"query\" is a condition like \"db.table1.field1==\\'value\\'\". Something like \"db.table1.field1==db.table2.field2\" results in a SQL JOIN.': 'The \"query\" is a condition like \"db.table1.field1==\\'value\\'\". Something like \"db.table1.field1==db.table2.field2\" results in a SQL JOIN.',\n'The Core': 'The Core',\n'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',\n'The Views': 'The Views',\n'This App': 'This App',\n'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',\n'Timestamp': 'Timestamp',\n'Twitter': 'Twitter',\n'unable to parse csv file': 'nem lehet a csv fájlt beolvasni',\n'Update:': 'Frissít:',\n'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',\n'User ID': 'User ID',\n'Videos': 'Videos',\n'View': 'Nézet',\n'Welcome %s': 'Welcome %s',\n'Welcome to web2py': 'Isten hozott a web2py-ban',\n'Welcome to web2py!': 'Welcome to web2py!',\n'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',\n'You are successfully running web2py': 'You are successfully running web2py',\n'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',\n'You visited the url %s': 'You visited the url %s',\n}\n"}}},{"rowIdx":284683,"cells":{"repo_name":{"kind":"string","value":"sjerdo/letsencrypt"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"letsencrypt/plugins/webroot_test.py"},"copies":{"kind":"string","value":"2"},"content":{"kind":"string","value":"\"\"\"Tests for letsencrypt.plugins.webroot.\"\"\"\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport mock\n\nfrom acme import challenges\nfrom acme import jose\n\nfrom letsencrypt import achallenges\nfrom letsencrypt import errors\n\nfrom letsencrypt.tests import acme_util\nfrom letsencrypt.tests import test_util\n\n\nKEY = jose.JWKRSA.load(test_util.load_vector(\"rsa512_key.pem\"))\n\n\nclass AuthenticatorTest(unittest.TestCase):\n \"\"\"Tests for letsencrypt.plugins.webroot.Authenticator.\"\"\"\n\n achall = achallenges.KeyAuthorizationAnnotatedChallenge(\n challb=acme_util.HTTP01_P, domain=None, account_key=KEY)\n\n def setUp(self):\n from letsencrypt.plugins.webroot import Authenticator\n self.path = tempfile.mkdtemp()\n self.validation_path = os.path.join(\n self.path, \".well-known\", \"acme-challenge\",\n \"ZXZhR3hmQURzNnBTUmIyTEF2OUlaZjE3RHQzanV4R0orUEN0OTJ3citvQQ\")\n self.config = mock.MagicMock(webroot_path=self.path)\n self.auth = Authenticator(self.config, \"webroot\")\n self.auth.prepare()\n\n def tearDown(self):\n shutil.rmtree(self.path)\n\n def test_more_info(self):\n more_info = self.auth.more_info()\n self.assertTrue(isinstance(more_info, str))\n self.assertTrue(self.path in more_info)\n\n def test_add_parser_arguments(self):\n add = mock.MagicMock()\n self.auth.add_parser_arguments(add)\n self.assertEqual(1, add.call_count)\n\n def test_prepare_bad_root(self):\n self.config.webroot_path = os.path.join(self.path, \"null\")\n self.assertRaises(errors.PluginError, self.auth.prepare)\n\n def test_prepare_missing_root(self):\n self.config.webroot_path = None\n self.assertRaises(errors.PluginError, self.auth.prepare)\n\n def test_prepare_full_root_exists(self):\n # prepare() has already been called once in setUp()\n self.auth.prepare() # shouldn't raise any exceptions\n\n def test_prepare_reraises_other_errors(self):\n self.auth.full_path = os.path.join(self.path, \"null\")\n os.chmod(self.path, 0o000)\n self.assertRaises(errors.PluginError, self.auth.prepare)\n os.chmod(self.path, 0o700)\n\n def test_perform_cleanup(self):\n responses = self.auth.perform([self.achall])\n self.assertEqual(1, len(responses))\n self.assertTrue(os.path.exists(self.validation_path))\n with open(self.validation_path) as validation_f:\n validation = validation_f.read()\n self.assertTrue(\n challenges.KeyAuthorizationChallengeResponse(\n key_authorization=validation).verify(\n self.achall.chall, KEY.public_key()))\n\n self.auth.cleanup([self.achall])\n self.assertFalse(os.path.exists(self.validation_path))\n\n\nif __name__ == \"__main__\":\n unittest.main() # pragma: no cover\n"}}},{"rowIdx":284684,"cells":{"repo_name":{"kind":"string","value":"ddepaoli3/magnum"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"magnum/__init__.py"},"copies":{"kind":"string","value":"19"},"content":{"kind":"string","value":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport threading\n\nimport pbr.version\n\n\n__version__ = pbr.version.VersionInfo(\n 'magnum').version_string()\n\n# Make a project global TLS trace storage repository\nTLS = threading.local()\n"}}},{"rowIdx":284685,"cells":{"repo_name":{"kind":"string","value":"chvalean/lis-test"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"WS2012R2/lisa/tools/middleware_bench/utils/setup.py"},"copies":{"kind":"string","value":"7"},"content":{"kind":"string","value":"\"\"\"\nLinux on Hyper-V and Azure Test Code, ver. 1.0.0\nCopyright (c) Microsoft Corporation\n\nAll rights reserved\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nSee the Apache Version 2.0 License for specific language governing\npermissions and limitations under the License.\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport logging\n\nfrom utils import constants\nfrom utils.cmdshell import SSHClient\nfrom report.db_utils import upload_results\nfrom paramiko.ssh_exception import NoValidConnectionsError\n\nfrom providers.amazon_service import AWSConnector\nfrom providers.azure_service import AzureConnector\nfrom providers.gcp_service import GCPConnector\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%y/%m/%d %H:%M:%S', level=logging.INFO)\nlog = logging.getLogger(__name__)\n\n\nclass SetupTestEnv:\n \"\"\"\n Setup test environment.\n \"\"\"\n def __init__(self, provider=None, vm_count=None, test_type=None, disk_size=None, raid=None,\n keyid=None, secret=None, token=None, subscriptionid=None, tenantid=None,\n projectid=None, imageid=None, instancetype=None, user=None, localpath=None,\n region=None, zone=None, sriov=False, kernel=None):\n \"\"\"\n Init AWS connector to create and configure AWS ec2 instances.\n :param provider Service provider to be used e.g. azure, aws, gce.\n :param vm_count: Number of VMs to prepare\n :param test_type: vm_disk > 1 VM with disk (Orion and Sysbench)\n no_disk > No disk attached (Redis, Memcached, Apache_bench)\n db_disk > Second VM with disk (MariaDB, MongoDB)\n cluster_disk > All VMs have disks (Terasort)\n :param disk_size:\n :param raid: Bool or Int (the number of disks), to specify if a RAID will be configured\n :param keyid: user key for executing remote connection\n :param secret: user secret for executing remote connection\n :param token: GCE refresh token obtained with gcloud sdk\n :param subscriptionid: Azure specific subscription id\n :param tenantid: Azure specific tenant id\n :param projectid: GCE specific project id\n :param imageid: AWS OS AMI image id or\n Azure image references offer and sku: e.g. 'UbuntuServer#16.04.0-LTS' or\n GCE image family, e.g. 'ubuntu-1604-lts'\n :param instancetype: AWS instance resource type e.g 'd2.4xlarge' or\n Azure hardware profile vm size e.g. 'Standard_DS14_v2' or\n GCE instance size e.g. 'n1-highmem-16'\n :param user: remote ssh user for the instance\n :param localpath: localpath where the logs should be downloaded, and the\n default path for other necessary tools\n :param region: region to connect to\n :param zone: zone where other resources should be available\n :param sriov: bool for configuring SR-IOV or not\n :param kernel: kernel deb name to install provided in localpath\n :rtype Tuple\n :return: connector <Connector>,\n vm_ips <VM private IPs dict>,\n device <attached disk devices>,\n ssh_client <ssh clients dict>\n \"\"\"\n self.provider = provider\n self.vm_count = vm_count\n self.test_type = test_type\n self.disk_size = disk_size\n self.raid = raid\n self.keyid = keyid\n self.secret = secret\n self.token = token\n self.subscriptionid = subscriptionid\n self.tenantid = tenantid\n self.projectid = projectid\n self.imageid = imageid\n self.instancetype = instancetype\n self.user = user\n self.localpath = localpath\n self.region = region\n self.zone = zone\n self.sriov = sriov\n self.kernel = kernel\n\n # create and generate setup details\n try:\n self.connector = self.create_connector()\n self.vms = self.create_instances()\n self.device = self.get_disk_devices()\n self.ssh_client, self.vm_ips = self.get_instance_details()\n self.perf_tuning()\n self.reconnect_sshclient()\n except Exception as e:\n log.exception(e)\n if self.connector:\n self.connector.teardown()\n raise\n\n def create_connector(self):\n \"\"\"\n Create connector by provider.\n :return: connector\n \"\"\"\n connector = None\n if self.provider == constants.AWS:\n connector = AWSConnector(keyid=self.keyid, secret=self.secret, imageid=self.imageid,\n instancetype=self.instancetype, user=self.user,\n localpath=self.localpath, region=self.region, zone=self.zone)\n elif self.provider == constants.AZURE:\n connector = AzureConnector(clientid=self.keyid, secret=self.secret,\n subscriptionid=self.subscriptionid, tenantid=self.tenantid,\n imageid=self.imageid, instancetype=self.instancetype,\n user=self.user, localpath=self.localpath,\n location=self.region, sriov=self.sriov)\n elif self.provider == constants.GCE:\n connector = GCPConnector(clientid=self.keyid, secret=self.secret, token=self.token,\n projectid=self.projectid, imageid=self.imageid,\n instancetype=self.instancetype, user=self.user,\n localpath=self.localpath, zone=self.zone)\n if connector:\n connector.connect()\n return connector\n else:\n raise Exception('Unsupported provider or connector failed.')\n\n def create_instances(self):\n \"\"\"\n Create instances.\n :return: VM instances\n \"\"\"\n open(self.connector.host_key_file, 'w').close()\n vms = {}\n for i in xrange(1, self.vm_count + 1):\n vms[i] = self.connector.create_vm()\n return vms\n\n def reconnect_sshclient(self):\n if self.provider == constants.AWS:\n log.info('The provider is AWS, reconnect sshclient')\n for i in xrange(1, self.vm_count + 1):\n self.ssh_client[i].connect()\n\n def get_instance_details(self):\n \"\"\"\n Create ssh client and get vm IPs\n :return: ssh_client, vm_ips\n \"\"\"\n ssh_client = {}\n vm_ips = {}\n for i in xrange(1, self.vm_count + 1):\n if self.provider == constants.AWS:\n ssh_client[i] = self.connector.wait_for_ping(self.vms[i])\n # SRIOV is enabled by default on AWS for the tested platforms\n # if sriov == constants.ENABLED:\n # ssh_client[i] = connector.enable_sr_iov(vms[i], ssh_client[i])\n self.vms[i].update()\n vm_ips[i] = self.vms[i].private_ip_address\n elif self.provider == constants.AZURE:\n ssh_client[i] = SSHClient(server=self.vms[i].name + self.connector.dns_suffix,\n host_key_file=self.connector.host_key_file,\n user=self.connector.user,\n ssh_key_file=os.path.join(\n self.connector.localpath,\n self.connector.key_name + '.pem'))\n ip = ssh_client[i].run(\n 'ifconfig eth0 | grep \"inet\\ addr\" | cut -d: -f2 | cut -d\" \" -f1')\n vm_ips[i] = ip[1].strip()\n elif self.provider == constants.GCE:\n ssh_client[i] = self.connector.wait_for_ping(self.vms[i])\n vm_ips[i] = self.vms[i]['networkInterfaces'][0]['networkIP']\n return ssh_client, vm_ips\n\n def attach_raid_disks(self, vm_tag, disk_args):\n device = []\n for i in xrange(self.raid):\n if self.provider == constants.AWS:\n disk_args['device'] = '/dev/sd{}'.format(chr(120 - i))\n device.append(disk_args['device'].replace('sd', 'xvd'))\n elif self.provider == constants.AZURE:\n disk_args['device'] = i\n device.append('/dev/sd{}'.format(chr(99 + i)))\n elif self.provider == constants.GCE:\n device.append('/dev/sd{}'.format(chr(98 + i)))\n self.connector.attach_disk(self.vms[vm_tag], disk_size=self.disk_size, **disk_args)\n return device\n\n def get_disk_devices(self):\n if not self.test_type:\n return None\n device = None\n disk_args = {}\n if self.provider == constants.AWS:\n device = constants.DEVICE_AWS.replace('sd', 'xvd')\n disk_args['iops'] = 5000\n disk_args['volume_type'] = self.connector.volume_type['ssd_io1']\n disk_args['device'] = constants.DEVICE_AWS\n elif self.provider == constants.AZURE:\n device = constants.DEVICE_AZURE\n elif self.provider == constants.GCE:\n # Note: using disk device order prediction,GCE API is not consistent in the disk naming\n # device = constants.DEVICE_GCE + disk_name\n device = constants.TEMP_DEVICE_GCE\n\n if self.test_type == constants.CLUSTER_DISK:\n self.connector.attach_disk(self.vms[1], disk_size=self.disk_size + 200, **disk_args)\n for i in xrange(2, self.vm_count + 1):\n self.connector.attach_disk(self.vms[i], disk_size=self.disk_size, **disk_args)\n time.sleep(3)\n return device\n\n vm_tag = None\n if self.test_type == constants.VM_DISK:\n vm_tag = 1\n elif self.test_type == constants.DB_DISK:\n vm_tag = 2\n\n if self.raid and type(self.raid) is int:\n return self.attach_raid_disks(vm_tag, disk_args)\n else:\n self.connector.attach_disk(self.vms[vm_tag], disk_size=self.disk_size, **disk_args)\n\n return device\n\n def perf_tuning(self):\n current_path = os.path.dirname(sys.modules['__main__'].__file__)\n for i in range(1, self.vm_count + 1):\n log.info('Running perf tuning on {}'.format(self.vm_ips[i]))\n self.ssh_client[i].connect()\n self.ssh_client[i].put_file(os.path.join(current_path, 'tests', 'perf_tuning.sh'),\n '/tmp/perf_tuning.sh')\n self.ssh_client[i].run('chmod +x /tmp/perf_tuning.sh')\n self.ssh_client[i].run(\"sed -i 's/\\r//' /tmp/perf_tuning.sh\")\n params = [self.provider]\n if '.deb' in self.kernel:\n log.info('Uploading kernel {} on {}'.format(self.kernel, self.vm_ips[i]))\n self.ssh_client[i].put_file(os.path.join(self.localpath, self.kernel),\n '/tmp/{}'.format(self.kernel))\n params.append('/tmp/{}'.format(self.kernel))\n self.ssh_client[i].run('/tmp/perf_tuning.sh {}'.format(' '.join(params)))\n if self.provider in [constants.AWS, constants.GCE]:\n self.ssh_client[i] = self.connector.restart_vm(self.vms[i])\n elif self.provider == constants.AZURE:\n self.vms[i] = self.connector.restart_vm(self.vms[i].name)\n # TODO add custom kernel support for all providers - only azure support\n self.ssh_client[i] = SSHClient(server=self.vms[i].name + self.connector.dns_suffix,\n host_key_file=self.connector.host_key_file,\n user=self.connector.user,\n ssh_key_file=os.path.join(\n self.connector.localpath,\n self.connector.key_name + '.pem'))\n ip = self.ssh_client[i].run(\n 'ifconfig eth0 | grep \"inet\\ addr\" | cut -d: -f2 | cut -d\" \" -f1')\n self.vm_ips[i] = ip[1].strip()\n\n def run_test(self, ssh_vm_conf=0, testname=None, test_cmd=None, results_path=None, raid=False,\n ssh_raid=1, timeout=constants.TIMEOUT):\n try:\n if all(client is not None for client in self.ssh_client.values()):\n current_path = os.path.dirname(sys.modules['__main__'].__file__)\n # enable key auth between instances\n for i in xrange(1, ssh_vm_conf + 1):\n self.ssh_client[i].put_file(os.path.join(self.localpath,\n self.connector.key_name + '.pem'),\n '/home/{}/.ssh/id_rsa'.format(self.user))\n self.ssh_client[i].run('chmod 0600 /home/{0}/.ssh/id_rsa'.format(self.user))\n if raid:\n self.ssh_client[ssh_raid].put_file(os.path.join(\n current_path, 'tests', 'raid.sh'), '/tmp/raid.sh')\n self.ssh_client[ssh_raid].run('chmod +x /tmp/raid.sh')\n self.ssh_client[ssh_raid].run(\"sed -i 's/\\r//' /tmp/raid.sh\")\n self.ssh_client[ssh_raid].run('/tmp/raid.sh 0 {} {}'.format(raid, ' '.join(\n self.device)))\n bash_testname = 'run_{}.sh'.format(testname)\n self.ssh_client[1].put_file(os.path.join(current_path, 'tests', bash_testname),\n '/tmp/{}'.format(bash_testname))\n self.ssh_client[1].run('chmod +x /tmp/{}'.format(bash_testname))\n self.ssh_client[1].run(\"sed -i 's/\\r//' /tmp/{}\".format(bash_testname))\n log.info('Starting background command {}'.format(test_cmd))\n channel = self.ssh_client[1].run_pty(test_cmd)\n _, pid, _ = self.ssh_client[1].run(\n \"ps aux | grep -v grep | grep {} | awk '{{print $2}}'\".format(\n bash_testname))\n self._wait_for_pid(self.ssh_client[1], bash_testname, pid, timeout=timeout)\n channel.close()\n self.ssh_client[1].get_file('/tmp/{}.zip'.format(testname), results_path)\n except Exception as e:\n log.exception(e)\n raise\n finally:\n if self.connector:\n self.connector.teardown()\n\n @staticmethod\n def _wait_for_pid(ssh_client, bash_testname, pid, timeout=constants.TIMEOUT):\n t = 0\n while t < timeout:\n try:\n _, new_pid, _ = ssh_client.run(\n \"ps aux | grep -v grep | grep {} | awk '{{print $2}}'\".format(\n bash_testname))\n if new_pid != pid:\n return\n except NoValidConnectionsError:\n log.debug('NoValidConnectionsError, will retry in 60 seconds')\n time.sleep(60)\n t += 60\n time.sleep(60)\n t += 60\n else:\n raise Exception('Timeout waiting for process to end.'.format(timeout))\n\n def run_test_nohup(self, ssh_vm_conf=0, test_cmd=None, timeout=constants.TIMEOUT, track=None):\n try:\n if all(client is not None for client in self.ssh_client.values()):\n current_path = os.path.dirname(sys.modules['__main__'].__file__)\n # enable key auth between instances\n for i in xrange(1, ssh_vm_conf + 1):\n self.ssh_client[i].put_file(os.path.join(self.localpath,\n self.connector.key_name + '.pem'),\n '/home/{}/.ssh/id_rsa'.format(self.user))\n self.ssh_client[i].run('chmod 0600 /home/{0}/.ssh/id_rsa'.format(self.user))\n log.info('Starting run nohup command {}'.format(test_cmd))\n self.ssh_client[1].run(test_cmd)\n self._wait_for_command(self.ssh_client[1], track, timeout=timeout)\n except Exception as e:\n log.exception(e)\n raise\n finally:\n log.info('Finish to run nohup command {}'.format(test_cmd))\n\n @staticmethod\n def _wait_for_command(ssh_client, track, timeout=constants.TIMEOUT):\n t = 0\n while t < timeout:\n try:\n _, p_count, _ = ssh_client.run(\n \"ps aux | grep -v grep | grep {} | awk '{{print $2}}' | wc -l\".format(\n track))\n if int(p_count) == 0 :\n return\n except NoValidConnectionsError:\n log.debug('NoValidConnectionsError, will retry in 60 seconds')\n time.sleep(60)\n t += 60\n time.sleep(60)\n t += 60\n else:\n raise Exception('Timeout waiting for process to end.'.format(timeout))"}}},{"rowIdx":284686,"cells":{"repo_name":{"kind":"string","value":"jalavik/inspire-next"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"inspire/testsuite/test_export.py"},"copies":{"kind":"string","value":"2"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# This file is part of INSPIRE.\n# Copyright (C) 2015 CERN.\n#\n# INSPIRE is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of the\n# License, or (at your option) any later version.\n#\n# INSPIRE is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\nimport pkg_resources\nimport os\n\nfrom dojson.contrib.marc21.utils import create_record\n\nfrom invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite\nfrom inspire.dojson.hep import hep\nfrom invenio.base.wrappers import lazy_import\nExport = lazy_import('inspire.utils.export.Export')\n\n\nclass ExportTests(InvenioTestCase):\n\n def setUp(self):\n self.marcxml = pkg_resources.resource_string('inspire.testsuite',\n os.path.join(\n 'fixtures',\n 'test_hep_formats.xml')\n )\n record = create_record(self.marcxml)\n\n self.hep_record = hep.do(record)\n\n self.export = Export(self.hep_record)\n\n self.sample_export_good = {\n 'citation_key': 'Aad:2015wqa',\n 'doi': '10.1140/epjc/s10052-015-3661-9, 10.1140/epjc/s10052-015-3518-2',\n 'arxiv_field': {u'categories': [u'hep-ex'], u'value': u'arXiv:1503.03290'},\n 'arxiv': 'arXiv:1503.03290 [hep-ex]',\n 'reportNumber': 'CERN-PH-EP-2015-038',\n 'SLACcitation': '%%CITATION = ARXIV:1503.03290;%%',\n }\n\n def test_citation_key(self):\n \"\"\"Test if citation key is created correctly\"\"\"\n self.assertEqual(self.sample_export_good['citation_key'],\n self.export._get_citation_key())\n\n def test_doi(self):\n \"\"\"Test if doi is created correctly\"\"\"\n self.assertEqual(self.sample_export_good['doi'],\n self.export._get_doi())\n\n def test_arxiv_field(self):\n \"\"\"Test if arxiv_field is created correctly\"\"\"\n self.assertEqual(self.sample_export_good['arxiv_field'],\n self.export.arxiv_field)\n\n def test_arxiv(self):\n \"\"\"Test if arxiv is created correctly\"\"\"\n self.assertEqual(self.sample_export_good['arxiv'],\n self.export._get_arxiv())\n\n def test_report_number(self):\n \"\"\"Test if report number is created correctly\"\"\"\n self.assertEqual(self.sample_export_good['reportNumber'],\n self.export._get_report_number())\n\n def test_slac_citations(self):\n \"\"\"Test if slac citation is created correctly\"\"\"\n self.assertEqual(self.sample_export_good['SLACcitation'],\n self.export._get_slac_citation())\n\nTEST_SUITE = make_test_suite(ExportTests)\n\nif __name__ == \"__main__\":\n run_test_suite(TEST_SUITE)\n"}}},{"rowIdx":284687,"cells":{"repo_name":{"kind":"string","value":"p0psicles/SickRage"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"lib/feedparser/util.py"},"copies":{"kind":"string","value":"36"},"content":{"kind":"string","value":"# Copyright 2010-2015 Kurt McKee <contactme@kurtmckee.org>\n# Copyright 2002-2008 Mark Pilgrim\n# All rights reserved.\n#\n# This file is a part of feedparser.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport warnings\n\nclass FeedParserDict(dict):\n keymap = {'channel': 'feed',\n 'items': 'entries',\n 'guid': 'id',\n 'date': 'updated',\n 'date_parsed': 'updated_parsed',\n 'description': ['summary', 'subtitle'],\n 'description_detail': ['summary_detail', 'subtitle_detail'],\n 'url': ['href'],\n 'modified': 'updated',\n 'modified_parsed': 'updated_parsed',\n 'issued': 'published',\n 'issued_parsed': 'published_parsed',\n 'copyright': 'rights',\n 'copyright_detail': 'rights_detail',\n 'tagline': 'subtitle',\n 'tagline_detail': 'subtitle_detail'}\n def __getitem__(self, key):\n '''\n :return: A :class:`FeedParserDict`.\n '''\n if key == 'category':\n try:\n return dict.__getitem__(self, 'tags')[0]['term']\n except IndexError:\n raise KeyError(\"object doesn't have key 'category'\")\n elif key == 'enclosures':\n norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])\n return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']=='enclosure']\n elif key == 'license':\n for link in dict.__getitem__(self, 'links'):\n if link['rel']=='license' and 'href' in link:\n return link['href']\n elif key == 'updated':\n # Temporarily help developers out by keeping the old\n # broken behavior that was reported in issue 310.\n # This fix was proposed in issue 328.\n if not dict.__contains__(self, 'updated') and \\\n dict.__contains__(self, 'published'):\n warnings.warn(\"To avoid breaking existing software while \"\n \"fixing issue 310, a temporary mapping has been created \"\n \"from `updated` to `published` if `updated` doesn't \"\n \"exist. This fallback will be removed in a future version \"\n \"of feedparser.\", DeprecationWarning)\n return dict.__getitem__(self, 'published')\n return dict.__getitem__(self, 'updated')\n elif key == 'updated_parsed':\n if not dict.__contains__(self, 'updated_parsed') and \\\n dict.__contains__(self, 'published_parsed'):\n warnings.warn(\"To avoid breaking existing software while \"\n \"fixing issue 310, a temporary mapping has been created \"\n \"from `updated_parsed` to `published_parsed` if \"\n \"`updated_parsed` doesn't exist. This fallback will be \"\n \"removed in a future version of feedparser.\",\n DeprecationWarning)\n return dict.__getitem__(self, 'published_parsed')\n return dict.__getitem__(self, 'updated_parsed')\n else:\n realkey = self.keymap.get(key, key)\n if isinstance(realkey, list):\n for k in realkey:\n if dict.__contains__(self, k):\n return dict.__getitem__(self, k)\n elif dict.__contains__(self, realkey):\n return dict.__getitem__(self, realkey)\n return dict.__getitem__(self, key)\n\n def __contains__(self, key):\n if key in ('updated', 'updated_parsed'):\n # Temporarily help developers out by keeping the old\n # broken behavior that was reported in issue 310.\n # This fix was proposed in issue 328.\n return dict.__contains__(self, key)\n try:\n self.__getitem__(key)\n except KeyError:\n return False\n else:\n return True\n\n has_key = __contains__\n\n def get(self, key, default=None):\n '''\n :return: A :class:`FeedParserDict`.\n '''\n try:\n return self.__getitem__(key)\n except KeyError:\n return default\n\n def __setitem__(self, key, value):\n key = self.keymap.get(key, key)\n if isinstance(key, list):\n key = key[0]\n return dict.__setitem__(self, key, value)\n\n def setdefault(self, key, value):\n if key not in self:\n self[key] = value\n return value\n return self[key]\n\n def __getattr__(self, key):\n # __getattribute__() is called first; this will be called\n # only if an attribute was not already found\n try:\n return self.__getitem__(key)\n except KeyError:\n raise AttributeError(\"object has no attribute '%s'\" % key)\n\n def __hash__(self):\n return id(self)\n"}}},{"rowIdx":284688,"cells":{"repo_name":{"kind":"string","value":"VielSoft/odoo"},"ref":{"kind":"string","value":"refs/heads/8.0"},"path":{"kind":"string","value":"addons/l10n_ar/__openerp__.py"},"copies":{"kind":"string","value":"260"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n{\n 'name': 'Argentina Localization Chart Account',\n 'version': '1.0',\n 'description': \"\"\"\nArgentinian accounting chart and tax localization.\n==================================================\n\nPlan contable argentino e impuestos de acuerdo a disposiciones vigentes\n\n \"\"\",\n 'author': ['Cubic ERP'],\n 'website': 'http://cubicERP.com',\n 'category': 'Localization/Account Charts',\n 'depends': ['account_chart'],\n 'data':[\n 'account_tax_code.xml',\n 'l10n_ar_chart.xml',\n 'account_tax.xml',\n 'l10n_ar_wizard.xml',\n ],\n 'demo': [],\n 'active': False,\n 'installable': True,\n}\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"}}},{"rowIdx":284689,"cells":{"repo_name":{"kind":"string","value":"agiliq/django"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"django/conf/locale/fr/formats.py"},"copies":{"kind":"string","value":"116"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n# This file is distributed under the same license as the Django package.\n#\nfrom __future__ import unicode_literals\n\n# The *_FORMAT strings use the Django date format syntax,\n# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nDATE_FORMAT = 'j F Y'\nTIME_FORMAT = 'H:i'\nDATETIME_FORMAT = 'j F Y H:i'\nYEAR_MONTH_FORMAT = 'F Y'\nMONTH_DAY_FORMAT = 'j F'\nSHORT_DATE_FORMAT = 'j N Y'\nSHORT_DATETIME_FORMAT = 'j N Y H:i'\nFIRST_DAY_OF_WEEK = 1 # Monday\n\n# The *_INPUT_FORMATS strings use the Python strftime format syntax,\n# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior\nDATE_INPUT_FORMATS = (\n '%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'\n '%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06'\n # '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'\n)\nDATETIME_INPUT_FORMATS = (\n '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'\n '%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'\n '%d/%m/%Y %H:%M', # '25/10/2006 14:30'\n '%d/%m/%Y', # '25/10/2006'\n '%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59'\n '%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200'\n '%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'\n '%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'\n)\nDECIMAL_SEPARATOR = ','\nTHOUSAND_SEPARATOR = '\\xa0' # non-breaking space\nNUMBER_GROUPING = 3\n"}}},{"rowIdx":284690,"cells":{"repo_name":{"kind":"string","value":"sonium0/pymatgen"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"pymatgen/alchemy/materials.py"},"copies":{"kind":"string","value":"1"},"content":{"kind":"string","value":"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import unicode_literals\n\n\"\"\"\nThis module provides various representations of transformed structures. A\nTransformedStructure is a structure that has been modified by undergoing a\nseries of transformations.\n\"\"\"\n\n\n__author__ = \"Shyue Ping Ong, Will Richards\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__version__ = \"1.0\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"shyuep@gmail.com\"\n__date__ = \"Mar 2, 2012\"\n\nimport os\nimport re\nimport json\nimport datetime\nfrom copy import deepcopy\n\nfrom monty.json import MontyDecoder\n\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.io.cif import CifParser\nfrom pymatgen.io.vasp.inputs import Poscar\nfrom pymatgen.serializers.json_coders import PMGSONable\nfrom pymatgen.matproj.snl import StructureNL\n\nfrom warnings import warn\n\ndec = MontyDecoder()\n\n\nclass TransformedStructure(PMGSONable):\n \"\"\"\n Container object for new structures that include history of\n transformations.\n\n Each transformed structure is made up of a sequence of structures with\n associated transformation history.\n \"\"\"\n\n def __init__(self, structure, transformations=None, history=None,\n other_parameters=None):\n \"\"\"\n Initializes a transformed structure from a structure.\n\n Args:\n structure (Structure): Input structure\n transformations ([Transformations]): List of transformations to\n apply.\n history (list): Previous history.\n other_parameters (dict): Additional parameters to be added.\n \"\"\"\n self.final_structure = structure\n self.history = history or []\n self.other_parameters = other_parameters or {}\n self._undone = []\n\n transformations = transformations or []\n for t in transformations:\n self.append_transformation(t)\n\n def undo_last_change(self):\n \"\"\"\n Undo the last change in the TransformedStructure.\n\n Raises:\n IndexError: If already at the oldest change.\n \"\"\"\n if len(self.history) == 0:\n raise IndexError(\"Can't undo. Already at oldest change.\")\n if 'input_structure' not in self.history[-1]:\n raise IndexError(\"Can't undo. Latest history has no \"\n \"input_structure\")\n h = self.history.pop()\n self._undone.append((h, self.final_structure))\n s = h[\"input_structure\"]\n if isinstance(s, dict):\n s = Structure.from_dict(s)\n self.final_structure = s\n\n def redo_next_change(self):\n \"\"\"\n Redo the last undone change in the TransformedStructure.\n\n Raises:\n IndexError: If already at the latest change.\n \"\"\"\n if len(self._undone) == 0:\n raise IndexError(\"Can't redo. Already at latest change.\")\n h, s = self._undone.pop()\n self.history.append(h)\n self.final_structure = s\n\n def __getattr__(self, name):\n s = object.__getattribute__(self, 'final_structure')\n return getattr(s, name)\n\n def __len__(self):\n return len(self.history)\n\n def append_transformation(self, transformation, return_alternatives=False,\n clear_redo=True):\n \"\"\"\n Appends a transformation to the TransformedStructure.\n\n Args:\n transformation: Transformation to append\n return_alternatives: Whether to return alternative\n TransformedStructures for one-to-many transformations.\n return_alternatives can be a number, which stipulates the\n total number of structures to return.\n clear_redo: Boolean indicating whether to clear the redo list.\n By default, this is True, meaning any appends clears the\n history of undoing. However, when using append_transformation\n to do a redo, the redo list should not be cleared to allow\n multiple redos.\n \"\"\"\n if clear_redo:\n self._undone = []\n\n if return_alternatives and transformation.is_one_to_many:\n ranked_list = transformation.apply_transformation(\n self.final_structure, return_ranked_list=return_alternatives)\n\n input_structure = self.final_structure.as_dict()\n alts = []\n for x in ranked_list[1:]:\n s = x.pop(\"structure\")\n actual_transformation = x.pop(\"transformation\", transformation)\n hdict = actual_transformation.as_dict()\n hdict[\"input_structure\"] = input_structure\n hdict[\"output_parameters\"] = x\n self.final_structure = s\n d = self.as_dict()\n d['history'].append(hdict)\n d['final_structure'] = s.as_dict()\n alts.append(TransformedStructure.from_dict(d))\n\n x = ranked_list[0]\n s = x.pop(\"structure\")\n actual_transformation = x.pop(\"transformation\", transformation)\n hdict = actual_transformation.as_dict()\n hdict[\"input_structure\"] = self.final_structure.as_dict()\n hdict[\"output_parameters\"] = x\n self.history.append(hdict)\n self.final_structure = s\n return alts\n else:\n s = transformation.apply_transformation(self.final_structure)\n hdict = transformation.as_dict()\n hdict[\"input_structure\"] = self.final_structure.as_dict()\n hdict[\"output_parameters\"] = {}\n self.history.append(hdict)\n self.final_structure = s\n\n def append_filter(self, structure_filter):\n \"\"\"\n Adds a filter.\n\n Args:\n structure_filter (StructureFilter): A filter implementating the\n AbstractStructureFilter API. Tells transmuter waht structures\n to retain.\n \"\"\"\n hdict = structure_filter.as_dict()\n hdict[\"input_structure\"] = self.final_structure.as_dict()\n self.history.append(hdict)\n\n def extend_transformations(self, transformations,\n return_alternatives=False):\n \"\"\"\n Extends a sequence of transformations to the TransformedStructure.\n\n Args:\n transformations: Sequence of Transformations\n return_alternatives: Whether to return alternative\n TransformedStructures for one-to-many transformations.\n return_alternatives can be a number, which stipulates the\n total number of structures to return.\n \"\"\"\n for t in transformations:\n self.append_transformation(t,\n return_alternatives=return_alternatives)\n\n def get_vasp_input(self, vasp_input_set, generate_potcar=True):\n \"\"\"\n Returns VASP input as a dict of vasp objects.\n\n Args:\n vasp_input_set (pymatgen.io.vaspio_set.VaspInputSet): input set\n to create vasp input files from structures\n generate_potcar (bool): Set to False to generate a POTCAR.spec\n file instead of a POTCAR, which contains the POTCAR labels\n but not the actual POTCAR. Defaults to True.\n \"\"\"\n d = vasp_input_set.get_all_vasp_input(self.final_structure,\n generate_potcar)\n d[\"transformations.json\"] = json.dumps(self.as_dict())\n return d\n\n def write_vasp_input(self, vasp_input_set, output_dir,\n create_directory=True):\n \"\"\"\n Writes VASP input to an output_dir.\n\n Args:\n vasp_input_set:\n pymatgen.io.vaspio_set.VaspInputSet like object that creates\n vasp input files from structures\n output_dir:\n Directory to output files\n create_directory:\n Create the directory if not present. Defaults to True.\n \"\"\"\n vasp_input_set.write_input(self.final_structure, output_dir,\n make_dir_if_not_present=create_directory)\n with open(os.path.join(output_dir, \"transformations.json\"), \"w\") as fp:\n json.dump(self.as_dict(), fp)\n\n def __str__(self):\n output = [\"Current structure\", \"------------\",\n str(self.final_structure),\n \"\\nHistory\",\n \"------------\"]\n for h in self.history:\n h.pop('input_structure', None)\n output.append(str(h))\n output.append(\"\\nOther parameters\")\n output.append(\"------------\")\n output.append(str(self.other_parameters))\n return \"\\n\".join(output)\n\n def set_parameter(self, key, value):\n self.other_parameters[key] = value\n\n @property\n def was_modified(self):\n \"\"\"\n Boolean describing whether the last transformation on the structure\n made any alterations to it one example of when this would return false\n is in the case of performing a substitution transformation on the\n structure when the specie to replace isn't in the structure.\n \"\"\"\n return not self.final_structure == self.structures[-2]\n\n @property\n def structures(self):\n \"\"\"\n Copy of all structures in the TransformedStructure. A\n structure is stored after every single transformation.\n \"\"\"\n hstructs = [Structure.from_dict(s['input_structure'])\n for s in self.history if 'input_structure' in s]\n return hstructs + [self.final_structure]\n\n @staticmethod\n def from_cif_string(cif_string, transformations=None, primitive=True,\n occupancy_tolerance=1.):\n \"\"\"\n Generates TransformedStructure from a cif string.\n\n Args:\n cif_string (str): Input cif string. Should contain only one\n structure. For cifs containing multiple structures, please use\n CifTransmuter.\n transformations ([Transformations]): Sequence of transformations\n to be applied to the input structure.\n primitive (bool): Option to set if the primitive cell should be\n extracted. Defaults to True. However, there are certain\n instances where you might want to use a non-primitive cell,\n e.g., if you are trying to generate all possible orderings of partial removals\n or order a disordered structure.\n occupancy_tolerance (float): If total occupancy of a site is\n between 1 and occupancy_tolerance, the occupancies will be\n scaled down to 1.\n\n Returns:\n TransformedStructure\n \"\"\"\n parser = CifParser.from_string(cif_string, occupancy_tolerance)\n raw_string = re.sub(\"'\", \"\\\"\", cif_string)\n cif_dict = parser.as_dict()\n cif_keys = list(cif_dict.keys())\n s = parser.get_structures(primitive)[0]\n partial_cif = cif_dict[cif_keys[0]]\n if \"_database_code_ICSD\" in partial_cif:\n source = partial_cif[\"_database_code_ICSD\"] + \"-ICSD\"\n else:\n source = \"uploaded cif\"\n source_info = {\"source\": source,\n \"datetime\": str(datetime.datetime.now()),\n \"original_file\": raw_string,\n \"cif_data\": cif_dict[cif_keys[0]]}\n return TransformedStructure(s, transformations, history=[source_info])\n\n @staticmethod\n def from_poscar_string(poscar_string, transformations=None):\n \"\"\"\n Generates TransformedStructure from a poscar string.\n\n Args:\n poscar_string (str): Input POSCAR string.\n transformations ([Transformations]): Sequence of transformations\n to be applied to the input structure.\n \"\"\"\n p = Poscar.from_string(poscar_string)\n if not p.true_names:\n raise ValueError(\"Transformation can be craeted only from POSCAR \"\n \"strings with proper VASP5 element symbols.\")\n raw_string = re.sub(\"'\", \"\\\"\", poscar_string)\n s = p.structure\n source_info = {\"source\": \"POSCAR\",\n \"datetime\": str(datetime.datetime.now()),\n \"original_file\": raw_string}\n return TransformedStructure(s, transformations, history=[source_info])\n\n def as_dict(self):\n \"\"\"\n Dict representation of the TransformedStructure.\n \"\"\"\n d = self.final_structure.as_dict()\n d[\"@module\"] = self.__class__.__module__\n d[\"@class\"] = self.__class__.__name__\n d[\"history\"] = deepcopy(self.history)\n d[\"version\"] = __version__\n d[\"last_modified\"] = str(datetime.datetime.utcnow())\n d[\"other_parameters\"] = deepcopy(self.other_parameters)\n return d\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Creates a TransformedStructure from a dict.\n \"\"\"\n s = Structure.from_dict(d)\n return cls(s, history=d[\"history\"],\n other_parameters=d.get(\"other_parameters\", None))\n\n def to_snl(self, authors, projects=None, references='', remarks=None,\n data=None, created_at=None):\n if self.other_parameters:\n warn('Data in TransformedStructure.other_parameters discarded '\n 'during type conversion to SNL')\n hist = []\n for h in self.history:\n snl_metadata = h.pop('_snl', {})\n hist.append({'name' : snl_metadata.pop('name', 'pymatgen'),\n 'url' : snl_metadata.pop('url',\n 'http://pypi.python.org/pypi/pymatgen'),\n 'description' : h})\n return StructureNL(self.final_structure, authors, projects, references,\n remarks, data, hist, created_at)\n\n @classmethod\n def from_snl(cls, snl):\n \"\"\"\n Create TransformedStructure from SNL.\n\n Args:\n snl (StructureNL): Starting snl\n\n Returns:\n TransformedStructure\n \"\"\"\n hist = []\n for h in snl.history:\n d = h.description\n d['_snl'] = {'url' : h.url, 'name' : h.name}\n hist.append(d)\n return cls(snl.structure, history=hist)\n"}}},{"rowIdx":284691,"cells":{"repo_name":{"kind":"string","value":"openiitbombayx/edx-platform"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"lms/djangoapps/lms_xblock/admin.py"},"copies":{"kind":"string","value":"173"},"content":{"kind":"string","value":"\"\"\"\nDjango admin dashboard configuration for LMS XBlock infrastructure.\n\"\"\"\n\nfrom django.contrib import admin\nfrom config_models.admin import ConfigurationModelAdmin\nfrom lms.djangoapps.lms_xblock.models import XBlockAsidesConfig\n\nadmin.site.register(XBlockAsidesConfig, ConfigurationModelAdmin)\n"}}},{"rowIdx":284692,"cells":{"repo_name":{"kind":"string","value":"cw0100/cwse"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"nodejs/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py"},"copies":{"kind":"string","value":"899"},"content":{"kind":"string","value":"# Copyright (c) 2012 Google Inc. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport collections\nimport os\nimport gyp\nimport gyp.common\nimport gyp.msvs_emulation\nimport json\nimport sys\n\ngenerator_supports_multiple_toolsets = True\n\ngenerator_wants_static_library_dependencies_adjusted = False\n\ngenerator_default_variables = {\n}\nfor dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',\n 'LIB_DIR', 'SHARED_LIB_DIR']:\n # Some gyp steps fail if these are empty(!).\n generator_default_variables[dirname] = 'dir'\nfor unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',\n 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',\n 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',\n 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',\n 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',\n 'CONFIGURATION_NAME']:\n generator_default_variables[unused] = ''\n\n\ndef CalculateVariables(default_variables, params):\n generator_flags = params.get('generator_flags', {})\n for key, val in generator_flags.items():\n default_variables.setdefault(key, val)\n default_variables.setdefault('OS', gyp.common.GetFlavor(params))\n\n flavor = gyp.common.GetFlavor(params)\n if flavor =='win':\n # Copy additional generator configuration data from VS, which is shared\n # by the Windows Ninja generator.\n import gyp.generator.msvs as msvs_generator\n generator_additional_non_configuration_keys = getattr(msvs_generator,\n 'generator_additional_non_configuration_keys', [])\n generator_additional_path_sections = getattr(msvs_generator,\n 'generator_additional_path_sections', [])\n\n gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)\n\n\ndef CalculateGeneratorInputInfo(params):\n \"\"\"Calculate the generator specific info that gets fed to input (called by\n gyp).\"\"\"\n generator_flags = params.get('generator_flags', {})\n if generator_flags.get('adjust_static_libraries', False):\n global generator_wants_static_library_dependencies_adjusted\n generator_wants_static_library_dependencies_adjusted = True\n\n\ndef GenerateOutput(target_list, target_dicts, data, params):\n # Map of target -> list of targets it depends on.\n edges = {}\n\n # Queue of targets to visit.\n targets_to_visit = target_list[:]\n\n while len(targets_to_visit) > 0:\n target = targets_to_visit.pop()\n if target in edges:\n continue\n edges[target] = []\n\n for dep in target_dicts[target].get('dependencies', []):\n edges[target].append(dep)\n targets_to_visit.append(dep)\n\n filename = 'dump.json'\n f = open(filename, 'w')\n json.dump(edges, f)\n f.close()\n print 'Wrote json to %s.' % filename\n"}}},{"rowIdx":284693,"cells":{"repo_name":{"kind":"string","value":"lexyan/SickBeard"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"lib/hachoir_core/field/timestamp.py"},"copies":{"kind":"string","value":"90"},"content":{"kind":"string","value":"from lib.hachoir_core.tools import (humanDatetime, humanDuration,\n timestampUNIX, timestampMac32, timestampUUID60,\n timestampWin64, durationWin64)\nfrom lib.hachoir_core.field import Bits, FieldSet\nfrom datetime import datetime\n\nclass GenericTimestamp(Bits):\n def __init__(self, parent, name, size, description=None):\n Bits.__init__(self, parent, name, size, description)\n\n def createDisplay(self):\n return humanDatetime(self.value)\n\n def createRawDisplay(self):\n value = Bits.createValue(self)\n return unicode(value)\n\n def __nonzero__(self):\n return Bits.createValue(self) != 0\n\ndef timestampFactory(cls_name, handler, size):\n class Timestamp(GenericTimestamp):\n def __init__(self, parent, name, description=None):\n GenericTimestamp.__init__(self, parent, name, size, description)\n\n def createValue(self):\n value = Bits.createValue(self)\n return handler(value)\n cls = Timestamp\n cls.__name__ = cls_name\n return cls\n\nTimestampUnix32 = timestampFactory(\"TimestampUnix32\", timestampUNIX, 32)\nTimestampUnix64 = timestampFactory(\"TimestampUnix64\", timestampUNIX, 64)\nTimestampMac32 = timestampFactory(\"TimestampUnix32\", timestampMac32, 32)\nTimestampUUID60 = timestampFactory(\"TimestampUUID60\", timestampUUID60, 60)\nTimestampWin64 = timestampFactory(\"TimestampWin64\", timestampWin64, 64)\n\nclass TimeDateMSDOS32(FieldSet):\n \"\"\"\n 32-bit MS-DOS timestamp (16-bit time, 16-bit date)\n \"\"\"\n static_size = 32\n\n def createFields(self):\n # TODO: Create type \"MSDOS_Second\" : value*2\n yield Bits(self, \"second\", 5, \"Second/2\")\n yield Bits(self, \"minute\", 6)\n yield Bits(self, \"hour\", 5)\n\n yield Bits(self, \"day\", 5)\n yield Bits(self, \"month\", 4)\n # TODO: Create type \"MSDOS_Year\" : value+1980\n yield Bits(self, \"year\", 7, \"Number of year after 1980\")\n\n def createValue(self):\n return datetime(\n 1980+self[\"year\"].value, self[\"month\"].value, self[\"day\"].value,\n self[\"hour\"].value, self[\"minute\"].value, 2*self[\"second\"].value)\n\n def createDisplay(self):\n return humanDatetime(self.value)\n\nclass DateTimeMSDOS32(TimeDateMSDOS32):\n \"\"\"\n 32-bit MS-DOS timestamp (16-bit date, 16-bit time)\n \"\"\"\n def createFields(self):\n yield Bits(self, \"day\", 5)\n yield Bits(self, \"month\", 4)\n yield Bits(self, \"year\", 7, \"Number of year after 1980\")\n yield Bits(self, \"second\", 5, \"Second/2\")\n yield Bits(self, \"minute\", 6)\n yield Bits(self, \"hour\", 5)\n\nclass TimedeltaWin64(GenericTimestamp):\n def __init__(self, parent, name, description=None):\n GenericTimestamp.__init__(self, parent, name, 64, description)\n\n def createDisplay(self):\n return humanDuration(self.value)\n\n def createValue(self):\n value = Bits.createValue(self)\n return durationWin64(value)\n\n"}}},{"rowIdx":284694,"cells":{"repo_name":{"kind":"string","value":"frouty/odoo_oph"},"ref":{"kind":"string","value":"refs/heads/dev_70"},"path":{"kind":"string","value":"addons/web_hello/__openerp__.py"},"copies":{"kind":"string","value":"69"},"content":{"kind":"string","value":"{\n 'name': 'Hello',\n 'category': 'Hidden',\n 'description':\"\"\"\nOpenERP Web example module.\n===========================\n\n\"\"\",\n 'version': '2.0',\n 'depends': [],\n 'js': ['static/*/*.js', 'static/*/js/*.js'],\n 'css': [],\n 'auto_install': False,\n 'web_preload': False,\n}\n"}}},{"rowIdx":284695,"cells":{"repo_name":{"kind":"string","value":"zdary/intellij-community"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"python/testData/copyPaste/singleLine/Indent12.dst.py"},"copies":{"kind":"string","value":"664"},"content":{"kind":"string","value":"class C:\n def foo(self):\n <caret> y = 2\n"}}},{"rowIdx":284696,"cells":{"repo_name":{"kind":"string","value":"acabey/acabey.github.io"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"projects/demos/engineering.purdue.edu/scriptingwithobjects/swocode/chap10/KeyError.py"},"copies":{"kind":"string","value":"1"},"content":{"kind":"string","value":"#!/usr/local/bin/python\n\n# KeyError.py\n\nx = {'a' : 1, 'b' : 2} #(A)\n\nprint x['a'] # 1 #(B)\nprint x['b'] # 2 #(C)\n\nprint x['c'] # KeyError: 'c' #(D)\n"}}},{"rowIdx":284697,"cells":{"repo_name":{"kind":"string","value":"psav/cfme_tests"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"cfme/utils/net.py"},"copies":{"kind":"string","value":"2"},"content":{"kind":"string","value":"from collections import defaultdict\nimport socket\nimport os\nimport re\nfrom cfme.fixtures.pytest_store import store\n\nfrom cfme.utils.log import logger\n\n_ports = defaultdict(dict)\n_dns_cache = {}\nip_address = re.compile(\n r\"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}\"\n r\"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$\")\n\n\ndef random_port(tcp=True):\n \"\"\"Get a random port number for making a socket\n\n Args:\n tcp: Return a TCP port number if True, UDP if False\n\n This may not be reliable at all due to an inherent race condition. This works\n by creating a socket on an ephemeral port, inspecting it to see what port was used,\n closing it, and returning that port number. In the time between closing the socket\n and opening a new one, it's possible for the OS to reopen that port for another purpose.\n\n In practical testing, this race condition did not result in a failure to (re)open the\n returned port number, making this solution squarely \"good enough for now\".\n \"\"\"\n # Port 0 will allocate an ephemeral port\n socktype = socket.SOCK_STREAM if tcp else socket.SOCK_DGRAM\n s = socket.socket(socket.AF_INET, socktype)\n s.bind(('', 0))\n addr, port = s.getsockname()\n s.close()\n return port\n\n\ndef my_ip_address(http=False):\n \"\"\"Get the ip address of the host running tests using the service listed in cfme_data['ip_echo']\n\n The ip echo endpoint is expected to write the ip address to the socket and close the\n connection. See a working example of this in :py:func:`ip_echo_socket`.\n\n \"\"\"\n # the pytest store does this work, it's included here for convenience\n return store.my_ip_address\n\n\ndef ip_echo_socket(port=32123):\n \"\"\"A simple socket server, for use with :py:func:`my_ip_address`\"\"\"\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', port))\n s.listen(0)\n while True:\n conn, addr = s.accept()\n conn.sendall(addr[0])\n conn.close()\n\n\ndef net_check(port, addr=None, force=False):\n \"\"\"Checks the availablility of a port\"\"\"\n port = int(port)\n if not addr:\n addr = store.current_appliance.hostname\n if port not in _ports[addr] or force:\n # First try DNS resolution\n try:\n addr = socket.gethostbyname(addr)\n\n # Then try to connect to the port\n try:\n socket.create_connection((addr, port), timeout=10)\n _ports[addr][port] = True\n except socket.error:\n _ports[addr][port] = False\n except:\n _ports[addr][port] = False\n return _ports[addr][port]\n\n\ndef net_check_remote(port, addr=None, machine_addr=None, ssh_creds=None, force=False):\n \"\"\"Checks the availability of a port from outside using another machine (over SSH)\"\"\"\n from cfme.utils.ssh import SSHClient\n port = int(port)\n if not addr:\n addr = my_ip_address()\n if port not in _ports[addr] or force:\n if not machine_addr:\n machine_addr = store.current_appliance.hostname\n if not ssh_creds:\n ssh_client = store.current_appliance.ssh_client\n else:\n ssh_client = SSHClient(\n hostname=machine_addr,\n username=ssh_creds['username'],\n password=ssh_creds['password']\n )\n with ssh_client:\n # on exception => fails with return code 1\n cmd = '''python -c \"\nimport sys, socket\naddr = socket.gethostbyname('%s')\nsocket.create_connection((addr, %d), timeout=10)\nsys.exit(0)\n \"''' % (addr, port)\n result = ssh_client.run_command(cmd)\n _ports[addr][port] = result.success\n return _ports[addr][port]\n\n\ndef resolve_hostname(hostname, force=False):\n \"\"\"Cached DNS resolver. If the hostname does not resolve to an IP, returns None.\"\"\"\n if hostname not in _dns_cache or force:\n try:\n _dns_cache[hostname] = socket.gethostbyname(hostname)\n except socket.gaierror:\n _dns_cache[hostname] = None\n return _dns_cache[hostname]\n\n\ndef resolve_ips(host_iterable, force_dns=False):\n \"\"\"Takes list of hostnames, ips and another things. If the item is not an IP, it will be tried\n to be converted to an IP. If that succeeds, it is appended to the set together with original\n hostname. If it can't be resolved, just the original hostname is appended.\n \"\"\"\n result = set([])\n for host in map(str, host_iterable):\n result.add(host) # It is already an IP address\n if ip_address.match(host) is None:\n ip = resolve_hostname(host, force=force_dns)\n if ip is not None:\n result.add(ip)\n return result\n\n\ndef is_pingable(ip_addr):\n \"\"\"verifies the specified ip_address is reachable or not.\n\n Args:\n ip_addr: ip_address to verify the PING.\n returns: return True is ip_address is pinging else returns False.\n \"\"\"\n try:\n status = os.system(\"ping -c1 -w2 {}\".format(ip_addr))\n if status == 0:\n logger.info('IP: %s is UP !', ip_addr)\n return True\n logger.info('IP: %s is DOWN !', ip_addr)\n return False\n except Exception as e:\n logger.exception(e)\n return False\n"}}},{"rowIdx":284698,"cells":{"repo_name":{"kind":"string","value":"NewPresident1/kitsune"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"kitsune/questions/tests/test_utils.py"},"copies":{"kind":"string","value":"16"},"content":{"kind":"string","value":"from nose.tools import eq_\n\nfrom kitsune.questions.models import Question, Answer\nfrom kitsune.questions.tests import question, answer\nfrom kitsune.questions.utils import (\n num_questions, num_answers, num_solutions, mark_content_as_spam)\nfrom kitsune.sumo.tests import TestCase\nfrom kitsune.users.tests import user\n\n\nclass ContributionCountTestCase(TestCase):\n def test_num_questions(self):\n \"\"\"Answers are counted correctly on a user.\"\"\"\n u = user(save=True)\n eq_(num_questions(u), 0)\n\n q1 = question(creator=u, save=True)\n eq_(num_questions(u), 1)\n\n q2 = question(creator=u, save=True)\n eq_(num_questions(u), 2)\n\n q1.delete()\n eq_(num_questions(u), 1)\n\n q2.delete()\n eq_(num_questions(u), 0)\n\n def test_num_answers(self):\n u = user(save=True)\n q = question(save=True)\n eq_(num_answers(u), 0)\n\n a1 = answer(creator=u, question=q, save=True)\n eq_(num_answers(u), 1)\n\n a2 = answer(creator=u, question=q, save=True)\n eq_(num_answers(u), 2)\n\n a1.delete()\n eq_(num_answers(u), 1)\n\n a2.delete()\n eq_(num_answers(u), 0)\n\n def test_num_solutions(self):\n u = user(save=True)\n q1 = question(save=True)\n q2 = question(save=True)\n a1 = answer(creator=u, question=q1, save=True)\n a2 = answer(creator=u, question=q2, save=True)\n eq_(num_solutions(u), 0)\n\n q1.solution = a1\n q1.save()\n eq_(num_solutions(u), 1)\n\n q2.solution = a2\n q2.save()\n eq_(num_solutions(u), 2)\n\n q1.solution = None\n q1.save()\n eq_(num_solutions(u), 1)\n\n a2.delete()\n eq_(num_solutions(u), 0)\n\n\nclass FlagUserContentAsSpamTestCase(TestCase):\n\n def test_flag_content_as_spam(self):\n # Create some questions and answers by the user.\n u = user(save=True)\n question(creator=u, save=True)\n question(creator=u, save=True)\n answer(creator=u, save=True)\n answer(creator=u, save=True)\n answer(creator=u, save=True)\n\n # Verify they are not marked as spam yet.\n eq_(2, Question.objects.filter(is_spam=False, creator=u).count())\n eq_(0, Question.objects.filter(is_spam=True, creator=u).count())\n eq_(3, Answer.objects.filter(is_spam=False, creator=u).count())\n eq_(0, Answer.objects.filter(is_spam=True, creator=u).count())\n\n # Flag content as spam and verify it is updated.\n mark_content_as_spam(u, user(save=True))\n eq_(0, Question.objects.filter(is_spam=False, creator=u).count())\n eq_(2, Question.objects.filter(is_spam=True, creator=u).count())\n eq_(0, Answer.objects.filter(is_spam=False, creator=u).count())\n eq_(3, Answer.objects.filter(is_spam=True, creator=u).count())\n"}}},{"rowIdx":284699,"cells":{"repo_name":{"kind":"string","value":"brainelectronics/towerdefense"},"ref":{"kind":"string","value":"refs/heads/master"},"path":{"kind":"string","value":"examples/pyglet/image/codecs/quicktime.py"},"copies":{"kind":"string","value":"43"},"content":{"kind":"string","value":"# ----------------------------------------------------------------------------\n# pyglet\n# Copyright (c) 2006-2008 Alex Holkner\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions \n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright \n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of pyglet nor the names of its\n# contributors may be used to endorse or promote products\n# derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n# ----------------------------------------------------------------------------\n\n'''\n'''\n\n__docformat__ = 'restructuredtext'\n__version__ = '$Id: pil.py 163 2006-11-13 04:15:46Z Alex.Holkner $'\n\nimport sys\n\nfrom ctypes import *\n\nfrom pyglet.gl import *\nfrom pyglet.image import *\nfrom pyglet.image.codecs import *\n\nfrom pyglet.window.carbon import carbon, quicktime, _oscheck\nfrom pyglet.libs.darwin.constants import _name\nfrom pyglet.libs.darwin.types import *\n\nHandle = POINTER(POINTER(c_byte))\n\nGWorldPtr = c_void_p\ncarbon.NewHandle.restype = Handle\nHandleDataHandlerSubType = _name('hndl')\nPointerDataHandlerSubType = _name('ptr ')\nkDataHCanRead = 1\nkDataRefExtensionFileName = _name('fnam')\nkDataRefExtensionMIMEType = _name('mime')\nComponentInstance = c_void_p\n \nk1MonochromePixelFormat = 0x00000001\nk2IndexedPixelFormat = 0x00000002\nk4IndexedPixelFormat = 0x00000004\nk8IndexedPixelFormat = 0x00000008\nk16BE555PixelFormat = 0x00000010\nk24RGBPixelFormat = 0x00000018\nk32ARGBPixelFormat = 0x00000020\nk32BGRAPixelFormat = _name('BGRA')\nk1IndexedGrayPixelFormat = 0x00000021\nk2IndexedGrayPixelFormat = 0x00000022\nk4IndexedGrayPixelFormat = 0x00000024\nk8IndexedGrayPixelFormat = 0x00000028\nkNativeEndianPixMap = 1 << 8\n\nkGraphicsImporterDontDoGammaCorrection = 1 << 0\nkGraphicsImporterDontUseColorMatching = 1 << 3\n\nnewMovieActive = 1\nnoErr = 0\nmovieTrackMediaType = 1 << 0\nmovieTrackCharacteristic = 1 << 1\nmovieTrackEnabledOnly = 1 << 2\nVisualMediaCharacteristic = _name('eyes')\nnextTimeMediaSample = 1\n\nclass PointerDataRefRecord(Structure):\n _fields_ = [\n ('data', c_void_p),\n ('dataLength', c_long)\n ]\n\ndef Str255(value):\n return create_string_buffer(chr(len(value)) + value)\n\nclass QuickTimeImageDecoder(ImageDecoder):\n def get_file_extensions(self):\n # Only most common ones shown here\n return ['.bmp', '.cur', '.gif', '.ico', '.jpg', '.jpeg', '.pcx', '.png',\n '.tga', '.tif', '.tiff', '.xbm', '.xpm']\n\n def get_animation_file_extensions(self):\n return ['.gif']\n\n def _get_data_ref(self, file, filename):\n self._data_hold = data = create_string_buffer(file.read())\n\n dataref = carbon.NewHandle(sizeof(PointerDataRefRecord))\n datarec = cast(dataref,\n POINTER(POINTER(PointerDataRefRecord))).contents.contents\n datarec.data = addressof(data)\n datarec.dataLength = len(data)\n\n self._data_handler_holder = data_handler = ComponentInstance() \n r = quicktime.OpenADataHandler(dataref, PointerDataHandlerSubType,\n None, 0, None, kDataHCanRead, byref(data_handler))\n _oscheck(r)\n\n extension_handle = Handle()\n\n self._filename_hold = filename = Str255(filename)\n r = carbon.PtrToHand(filename, byref(extension_handle), len(filename))\n r = quicktime.DataHSetDataRefExtension(data_handler, extension_handle,\n kDataRefExtensionFileName)\n _oscheck(r)\n quicktime.DisposeHandle(extension_handle)\n\n quicktime.DisposeHandle(dataref)\n\n dataref = c_void_p()\n r = quicktime.DataHGetDataRef(data_handler, byref(dataref))\n _oscheck(r)\n \n quicktime.CloseComponent(data_handler)\n \n return dataref\n\n def _get_formats(self):\n # TODO choose 24 bit where appropriate.\n if sys.byteorder == 'big':\n format = 'ARGB'\n qtformat = k32ARGBPixelFormat\n else:\n format = 'BGRA'\n qtformat = k32BGRAPixelFormat\n return format, qtformat\n\n def decode(self, file, filename):\n dataref = self._get_data_ref(file, filename)\n importer = ComponentInstance()\n quicktime.GetGraphicsImporterForDataRef(dataref, \n PointerDataHandlerSubType, byref(importer))\n\n if not importer:\n raise ImageDecodeException(filename or file)\n\n rect = Rect()\n quicktime.GraphicsImportGetNaturalBounds(importer, byref(rect))\n width = rect.right\n height = rect.bottom\n\n format, qtformat = self._get_formats()\n\n buffer = (c_byte * (width * height * len(format)))()\n world = GWorldPtr()\n quicktime.QTNewGWorldFromPtr(byref(world), qtformat,\n byref(rect), c_void_p(), c_void_p(), 0, buffer,\n len(format) * width)\n\n flags = (kGraphicsImporterDontUseColorMatching |\n kGraphicsImporterDontDoGammaCorrection)\n quicktime.GraphicsImportSetFlags(importer, flags)\n quicktime.GraphicsImportSetGWorld(importer, world, c_void_p())\n \n result = quicktime.GraphicsImportDraw(importer)\n quicktime.DisposeGWorld(world)\n quicktime.CloseComponent(importer)\n\n if result != 0:\n raise ImageDecodeException(filename or file)\n\n pitch = len(format) * width\n\n return ImageData(width, height, format, buffer, -pitch)\n\n def decode_animation(self, file, filename):\n # TODO: Stop playing chicken with the GC\n # TODO: Cleanup in errors\n\n quicktime.EnterMovies()\n\n data_ref = self._get_data_ref(file, filename)\n if not data_ref:\n raise ImageDecodeException(filename or file)\n\n movie = c_void_p()\n id = c_short()\n result = quicktime.NewMovieFromDataRef(byref(movie), \n newMovieActive,\n 0,\n data_ref,\n PointerDataHandlerSubType)\n\n if not movie:\n #_oscheck(result)\n raise ImageDecodeException(filename or file)\n quicktime.GoToBeginningOfMovie(movie)\n\n time_scale = float(quicktime.GetMovieTimeScale(movie))\n\n format, qtformat = self._get_formats()\n \n # Get movie width and height\n rect = Rect()\n quicktime.GetMovieBox(movie, byref(rect))\n width = rect.right\n height = rect.bottom\n pitch = len(format) * width\n\n # Set gworld\n buffer = (c_byte * (width * height * len(format)))()\n world = GWorldPtr()\n quicktime.QTNewGWorldFromPtr(byref(world), qtformat,\n byref(rect), c_void_p(), c_void_p(), 0, buffer,\n len(format) * width) \n quicktime.SetGWorld(world, 0)\n quicktime.SetMovieGWorld(movie, world, 0)\n\n visual = quicktime.GetMovieIndTrackType(movie, 1, \n VisualMediaCharacteristic, \n movieTrackCharacteristic)\n if not visual:\n raise ImageDecodeException('No video track')\n\n time = 0\n\n interesting_time = c_int()\n quicktime.GetTrackNextInterestingTime(\n visual,\n nextTimeMediaSample,\n time,\n 1,\n byref(interesting_time),\n None)\n duration = interesting_time.value / time_scale\n\n frames = []\n\n while time >= 0:\n result = quicktime.GetMoviesError()\n if result == noErr:\n # force redraw\n result = quicktime.UpdateMovie(movie)\n if result == noErr:\n # process movie\n quicktime.MoviesTask(movie, 0)\n result = quicktime.GetMoviesError()\n _oscheck(result)\n\n buffer_copy = (c_byte * len(buffer))()\n memmove(buffer_copy, buffer, len(buffer))\n image = ImageData(width, height, format, buffer_copy, -pitch)\n frames.append(AnimationFrame(image, duration))\n\n interesting_time = c_int()\n duration = c_int()\n quicktime.GetTrackNextInterestingTime(\n visual,\n nextTimeMediaSample,\n time,\n 1,\n byref(interesting_time),\n byref(duration))\n\n quicktime.SetMovieTimeValue(movie, interesting_time)\n time = interesting_time.value\n duration = duration.value / time_scale\n if duration <= 0.01:\n duration = 0.1\n\n quicktime.DisposeMovie(movie)\n carbon.DisposeHandle(data_ref)\n\n quicktime.ExitMovies()\n\n return Animation(frames)\n\ndef get_decoders():\n return [QuickTimeImageDecoder()]\n\ndef get_encoders():\n return []\n\n"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":2846,"numItemsPerPage":100,"numTotalItems":285264,"offset":284600,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NTg0NjE2OCwic3ViIjoiL2RhdGFzZXRzL3Rob213b2xmL2dpdGh1Yi1weXRob24iLCJleHAiOjE3NTU4NDk3NjgsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.nGgAvM-_imxJ6H1ol0O9BmR7dndCu9JS8-Ml_CxFTxYIKBpK20epSKocy7OXOB_bdKGiRpkgNcOZk0D4K9raDQ","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}"><div><header class="bg-linear-to-t border-b border-gray-100 pt-4 xl:pt-0 from-purple-500/8 dark:from-purple-500/20 to-white to-70% dark:to-gray-950"><div class="mx-4 relative flex flex-col xl:flex-row"><h1 class="flex flex-wrap items-center max-md:leading-tight gap-y-1 text-lg xl:flex-none"><a href="/datasets" class="group flex items-center"><svg class="sm:mr-1 -mr-1 text-gray-400" style="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 25 25"><ellipse cx="12.5" cy="5" fill="currentColor" fill-opacity="0.25" rx="7.5" ry="2"></ellipse><path d="M12.5 15C16.6421 15 20 14.1046 20 13V20C20 21.1046 16.6421 22 12.5 22C8.35786 22 5 21.1046 5 20V13C5 14.1046 8.35786 15 12.5 15Z" fill="currentColor" opacity="0.5"></path><path d="M12.5 7C16.6421 7 20 6.10457 20 5V11.5C20 12.6046 16.6421 13.5 12.5 13.5C8.35786 13.5 5 12.6046 5 11.5V5C5 6.10457 8.35786 7 12.5 7Z" fill="currentColor" opacity="0.5"></path><path d="M5.23628 12C5.08204 12.1598 5 12.8273 5 13C5 14.1046 8.35786 15 12.5 15C16.6421 15 20 14.1046 20 13C20 12.8273 19.918 12.1598 19.7637 12C18.9311 12.8626 15.9947 13.5 12.5 13.5C9.0053 13.5 6.06886 12.8626 5.23628 12Z" fill="currentColor"></path></svg> <span class="mr-2.5 font-semibold text-gray-400 group-hover:text-gray-500 max-sm:hidden">Datasets:</span></a> <hr class="mx-1.5 h-2 translate-y-px rounded-sm border-r dark:border-gray-600 sm:hidden"> <div class="group flex flex-none items-center"><div class="relative mr-1 flex items-center"> <span class="inline-block "><span class="contents"><a href="/thomwolf" class="text-gray-400 hover:text-blue-600"><img alt="" class="size-3.5 rounded-full flex-none" src="https://aifasthub.com/avatars/v1/production/uploads/1583857746553-5df7e9e5da6d0311fd3d53f9.jpeg" crossorigin="anonymous"></a></span> </span></div> <span class="inline-block "><span class="contents"><a href="/thomwolf" class="text-gray-400 hover:text-blue-600">thomwolf</a></span> </span> <div class="mx-0.5 text-gray-300">/</div></div> <div class="max-w-full xl:flex xl:min-w-0 xl:flex-nowrap xl:items-center xl:gap-x-1"><a class="break-words font-mono font-semibold hover:text-blue-600 text-[1.07rem] xl:truncate" href="/datasets/thomwolf/github-python">github-python</a> <button class="text-xs mr-3 focus:outline-hidden inline-flex cursor-pointer items-center text-sm mx-0.5 text-gray-600 " title="Copy dataset name to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> </button></div> <div class="inline-flex items-center overflow-hidden whitespace-nowrap rounded-md border bg-white text-sm leading-none text-gray-500 mr-2"><button class="relative flex items-center overflow-hidden from-red-50 to-transparent dark:from-red-900 px-1.5 py-1 hover:bg-linear-to-t focus:outline-hidden" title="Like"><svg class="left-1.5 absolute" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32" fill="currentColor"><path d="M22.45,6a5.47,5.47,0,0,1,3.91,1.64,5.7,5.7,0,0,1,0,8L16,26.13,5.64,15.64a5.7,5.7,0,0,1,0-8,5.48,5.48,0,0,1,7.82,0L16,10.24l2.53-2.58A5.44,5.44,0,0,1,22.45,6m0-2a7.47,7.47,0,0,0-5.34,2.24L16,7.36,14.89,6.24a7.49,7.49,0,0,0-10.68,0,7.72,7.72,0,0,0,0,10.82L16,29,27.79,17.06a7.72,7.72,0,0,0,0-10.82A7.49,7.49,0,0,0,22.45,4Z"></path></svg> <span class="ml-4 pl-0.5 ">like</span></button> <button class="focus:outline-hidden flex items-center border-l px-1.5 py-1 text-gray-400 hover:bg-gray-50 focus:bg-gray-100 dark:hover:bg-gray-900 dark:focus:bg-gray-800" title="See users who liked this repository">10</button></div> </h1> <div class="flex flex-col-reverse gap-x-2 sm:flex-row sm:items-center sm:justify-between xl:ml-auto"><div class="-mb-px flex h-12 items-center overflow-x-auto overflow-y-hidden "> <a class="tab-alternate" href="/datasets/thomwolf/github-python"><svg class="mr-1.5 text-gray-400 flex-none" style="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg> Dataset card </a><a class="tab-alternate active" href="/datasets/thomwolf/github-python/viewer/"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path fill="currentColor" d="M2.5 2h7a1 1 0 0 1 1 1v6a1 1 0 0 1-1 1h-7a1 1 0 0 1-1-1V3a1 1 0 0 1 1-1Zm0 2v2h3V4h-3Zm4 0v2h3V4h-3Zm-4 3v2h3V7h-3Zm4 0v2h3V7h-3Z"></path></svg> Data Studio </a><a class="tab-alternate" href="/datasets/thomwolf/github-python/tree/main"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-tertiary" d="M21 19h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0-4h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0-8h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0 4h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M9 19a1 1 0 0 1-1-1V6a1 1 0 0 1 2 0v12a1 1 0 0 1-1 1zm-6-4.333a1 1 0 0 1-.64-1.769L3.438 12l-1.078-.898a1 1 0 0 1 1.28-1.538l2 1.667a1 1 0 0 1 0 1.538l-2 1.667a.999.999 0 0 1-.64.231z" fill="currentColor"></path></svg> <span class="xl:hidden">Files</span> <span class="hidden xl:inline">Files and versions</span> <span class="inline-block "><span class="contents"><div slot="anchor" class="shadow-purple-500/10 ml-2 inline-flex -translate-y-px items-center gap-0.5 rounded-md border bg-white px-1 py-0.5 align-middle text-xs font-semibold leading-none text-gray-800 shadow-sm dark:border-gray-700 dark:bg-gradient-to-b dark:from-gray-925 dark:to-gray-925 dark:text-gray-300"><svg class="size-3 " xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path fill-rule="evenodd" clip-rule="evenodd" d="M6.14 3.64 5.1 4.92 2.98 2.28h2.06l1.1 1.36Zm0 4.72-1.1 1.36H2.98l2.13-2.64 1.03 1.28Zm4.9 1.36L8.03 6l3-3.72H8.96L5.97 6l3 3.72h2.06Z" fill="#7875FF"></path><path d="M4.24 6 2.6 8.03.97 6 2.6 3.97 4.24 6Z" fill="#FF7F41" opacity="1"></path></svg> <span>xet</span> </div></span> </span> </a><a class="tab-alternate" href="/datasets/thomwolf/github-python/discussions"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path><path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path></svg> Community </a></div> </div></div></header> </div> <div class="flex flex-col w-full"> <div class="flex h-full flex-1"> <div class="flex flex-1 flex-col overflow-hidden " style="height: calc(100vh - 48px)"><div class="flex flex-col overflow-hidden h-full "> <div class="flex flex-1 flex-col overflow-hidden "><div class="flex flex-1 flex-col overflow-hidden"><div class="flex min-h-0 flex-1"><div class="flex flex-1 flex-col overflow-hidden"><div class="md:shadow-xs dark:border-gray-800 md:my-4 md:ml-4 md:rounded-lg md:border flex min-w-0 flex-wrap "><div class="flex min-w-0 flex-1 flex-wrap"><div class="grid flex-1 grid-cols-1 overflow-hidden text-sm md:grid-cols-2 md:place-content-center md:rounded-lg"><label class="relative block flex-1 px-3 py-2 hover:bg-gray-50 dark:border-gray-850 dark:hover:bg-gray-950 md:border-r md:border-r-0 hidden" title="default"><span class="text-gray-500">Subset (1)</span> <div class="flex items-center whitespace-nowrap"><span class="truncate">default</span> <span class="mx-2 text-gray-500">·</span> <span class="text-gray-500">285k rows</span> <svg class="ml-auto min-w-6 pl-2" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></div> <select class="absolute inset-0 z-10 w-full cursor-pointer border-0 bg-white text-base opacity-0"><optgroup label="Subset (1)"><option value="default" selected>default (285k rows)</option></optgroup></select></label> <label class="relative block flex-1 px-3 py-2 hover:bg-gray-50 dark:border-gray-850 dark:hover:bg-gray-900 md:border-r md:border-r" title="train"><div class="text-gray-500">Split (1)</div> <div class="flex items-center overflow-hidden whitespace-nowrap"><span class="truncate">train</span> <span class="mx-2 text-gray-500">·</span> <span class="text-gray-500">285k rows</span> <svg class="ml-auto min-w-6 pl-2" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></div> <select class="absolute inset-0 z-10 w-full cursor-pointer border-0 bg-white text-base opacity-0"><optgroup label="Split (1)"><option value="train" selected>train (285k rows)</option></optgroup></select></label></div></div> <div class="hidden flex-none flex-col items-center gap-0.5 border-l px-1 md:flex justify-end"> <span class="inline-block "><span class="contents"><div slot="anchor"><button class="group text-gray-500 hover:text-gray-700" aria-label="Hide sidepanel"><div class="rounded-xs flex size-4 items-center justify-center border border-gray-400 bg-gray-100 hover:border-gray-600 hover:bg-blue-50 dark:border-gray-600 dark:bg-gray-800 dark:hover:bg-gray-700 dark:group-hover:border-gray-400"><div class="float-left h-full w-[65%]"></div> <div class="float-right h-full w-[35%] bg-gray-400 group-hover:bg-gray-600 dark:bg-gray-600 dark:group-hover:bg-gray-400"></div></div></button></div></span> </span> <div class="relative "> <button class="btn px-0.5 py-0.5 " type="button"> <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="p-0.5" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><circle cx="16" cy="7" r="3" fill="currentColor"></circle><circle cx="16" cy="16" r="3" fill="currentColor"></circle><circle cx="16" cy="25" r="3" fill="currentColor"></circle></svg> </button> </div></div></div> <div class="flex min-h-0 flex-1 flex-col border dark:border-gray-800 md:mb-4 md:ml-4 md:rounded-lg"> <div class="bg-linear-to-r text-smd relative flex items-center dark:border-gray-900 dark:bg-gray-950 false rounded-t-lg [&:has(:focus)]:from-gray-50 [&:has(:focus)]:to-transparent [&:has(:focus)]:to-20% dark:[&:has(:focus)]:from-gray-900"><form class="flex-1"><svg class="absolute left-3 top-1/2 transform -translate-y-1/2 pointer-events-none text-gray-400" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M30 28.59L22.45 21A11 11 0 1 0 21 22.45L28.59 30zM5 14a9 9 0 1 1 9 9a9 9 0 0 1-9-9z" fill="currentColor"></path></svg> <input disabled class="outline-hidden h-9 w-full border-none bg-transparent px-1 pl-9 pr-3 placeholder:text-gray-400 " placeholder="Search this dataset" dir="auto"></form> <div class="flex items-center gap-2 px-2 py-1"><button type="button" class="hover:bg-yellow-200/70 flex items-center gap-1 rounded-md border border-yellow-200 bg-yellow-100 pl-0.5 pr-1 text-[.8rem] leading-normal text-gray-700 dark:border-orange-500/25 dark:bg-orange-500/20 dark:text-gray-300 dark:hover:brightness-110 md:hidden"><div class="rounded-sm bg-yellow-300 px-1 font-mono text-[.7rem] font-bold text-black dark:bg-yellow-700 dark:text-gray-200">SQL </div> Console </button></div></div> <div class="flex flex-1 flex-col overflow-hidden min-h-64 flex w-full flex-col border-t md:rounded-b-lg md:shadow-lg"> <div class="flex-1 relative overflow-auto"><table class="w-full table-auto rounded-lg font-mono text-xs text-gray-900"><thead class="shadow-xs sticky left-0 right-0 top-0 z-1 bg-white align-top"><tr class="space-y-54 h-full min-w-fit divide-x border-b text-left"><th class="h-full max-w-sm p-2 text-left relative w-auto"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">repo_name <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">lengths</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><g><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="0" y="17.195149515867925" width="11.2" height="12.804850484132075" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="13.2" y="0" width="11.2" height="30" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="26.4" y="17.19149638046367" width="11.2" height="12.808503619536333" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="39.599999999999994" y="24.560188154800088" width="11.2" height="5.439811845199914" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="52.8" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="66" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="79.19999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="92.39999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="105.6" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="118.8" y="25" width="11.2" height="5" fill-opacity="1"></rect></g><rect class="fill-white dark:fill-gray-900" x="0" y="26" width="130" height="2" stroke-opacity="1"></rect><line class="stroke-gray-100 dark:stroke-gray-500/20" x1="0" y1="27.5" x2="130" y2="27.5" stroke-opacity="1"></line><g><rect class="fill-indigo-500 cursor-pointer" x="-1" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="12.2" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="25.4" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="38.599999999999994" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="51.8" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="65" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="78.19999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="91.39999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="104.6" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="117.8" y="0" width="13.2" height="30" fill-opacity="0"></rect></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 overflow-hidden text-ellipsis whitespace-nowrap" style="max-width: 60px">5</div> <div class="absolute overflow-hidden text-ellipsis whitespace-nowrap" style="right: 0px; max-width: 60px">100</div> </div></div></div></div> <div class="absolute right-0 top-0 z-10 h-full w-1 cursor-col-resize hover:bg-indigo-100 active:bg-indigo-500 dark:hover:bg-indigo-800 dark:active:bg-indigo-600/80"><div class="absolute right-0 top-0 h-full w-1"></div> </div> </th><th class="h-full max-w-sm p-2 text-left relative w-auto"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">ref <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">lengths</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><g><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="0" y="0" width="11.2" height="30" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="13.2" y="24.319983627122884" width="11.2" height="5.680016372877116" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="26.4" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="39.599999999999994" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="52.8" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="66" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="79.19999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="92.39999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="105.6" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="118.8" y="25" width="11.2" height="5" fill-opacity="1"></rect></g><rect class="fill-white dark:fill-gray-900" x="0" y="26" width="130" height="2" stroke-opacity="1"></rect><line class="stroke-gray-100 dark:stroke-gray-500/20" x1="0" y1="27.5" x2="130" y2="27.5" stroke-opacity="1"></line><g><rect class="fill-indigo-500 cursor-pointer" x="-1" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="12.2" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="25.4" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="38.599999999999994" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="51.8" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="65" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="78.19999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="91.39999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="104.6" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="117.8" y="0" width="13.2" height="30" fill-opacity="0"></rect></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 overflow-hidden text-ellipsis whitespace-nowrap" style="max-width: 60px">12</div> <div class="absolute overflow-hidden text-ellipsis whitespace-nowrap" style="right: 0px; max-width: 60px">67</div> </div></div></div></div> <div class="absolute right-0 top-0 z-10 h-full w-1 cursor-col-resize hover:bg-indigo-100 active:bg-indigo-500 dark:hover:bg-indigo-800 dark:active:bg-indigo-600/80"><div class="absolute right-0 top-0 h-full w-1"></div> </div> </th><th class="h-full max-w-sm p-2 text-left relative w-auto"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">path <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">lengths</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><g><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="0" y="11.49854937236664" width="11.2" height="18.50145062763336" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="13.2" y="0" width="11.2" height="30" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="26.4" y="15.405505095420683" width="11.2" height="14.594494904579317" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="39.599999999999994" y="23.672012363138023" width="11.2" height="6.3279876368619785" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="52.8" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="66" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="79.19999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="92.39999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="105.6" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="118.8" y="25" width="11.2" height="5" fill-opacity="1"></rect></g><rect class="fill-white dark:fill-gray-900" x="0" y="26" width="130" height="2" stroke-opacity="1"></rect><line class="stroke-gray-100 dark:stroke-gray-500/20" x1="0" y1="27.5" x2="130" y2="27.5" stroke-opacity="1"></line><g><rect class="fill-indigo-500 cursor-pointer" x="-1" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="12.2" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="25.4" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="38.599999999999994" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="51.8" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="65" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="78.19999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="91.39999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="104.6" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="117.8" y="0" width="13.2" height="30" fill-opacity="0"></rect></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 overflow-hidden text-ellipsis whitespace-nowrap" style="max-width: 60px">4</div> <div class="absolute overflow-hidden text-ellipsis whitespace-nowrap" style="right: 0px; max-width: 60px">244</div> </div></div></div></div> <div class="absolute right-0 top-0 z-10 h-full w-1 cursor-col-resize hover:bg-indigo-100 active:bg-indigo-500 dark:hover:bg-indigo-800 dark:active:bg-indigo-600/80"><div class="absolute right-0 top-0 h-full w-1"></div> </div> </th><th class="h-full max-w-sm p-2 text-left relative w-auto"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">copies <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">lengths</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><g><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="0" y="0" width="14.5" height="30" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="16.5" y="6.296211071307166" width="14.5" height="23.703788928692834" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="33" y="13.893000026148576" width="14.5" height="16.106999973851423" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="49.5" y="24.352465374927" width="14.5" height="5.647534625072998" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="66" y="25" width="14.5" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="82.5" y="25" width="14.5" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="99" y="26" width="14.5" height="4" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="115.5" y="21.77170549730234" width="14.5" height="8.228294502697661" fill-opacity="1"></rect></g><rect class="fill-white dark:fill-gray-900" x="0" y="26" width="130" height="2" stroke-opacity="1"></rect><line class="stroke-gray-100 dark:stroke-gray-500/20" x1="0" y1="27.5" x2="130" y2="27.5" stroke-opacity="1"></line><g><rect class="fill-indigo-500 cursor-pointer" x="-1" y="0" width="16.5" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="15.5" y="0" width="16.5" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="32" y="0" width="16.5" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="48.5" y="0" width="16.5" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="65" y="0" width="16.5" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="81.5" y="0" width="16.5" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="98" y="0" width="16.5" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="114.5" y="0" width="16.5" height="30" fill-opacity="0"></rect></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 overflow-hidden text-ellipsis whitespace-nowrap" style="max-width: 60px">1</div> <div class="absolute overflow-hidden text-ellipsis whitespace-nowrap" style="right: 0px; max-width: 60px">8</div> </div></div></div></div> <div class="absolute right-0 top-0 z-10 h-full w-1 cursor-col-resize hover:bg-indigo-100 active:bg-indigo-500 dark:hover:bg-indigo-800 dark:active:bg-indigo-600/80"><div class="absolute right-0 top-0 h-full w-1"></div> </div> </th><th class="h-full max-w-sm p-2 text-left relative w-auto"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">content <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">lengths</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><g><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="0" y="0" width="10" height="30" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="12" y="25" width="10" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="24" y="25" width="10" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="36" y="25" width="10" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="48" y="25" width="10" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="60" y="25" width="10" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="72" y="25" width="10" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="84" y="25" width="10" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="96" y="25" width="10" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="108" y="25" width="10" height="5" fill-opacity="1"></rect><rect class=" fill-gray-200 dark:fill-gray-500/80" rx="2" x="120" y="25" width="10" height="5" fill-opacity="1"></rect></g><rect class="fill-white dark:fill-gray-900" x="0" y="26" width="130" height="2" stroke-opacity="1"></rect><line class="stroke-gray-100 dark:stroke-gray-500/20" x1="0" y1="27.5" x2="130" y2="27.5" stroke-opacity="1"></line><g><rect class="fill-indigo-500 cursor-pointer" x="-1" y="0" width="12" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="11" y="0" width="12" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="23" y="0" width="12" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="35" y="0" width="12" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="47" y="0" width="12" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="59" y="0" width="12" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="71" y="0" width="12" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="83" y="0" width="12" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="95" y="0" width="12" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="107" y="0" width="12" height="30" fill-opacity="0"></rect><rect class="fill-gray-200 cursor-pointer" x="119" y="0" width="12" height="30" fill-opacity="0"></rect></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 overflow-hidden text-ellipsis whitespace-nowrap" style="max-width: 54px">0</div> <div class="absolute overflow-hidden text-ellipsis whitespace-nowrap" style="right: 12px; max-width: 54px">1.05M</div> <div class="absolute -translate-x-1/2" style="left: 125px">⌀</div></div></div></div></div> <div class="absolute right-0 top-0 z-10 h-full w-1 cursor-col-resize hover:bg-indigo-100 active:bg-indigo-500 dark:hover:bg-indigo-800 dark:active:bg-indigo-600/80"><div class="absolute right-0 top-0 h-full w-1"></div> </div> </th></tr></thead> <tbody class="h-16 overflow-scroll"><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284600"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">GunoH/intellij-community</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">python/testData/formatter/indentAfterBackslash_after.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">79</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">foo = bar \ if bar is not None else None </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284601"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">40223142/2015cad0623</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">static/Brython3.1.3-20150514-095342/Lib/multiprocessing/util.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">696</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># # Module providing various facilities to other parts of the package # # multiprocessing/util.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import sys import functools import os import itertools import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags from multiprocessing.process import current_process, active_children __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 LOGGER_NAME = 'multiprocessing' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args): if _logger: _logger.log(SUBDEBUG, msg, *args) def debug(msg, *args): if _logger: _logger.log(DEBUG, msg, *args) def info(msg, *args): if _logger: _logger.log(INFO, msg, *args) def sub_warning(msg, *args): if _logger: _logger.log(SUBWARNING, msg, *args) def get_logger(): ''' Returns logger used by multiprocessing ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 logging.addLevelName(SUBDEBUG, 'SUBDEBUG') logging.addLevelName(SUBWARNING, 'SUBWARNING') # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger # # Function returning a temp directory which will be removed on exit # def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up if current_process()._tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100) current_process()._tempdir = tempdir return current_process()._tempdir # # Support for reinitialization of objects when bootstrapping a child process # _afterfork_registry = weakref.WeakValueDictionary() _afterfork_counter = itertools.count() def _run_after_forkers(): items = list(_afterfork_registry.items()) items.sort() for (index, ident, func), obj in items: try: func(obj) except Exception as e: info('after forker raised exception %s', e) def register_after_fork(obj, func): _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj # # Finalization using weakrefs # _finalizer_registry = {} _finalizer_counter = itertools.count() class Finalize(object): ''' Class which supports object finalization using weakrefs ''' def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): assert exitpriority is None or type(exitpriority) is int if obj is not None: self._weakref = weakref.ref(obj, self) else: assert exitpriority is not None self._callback = callback self._args = args self._kwargs = kwargs or {} self._key = (exitpriority, next(_finalizer_counter)) self._pid = os.getpid() _finalizer_registry[self._key] = self def __call__(self, wr=None, # Need to bind these locally because the globals can have # been cleared at shutdown _finalizer_registry=_finalizer_registry, sub_debug=sub_debug, getpid=os.getpid): ''' Run the callback unless it has already been called or cancelled ''' try: del _finalizer_registry[self._key] except KeyError: sub_debug('finalizer no longer registered') else: if self._pid != getpid(): sub_debug('finalizer ignored because different process') res = None else: sub_debug('finalizer calling %s with args %s and kwargs %s', self._callback, self._args, self._kwargs) res = self._callback(*self._args, **self._kwargs) self._weakref = self._callback = self._args = \ self._kwargs = self._key = None return res def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None def still_active(self): ''' Return whether this finalizer is still waiting to invoke callback ''' return self._key in _finalizer_registry def __repr__(self): try: obj = self._weakref() except (AttributeError, TypeError): obj = None if obj is None: return '<Finalize object, dead>' x = '<Finalize object, callback=%s' % \ getattr(self._callback, '__name__', self._callback) if self._args: x += ', args=' + str(self._args) if self._kwargs: x += ', kwargs=' + str(self._kwargs) if self._key[0] is not None: x += ', exitprority=' + str(self._key[0]) return x + '>' def _run_finalizers(minpriority=None): ''' Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. ''' if _finalizer_registry is None: # This function may be called after this module's globals are # destroyed. See the _exit_function function in this module for more # notes. return if minpriority is None: f = lambda p : p[0][0] is not None else: f = lambda p : p[0][0] is not None and p[0][0] >= minpriority items = [x for x in list(_finalizer_registry.items()) if f(x)] items.sort(reverse=True) for key, finalizer in items: sub_debug('calling %s', finalizer) try: finalizer() except Exception: import traceback traceback.print_exc() if minpriority is None: _finalizer_registry.clear() # # Clean up on exit # def is_exiting(): ''' Returns true if the process is shutting down ''' return _exiting or _exiting is None _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, active_children=active_children, current_process=current_process): # We hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. global _exiting if not _exiting: _exiting = True info('process shutting down') debug('running all "atexit" finalizers with priority >= 0') _run_finalizers(0) if current_process() is not None: # We check if the current process is None here because if # it's None, any call to ``active_children()`` will raise # an AttributeError (active_children winds up trying to # get attributes from util._current_process). One # situation where this can happen is if someone has # manipulated sys.modules, causing this module to be # garbage collected. The destructor for the module type # then replaces all values in the module dict with None. # For instance, after setuptools runs a test it replaces # sys.modules with a copy created earlier. See issues # #9775 and #15881. Also related: #4106, #9205, and # #9207. for p in active_children(): if p._daemonic: info('calling terminate() for daemon %s', p.name) p._popen.terminate() for p in active_children(): info('calling join() for process %s', p.name) p.join() debug('running the remaining "atexit" finalizers') _run_finalizers() atexit.register(_exit_function) # # Some fork aware types # class ForkAwareThreadLock(object): def __init__(self): self._reset() register_after_fork(self, ForkAwareThreadLock._reset) def _reset(self): self._lock = threading.Lock() self.acquire = self._lock.acquire self.release = self._lock.release class ForkAwareLocal(threading.local): def __init__(self): register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284602"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">omtinez/micropython</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tools/gendoc.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">101</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Generate documentation for pyboard API from C files. """ import os import argparse import re import markdown # given a list of (name,regex) pairs, find the first one that matches the given line def re_match_first(regexs, line): for name, regex in regexs: match = re.match(regex, line) if match: return name, match return None, None def makedirs(d): if not os.path.isdir(d): os.makedirs(d) class Lexer: class LexerError(Exception): pass class EOF(Exception): pass class Break(Exception): pass def __init__(self, file): self.filename = file with open(file, 'rt') as f: line_num = 0 lines = [] for line in f: line_num += 1 line = line.strip() if line == '///': lines.append((line_num, '')) elif line.startswith('/// '): lines.append((line_num, line[4:])) elif len(lines) > 0 and lines[-1][1] is not None: lines.append((line_num, None)) if len(lines) > 0 and lines[-1][1] is not None: lines.append((line_num, None)) self.cur_line = 0 self.lines = lines def opt_break(self): if len(self.lines) > 0 and self.lines[0][1] is None: self.lines.pop(0) def next(self): if len(self.lines) == 0: raise Lexer.EOF else: l = self.lines.pop(0) self.cur_line = l[0] if l[1] is None: raise Lexer.Break else: return l[1] def error(self, msg): print('({}:{}) {}'.format(self.filename, self.cur_line, msg)) raise Lexer.LexerError class MarkdownWriter: def __init__(self): pass def start(self): self.lines = [] def end(self): return '\n'.join(self.lines) def heading(self, level, text): if len(self.lines) > 0: self.lines.append('') self.lines.append(level * '#' + ' ' + text) self.lines.append('') def para(self, text): if len(self.lines) > 0 and self.lines[-1] != '': self.lines.append('') if isinstance(text, list): self.lines.extend(text) elif isinstance(text, str): self.lines.append(text) else: assert False self.lines.append('') def single_line(self, text): self.lines.append(text) def module(self, name, short_descr, descr): self.heading(1, 'module {}'.format(name)) self.para(descr) def function(self, ctx, name, args, descr): proto = '{}.{}{}'.format(ctx, self.name, self.args) self.heading(3, '`' + proto + '`') self.para(descr) def method(self, ctx, name, args, descr): if name == '\\constructor': proto = '{}{}'.format(ctx, args) elif name == '\\call': proto = '{}{}'.format(ctx, args) else: proto = '{}.{}{}'.format(ctx, name, args) self.heading(3, '`' + proto + '`') self.para(descr) def constant(self, ctx, name, descr): self.single_line('`{}.{}` - {}'.format(ctx, name, descr)) class ReStructuredTextWriter: head_chars = {1:'=', 2:'-', 3:'.'} def __init__(self): pass def start(self): self.lines = [] def end(self): return '\n'.join(self.lines) def _convert(self, text): return text.replace('`', '``').replace('*', '\\*') def heading(self, level, text, convert=True): if len(self.lines) > 0: self.lines.append('') if convert: text = self._convert(text) self.lines.append(text) self.lines.append(len(text) * self.head_chars[level]) self.lines.append('') def para(self, text, indent=''): if len(self.lines) > 0 and self.lines[-1] != '': self.lines.append('') if isinstance(text, list): for t in text: self.lines.append(indent + self._convert(t)) elif isinstance(text, str): self.lines.append(indent + self._convert(text)) else: assert False self.lines.append('') def single_line(self, text): self.lines.append(self._convert(text)) def module(self, name, short_descr, descr): self.heading(1, ':mod:`{}` --- {}'.format(name, self._convert(short_descr)), convert=False) self.lines.append('.. module:: {}'.format(name)) self.lines.append(' :synopsis: {}'.format(short_descr)) self.para(descr) def function(self, ctx, name, args, descr): args = self._convert(args) self.lines.append('.. function:: ' + name + args) self.para(descr, indent=' ') def method(self, ctx, name, args, descr): args = self._convert(args) if name == '\\constructor': self.lines.append('.. class:: ' + ctx + args) elif name == '\\call': self.lines.append('.. method:: ' + ctx + args) else: self.lines.append('.. method:: ' + ctx + '.' + name + args) self.para(descr, indent=' ') def constant(self, ctx, name, descr): self.lines.append('.. data:: ' + name) self.para(descr, indent=' ') class DocValidateError(Exception): pass class DocItem: def __init__(self): self.doc = [] def add_doc(self, lex): try: while True: line = lex.next() if len(line) > 0 or len(self.doc) > 0: self.doc.append(line) except Lexer.Break: pass def dump(self, writer): writer.para(self.doc) class DocConstant(DocItem): def __init__(self, name, descr): super().__init__() self.name = name self.descr = descr def dump(self, ctx, writer): writer.constant(ctx, self.name, self.descr) class DocFunction(DocItem): def __init__(self, name, args): super().__init__() self.name = name self.args = args def dump(self, ctx, writer): writer.function(ctx, self.name, self.args, self.doc) class DocMethod(DocItem): def __init__(self, name, args): super().__init__() self.name = name self.args = args def dump(self, ctx, writer): writer.method(ctx, self.name, self.args, self.doc) class DocClass(DocItem): def __init__(self, name, descr): super().__init__() self.name = name self.descr = descr self.constructors = {} self.classmethods = {} self.methods = {} self.constants = {} def process_classmethod(self, lex, d): name = d['id'] if name == '\\constructor': dict_ = self.constructors else: dict_ = self.classmethods if name in dict_: lex.error("multiple definition of method '{}'".format(name)) method = dict_[name] = DocMethod(name, d['args']) method.add_doc(lex) def process_method(self, lex, d): name = d['id'] dict_ = self.methods if name in dict_: lex.error("multiple definition of method '{}'".format(name)) method = dict_[name] = DocMethod(name, d['args']) method.add_doc(lex) def process_constant(self, lex, d): name = d['id'] if name in self.constants: lex.error("multiple definition of constant '{}'".format(name)) self.constants[name] = DocConstant(name, d['descr']) lex.opt_break() def dump(self, writer): writer.heading(1, 'class {}'.format(self.name)) super().dump(writer) if len(self.constructors) > 0: writer.heading(2, 'Constructors') for f in sorted(self.constructors.values(), key=lambda x:x.name): f.dump(self.name, writer) if len(self.classmethods) > 0: writer.heading(2, 'Class methods') for f in sorted(self.classmethods.values(), key=lambda x:x.name): f.dump(self.name, writer) if len(self.methods) > 0: writer.heading(2, 'Methods') for f in sorted(self.methods.values(), key=lambda x:x.name): f.dump(self.name.lower(), writer) if len(self.constants) > 0: writer.heading(2, 'Constants') for c in sorted(self.constants.values(), key=lambda x:x.name): c.dump(self.name, writer) class DocModule(DocItem): def __init__(self, name, descr): super().__init__() self.name = name self.descr = descr self.functions = {} self.constants = {} self.classes = {} self.cur_class = None def new_file(self): self.cur_class = None def process_function(self, lex, d): name = d['id'] if name in self.functions: lex.error("multiple definition of function '{}'".format(name)) function = self.functions[name] = DocFunction(name, d['args']) function.add_doc(lex) #def process_classref(self, lex, d): # name = d['id'] # self.classes[name] = name # lex.opt_break() def process_class(self, lex, d): name = d['id'] if name in self.classes: lex.error("multiple definition of class '{}'".format(name)) self.cur_class = self.classes[name] = DocClass(name, d['descr']) self.cur_class.add_doc(lex) def process_classmethod(self, lex, d): self.cur_class.process_classmethod(lex, d) def process_method(self, lex, d): self.cur_class.process_method(lex, d) def process_constant(self, lex, d): if self.cur_class is None: # a module-level constant name = d['id'] if name in self.constants: lex.error("multiple definition of constant '{}'".format(name)) self.constants[name] = DocConstant(name, d['descr']) lex.opt_break() else: # a class-level constant self.cur_class.process_constant(lex, d) def validate(self): if self.descr is None: raise DocValidateError('module {} referenced but never defined'.format(self.name)) def dump(self, writer): writer.module(self.name, self.descr, self.doc) if self.functions: writer.heading(2, 'Functions') for f in sorted(self.functions.values(), key=lambda x:x.name): f.dump(self.name, writer) if self.constants: writer.heading(2, 'Constants') for c in sorted(self.constants.values(), key=lambda x:x.name): c.dump(self.name, writer) if self.classes: writer.heading(2, 'Classes') for c in sorted(self.classes.values(), key=lambda x:x.name): writer.para('[`{}.{}`]({}) - {}'.format(self.name, c.name, c.name, c.descr)) def write_html(self, dir): md_writer = MarkdownWriter() md_writer.start() self.dump(md_writer) with open(os.path.join(dir, 'index.html'), 'wt') as f: f.write(markdown.markdown(md_writer.end())) for c in self.classes.values(): class_dir = os.path.join(dir, c.name) makedirs(class_dir) md_writer.start() md_writer.para('part of the [{} module](./)'.format(self.name)) c.dump(md_writer) with open(os.path.join(class_dir, 'index.html'), 'wt') as f: f.write(markdown.markdown(md_writer.end())) def write_rst(self, dir): rst_writer = ReStructuredTextWriter() rst_writer.start() self.dump(rst_writer) with open(dir + '/' + self.name + '.rst', 'wt') as f: f.write(rst_writer.end()) for c in self.classes.values(): rst_writer.start() c.dump(rst_writer) with open(dir + '/' + self.name + '.' + c.name + '.rst', 'wt') as f: f.write(rst_writer.end()) class Doc: def __init__(self): self.modules = {} self.cur_module = None def new_file(self): self.cur_module = None for m in self.modules.values(): m.new_file() def check_module(self, lex): if self.cur_module is None: lex.error('module not defined') def process_module(self, lex, d): name = d['id'] if name not in self.modules: self.modules[name] = DocModule(name, None) self.cur_module = self.modules[name] if self.cur_module.descr is not None: lex.error("multiple definition of module '{}'".format(name)) self.cur_module.descr = d['descr'] self.cur_module.add_doc(lex) def process_moduleref(self, lex, d): name = d['id'] if name not in self.modules: self.modules[name] = DocModule(name, None) self.cur_module = self.modules[name] lex.opt_break() def process_class(self, lex, d): self.check_module(lex) self.cur_module.process_class(lex, d) def process_function(self, lex, d): self.check_module(lex) self.cur_module.process_function(lex, d) def process_classmethod(self, lex, d): self.check_module(lex) self.cur_module.process_classmethod(lex, d) def process_method(self, lex, d): self.check_module(lex) self.cur_module.process_method(lex, d) def process_constant(self, lex, d): self.check_module(lex) self.cur_module.process_constant(lex, d) def validate(self): for m in self.modules.values(): m.validate() def dump(self, writer): writer.heading(1, 'Modules') writer.para('These are the Python modules that are implemented.') for m in sorted(self.modules.values(), key=lambda x:x.name): writer.para('[`{}`]({}/) - {}'.format(m.name, m.name, m.descr)) def write_html(self, dir): md_writer = MarkdownWriter() with open(os.path.join(dir, 'module', 'index.html'), 'wt') as f: md_writer.start() self.dump(md_writer) f.write(markdown.markdown(md_writer.end())) for m in self.modules.values(): mod_dir = os.path.join(dir, 'module', m.name) makedirs(mod_dir) m.write_html(mod_dir) def write_rst(self, dir): #with open(os.path.join(dir, 'module', 'index.html'), 'wt') as f: # f.write(markdown.markdown(self.dump())) for m in self.modules.values(): m.write_rst(dir) regex_descr = r'(?P<descr>.*)' doc_regexs = ( (Doc.process_module, re.compile(r'\\module (?P<id>[a-z][a-z0-9]*) - ' + regex_descr + r'$')), (Doc.process_moduleref, re.compile(r'\\moduleref (?P<id>[a-z]+)$')), (Doc.process_function, re.compile(r'\\function (?P<id>[a-z0-9_]+)(?P<args>\(.*\))$')), (Doc.process_classmethod, re.compile(r'\\classmethod (?P<id>\\?[a-z0-9_]+)(?P<args>\(.*\))$')), (Doc.process_method, re.compile(r'\\method (?P<id>\\?[a-z0-9_]+)(?P<args>\(.*\))$')), (Doc.process_constant, re.compile(r'\\constant (?P<id>[A-Za-z0-9_]+) - ' + regex_descr + r'$')), #(Doc.process_classref, re.compile(r'\\classref (?P<id>[A-Za-z0-9_]+)$')), (Doc.process_class, re.compile(r'\\class (?P<id>[A-Za-z0-9_]+) - ' + regex_descr + r'$')), ) def process_file(file, doc): lex = Lexer(file) doc.new_file() try: try: while True: line = lex.next() fun, match = re_match_first(doc_regexs, line) if fun == None: lex.error('unknown line format: {}'.format(line)) fun(doc, lex, match.groupdict()) except Lexer.Break: lex.error('unexpected break') except Lexer.EOF: pass except Lexer.LexerError: return False return True def main(): cmd_parser = argparse.ArgumentParser(description='Generate documentation for pyboard API from C files.') cmd_parser.add_argument('--outdir', metavar='<output dir>', default='gendoc-out', help='ouput directory') cmd_parser.add_argument('--format', default='html', help='output format: html or rst') cmd_parser.add_argument('files', nargs='+', help='input files') args = cmd_parser.parse_args() doc = Doc() for file in args.files: print('processing', file) if not process_file(file, doc): return try: doc.validate() except DocValidateError as e: print(e) makedirs(args.outdir) if args.format == 'html': doc.write_html(args.outdir) elif args.format == 'rst': doc.write_rst(args.outdir) else: print('unknown format:', args.format) return print('written to', args.outdir) if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284603"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jayme-github/CouchPotatoServer</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">libs/sqlalchemy/dialects/maxdb/base.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">15</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># maxdb/base.py # Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Support for the MaxDB database. This dialect is *not* ported to SQLAlchemy 0.6 or 0.7. This dialect is *not* tested on SQLAlchemy 0.6 or 0.7. Overview -------- The ``maxdb`` dialect is **experimental** and has only been tested on 7.6.03.007 and 7.6.00.037. Of these, **only 7.6.03.007 will work** with SQLAlchemy's ORM. The earlier version has severe ``LEFT JOIN`` limitations and will return incorrect results from even very simple ORM queries. Only the native Python DB-API is currently supported. ODBC driver support is a future enhancement. Connecting ---------- The username is case-sensitive. If you usually connect to the database with sqlcli and other tools in lower case, you likely need to use upper case for DB-API. Implementation Notes -------------------- With the 7.6.00.37 driver and Python 2.5, it seems that all DB-API generated exceptions are broken and can cause Python to crash. For 'somecol.in_([])' to work, the IN operator's generation must be changed to cast 'NULL' to a numeric, i.e. NUM(NULL). The DB-API doesn't accept a bind parameter there, so that particular generation must inline the NULL value, which depends on [ticket:807]. The DB-API is very picky about where bind params may be used in queries. Bind params for some functions (e.g. MOD) need type information supplied. The dialect does not yet do this automatically. Max will occasionally throw up 'bad sql, compile again' exceptions for perfectly valid SQL. The dialect does not currently handle these, more research is needed. MaxDB 7.5 and Sap DB <= 7.4 reportedly do not support schemas. A very slightly different version of this dialect would be required to support those versions, and can easily be added if there is demand. Some other required components such as an Max-aware 'old oracle style' join compiler (thetas with (+) outer indicators) are already done and available for integration- email the devel list if you're interested in working on this. Versions tested: 7.6.03.07 and 7.6.00.37, native Python DB-API * MaxDB has severe limitations on OUTER JOINs, which are essential to ORM eager loading. And rather than raise an error if a SELECT can't be serviced, the database simply returns incorrect results. * Version 7.6.03.07 seems to JOIN properly, however the docs do not show the OUTER restrictions being lifted (as of this writing), and no changelog is available to confirm either. If you are using a different server version and your tasks require the ORM or any semi-advanced SQL through the SQL layer, running the SQLAlchemy test suite against your database is HIGHLY recommended before you begin. * Version 7.6.00.37 is LHS/RHS sensitive in `FROM lhs LEFT OUTER JOIN rhs ON lhs.col=rhs.col` vs `rhs.col=lhs.col`! * Version 7.6.00.37 is confused by `SELECT DISTINCT col as alias FROM t ORDER BY col` - these aliased, DISTINCT, ordered queries need to be re-written to order by the alias name. * Version 7.6.x supports creating a SAVEPOINT but not its RELEASE. * MaxDB supports autoincrement-style columns (DEFAULT SERIAL) and independent sequences. When including a DEFAULT SERIAL column in an insert, 0 needs to be inserted rather than NULL to generate a value. * MaxDB supports ANSI and "old Oracle style" theta joins with (+) outer join indicators. * The SQLAlchemy dialect is schema-aware and probably won't function correctly on server versions (pre-7.6?). Support for schema-less server versions could be added if there's call. * ORDER BY is not supported in subqueries. LIMIT is not supported in subqueries. In 7.6.00.37, TOP does work in subqueries, but without limit not so useful. OFFSET does not work in 7.6 despite being in the docs. Row number tricks in WHERE via ROWNO may be possible but it only seems to allow less-than comparison! * Version 7.6.03.07 can't LIMIT if a derived table is in FROM: `SELECT * FROM (SELECT * FROM a) LIMIT 2` * MaxDB does not support sql's CAST and can only usefullly cast two types. There isn't much implicit type conversion, so be precise when creating `PassiveDefaults` in DDL generation: `'3'` and `3` aren't the same. sapdb.dbapi ^^^^^^^^^^^ * As of 2007-10-22 the Python 2.4 and 2.5 compatible versions of the DB-API are no longer available. A forum posting at SAP states that the Python driver will be available again "in the future". The last release from MySQL AB works if you can find it. * sequence.NEXTVAL skips every other value! * No rowcount for executemany() * If an INSERT into a table with a DEFAULT SERIAL column inserts the results of a function `INSERT INTO t VALUES (LENGTH('foo'))`, the cursor won't have the serial id. It needs to be manually yanked from tablename.CURRVAL. * Super-duper picky about where bind params can be placed. Not smart about converting Python types for some functions, such as `MOD(5, ?)`. * LONG (text, binary) values in result sets are read-once. The dialect uses a caching RowProxy when these types are present. * Connection objects seem like they want to be either `close()`d or garbage collected, but not both. There's a warning issued but it seems harmless. """ import datetime, itertools, re from sqlalchemy import exc, schema, sql, util, processors from sqlalchemy.sql import operators as sql_operators, expression as sql_expr from sqlalchemy.sql import compiler, visitors from sqlalchemy.engine import base as engine_base, default, reflection from sqlalchemy import types as sqltypes class _StringType(sqltypes.String): _type = None def __init__(self, length=None, encoding=None, **kw): super(_StringType, self).__init__(length=length, **kw) self.encoding = encoding def bind_processor(self, dialect): if self.encoding == 'unicode': return None else: def process(value): if isinstance(value, unicode): return value.encode(dialect.encoding) else: return value return process def result_processor(self, dialect, coltype): #XXX: this code is probably very slow and one should try (if at all # possible) to determine the correct code path on a per-connection # basis (ie, here in result_processor, instead of inside the processor # function itself) and probably also use a few generic # processors, or possibly per query (though there is no mechanism # for that yet). def process(value): while True: if value is None: return None elif isinstance(value, unicode): return value elif isinstance(value, str): if self.convert_unicode or dialect.convert_unicode: return value.decode(dialect.encoding) else: return value elif hasattr(value, 'read'): # some sort of LONG, snarf and retry value = value.read(value.remainingLength()) continue else: # unexpected type, return as-is return value return process class MaxString(_StringType): _type = 'VARCHAR' class MaxUnicode(_StringType): _type = 'VARCHAR' def __init__(self, length=None, **kw): kw['encoding'] = 'unicode' super(MaxUnicode, self).__init__(length=length, **kw) class MaxChar(_StringType): _type = 'CHAR' class MaxText(_StringType): _type = 'LONG' def __init__(self, length=None, **kw): super(MaxText, self).__init__(length, **kw) def get_col_spec(self): spec = 'LONG' if self.encoding is not None: spec = ' '.join((spec, self.encoding)) elif self.convert_unicode: spec = ' '.join((spec, 'UNICODE')) return spec class MaxNumeric(sqltypes.Numeric): """The FIXED (also NUMERIC, DECIMAL) data type.""" def __init__(self, precision=None, scale=None, **kw): kw.setdefault('asdecimal', True) super(MaxNumeric, self).__init__(scale=scale, precision=precision, **kw) def bind_processor(self, dialect): return None class MaxTimestamp(sqltypes.DateTime): def bind_processor(self, dialect): def process(value): if value is None: return None elif isinstance(value, basestring): return value elif dialect.datetimeformat == 'internal': ms = getattr(value, 'microsecond', 0) return value.strftime("%Y%m%d%H%M%S" + ("%06u" % ms)) elif dialect.datetimeformat == 'iso': ms = getattr(value, 'microsecond', 0) return value.strftime("%Y-%m-%d %H:%M:%S." + ("%06u" % ms)) else: raise exc.InvalidRequestError( "datetimeformat '%s' is not supported." % ( dialect.datetimeformat,)) return process def result_processor(self, dialect, coltype): if dialect.datetimeformat == 'internal': def process(value): if value is None: return None else: return datetime.datetime( *[int(v) for v in (value[0:4], value[4:6], value[6:8], value[8:10], value[10:12], value[12:14], value[14:])]) elif dialect.datetimeformat == 'iso': def process(value): if value is None: return None else: return datetime.datetime( *[int(v) for v in (value[0:4], value[5:7], value[8:10], value[11:13], value[14:16], value[17:19], value[20:])]) else: raise exc.InvalidRequestError( "datetimeformat '%s' is not supported." % dialect.datetimeformat) return process class MaxDate(sqltypes.Date): def bind_processor(self, dialect): def process(value): if value is None: return None elif isinstance(value, basestring): return value elif dialect.datetimeformat == 'internal': return value.strftime("%Y%m%d") elif dialect.datetimeformat == 'iso': return value.strftime("%Y-%m-%d") else: raise exc.InvalidRequestError( "datetimeformat '%s' is not supported." % ( dialect.datetimeformat,)) return process def result_processor(self, dialect, coltype): if dialect.datetimeformat == 'internal': def process(value): if value is None: return None else: return datetime.date(int(value[0:4]), int(value[4:6]), int(value[6:8])) elif dialect.datetimeformat == 'iso': def process(value): if value is None: return None else: return datetime.date(int(value[0:4]), int(value[5:7]), int(value[8:10])) else: raise exc.InvalidRequestError( "datetimeformat '%s' is not supported." % dialect.datetimeformat) return process class MaxTime(sqltypes.Time): def bind_processor(self, dialect): def process(value): if value is None: return None elif isinstance(value, basestring): return value elif dialect.datetimeformat == 'internal': return value.strftime("%H%M%S") elif dialect.datetimeformat == 'iso': return value.strftime("%H-%M-%S") else: raise exc.InvalidRequestError( "datetimeformat '%s' is not supported." % ( dialect.datetimeformat,)) return process def result_processor(self, dialect, coltype): if dialect.datetimeformat == 'internal': def process(value): if value is None: return None else: return datetime.time(int(value[0:4]), int(value[4:6]), int(value[6:8])) elif dialect.datetimeformat == 'iso': def process(value): if value is None: return None else: return datetime.time(int(value[0:4]), int(value[5:7]), int(value[8:10])) else: raise exc.InvalidRequestError( "datetimeformat '%s' is not supported." % dialect.datetimeformat) return process class MaxBlob(sqltypes.LargeBinary): def bind_processor(self, dialect): return processors.to_str def result_processor(self, dialect, coltype): def process(value): if value is None: return None else: return value.read(value.remainingLength()) return process class MaxDBTypeCompiler(compiler.GenericTypeCompiler): def _string_spec(self, string_spec, type_): if type_.length is None: spec = 'LONG' else: spec = '%s(%s)' % (string_spec, type_.length) if getattr(type_, 'encoding'): spec = ' '.join([spec, getattr(type_, 'encoding').upper()]) return spec def visit_text(self, type_): spec = 'LONG' if getattr(type_, 'encoding', None): spec = ' '.join((spec, type_.encoding)) elif type_.convert_unicode: spec = ' '.join((spec, 'UNICODE')) return spec def visit_char(self, type_): return self._string_spec("CHAR", type_) def visit_string(self, type_): return self._string_spec("VARCHAR", type_) def visit_large_binary(self, type_): return "LONG BYTE" def visit_numeric(self, type_): if type_.scale and type_.precision: return 'FIXED(%s, %s)' % (type_.precision, type_.scale) elif type_.precision: return 'FIXED(%s)' % type_.precision else: return 'INTEGER' def visit_BOOLEAN(self, type_): return "BOOLEAN" colspecs = { sqltypes.Numeric: MaxNumeric, sqltypes.DateTime: MaxTimestamp, sqltypes.Date: MaxDate, sqltypes.Time: MaxTime, sqltypes.String: MaxString, sqltypes.Unicode:MaxUnicode, sqltypes.LargeBinary: MaxBlob, sqltypes.Text: MaxText, sqltypes.CHAR: MaxChar, sqltypes.TIMESTAMP: MaxTimestamp, sqltypes.BLOB: MaxBlob, sqltypes.Unicode: MaxUnicode, } ischema_names = { 'boolean': sqltypes.BOOLEAN, 'char': sqltypes.CHAR, 'character': sqltypes.CHAR, 'date': sqltypes.DATE, 'fixed': sqltypes.Numeric, 'float': sqltypes.FLOAT, 'int': sqltypes.INT, 'integer': sqltypes.INT, 'long binary': sqltypes.BLOB, 'long unicode': sqltypes.Text, 'long': sqltypes.Text, 'long': sqltypes.Text, 'smallint': sqltypes.SmallInteger, 'time': sqltypes.Time, 'timestamp': sqltypes.TIMESTAMP, 'varchar': sqltypes.VARCHAR, } # TODO: migrate this to sapdb.py class MaxDBExecutionContext(default.DefaultExecutionContext): def post_exec(self): # DB-API bug: if there were any functions as values, # then do another select and pull CURRVAL from the # autoincrement column's implicit sequence... ugh if self.compiled.isinsert and not self.executemany: table = self.compiled.statement.table index, serial_col = _autoserial_column(table) if serial_col and (not self.compiled._safeserial or not(self._last_inserted_ids) or self._last_inserted_ids[index] in (None, 0)): if table.schema: sql = "SELECT %s.CURRVAL FROM DUAL" % ( self.compiled.preparer.format_table(table)) else: sql = "SELECT CURRENT_SCHEMA.%s.CURRVAL FROM DUAL" % ( self.compiled.preparer.format_table(table)) rs = self.cursor.execute(sql) id = rs.fetchone()[0] if not self._last_inserted_ids: # This shouldn't ever be > 1? Right? self._last_inserted_ids = \ [None] * len(table.primary_key.columns) self._last_inserted_ids[index] = id super(MaxDBExecutionContext, self).post_exec() def get_result_proxy(self): if self.cursor.description is not None: for column in self.cursor.description: if column[1] in ('Long Binary', 'Long', 'Long Unicode'): return MaxDBResultProxy(self) return engine_base.ResultProxy(self) @property def rowcount(self): if hasattr(self, '_rowcount'): return self._rowcount else: return self.cursor.rowcount def fire_sequence(self, seq): if seq.optional: return None return self._execute_scalar("SELECT %s.NEXTVAL FROM DUAL" % ( self.dialect.identifier_preparer.format_sequence(seq))) class MaxDBCachedColumnRow(engine_base.RowProxy): """A RowProxy that only runs result_processors once per column.""" def __init__(self, parent, row): super(MaxDBCachedColumnRow, self).__init__(parent, row) self.columns = {} self._row = row self._parent = parent def _get_col(self, key): if key not in self.columns: self.columns[key] = self._parent._get_col(self._row, key) return self.columns[key] def __iter__(self): for i in xrange(len(self._row)): yield self._get_col(i) def __repr__(self): return repr(list(self)) def __eq__(self, other): return ((other is self) or (other == tuple([self._get_col(key) for key in xrange(len(self._row))]))) def __getitem__(self, key): if isinstance(key, slice): indices = key.indices(len(self._row)) return tuple([self._get_col(i) for i in xrange(*indices)]) else: return self._get_col(key) def __getattr__(self, name): try: return self._get_col(name) except KeyError: raise AttributeError(name) class MaxDBResultProxy(engine_base.ResultProxy): _process_row = MaxDBCachedColumnRow class MaxDBCompiler(compiler.SQLCompiler): function_conversion = { 'CURRENT_DATE': 'DATE', 'CURRENT_TIME': 'TIME', 'CURRENT_TIMESTAMP': 'TIMESTAMP', } # These functions must be written without parens when called with no # parameters. e.g. 'SELECT DATE FROM DUAL' not 'SELECT DATE() FROM DUAL' bare_functions = set([ 'CURRENT_SCHEMA', 'DATE', 'FALSE', 'SYSDBA', 'TIME', 'TIMESTAMP', 'TIMEZONE', 'TRANSACTION', 'TRUE', 'USER', 'UID', 'USERGROUP', 'UTCDATE', 'UTCDIFF']) def visit_mod(self, binary, **kw): return "mod(%s, %s)" % \ (self.process(binary.left), self.process(binary.right)) def default_from(self): return ' FROM DUAL' def for_update_clause(self, select): clause = select.for_update if clause is True: return " WITH LOCK EXCLUSIVE" elif clause is None: return "" elif clause == "read": return " WITH LOCK" elif clause == "ignore": return " WITH LOCK (IGNORE) EXCLUSIVE" elif clause == "nowait": return " WITH LOCK (NOWAIT) EXCLUSIVE" elif isinstance(clause, basestring): return " WITH LOCK %s" % clause.upper() elif not clause: return "" else: return " WITH LOCK EXCLUSIVE" def function_argspec(self, fn, **kw): if fn.name.upper() in self.bare_functions: return "" elif len(fn.clauses) > 0: return compiler.SQLCompiler.function_argspec(self, fn, **kw) else: return "" def visit_function(self, fn, **kw): transform = self.function_conversion.get(fn.name.upper(), None) if transform: fn = fn._clone() fn.name = transform return super(MaxDBCompiler, self).visit_function(fn, **kw) def visit_cast(self, cast, **kwargs): # MaxDB only supports casts * to NUMERIC, * to VARCHAR or # date/time to VARCHAR. Casts of LONGs will fail. if isinstance(cast.type, (sqltypes.Integer, sqltypes.Numeric)): return "NUM(%s)" % self.process(cast.clause) elif isinstance(cast.type, sqltypes.String): return "CHR(%s)" % self.process(cast.clause) else: return self.process(cast.clause) def visit_sequence(self, sequence): if sequence.optional: return None else: return ( self.dialect.identifier_preparer.format_sequence(sequence) + ".NEXTVAL") class ColumnSnagger(visitors.ClauseVisitor): def __init__(self): self.count = 0 self.column = None def visit_column(self, column): self.column = column self.count += 1 def _find_labeled_columns(self, columns, use_labels=False): labels = {} for column in columns: if isinstance(column, basestring): continue snagger = self.ColumnSnagger() snagger.traverse(column) if snagger.count == 1: if isinstance(column, sql_expr._Label): labels[unicode(snagger.column)] = column.name elif use_labels: labels[unicode(snagger.column)] = column._label return labels def order_by_clause(self, select, **kw): order_by = self.process(select._order_by_clause, **kw) # ORDER BY clauses in DISTINCT queries must reference aliased # inner columns by alias name, not true column name. if order_by and getattr(select, '_distinct', False): labels = self._find_labeled_columns(select.inner_columns, select.use_labels) if labels: for needs_alias in labels.keys(): r = re.compile(r'(^| )(%s)(,| |$)' % re.escape(needs_alias)) order_by = r.sub((r'\1%s\3' % labels[needs_alias]), order_by) # No ORDER BY in subqueries. if order_by: if self.is_subquery(): # It's safe to simply drop the ORDER BY if there is no # LIMIT. Right? Other dialects seem to get away with # dropping order. if select._limit: raise exc.CompileError( "MaxDB does not support ORDER BY in subqueries") else: return "" return " ORDER BY " + order_by else: return "" def get_select_precolumns(self, select): # Convert a subquery's LIMIT to TOP sql = select._distinct and 'DISTINCT ' or '' if self.is_subquery() and select._limit: if select._offset: raise exc.InvalidRequestError( 'MaxDB does not support LIMIT with an offset.') sql += 'TOP %s ' % select._limit return sql def limit_clause(self, select): # The docs say offsets are supported with LIMIT. But they're not. # TODO: maybe emulate by adding a ROWNO/ROWNUM predicate? # TODO: does MaxDB support bind params for LIMIT / TOP ? if self.is_subquery(): # sub queries need TOP return '' elif select._offset: raise exc.InvalidRequestError( 'MaxDB does not support LIMIT with an offset.') else: return ' \n LIMIT %s' % (select._limit,) def visit_insert(self, insert): self.isinsert = True self._safeserial = True colparams = self._get_colparams(insert) for value in (insert.parameters or {}).itervalues(): if isinstance(value, sql_expr.Function): self._safeserial = False break return ''.join(('INSERT INTO ', self.preparer.format_table(insert.table), ' (', ', '.join([self.preparer.format_column(c[0]) for c in colparams]), ') VALUES (', ', '.join([c[1] for c in colparams]), ')')) class MaxDBIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = set([ 'abs', 'absolute', 'acos', 'adddate', 'addtime', 'all', 'alpha', 'alter', 'any', 'ascii', 'asin', 'atan', 'atan2', 'avg', 'binary', 'bit', 'boolean', 'byte', 'case', 'ceil', 'ceiling', 'char', 'character', 'check', 'chr', 'column', 'concat', 'constraint', 'cos', 'cosh', 'cot', 'count', 'cross', 'curdate', 'current', 'curtime', 'database', 'date', 'datediff', 'day', 'dayname', 'dayofmonth', 'dayofweek', 'dayofyear', 'dec', 'decimal', 'decode', 'default', 'degrees', 'delete', 'digits', 'distinct', 'double', 'except', 'exists', 'exp', 'expand', 'first', 'fixed', 'float', 'floor', 'for', 'from', 'full', 'get_objectname', 'get_schema', 'graphic', 'greatest', 'group', 'having', 'hex', 'hextoraw', 'hour', 'ifnull', 'ignore', 'index', 'initcap', 'inner', 'insert', 'int', 'integer', 'internal', 'intersect', 'into', 'join', 'key', 'last', 'lcase', 'least', 'left', 'length', 'lfill', 'list', 'ln', 'locate', 'log', 'log10', 'long', 'longfile', 'lower', 'lpad', 'ltrim', 'makedate', 'maketime', 'mapchar', 'max', 'mbcs', 'microsecond', 'min', 'minute', 'mod', 'month', 'monthname', 'natural', 'nchar', 'next', 'no', 'noround', 'not', 'now', 'null', 'num', 'numeric', 'object', 'of', 'on', 'order', 'packed', 'pi', 'power', 'prev', 'primary', 'radians', 'real', 'reject', 'relative', 'replace', 'rfill', 'right', 'round', 'rowid', 'rowno', 'rpad', 'rtrim', 'second', 'select', 'selupd', 'serial', 'set', 'show', 'sign', 'sin', 'sinh', 'smallint', 'some', 'soundex', 'space', 'sqrt', 'stamp', 'statistics', 'stddev', 'subdate', 'substr', 'substring', 'subtime', 'sum', 'sysdba', 'table', 'tan', 'tanh', 'time', 'timediff', 'timestamp', 'timezone', 'to', 'toidentifier', 'transaction', 'translate', 'trim', 'trunc', 'truncate', 'ucase', 'uid', 'unicode', 'union', 'update', 'upper', 'user', 'usergroup', 'using', 'utcdate', 'utcdiff', 'value', 'values', 'varchar', 'vargraphic', 'variance', 'week', 'weekofyear', 'when', 'where', 'with', 'year', 'zoned' ]) def _normalize_name(self, name): if name is None: return None if name.isupper(): lc_name = name.lower() if not self._requires_quotes(lc_name): return lc_name return name def _denormalize_name(self, name): if name is None: return None elif (name.islower() and not self._requires_quotes(name)): return name.upper() else: return name def _maybe_quote_identifier(self, name): if self._requires_quotes(name): return self.quote_identifier(name) else: return name class MaxDBDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kw): colspec = [self.preparer.format_column(column), self.dialect.type_compiler.process(column.type)] if not column.nullable: colspec.append('NOT NULL') default = column.default default_str = self.get_column_default_string(column) # No DDL default for columns specified with non-optional sequence- # this defaulting behavior is entirely client-side. (And as a # consequence, non-reflectable.) if (default and isinstance(default, schema.Sequence) and not default.optional): pass # Regular default elif default_str is not None: colspec.append('DEFAULT %s' % default_str) # Assign DEFAULT SERIAL heuristically elif column.primary_key and column.autoincrement: # For SERIAL on a non-primary key member, use # DefaultClause(text('SERIAL')) try: first = [c for c in column.table.primary_key.columns if (c.autoincrement and (isinstance(c.type, sqltypes.Integer) or (isinstance(c.type, MaxNumeric) and c.type.precision)) and not c.foreign_keys)].pop(0) if column is first: colspec.append('DEFAULT SERIAL') except IndexError: pass return ' '.join(colspec) def get_column_default_string(self, column): if isinstance(column.server_default, schema.DefaultClause): if isinstance(column.default.arg, basestring): if isinstance(column.type, sqltypes.Integer): return str(column.default.arg) else: return "'%s'" % column.default.arg else: return unicode(self._compile(column.default.arg, None)) else: return None def visit_create_sequence(self, create): """Creates a SEQUENCE. TODO: move to module doc? start With an integer value, set the START WITH option. increment An integer value to increment by. Default is the database default. maxdb_minvalue maxdb_maxvalue With an integer value, sets the corresponding sequence option. maxdb_no_minvalue maxdb_no_maxvalue Defaults to False. If true, sets the corresponding sequence option. maxdb_cycle Defaults to False. If true, sets the CYCLE option. maxdb_cache With an integer value, sets the CACHE option. maxdb_no_cache Defaults to False. If true, sets NOCACHE. """ sequence = create.element if (not sequence.optional and (not self.checkfirst or not self.dialect.has_sequence(self.connection, sequence.name))): ddl = ['CREATE SEQUENCE', self.preparer.format_sequence(sequence)] sequence.increment = 1 if sequence.increment is not None: ddl.extend(('INCREMENT BY', str(sequence.increment))) if sequence.start is not None: ddl.extend(('START WITH', str(sequence.start))) opts = dict([(pair[0][6:].lower(), pair[1]) for pair in sequence.kwargs.items() if pair[0].startswith('maxdb_')]) if 'maxvalue' in opts: ddl.extend(('MAXVALUE', str(opts['maxvalue']))) elif opts.get('no_maxvalue', False): ddl.append('NOMAXVALUE') if 'minvalue' in opts: ddl.extend(('MINVALUE', str(opts['minvalue']))) elif opts.get('no_minvalue', False): ddl.append('NOMINVALUE') if opts.get('cycle', False): ddl.append('CYCLE') if 'cache' in opts: ddl.extend(('CACHE', str(opts['cache']))) elif opts.get('no_cache', False): ddl.append('NOCACHE') return ' '.join(ddl) class MaxDBDialect(default.DefaultDialect): name = 'maxdb' supports_alter = True supports_unicode_statements = True max_identifier_length = 32 supports_sane_rowcount = True supports_sane_multi_rowcount = False preparer = MaxDBIdentifierPreparer statement_compiler = MaxDBCompiler ddl_compiler = MaxDBDDLCompiler execution_ctx_cls = MaxDBExecutionContext ported_sqla_06 = False colspecs = colspecs ischema_names = ischema_names # MaxDB-specific datetimeformat = 'internal' def __init__(self, _raise_known_sql_errors=False, **kw): super(MaxDBDialect, self).__init__(**kw) self._raise_known = _raise_known_sql_errors if self.dbapi is None: self.dbapi_type_map = {} else: self.dbapi_type_map = { 'Long Binary': MaxBlob(), 'Long byte_t': MaxBlob(), 'Long Unicode': MaxText(), 'Timestamp': MaxTimestamp(), 'Date': MaxDate(), 'Time': MaxTime(), datetime.datetime: MaxTimestamp(), datetime.date: MaxDate(), datetime.time: MaxTime(), } def do_execute(self, cursor, statement, parameters, context=None): res = cursor.execute(statement, parameters) if isinstance(res, int) and context is not None: context._rowcount = res def do_release_savepoint(self, connection, name): # Does MaxDB truly support RELEASE SAVEPOINT <id>? All my attempts # produce "SUBTRANS COMMIT/ROLLBACK not allowed without SUBTRANS # BEGIN SQLSTATE: I7065" # Note that ROLLBACK TO works fine. In theory, a RELEASE should # just free up some transactional resources early, before the overall # COMMIT/ROLLBACK so omitting it should be relatively ok. pass def _get_default_schema_name(self, connection): return self.identifier_preparer._normalize_name( connection.execute( 'SELECT CURRENT_SCHEMA FROM DUAL').scalar()) def has_table(self, connection, table_name, schema=None): denormalize = self.identifier_preparer._denormalize_name bind = [denormalize(table_name)] if schema is None: sql = ("SELECT tablename FROM TABLES " "WHERE TABLES.TABLENAME=? AND" " TABLES.SCHEMANAME=CURRENT_SCHEMA ") else: sql = ("SELECT tablename FROM TABLES " "WHERE TABLES.TABLENAME = ? AND" " TABLES.SCHEMANAME=? ") bind.append(denormalize(schema)) rp = connection.execute(sql, bind) return bool(rp.first()) @reflection.cache def get_table_names(self, connection, schema=None, **kw): if schema is None: sql = (" SELECT TABLENAME FROM TABLES WHERE " " SCHEMANAME=CURRENT_SCHEMA ") rs = connection.execute(sql) else: sql = (" SELECT TABLENAME FROM TABLES WHERE " " SCHEMANAME=? ") matchname = self.identifier_preparer._denormalize_name(schema) rs = connection.execute(sql, matchname) normalize = self.identifier_preparer._normalize_name return [normalize(row[0]) for row in rs] def reflecttable(self, connection, table, include_columns): denormalize = self.identifier_preparer._denormalize_name normalize = self.identifier_preparer._normalize_name st = ('SELECT COLUMNNAME, MODE, DATATYPE, CODETYPE, LEN, DEC, ' ' NULLABLE, "DEFAULT", DEFAULTFUNCTION ' 'FROM COLUMNS ' 'WHERE TABLENAME=? AND SCHEMANAME=%s ' 'ORDER BY POS') fk = ('SELECT COLUMNNAME, FKEYNAME, ' ' REFSCHEMANAME, REFTABLENAME, REFCOLUMNNAME, RULE, ' ' (CASE WHEN REFSCHEMANAME = CURRENT_SCHEMA ' ' THEN 1 ELSE 0 END) AS in_schema ' 'FROM FOREIGNKEYCOLUMNS ' 'WHERE TABLENAME=? AND SCHEMANAME=%s ' 'ORDER BY FKEYNAME ') params = [denormalize(table.name)] if not table.schema: st = st % 'CURRENT_SCHEMA' fk = fk % 'CURRENT_SCHEMA' else: st = st % '?' fk = fk % '?' params.append(denormalize(table.schema)) rows = connection.execute(st, params).fetchall() if not rows: raise exc.NoSuchTableError(table.fullname) include_columns = set(include_columns or []) for row in rows: (name, mode, col_type, encoding, length, scale, nullable, constant_def, func_def) = row name = normalize(name) if include_columns and name not in include_columns: continue type_args, type_kw = [], {} if col_type == 'FIXED': type_args = length, scale # Convert FIXED(10) DEFAULT SERIAL to our Integer if (scale == 0 and func_def is not None and func_def.startswith('SERIAL')): col_type = 'INTEGER' type_args = length, elif col_type in 'FLOAT': type_args = length, elif col_type in ('CHAR', 'VARCHAR'): type_args = length, type_kw['encoding'] = encoding elif col_type == 'LONG': type_kw['encoding'] = encoding try: type_cls = ischema_names[col_type.lower()] type_instance = type_cls(*type_args, **type_kw) except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (col_type, name)) type_instance = sqltypes.NullType col_kw = {'autoincrement': False} col_kw['nullable'] = (nullable == 'YES') col_kw['primary_key'] = (mode == 'KEY') if func_def is not None: if func_def.startswith('SERIAL'): if col_kw['primary_key']: # No special default- let the standard autoincrement # support handle SERIAL pk columns. col_kw['autoincrement'] = True else: # strip current numbering col_kw['server_default'] = schema.DefaultClause( sql.text('SERIAL')) col_kw['autoincrement'] = True else: col_kw['server_default'] = schema.DefaultClause( sql.text(func_def)) elif constant_def is not None: col_kw['server_default'] = schema.DefaultClause(sql.text( "'%s'" % constant_def.replace("'", "''"))) table.append_column(schema.Column(name, type_instance, **col_kw)) fk_sets = itertools.groupby(connection.execute(fk, params), lambda row: row.FKEYNAME) for fkeyname, fkey in fk_sets: fkey = list(fkey) if include_columns: key_cols = set([r.COLUMNNAME for r in fkey]) if key_cols != include_columns: continue columns, referants = [], [] quote = self.identifier_preparer._maybe_quote_identifier for row in fkey: columns.append(normalize(row.COLUMNNAME)) if table.schema or not row.in_schema: referants.append('.'.join( [quote(normalize(row[c])) for c in ('REFSCHEMANAME', 'REFTABLENAME', 'REFCOLUMNNAME')])) else: referants.append('.'.join( [quote(normalize(row[c])) for c in ('REFTABLENAME', 'REFCOLUMNNAME')])) constraint_kw = {'name': fkeyname.lower()} if fkey[0].RULE is not None: rule = fkey[0].RULE if rule.startswith('DELETE '): rule = rule[7:] constraint_kw['ondelete'] = rule table_kw = {} if table.schema or not row.in_schema: table_kw['schema'] = normalize(fkey[0].REFSCHEMANAME) ref_key = schema._get_table_key(normalize(fkey[0].REFTABLENAME), table_kw.get('schema')) if ref_key not in table.metadata.tables: schema.Table(normalize(fkey[0].REFTABLENAME), table.metadata, autoload=True, autoload_with=connection, **table_kw) constraint = schema.ForeignKeyConstraint( columns, referants, link_to_name=True, **constraint_kw) table.append_constraint(constraint) def has_sequence(self, connection, name): # [ticket:726] makes this schema-aware. denormalize = self.identifier_preparer._denormalize_name sql = ("SELECT sequence_name FROM SEQUENCES " "WHERE SEQUENCE_NAME=? ") rp = connection.execute(sql, denormalize(name)) return bool(rp.first()) def _autoserial_column(table): """Finds the effective DEFAULT SERIAL column of a Table, if any.""" for index, col in enumerate(table.primary_key.columns): if (isinstance(col.type, (sqltypes.Integer, sqltypes.Numeric)) and col.autoincrement): if isinstance(col.default, schema.Sequence): if col.default.optional: return index, col elif (col.default is None or (not isinstance(col.server_default, schema.DefaultClause))): return index, col return None, None </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284604"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">cappatar/knesset-data-pipelines</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">datapackage_pipelines_knesset/committees/processors/parse_committee_meeting_protocols.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from datapackage_pipelines_knesset.common.processors.base_processor import BaseProcessor from knesset_data.protocols.committee import CommitteeMeetingProtocol from knesset_data.protocols.exceptions import AntiwordException import os, csv, json, subprocess, logging, shutil, tempfile from datapackage_pipelines_knesset.common import object_storage import xml.etree.ElementTree class ParseCommitteeMeetingProtocolsProcessor(BaseProcessor): def __init__(self, *args, **kwargs): super(ParseCommitteeMeetingProtocolsProcessor, self).__init__(*args, **kwargs) self._schema["fields"] = [ {"name": "kns_committee_id", "type": "integer", "description": "primary key from kns_committee table"}, {"name": "kns_session_id", "type": "integer", "description": "primary key from kns_committeesession table"}, {"name": "protocol_object_name", "type": "string", "description": "storage object name containing the downloaded protocol"}, {"name": "protocol_extension", "type": "string", "description": "file extension of the downloaded protocol"}, {"name": "text_object_name", "type": "string", "description": "storage object name containing the parsed protocol text"}, {"name": "parts_object_name", "type": "string", "description": "storage object name containing the parsed protocol csv"},] self._schema["primaryKey"] = ["kns_session_id"] self.s3 = object_storage.get_s3() def _process(self, datapackage, resources): return self._process_filter(datapackage, resources) def _filter_row(self, meeting_protocol, **kwargs): bucket = "committees" protocol_object_name = meeting_protocol["protocol_object_name"] protocol_extension = meeting_protocol["protocol_extension"] base_object_name = "protocols/parsed/{}/{}".format(meeting_protocol["kns_committee_id"], meeting_protocol["kns_session_id"]) parts_object_name = "{}.csv".format(base_object_name) text_object_name = "{}.txt".format(base_object_name) if not object_storage.exists(self.s3, bucket, parts_object_name, min_size=5): parse_args = (meeting_protocol["kns_committee_id"], meeting_protocol["kns_session_id"], bucket, protocol_object_name, parts_object_name, text_object_name) if protocol_extension == "doc": parse_res = self._parse_doc_protocol(*parse_args) elif protocol_extension == "rtf": parse_res = self._parse_rtf_protocol(*parse_args) elif protocol_extension == "docx": parse_res = None else: raise Exception("unknown extension: {}".format(protocol_extension)) if not parse_res: # in case parsing failed - we remove all parsed files, to ensure re-parse next time object_storage.delete(self.s3, bucket, text_object_name) object_storage.delete(self.s3, bucket, parts_object_name) text_object_name = None parts_object_name = None yield {"kns_committee_id": meeting_protocol["kns_committee_id"], "kns_session_id": meeting_protocol["kns_session_id"], "protocol_object_name": protocol_object_name, "protocol_extension": protocol_extension, "text_object_name": text_object_name, "parts_object_name": parts_object_name} def _parse_rtf_protocol(self, committee_id, meeting_id, bucket, protocol_object_name, parts_object_name, text_object_name): # currently with the new API - we don't seem to get rtf files anymore # it looks like files which used to be rtf are actually doc # need to investigate further return False # rtf_extractor = os.environ.get("RTF_EXTRACTOR_BIN") # if rtf_extractor: # with object_storage.temp_download(protocol_object_name) as protocol_filename: # with tempfile.NamedTemporaryFile() as text_filename: # cmd = rtf_extractor + ' ' + protocol_filename + ' ' + text_filename # try: # subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) # protocol_text = fs.read(text_filename) # with CommitteeMeetingProtocol.get_from_text(protocol_text) as protocol: # self._parse_protocol_parts(parts_filename, protocol) # except subprocess.SubprocessError: # logging.exception("committee {} meeting {}: failed to parse rtf file, skipping".format(committee_id, # meeting_id)) # return False # return True # else: # logging.warning("missing RTF_EXTRACTOR_BIN environment variable, skipping rtf parsing") # return False def _parse_doc_protocol(self, committee_id, meeting_id, bucket, protocol_object_name, parts_object_name, text_object_name): logging.info("parsing doc protocol {} --> {}, {}".format(protocol_object_name, parts_object_name, text_object_name)) with object_storage.temp_download(self.s3, bucket, protocol_object_name) as protocol_filename: try: with CommitteeMeetingProtocol.get_from_filename(protocol_filename) as protocol: object_storage.write(self.s3, bucket, text_object_name, protocol.text, public_bucket=True) self._parse_protocol_parts(bucket, parts_object_name, protocol) except ( AntiwordException, # see https://github.com/hasadna/knesset-data-pipelines/issues/15 subprocess.SubprocessError, xml.etree.ElementTree.ParseError # see https://github.com/hasadna/knesset-data-pipelines/issues/32 ): logging.exception("committee {} meeting {}: failed to parse doc file, skipping".format(committee_id, meeting_id)) return False return True def _parse_protocol_parts(self, bucket, parts_object_name, protocol): with object_storage.csv_writer(self.s3, bucket, parts_object_name, public_bucket=True) as csv_writer: csv_writer.writerow(["header", "body"]) for part in protocol.parts: csv_writer.writerow([part.header, part.body]) logging.info("parsed parts file -> {}".format(parts_object_name)) if __name__ == '__main__': ParseCommitteeMeetingProtocolsProcessor.main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284605"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">fangxingli/hue</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">desktop/core/ext-py/markdown/markdown/extensions/legacy.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">49</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Legacy Extension for Python-Markdown ==================================== Replaces the core parser with the old one. """ import markdown, re from markdown import etree """Basic and reusable regular expressions.""" def wrapRe(raw_re) : return re.compile("^%s$" % raw_re, re.DOTALL) CORE_RE = { 'header': wrapRe(r'(#{1,6})[ \t]*(.*?)[ \t]*(#*)'), # # A title 'reference-def': wrapRe(r'(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)'), # [Google]: http://www.google.com/ 'containsline': wrapRe(r'([-]*)$|^([=]*)'), # -----, =====, etc. 'ol': wrapRe(r'[ ]{0,3}[\d]*\.\s+(.*)'), # 1. text 'ul': wrapRe(r'[ ]{0,3}[*+-]\s+(.*)'), # "* text" 'isline1': wrapRe(r'(\**)'), # *** 'isline2': wrapRe(r'(\-*)'), # --- 'isline3': wrapRe(r'(\_*)'), # ___ 'tabbed': wrapRe(r'((\t)|( ))(.*)'), # an indented line 'quoted': wrapRe(r'[ ]{0,2}> ?(.*)'), # a quoted block ("> ...") 'containsline': re.compile(r'^([-]*)$|^([=]*)$', re.M), 'attr': re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123} } class MarkdownParser: """Parser Markdown into a ElementTree.""" def __init__(self): pass def parseDocument(self, lines): """Parse a markdown string into an ElementTree.""" # Create a ElementTree from the lines root = etree.Element("div") buffer = [] for line in lines: if line.startswith("#"): self.parseChunk(root, buffer) buffer = [line] else: buffer.append(line) self.parseChunk(root, buffer) return etree.ElementTree(root) def parseChunk(self, parent_elem, lines, inList=0, looseList=0): """Process a chunk of markdown-formatted text and attach the parse to an ElementTree node. Process a section of a source document, looking for high level structural elements like lists, block quotes, code segments, html blocks, etc. Some those then get stripped of their high level markup (e.g. get unindented) and the lower-level markup is processed recursively. Keyword arguments: * parent_elem: The ElementTree element to which the content will be added. * lines: a list of lines * inList: a level Returns: None """ # Loop through lines until none left. while lines: # Skipping empty line if not lines[0]: lines = lines[1:] continue # Check if this section starts with a list, a blockquote or # a code block. If so, process them. processFn = { 'ul': self.__processUList, 'ol': self.__processOList, 'quoted': self.__processQuote, 'tabbed': self.__processCodeBlock} for regexp in ['ul', 'ol', 'quoted', 'tabbed']: m = CORE_RE[regexp].match(lines[0]) if m: processFn[regexp](parent_elem, lines, inList) return # We are NOT looking at one of the high-level structures like # lists or blockquotes. So, it's just a regular paragraph # (though perhaps nested inside a list or something else). If # we are NOT inside a list, we just need to look for a blank # line to find the end of the block. If we ARE inside a # list, however, we need to consider that a sublist does not # need to be separated by a blank line. Rather, the following # markup is legal: # # * The top level list item # # Another paragraph of the list. This is where we are now. # * Underneath we might have a sublist. # if inList: start, lines = self.__linesUntil(lines, (lambda line: CORE_RE['ul'].match(line) or CORE_RE['ol'].match(line) or not line.strip())) self.parseChunk(parent_elem, start, inList-1, looseList=looseList) inList = inList-1 else: # Ok, so it's just a simple block test = lambda line: not line.strip() or line[0] == '>' paragraph, lines = self.__linesUntil(lines, test) if len(paragraph) and paragraph[0].startswith('#'): self.__processHeader(parent_elem, paragraph) elif len(paragraph) and CORE_RE["isline3"].match(paragraph[0]): self.__processHR(parent_elem) lines = paragraph[1:] + lines elif paragraph: self.__processParagraph(parent_elem, paragraph, inList, looseList) if lines and not lines[0].strip(): lines = lines[1:] # skip the first (blank) line def __processHR(self, parentElem): hr = etree.SubElement(parentElem, "hr") def __processHeader(self, parentElem, paragraph): m = CORE_RE['header'].match(paragraph[0]) if m: level = len(m.group(1)) h = etree.SubElement(parentElem, "h%d" % level) h.text = m.group(2).strip() else: message(CRITICAL, "We've got a problem header!") def __processParagraph(self, parentElem, paragraph, inList, looseList): if ( parentElem.tag == 'li' and not (looseList or parentElem.getchildren())): # If this is the first paragraph inside "li", don't # put <p> around it - append the paragraph bits directly # onto parentElem el = parentElem else: # Otherwise make a "p" element el = etree.SubElement(parentElem, "p") dump = [] # Searching for hr or header for line in paragraph: # it's hr if CORE_RE["isline3"].match(line): el.text = "\n".join(dump) self.__processHR(el) dump = [] # it's header elif line.startswith("#"): el.text = "\n".join(dump) self.__processHeader(parentElem, [line]) dump = [] else: dump.append(line) if dump: text = "\n".join(dump) el.text = text def __processUList(self, parentElem, lines, inList): self.__processList(parentElem, lines, inList, listexpr='ul', tag='ul') def __processOList(self, parentElem, lines, inList): self.__processList(parentElem, lines, inList, listexpr='ol', tag='ol') def __processList(self, parentElem, lines, inList, listexpr, tag): """ Given a list of document lines starting with a list item, finds the end of the list, breaks it up, and recursively processes each list item and the remainder of the text file. Keyword arguments: * parentElem: A ElementTree element to which the content will be added * lines: a list of lines * inList: a level Returns: None """ ul = etree.SubElement(parentElem, tag) # ul might actually be '<ol>' looseList = 0 # Make a list of list items items = [] item = -1 i = 0 # a counter to keep track of where we are for line in lines: loose = 0 if not line.strip(): # If we see a blank line, this _might_ be the end of the list i += 1 loose = 1 # Find the next non-blank line for j in range(i, len(lines)): if lines[j].strip(): next = lines[j] break else: # There is no more text => end of the list break # Check if the next non-blank line is still a part of the list if ( CORE_RE[listexpr].match(next) or CORE_RE['tabbed'].match(next) ): # get rid of any white space in the line items[item].append(line.strip()) looseList = loose or looseList continue else: break # found end of the list # Now we need to detect list items (at the current level) # while also detabing child elements if necessary for expr in ['ul', 'ol', 'tabbed']: m = CORE_RE[expr].match(line) if m: if expr in ['ul', 'ol']: # We are looking at a new item #if m.group(1) : # Removed the check to allow for a blank line # at the beginning of the list item items.append([m.group(1)]) item += 1 elif expr == 'tabbed': # This line needs to be detabbed items[item].append(m.group(4)) #after the 'tab' i += 1 break else: items[item].append(line) # Just regular continuation i += 1 # added on 2006.02.25 else: i += 1 # Add the ElementTree elements for item in items: li = etree.SubElement(ul, "li") self.parseChunk(li, item, inList + 1, looseList = looseList) # Process the remaining part of the section self.parseChunk(parentElem, lines[i:], inList) def __linesUntil(self, lines, condition): """ A utility function to break a list of lines upon the first line that satisfied a condition. The condition argument should be a predicate function. """ i = -1 for line in lines: i += 1 if condition(line): break else: i += 1 return lines[:i], lines[i:] def __processQuote(self, parentElem, lines, inList): """ Given a list of document lines starting with a quote finds the end of the quote, unindents it and recursively processes the body of the quote and the remainder of the text file. Keyword arguments: * parentElem: ElementTree element to which the content will be added * lines: a list of lines * inList: a level Returns: None """ dequoted = [] i = 0 blank_line = False # allow one blank line between paragraphs for line in lines: m = CORE_RE['quoted'].match(line) if m: dequoted.append(m.group(1)) i += 1 blank_line = False elif not blank_line and line.strip() != '': dequoted.append(line) i += 1 elif not blank_line and line.strip() == '': dequoted.append(line) i += 1 blank_line = True else: break blockquote = etree.SubElement(parentElem, "blockquote") self.parseChunk(blockquote, dequoted, inList) self.parseChunk(parentElem, lines[i:], inList) def __processCodeBlock(self, parentElem, lines, inList): """ Given a list of document lines starting with a code block finds the end of the block, puts it into the ElementTree verbatim wrapped in ("<pre><code>") and recursively processes the the remainder of the text file. Keyword arguments: * parentElem: ElementTree element to which the content will be added * lines: a list of lines * inList: a level Returns: None """ detabbed, theRest = self.detectTabbed(lines) pre = etree.SubElement(parentElem, "pre") code = etree.SubElement(pre, "code") text = "\n".join(detabbed).rstrip()+"\n" code.text = markdown.AtomicString(text) self.parseChunk(parentElem, theRest, inList) def detectTabbed(self, lines): """ Find indented text and remove indent before further proccesing. Keyword arguments: * lines: an array of strings Returns: a list of post processed items and the unused remainder of the original list """ items = [] item = -1 i = 0 # to keep track of where we are def detab(line): match = CORE_RE['tabbed'].match(line) if match: return match.group(4) for line in lines: if line.strip(): # Non-blank line line = detab(line) if line: items.append(line) i += 1 continue else: return items, lines[i:] else: # Blank line: _maybe_ we are done. i += 1 # advance # Find the next non-blank line for j in range(i, len(lines)): if lines[j].strip(): next_line = lines[j]; break else: break # There is no more text; we are done. # Check if the next non-blank line is tabbed if detab(next_line): # Yes, more work to do. items.append("") continue else: break # No, we are done. else: i += 1 return items, lines[i:] class HeaderPreprocessor(markdown.Preprocessor): """Replace underlined headers with hashed headers. (To avoid the need for lookahead later.) """ def run (self, lines): i = -1 while i+1 < len(lines): i = i+1 if not lines[i].strip(): continue if lines[i].startswith("#"): lines.insert(i+1, "\n") if (i+1 <= len(lines) and lines[i+1] and lines[i+1][0] in ['-', '=']): underline = lines[i+1].strip() if underline == "="*len(underline): lines[i] = "# " + lines[i].strip() lines[i+1] = "" elif underline == "-"*len(underline): lines[i] = "## " + lines[i].strip() lines[i+1] = "" return lines class LinePreprocessor(markdown.Preprocessor): """Convert HR lines to "___" format.""" blockquote_re = re.compile(r'^(> )+') def run (self, lines): for i in range(len(lines)): prefix = '' m = self.blockquote_re.search(lines[i]) if m: prefix = m.group(0) if self._isLine(lines[i][len(prefix):]): lines[i] = prefix + "___" return lines def _isLine(self, block): """Determine if a block should be replaced with an <HR>""" if block.startswith(" "): return False # a code block text = "".join([x for x in block if not x.isspace()]) if len(text) <= 2: return False for pattern in ['isline1', 'isline2', 'isline3']: m = CORE_RE[pattern].match(text) if (m and m.group(1)): return True else: return False class LegacyExtension(markdown.Extension): """ Replace Markdown's core parser. """ def extendMarkdown(self, md, md_globals): """ Set the core parser to an instance of MarkdownParser. """ md.parser = MarkdownParser() md.preprocessors.add ("header", HeaderPreprocessor(self), "<reference") md.preprocessors.add("line", LinePreprocessor(self), "<reference") def makeExtension(configs={}): return LegacyExtension(configs=configs) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284606"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Nicolas570/chris_db</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">components/faker/faker/providers/company/it_IT/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">19</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># coding=utf-8 from __future__ import unicode_literals from .. import Provider as CompanyProvider class Provider(CompanyProvider): formats = ( '{{last_name}} {{company_suffix}}', '{{last_name}}-{{last_name}} {{company_suffix}}', '{{last_name}}, {{last_name}} e {{last_name}} {{company_suffix}}' ) catch_phrase_words = ( ( 'Abilità', 'Access', 'Adattatore', 'Algoritmo', 'Alleanza', 'Analizzatore', 'Applicazione', 'Approccio', 'Architettura', 'Archivio', 'Intelligenza artificiale', 'Array', 'Attitudine', 'Benchmark', 'Capacità', 'Sfida', 'Circuito', 'Collaborazione', 'Complessità', 'Concetto', 'Conglomerato', 'Contingenza', 'Core', 'Database', 'Data-warehouse', 'Definizione', 'Emulazione', 'Codifica', 'Criptazione', 'Firmware', 'Flessibilità', 'Previsione', 'Frame', 'framework', 'Funzione', 'Funzionalità', 'Interfaccia grafica', 'Hardware', 'Help-desk', 'Gerarchia', 'Hub', 'Implementazione', 'Infrastruttura', 'Iniziativa', 'Installazione', 'Set di istruzioni', 'Interfaccia', 'Soluzione internet', 'Intranet', 'Conoscenza base', 'Matrici', 'Matrice', 'Metodologia', 'Middleware', 'Migrazione', 'Modello', 'Moderazione', 'Monitoraggio', 'Moratoria', 'Rete', 'Architettura aperta', 'Sistema aperto', 'Orchestrazione', 'Paradigma', 'Parallelismo', 'Policy', 'Portale', 'Struttura di prezzo', 'Prodotto', 'Produttività', 'Progetto', 'Proiezione', 'Protocollo', 'Servizio clienti', 'Software', 'Soluzione', 'Standardizzazione', 'Strategia', 'Struttura', 'Successo', 'Sovrastruttura', 'Supporto', 'Sinergia', 'Task-force', 'Finestra temporale', 'Strumenti', 'Utilizzazione', 'Sito web', 'Forza lavoro' ), ( 'adattiva', 'avanzata', 'migliorata', 'assimilata', 'automatizzata', 'bilanciata', 'centralizzata', 'compatibile', 'configurabile', 'cross-platform', 'decentralizzata', 'digitalizzata', 'distribuita', 'piccola', 'ergonomica', 'esclusiva', 'espansa', 'estesa', 'configurabile', 'fondamentale', 'orizzontale', 'implementata', 'innovativa', 'integrata', 'intuitiva', 'inversa', 'gestita', 'obbligatoria', 'monitorata', 'multi-canale', 'multi-laterale', 'open-source', 'operativa', 'ottimizzata', 'organica', 'persistente', 'polarizzata', 'proattiva', 'programmabile', 'progressiva', 'reattiva', 'riallineata', 'ricontestualizzata', 'ridotta', 'robusta', 'sicura', 'condivisibile', 'stand-alone', 'switchabile', 'sincronizzata', 'sinergica', 'totale', 'universale', 'user-friendly', 'versatile', 'virtuale', 'visionaria' ), ( '24 ore', '24/7', 'terza generazione', 'quarta generazione', 'quinta generazione', 'sesta generazione', 'asimmetrica', 'asincrona', 'background', 'bi-direzionale', 'biforcata', 'bottom-line', 'coerente', 'coesiva', 'composita', 'sensibile al contesto', 'basta sul contesto', 'basata sul contenuto', 'dedicata', 'didattica', 'direzionale', 'discreta', 'dinamica', 'eco-centrica', 'esecutiva', 'esplicita', 'full-range', 'globale', 'euristica', 'alto livello', 'olistica', 'omogenea', 'ibrida', 'impattante', 'incrementale', 'intangibile', 'interattiva', 'intermediaria', 'locale', 'logistica', 'massimizzata', 'metodica', 'mission-critical', 'mobile', 'modulare', 'motivazionale', 'multimedia', 'multi-tasking', 'nazionale', 'neutrale', 'nextgeneration', 'non-volatile', 'object-oriented', 'ottima', 'ottimizzante', 'radicale', 'real-time', 'reciproca', 'regionale', 'responsiva', 'scalabile', 'secondaria', 'stabile', 'statica', 'sistematica', 'sistemica', 'tangibile', 'terziaria', 'uniforme', 'valore aggiunto' ) ) bsWords = ( ( 'partnerships', 'comunità', 'ROI', 'soluzioni', 'e-services', 'nicchie', 'tecnologie', 'contenuti', 'supply-chains', 'convergenze', 'relazioni', 'architetture', 'interfacce', 'mercati', 'e-commerce', 'sistemi', 'modelli', 'schemi', 'reti', 'applicazioni', 'metriche', 'e-business', 'funzionalità', 'esperienze', 'webservices', 'metodologie' ), ( 'implementate', 'utilizzo', 'integrate', 'ottimali', 'evolutive', 'abilitate', 'reinventate', 'aggregate', 'migliorate', 'incentivate', 'monetizzate', 'sinergizzate', 'strategiche', 'deploy', 'marchi', 'accrescitive', 'target', 'sintetizzate', 'spedizioni', 'massimizzate', 'innovazione', 'guida', 'estensioni', 'generate', 'exploit', 'transizionali', 'matrici', 'ricontestualizzate' ), ( 'valore aggiunto', 'verticalizzate', 'proattive', 'forti', 'rivoluzionari', 'scalabili', 'innovativi', 'intuitivi', 'strategici', 'e-business', 'mission-critical', '24/7', 'globali', 'B2B', 'B2C', 'granulari', 'virtuali', 'virali', 'dinamiche', 'magnetiche', 'web', 'interattive', 'sexy', 'back-end', 'real-time', 'efficienti', 'front-end', 'distributivi', 'estensibili', 'mondiali', 'open-source', 'cross-platform', 'sinergiche', 'out-of-the-box', 'enterprise', 'integrate', 'di impatto', 'wireless', 'trasparenti', 'next-generation', 'cutting-edge', 'visionari', 'plug-and-play', 'collaborative', 'olistiche', 'ricche' ) ) company_suffixes = ('SPA', 'e figli', 'Group', 's.r.l.') def catch_phrase(self): """ :example 'Robust full-range hub' """ result = [] for word_list in self.catch_phrase_words: result.append(self.random_element(word_list)) return " ".join(result) def bs(self): """ :example 'integrate extensible convergence' """ result = [] for word_list in self.bsWords: result.append(self.random_element(word_list)) return " ".join(result)</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284607"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">stenskjaer/scrapy</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">scrapy/core/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">216</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Scrapy core library classes and functions. """ </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284608"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">shaurz/ome</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ome/emit.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># ome - Object Message Expressions # Copyright (c) 2015-2016 Luke McCarthy <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="9cf0e9f7f9dcf5f3fbf3eceef3b2fff3b2e9f7">[email protected]</a>> import io from contextlib import contextmanager class CodeEmitter(object): def __init__(self, indent=' ' * 4, indent_level=0): self._output = [] self._indent = indent self._indent_level = indent_level self._indent_str = self._indent * indent_level def indent(self): self._indent_level += 1 self._indent_str = self._indent * self._indent_level def dedent(self): self._indent_level -= 1 self._indent_str = self._indent * self._indent_level @contextmanager def indented(self): self.indent() try: yield finally: self.dedent() def __call__(self, line): self._output.append(self._indent_str + line) def unindented(self, line): self._output.append(line) def write_to(self, buf): for line in self._output: buf.write(line) buf.write('\n') class ProcedureCodeEmitter(CodeEmitter): def __init__(self, indent=' ' * 4): super(ProcedureCodeEmitter, self).__init__(indent) self._end_output = [] self._tail_emitters = [] def tail_emitter(self, label): emitter = CodeEmitter(self._indent, self._indent_level) emitter.label(label) self._tail_emitters.append(emitter) return emitter def end(self, line): self._end_output.append(self._indent_str + line) def get_output(self): buf = io.StringIO() self.write_to(buf) for emitter in self._tail_emitters: emitter.write_to(buf) for line in self._end_output: buf.write(line) buf.write('\n') return buf.getvalue() class MethodCode(object): def __init__(self, instructions, num_args): self.instructions = instructions self.num_args = num_args def generate_target_code(self, label, target): emit = ProcedureCodeEmitter(indent=target.indent) codegen = target.ProcedureCodegen(emit) codegen.optimise(self) codegen.begin(label, self.num_args) for ins in self.instructions: codegen.pre_instruction(ins) ins.emit(codegen) codegen.end() return emit.get_output() class MethodCodeBuilder(object): def __init__(self, num_args, num_locals, program): self.num_args = num_args + 1 # self is arg 0 self.num_locals = num_args + num_locals + 1 self.program = program self.instructions = [] def add_temp(self): local = self.num_locals self.num_locals += 1 return local def add_instruction(self, instruction): self.instructions.append(instruction) def allocate_string(self, string): return self.program.data_table.allocate_string(string) def allocate_large_integer(self, string): return self.program.data_table.allocate_large_integer(string) def get_tag(self, tag_name): return self.program.ids.tags[tag_name] def get_constant(self, constant_name): return self.program.ids.constants[constant_name] def make_message_label(self, symbol): return self.program.target.make_message_label(symbol) def make_lookup_label(self, symbol): return self.program.target.make_lookup_label(symbol) def make_method_label(self, tag, symbol): return self.program.target.make_method_label(tag, symbol) def get_code(self): return MethodCode(self.instructions, self.num_args) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284609"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">goliveirab/odoo</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/8.0</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">addons/project_issue/res_config.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">441</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class project_issue_settings(osv.osv_memory): _name = 'project.config.settings' _inherit = ['project.config.settings', 'fetchmail.config.settings'] _columns = { 'fetchmail_issue': fields.boolean("Create issues from an incoming email account ", fetchmail_model='project.issue', fetchmail_name='Incoming Issues', help="""Allows you to configure your incoming mail server, and create issues from incoming emails."""), } </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284610"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">FlavienCollomb/calligraphr-php</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">vendor/doctrine/orm/docs/en/_exts/configurationblock.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2577</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#Copyright (c) 2010 Fabien Potencier # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is furnished #to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #THE SOFTWARE. from docutils.parsers.rst import Directive, directives from docutils import nodes from string import upper class configurationblock(nodes.General, nodes.Element): pass class ConfigurationBlock(Directive): has_content = True required_arguments = 0 optional_arguments = 0 final_argument_whitespace = True option_spec = {} formats = { 'html': 'HTML', 'xml': 'XML', 'php': 'PHP', 'yaml': 'YAML', 'jinja': 'Twig', 'html+jinja': 'Twig', 'jinja+html': 'Twig', 'php+html': 'PHP', 'html+php': 'PHP', 'ini': 'INI', 'php-annotations': 'Annotations', } def run(self): env = self.state.document.settings.env node = nodes.Element() node.document = self.state.document self.state.nested_parse(self.content, self.content_offset, node) entries = [] for i, child in enumerate(node): if isinstance(child, nodes.literal_block): # add a title (the language name) before each block #targetid = "configuration-block-%d" % env.new_serialno('configuration-block') #targetnode = nodes.target('', '', ids=[targetid]) #targetnode.append(child) innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']]) para = nodes.paragraph() para += [innernode, child] entry = nodes.list_item('') entry.append(para) entries.append(entry) resultnode = configurationblock() resultnode.append(nodes.bullet_list('', *entries)) return [resultnode] def visit_configurationblock_html(self, node): self.body.append(self.starttag(node, 'div', CLASS='configuration-block')) def depart_configurationblock_html(self, node): self.body.append('</div>\n') def visit_configurationblock_latex(self, node): pass def depart_configurationblock_latex(self, node): pass def setup(app): app.add_node(configurationblock, html=(visit_configurationblock_html, depart_configurationblock_html), latex=(visit_configurationblock_latex, depart_configurationblock_latex)) app.add_directive('configuration-block', ConfigurationBlock) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284611"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Brocade-OpenSource/OpenStack-DNRM-Nova</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">nova/tests/cert/test_rpcapi.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">6</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for nova.cert.rpcapi """ from oslo.config import cfg from nova.cert import rpcapi as cert_rpcapi from nova import context from nova.openstack.common import rpc from nova import test CONF = cfg.CONF class CertRpcAPITestCase(test.TestCase): def _test_cert_api(self, method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = cert_rpcapi.CertAPI() expected_retval = 'foo' expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION) expected_msg = rpcapi.make_msg(method, **kwargs) expected_msg['version'] = expected_version self.call_ctxt = None self.call_topic = None self.call_msg = None self.call_timeout = None def _fake_call(_ctxt, _topic, _msg, _timeout): self.call_ctxt = _ctxt self.call_topic = _topic self.call_msg = _msg self.call_timeout = _timeout return expected_retval self.stubs.Set(rpc, 'call', _fake_call) retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, expected_retval) self.assertEqual(self.call_ctxt, ctxt) self.assertEqual(self.call_topic, CONF.cert_topic) self.assertEqual(self.call_msg, expected_msg) self.assertEqual(self.call_timeout, None) def test_revoke_certs_by_user(self): self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id') def test_revoke_certs_by_project(self): self._test_cert_api('revoke_certs_by_project', project_id='fake_project_id') def test_revoke_certs_by_user_and_project(self): self._test_cert_api('revoke_certs_by_user_and_project', user_id='fake_user_id', project_id='fake_project_id') def test_generate_x509_cert(self): self._test_cert_api('generate_x509_cert', user_id='fake_user_id', project_id='fake_project_id') def test_fetch_ca(self): self._test_cert_api('fetch_ca', project_id='fake_project_id') def test_fetch_crl(self): self._test_cert_api('fetch_crl', project_id='fake_project_id') def test_decrypt_text(self): self._test_cert_api('decrypt_text', project_id='fake_project_id', text='blah') </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284612"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">shubhamgupta123/erpnext</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">erpnext/support/doctype/issue/issue.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe import json from frappe import _ from frappe.model.document import Document from frappe.utils import now from frappe.utils.user import is_website_user from frappe.email.inbox import link_communication_to_document sender_field = "raised_by" class Issue(Document): def get_feed(self): return "{0}: {1}".format(_(self.status), self.subject) def validate(self): if (self.get("__islocal") and self.via_customer_portal): self.flags.create_communication = True if not self.raised_by: self.raised_by = frappe.session.user self.update_status() self.set_lead_contact(self.raised_by) if self.status == "Closed": from frappe.desk.form.assign_to import clear clear(self.doctype, self.name) def on_update(self): # create the communication email and remove the description if (self.flags.create_communication and self.via_customer_portal): self.create_communication() self.flags.communication_created = None def set_lead_contact(self, email_id): import email.utils email_id = email.utils.parseaddr(email_id)[1] if email_id: if not self.lead: self.lead = frappe.db.get_value("Lead", {"email_id": email_id}) if not self.contact and not self.customer: self.contact = frappe.db.get_value("Contact", {"email_id": email_id}) if self.contact: contact = frappe.get_doc('Contact', self.contact) self.customer = contact.get_link_for('Customer') if not self.company: self.company = frappe.db.get_value("Lead", self.lead, "company") or \ frappe.db.get_default("Company") def update_status(self): status = frappe.db.get_value("Issue", self.name, "status") if self.status!="Open" and status =="Open" and not self.first_responded_on: self.first_responded_on = now() if self.status=="Closed" and status !="Closed": self.resolution_date = now() if self.status=="Open" and status !="Open": # if no date, it should be set as None and not a blank string "", as per mysql strict config self.resolution_date = None def create_communication(self): communication = frappe.new_doc("Communication") communication.update({ "communication_type": "Communication", "communication_medium": "Email", "sent_or_received": "Received", "email_status": "Open", "subject": self.subject, "sender": self.raised_by, "content": self.description, "status": "Linked", "reference_doctype": "Issue", "reference_name": self.name }) communication.ignore_permissions = True communication.ignore_mandatory = True communication.save() self.db_set("description", "") def split_issue(self, subject, communication_id): # Bug: Pressing enter doesn't send subject from copy import deepcopy replicated_issue = deepcopy(self) replicated_issue.subject = subject frappe.get_doc(replicated_issue).insert() # Replicate linked Communications # todo get all communications in timeline before this, and modify them to append them to new doc comm_to_split_from = frappe.get_doc("Communication", communication_id) communications = frappe.get_all("Communication", filters={"reference_name": comm_to_split_from.reference_name, "reference_doctype": "Issue", "creation": ('>=', comm_to_split_from.creation)}) for communication in communications: doc = frappe.get_doc("Communication", communication.name) doc.reference_name = replicated_issue.name doc.save(ignore_permissions=True) return replicated_issue.name def get_list_context(context=None): return { "title": _("Issues"), "get_list": get_issue_list, "row_template": "templates/includes/issue_row.html", "show_sidebar": True, "show_search": True, 'no_breadcrumbs': True } def get_issue_list(doctype, txt, filters, limit_start, limit_page_length=20, order_by=None): from frappe.www.list import get_list user = frappe.session.user contact = frappe.db.get_value('Contact', {'user': user}, 'name') customer = None if contact: contact_doc = frappe.get_doc('Contact', contact) customer = contact_doc.get_link_for('Customer') ignore_permissions = False if is_website_user(): if not filters: filters = [] filters.append(("Issue", "customer", "=", customer)) if customer else filters.append(("Issue", "raised_by", "=", user)) ignore_permissions = True return get_list(doctype, txt, filters, limit_start, limit_page_length, ignore_permissions=ignore_permissions) @frappe.whitelist() def set_status(name, status): st = frappe.get_doc("Issue", name) st.status = status st.save() def auto_close_tickets(): """ auto close the replied support tickets after 7 days """ auto_close_after_days = frappe.db.get_value("Support Settings", "Support Settings", "close_issue_after_days") or 7 issues = frappe.db.sql(""" select name from tabIssue where status='Replied' and modified<DATE_SUB(CURDATE(), INTERVAL %s DAY) """, (auto_close_after_days), as_dict=True) for issue in issues: doc = frappe.get_doc("Issue", issue.get("name")) doc.status = "Closed" doc.flags.ignore_permissions = True doc.flags.ignore_mandatory = True doc.save() @frappe.whitelist() def set_multiple_status(names, status): names = json.loads(names) for name in names: set_status(name, status) def has_website_permission(doc, ptype, user, verbose=False): from erpnext.controllers.website_list_for_contact import has_website_permission permission_based_on_customer = has_website_permission(doc, ptype, user, verbose) return permission_based_on_customer or doc.raised_by==user def update_issue(contact, method): """Called when Contact is deleted""" frappe.db.sql("""UPDATE `tabIssue` set contact='' where contact=%s""", contact.name) @frappe.whitelist() def make_issue_from_communication(communication, ignore_communication_links=False): """ raise a issue from email """ doc = frappe.get_doc("Communication", communication) issue = frappe.get_doc({ "doctype": "Issue", "subject": doc.subject, "communication_medium": doc.communication_medium, "raised_by": doc.sender or "", "raised_by_phone": doc.phone_no or "" }).insert(ignore_permissions=True) link_communication_to_document(doc, "Issue", issue.name, ignore_communication_links) return issue.name</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284613"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">majintao0131/yaml-cpp.core</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">test/create-emitter-tests.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">87</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import sys import yaml import hashlib DEFINE = 'YAML_GEN_TESTS' EVENT_COUNT = 5 def encode_stream(line): for c in line: if c == '\n': yield '\\n' elif c == '"': yield '\\"' elif c == '\t': yield '\\t' elif ord(c) < 0x20: yield '\\x' + hex(ord(c)) else: yield c def encode(line): return ''.join(encode_stream(line)) def doc_start(implicit=False): if implicit: return {'emit': '', 'handle': 'OnDocumentStart(_)'} else: return {'emit': 'BeginDoc', 'handle': 'OnDocumentStart(_)'} def doc_end(implicit=False): if implicit: return {'emit': '', 'handle': 'OnDocumentEnd()'} else: return {'emit': 'EndDoc', 'handle': 'OnDocumentEnd()'} def scalar(value, tag='', anchor='', anchor_id=0): emit = [] if tag: emit += ['VerbatimTag("%s")' % encode(tag)] if anchor: emit += ['Anchor("%s")' % encode(anchor)] if tag: out_tag = encode(tag) else: if value == encode(value): out_tag = '?' else: out_tag = '!' emit += ['"%s"' % encode(value)] return {'emit': emit, 'handle': 'OnScalar(_, "%s", %s, "%s")' % (out_tag, anchor_id, encode(value))} def comment(value): return {'emit': 'Comment("%s")' % value, 'handle': ''} def seq_start(tag='', anchor='', anchor_id=0, style='_'): emit = [] if tag: emit += ['VerbatimTag("%s")' % encode(tag)] if anchor: emit += ['Anchor("%s")' % encode(anchor)] if tag: out_tag = encode(tag) else: out_tag = '?' emit += ['BeginSeq'] return {'emit': emit, 'handle': 'OnSequenceStart(_, "%s", %s, %s)' % (out_tag, anchor_id, style)} def seq_end(): return {'emit': 'EndSeq', 'handle': 'OnSequenceEnd()'} def map_start(tag='', anchor='', anchor_id=0, style='_'): emit = [] if tag: emit += ['VerbatimTag("%s")' % encode(tag)] if anchor: emit += ['Anchor("%s")' % encode(anchor)] if tag: out_tag = encode(tag) else: out_tag = '?' emit += ['BeginMap'] return {'emit': emit, 'handle': 'OnMapStart(_, "%s", %s, %s)' % (out_tag, anchor_id, style)} def map_end(): return {'emit': 'EndMap', 'handle': 'OnMapEnd()'} def gen_templates(): yield [[doc_start(), doc_start(True)], [scalar('foo'), scalar('foo\n'), scalar('foo', 'tag'), scalar('foo', '', 'anchor', 1)], [doc_end(), doc_end(True)]] yield [[doc_start(), doc_start(True)], [seq_start()], [[], [scalar('foo')], [scalar('foo', 'tag')], [scalar('foo', '', 'anchor', 1)], [scalar('foo', 'tag', 'anchor', 1)], [scalar('foo'), scalar('bar')], [scalar('foo', 'tag', 'anchor', 1), scalar('bar', 'tag', 'other', 2)]], [seq_end()], [doc_end(), doc_end(True)]] yield [[doc_start(), doc_start(True)], [map_start()], [[], [scalar('foo'), scalar('bar')], [scalar('foo', 'tag', 'anchor', 1), scalar('bar', 'tag', 'other', 2)]], [map_end()], [doc_end(), doc_end(True)]] yield [[doc_start(True)], [map_start()], [[scalar('foo')], [seq_start(), scalar('foo'), seq_end()], [map_start(), scalar('foo'), scalar('bar'), map_end()]], [[scalar('foo')], [seq_start(), scalar('foo'), seq_end()], [map_start(), scalar('foo'), scalar('bar'), map_end()]], [map_end()], [doc_end(True)]] yield [[doc_start(True)], [seq_start()], [[scalar('foo')], [seq_start(), scalar('foo'), seq_end()], [map_start(), scalar('foo'), scalar('bar'), map_end()]], [[scalar('foo')], [seq_start(), scalar('foo'), seq_end()], [map_start(), scalar('foo'), scalar('bar'), map_end()]], [seq_end()], [doc_end(True)]] def expand(template): if len(template) == 0: pass elif len(template) == 1: for item in template[0]: if isinstance(item, list): yield item else: yield [item] else: for car in expand(template[:1]): for cdr in expand(template[1:]): yield car + cdr def gen_events(): for template in gen_templates(): for events in expand(template): base = list(events) for i in range(0, len(base)+1): cpy = list(base) cpy.insert(i, comment('comment')) yield cpy def gen_tests(): for events in gen_events(): name = 'test' + hashlib.sha1(''.join(yaml.dump(event) for event in events)).hexdigest()[:20] yield {'name': name, 'events': events} class Writer(object): def __init__(self, out): self.out = out self.indent = 0 def writeln(self, s): self.out.write('%s%s\n' % (' ' * self.indent, s)) class Scope(object): def __init__(self, writer, name, indent): self.writer = writer self.name = name self.indent = indent def __enter__(self): self.writer.writeln('%s {' % self.name) self.writer.indent += self.indent def __exit__(self, type, value, traceback): self.writer.indent -= self.indent self.writer.writeln('}') def create_emitter_tests(out): out = Writer(out) includes = [ 'handler_test.h', 'yaml-cpp/yaml.h', 'gmock/gmock.h', 'gtest/gtest.h', ] for include in includes: out.writeln('#include "%s"' % include) out.writeln('') usings = [ '::testing::_', ] for using in usings: out.writeln('using %s;' % using) out.writeln('') with Scope(out, 'namespace YAML', 0) as _: with Scope(out, 'namespace', 0) as _: out.writeln('') out.writeln('typedef HandlerTest GenEmitterTest;') out.writeln('') tests = list(gen_tests()) for test in tests: with Scope(out, 'TEST_F(%s, %s)' % ('GenEmitterTest', test['name']), 2) as _: out.writeln('Emitter out;') for event in test['events']: emit = event['emit'] if isinstance(emit, list): for e in emit: out.writeln('out << %s;' % e) elif emit: out.writeln('out << %s;' % emit) out.writeln('') for event in test['events']: handle = event['handle'] if handle: out.writeln('EXPECT_CALL(handler, %s);' % handle) out.writeln('Parse(out.c_str());') out.writeln('') if __name__ == '__main__': create_emitter_tests(sys.stdout) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284614"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">xiechaopeng/tifi</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">dejavu/wavio.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">20</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># wavio.py # Author: Warren Weckesser # License: BSD 3-Clause (http://opensource.org/licenses/BSD-3-Clause) # Synopsis: A Python module for reading and writing 24 bit WAV files. # Github: github.com/WarrenWeckesser/wavio import wave as _wave import numpy as _np def _wav2array(nchannels, sampwidth, data): """data must be the string containing the bytes from the wav file.""" num_samples, remainder = divmod(len(data), sampwidth * nchannels) if remainder > 0: raise ValueError('The length of data is not a multiple of ' 'sampwidth * num_channels.') if sampwidth > 4: raise ValueError("sampwidth must not be greater than 4.") if sampwidth == 3: a = _np.empty((num_samples, nchannels, 4), dtype=_np.uint8) raw_bytes = _np.fromstring(data, dtype=_np.uint8) a[:, :, :sampwidth] = raw_bytes.reshape(-1, nchannels, sampwidth) a[:, :, sampwidth:] = (a[:, :, sampwidth - 1:sampwidth] >> 7) * 255 result = a.view('<i4').reshape(a.shape[:-1]) else: # 8 bit samples are stored as unsigned ints; others as signed ints. dt_char = 'u' if sampwidth == 1 else 'i' a = _np.fromstring(data, dtype='<%s%d' % (dt_char, sampwidth)) result = a.reshape(-1, nchannels) return result def readwav(file): """ Read a WAV file. Parameters ---------- file : string or file object Either the name of a file or an open file pointer. Return Values ------------- rate : float The sampling frequency (i.e. frame rate) sampwidth : float The sample width, in bytes. E.g. for a 24 bit WAV file, sampwidth is 3. data : numpy array The array containing the data. The shape of the array is (num_samples, num_channels). num_channels is the number of audio channels (1 for mono, 2 for stereo). Notes ----- This function uses the `wave` module of the Python standard libary to read the WAV file, so it has the same limitations as that library. In particular, the function does not read compressed WAV files. """ wav = _wave.open(file) rate = wav.getframerate() nchannels = wav.getnchannels() sampwidth = wav.getsampwidth() nframes = wav.getnframes() data = wav.readframes(nframes) wav.close() array = _wav2array(nchannels, sampwidth, data) return rate, sampwidth, array def writewav24(filename, rate, data): """ Create a 24 bit wav file. Parameters ---------- filename : string Name of the file to create. rate : float The sampling frequency (i.e. frame rate) of the data. data : array-like collection of integer or floating point values data must be "array-like", either 1- or 2-dimensional. If it is 2-d, the rows are the frames (i.e. samples) and the columns are the channels. Notes ----- The data is assumed to be signed, and the values are assumed to be within the range of a 24 bit integer. Floating point values are converted to integers. The data is not rescaled or normalized before writing it to the file. Example ------- Create a 3 second 440 Hz sine wave. >>> rate = 22050 # samples per second >>> T = 3 # sample duration (seconds) >>> f = 440.0 # sound frequency (Hz) >>> t = np.linspace(0, T, T*rate, endpoint=False) >>> x = (2**23 - 1) * np.sin(2 * np.pi * f * t) >>> writewav24("sine24.wav", rate, x) """ a32 = _np.asarray(data, dtype=_np.int32) if a32.ndim == 1: # Convert to a 2D array with a single column. a32.shape = a32.shape + (1,) # By shifting first 0 bits, then 8, then 16, the resulting output # is 24 bit little-endian. a8 = (a32.reshape(a32.shape + (1,)) >> _np.array([0, 8, 16])) & 255 wavdata = a8.astype(_np.uint8).tostring() w = _wave.open(filename, 'wb') w.setnchannels(a32.shape[1]) w.setsampwidth(3) w.setframerate(rate) w.writeframes(wavdata) w.close() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284615"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">RyanSkraba/beam</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">sdks/python/apache_beam/runners/runner_test.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Unit tests for the PipelineRunner and DirectRunner classes. Note that PipelineRunner and DirectRunner functionality is tested in all the other unit tests. In this file we choose to test only aspects related to caching and clearing values that are not tested elsewhere. """ from __future__ import absolute_import import unittest import apache_beam as beam from apache_beam.metrics.metric import Metrics from apache_beam.runners import DirectRunner from apache_beam.runners import create_runner class RunnerTest(unittest.TestCase): default_properties = [ '--dataflow_endpoint=ignored', '--job_name=test-job', '--project=test-project', '--staging_location=ignored', '--temp_location=/dev/null', '--no_auth'] def test_create_runner(self): self.assertTrue( isinstance(create_runner('DirectRunner'), DirectRunner)) self.assertRaises(ValueError, create_runner, 'xyz') def test_create_runner_shorthand(self): self.assertTrue( isinstance(create_runner('DiReCtRuNnEr'), DirectRunner)) self.assertTrue( isinstance(create_runner('directrunner'), DirectRunner)) self.assertTrue( isinstance(create_runner('direct'), DirectRunner)) self.assertTrue( isinstance(create_runner('DiReCt'), DirectRunner)) self.assertTrue( isinstance(create_runner('Direct'), DirectRunner)) def test_run_api(self): my_metric = Metrics.counter('namespace', 'my_metric') runner = DirectRunner() result = runner.run( beam.Create([1, 10, 100]) | beam.Map(lambda x: my_metric.inc(x))) result.wait_until_finish() # Use counters to assert the pipeline actually ran. my_metric_value = result.metrics().query()['counters'][0].committed self.assertEqual(my_metric_value, 111) def test_run_api_with_callable(self): my_metric = Metrics.counter('namespace', 'my_metric') def fn(start): return (start | beam.Create([1, 10, 100]) | beam.Map(lambda x: my_metric.inc(x))) runner = DirectRunner() result = runner.run(fn) result.wait_until_finish() # Use counters to assert the pipeline actually ran. my_metric_value = result.metrics().query()['counters'][0].committed self.assertEqual(my_metric_value, 111) if __name__ == '__main__': unittest.main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284616"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">vgonisanz/wpm</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">examples/buymenu_sample.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import sys, os sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src')) sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src/widgets')) import curses # Todo remove chaning own variables from curses import wrapper # Use my own wrapper from wpm import Wpm from menu import Menu from optionstruct import OptionStruct # Configuration buymenu_width = 30 buymenu_height = 15 buymenu_x0 = 5 buymenu_y0 = 5 buymenu_title = "Buy menu" buymenu_instructions = "Use arrows to move, ENTER to select, q to quit" buymenu_centered = True # Variables wpm = None screen = None buymenu = None def initialize(): global wpm global screen wpm = Wpm(True) wpm.logger.info("Starting %s" % os.path.basename(__file__)) screen = wpm.get_screen() return None def create_buymenu(): global buymenu # Create submenu 1 submenu_1_list = {} submenu_1_list['title'] = "Choose a pistol" submenu_1_list['names'] = ["USP .45ACP Tactical", "Glock 18C Select Fire", "Desert Eagle .50 AE", "SIG P228", "FN Five-Seven"] submenu_1_list['callbacks'] = [print_result, print_result, print_result, print_result, print_result] submenu_1_list['args'] = ["400$", "400$", "650$", "600$", "750$"] submenu_1 = create_menu(submenu_1_list['title'], submenu_1_list['names'], submenu_1_list['callbacks'], submenu_1_list['args']) submenu_2_list = {} submenu_2_list['title'] = "Choose a shotgun" submenu_2_list['names'] = ["M3 Super 90 Combat", "XM1014"] submenu_2_list['callbacks'] = [print_result, print_result] submenu_2_list['args'] = ["1700$", "3000$"] submenu_2 = create_menu(submenu_2_list['title'], submenu_2_list['names'], submenu_2_list['callbacks'], submenu_2_list['args']) submenu_3_list = {} submenu_3_list['title'] = "Choose a sub machine gun" submenu_3_list['names'] = ["MP5 Navy", "Streyr TMP", "FN P90", "MAC-10", "H&K UMP"] submenu_3_list['callbacks'] = [print_result, print_result, print_result, print_result, print_result] submenu_3_list['args'] = ["1500$", "1250$", "2350$", "1400$", "1700$"] submenu_3 = create_menu(submenu_3_list['title'], submenu_3_list['names'], submenu_3_list['callbacks'], submenu_3_list['args']) submenu_4_list = {} submenu_4_list['title'] = "Choose a rifle" submenu_4_list['names'] = ["AK47", "M4A1 Carbine/COLT", "SG552 Commando", "Steyr AUG", "Steyr Scout Sniper Rifle", "Artic Warfare Magnum", "SIG SG-550", "G3/SG1 Sniper Rifle"] submenu_4_list['callbacks'] = [print_result, print_result, print_result, print_result, print_result, print_result, print_result, print_result] submenu_4_list['args'] = ["2500$", "3100$", "3500$", "3500$", "2750$", "4750$", "4200$", "5000$"] submenu_4 = create_menu(submenu_4_list['title'], submenu_4_list['names'], submenu_4_list['callbacks'], submenu_4_list['args']) submenu_5_list = {} submenu_5_list['title'] = "Choose a machineGun" submenu_5_list['names'] = ["FN M249 PARA"] submenu_5_list['callbacks'] = [print_result] submenu_5_list['args'] = ["5750$"] submenu_5 = create_menu(submenu_5_list['title'], submenu_5_list['names'], submenu_5_list['callbacks'], submenu_5_list['args']) submenu_8_list = {} submenu_8_list['title'] = "Choose a equipment" submenu_8_list['names'] = ["Armor", "Armor|Helmet", "Flash", "Grenade", "Smoke", "Defuser", "NightVision", "Shield"] submenu_8_list['callbacks'] = [print_result, print_result, print_result, print_result, print_result, print_result, print_result, print_result] submenu_8_list['args'] = ["650$", "1000$", "200$", "300$", "300$", "200$", "1250$", "1000$"] submenu_8 = create_menu(submenu_8_list['title'], submenu_8_list['names'], submenu_8_list['callbacks'], submenu_8_list['args']) # Create test buymenu and run main_menu_list = {} main_menu_list['title'] = "Choose a weapon to buy" main_menu_list['names'] = ["Pistols", "Shotguns", "SMG", "Rifles", "MachineGuns", "Primary ammo", "Secondary ammo", "Equipment"] main_menu_list['callbacks'] = [launch_menu, launch_menu, launch_menu, launch_menu, launch_menu, print_result, print_result, launch_menu] main_menu_list['args'] = [submenu_1, submenu_2, submenu_3, submenu_4, submenu_5, ["Primary ammo", "100$"], ["Secondary ammo", "60$"], submenu_8] main_menu = create_menu(main_menu_list['title'], main_menu_list['names'], main_menu_list['callbacks'], main_menu_list['args']) main_menu.run() screen.waitforkey() return None def create_menu(title, name_list, callback_list = None, args_list = None): menu = Menu(buymenu_width, buymenu_height, buymenu_x0, buymenu_y0, title) # Create options for i in range(0, len(name_list)): name = name_list[i] callback = None args = None if callback_list: callback = callback_list[i] if args_list: args = args_list[i] option = OptionStruct(name, callback, args) menu.add_option(option) return menu def launch_menu(menu): menu.run() return None def print_result(money): """Maybe menu implement a callback with a pointer to itself to read info. """ screen.print_message("You buy for: %s" % money, 0, 0) screen.waitforkey() screen.clear() return None def main(stdscr): initialize() create_buymenu() return None if __name__ == "__main__": wrapper(main) print("Thanks for use %s" % os.path.basename(__file__)) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284617"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tiborsimon/projects</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">projects/gui/gui.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import pydoc import urwid from projects.gui import doc_generator from projects.gui.project_selector import ProjectSelector def select_project(project_list, path_callback): max_width = len(max(project_list, key=len)) f = ProjectSelector(project_list, 'normal', 'highlighted', 'selected') def refresh_list(key=''): if key: if key in ('delete', 'backspace'): f.remove_key() else: if key in 'abcdefghijklmnopqrstuvwxyz- .0123456789': f.add_key(key) s = f.render() txt.set_text(s) def exit_on_q(key): if key.__class__ is not str: return if key in ('Q',): raise urwid.ExitMainLoop() if key == 'up': f.up() if key == 'down': f.down() if key == 'enter': path_callback(f.select()) raise urwid.ExitMainLoop() key = key.lower() refresh_list(key) palette = [ ('normal', 'light gray', ''), ('selected', 'yellow, bold', ''), ('highlighted', 'black, bold', 'yellow'), ('quit button', 'light red, bold', ''), ('enter button', 'light green, bold', '') ] txt = urwid.Text('', align='left') fill = urwid.Filler(txt) pad = urwid.Padding(fill, align='center', width=max_width+4) box = urwid.LineBox(pad, title="Projects") footer = urwid.Text(['Start typing to search. Use arrow keys to navigate. Press (', ('enter button', 'Enter'), ') to select project. ', 'Press (', ('quit button', 'Q'), ') to exit.']) frame = urwid.Frame(body=box, footer=footer) loop = urwid.MainLoop(frame, palette, unhandled_input=exit_on_q) refresh_list() loop.run() def show_project_details(data, width): doc = doc_generator.generate_doc(data, width) doc = doc.format( head='\033[0;33m', project='\033[1;36m', command='\033[1;33m', reset='\033[0m' ) pydoc.pipepager(doc, cmd='less -R') # pydoc.pager(doc) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284618"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">wimac/home</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Dropbox/skel/bin/sick-beard/lib/hachoir_parser/misc/pdf.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">90</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Adobe Portable Document Format (PDF) parser. Author: Christophe Gisquet <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="b8dbd0cad1cbccd7c8d0dd96dfd1cbc9cdddccf8decadddd96deca">[email protected]</a>> """ from lib.hachoir_parser import Parser from lib.hachoir_core.field import ( Field, FieldSet, ParserError, GenericVector, UInt8, UInt16, UInt32, String, RawBytes) from lib.hachoir_core.endian import LITTLE_ENDIAN from lib.hachoir_core.text_handler import textHandler, hexadecimal MAGIC = "%PDF-" ENDMAGIC = "%%EOF" def getLineEnd(s, pos=None): if pos == None: pos = (s.absolute_address+s.current_size)//8 end = s.stream.searchBytesLength("\x0D", False, 8*pos) other_end = s.stream.searchBytesLength("\x0A", False, 8*pos) if end == None or (other_end != None and other_end < end): return other_end return end # TODO: rewrite to account for all possible terminations: ' ', '/', '\0XD' # But this probably requires changing *ALL* of the places they are used, # as ' ' is swallowed but not the others def getElementEnd(s, limit=' ', offset=0): addr = s.absolute_address+s.current_size addr += 8*offset pos = s.stream.searchBytesLength(limit, True, addr) if pos == None: #s.info("Can't find '%s' starting at %u" % (limit, addr)) return None return pos class PDFNumber(Field): LIMITS = ['[', '/', '\x0D', ']'] """ sprintf("%i") or sprinf("%.?f") """ def __init__(self, parent, name, desc=None): Field.__init__(self, parent, name, description=desc) # Get size size = getElementEnd(parent) for limit in self.LIMITS: other_size = getElementEnd(parent, limit) if other_size != None: other_size -= 1 if size == None or other_size < size: size = other_size self._size = 8*size # Get value val = parent.stream.readBytes(self.absolute_address, size) self.info("Number: size=%u value='%s'" % (size, val)) if val.find('.') != -1: self.createValue = lambda: float(val) else: self.createValue = lambda: int(val) class PDFString(Field): """ A string of the shape: ( This string \ uses 3 lines \ with the CR(LF) inhibited ) """ def __init__(self, parent, name, desc=None): Field.__init__(self, parent, name, description=desc) val = "" count = 1 off = 1 while not parent.eof: char = parent.stream.readBytes(self.absolute_address+8*off, 1) # Non-ASCII if not char.isalpha() or char == '\\': off += 1 continue if char == '(': count += 1 if char == ')': count -= 1 # Parenthesis block = 0 => end of string if count == 0: off += 1 break # Add it to the string val += char self._size = 8*off self.createValue = lambda: val class PDFName(Field): LIMITS = ['[', '/', '<', ']'] """ String starting with '/', where characters may be written using their ASCII code (exemple: '#20' would be ' ' ' ', ']' and '\0' are supposed not to be part of the name """ def __init__(self, parent, name, desc=None): Field.__init__(self, parent, name, description=desc) if parent.stream.readBytes(self.absolute_address, 1) != '/': raise ParserError("Unknown PDFName '%s'" % parent.stream.readBytes(self.absolute_address, 10)) size = getElementEnd(parent, offset=1) #other_size = getElementEnd(parent, '[')-1 #if size == None or (other_size != None and other_size < size): # size = other_size for limit in self.LIMITS: other_size = getElementEnd(parent, limit, 1) if other_size != None: other_size -= 1 if size == None or other_size < size: #self.info("New size: %u" % other_size) size = other_size self._size = 8*(size+1) # Value should be without the initial '/' and final ' ' self.createValue = lambda: parent.stream.readBytes(self.absolute_address+8, size).strip(' ') class PDFID(Field): """ Not described as an object, but let's do as it was. This ID has the shape <hexadecimal ASCII string> """ def __init__(self, parent, name, desc=None): Field.__init__(self, parent, name, description=desc) self._size = 8*getElementEnd(parent, '>') self.createValue = lambda: parent.stream.readBytes(self.absolute_address+8, (self._size//8)-1) class NotABool(Exception): pass class PDFBool(Field): """ "true" or "false" string standing for the boolean value """ def __init__(self, parent, name, desc=None): Field.__init__(self, parent, name, description=desc) if parent.stream.readBytes(self.absolute_address, 4) == "true": self._size = 4 self.createValue = lambda: True elif parent.stream.readBytes(self.absolute_address, 5) == "false": self._size = 5 self.createValue = lambda: False raise NotABool class LineEnd(FieldSet): """ Made of 0x0A, 0x0D (we may include several line ends) """ def createFields(self): while not self.eof: addr = self.absolute_address+self.current_size char = self.stream.readBytes(addr, 1) if char == '\x0A': yield UInt8(self, "lf", "Line feed") elif char == '\x0D': yield UInt8(self, "cr", "Line feed") else: self.info("Line ends at %u/%u, len %u" % (addr, self.stream._size, self.current_size)) break class PDFDictionaryPair(FieldSet): def createFields(self): yield PDFName(self, "name", getElementEnd(self)) for field in parsePDFType(self): yield field class PDFDictionary(FieldSet): def createFields(self): yield String(self, "dict_start", 2) while not self.eof: addr = self.absolute_address+self.current_size if self.stream.readBytes(addr, 2) != '>>': for field in parsePDFType(self): yield field else: break yield String(self, "dict_end", 2) class PDFArray(FieldSet): """ Array of possibly non-homogeneous elements, starting with '[' and ending with ']' """ def createFields(self): yield String(self, "array_start", 1) while self.stream.readBytes(self.absolute_address+self.current_size, 1) != ']': for field in parsePDFType(self): yield field yield String(self, "array_end", 1) def parsePDFType(s): addr = s.absolute_address+s.current_size char = s.stream.readBytes(addr, 1) if char == '/': yield PDFName(s, "type[]", getElementEnd(s)) elif char == '<': if s.stream.readBytes(addr+8, 1) == '<': yield PDFDictionary(s, "dict[]") else: yield PDFID(s, "id[]") elif char == '(': yield PDFString(s, "string[]") elif char == '[': yield PDFArray(s, "array[]") else: # First parse size size = getElementEnd(s) for limit in ['/', '>', '<']: other_size = getElementEnd(s, limit) if other_size != None: other_size -= 1 if size == None or (other_size>0 and other_size < size): size = other_size # Get element name = s.stream.readBytes(addr, size) char = s.stream.readBytes(addr+8*size+8, 1) if name.count(' ') > 1 and char == '<': # Probably a catalog yield Catalog(s, "catalog[]") elif name[0] in ('.','-','+', '0', '1', '2', '3', \ '4', '5', '6', '7', '8', '9'): s.info("Not a catalog: %u spaces and end='%s'" % (name.count(' '), char)) yield PDFNumber(s, "integer[]") else: s.info("Trying to parse '%s': %u bytes" % \ (s.stream.readBytes(s.absolute_address+s.current_size, 4), size)) yield String(s, "unknown[]", size) class Header(FieldSet): def createFields(self): yield String(self, "marker", 5, MAGIC) length = getLineEnd(self, 4) if length != None: #self.info("Found at position %08X" % len) yield String(self, "version", length-1) yield LineEnd(self, "line_end") else: self.warning("Can't determine version!") def createDescription(self): return "PDF version %s" % self["version"].display class Body(FieldSet): def __init__(self, parent, name, desc=None): FieldSet.__init__(self, parent, name, desc) pos = self.stream.searchBytesLength(CrossReferenceTable.MAGIC, False) if pos == None: raise ParserError("Can't find xref starting at %u" % (self.absolute_address//8)) self._size = 8*pos-self.absolute_address def createFields(self): while self.stream.readBytes(self.absolute_address+self.current_size, 1) == '%': size = getLineEnd(self, 4) if size == 2: yield textHandler(UInt16(self, "crc32"), hexadecimal) elif size == 4: yield textHandler(UInt32(self, "crc32"), hexadecimal) elif self.stream.readBytes(self.absolute_address+self.current_size, size).isalpha(): yield String(self, "comment[]", size) else: RawBytes(self, "unknown_data[]", size) yield LineEnd(self, "line_end[]") #abs_offset = self.current_size//8 # TODO: yield objects that read offsets and deduce size from # "/cross_ref_table/sub_section[]/entries/item[]" offsets = [] for subsection in self.array("/cross_ref_table/sub_section"): for obj in subsection.array("entries/item"): if "byte_offset" in obj: # Could be inserted already sorted offsets.append(obj["byte_offset"].value) offsets.append(self["/cross_ref_table"].absolute_address//8) offsets.sort() for index in xrange(len(offsets)-1): yield Catalog(self, "object[]", size=offsets[index+1]-offsets[index]) class Entry(FieldSet): static_size = 20*8 def createFields(self): typ = self.stream.readBytes(self.absolute_address+17*8, 1) if typ == 'n': yield PDFNumber(self, "byte_offset") elif typ == 'f': yield PDFNumber(self, "next_free_object_number") else: yield PDFNumber(self, "unknown_string") yield PDFNumber(self, "generation_number") yield UInt8(self, "type") yield LineEnd(self, "line_end") def createDescription(self): if self["type"].value == 'n': return "In-use entry at offset %u" % int(self["byte_offset"].value) elif self["type"].value == 'f': return "Free entry before in-use object %u" % \ int(self["next_free_object_number"].value) else: return "unknown %s" % self["unknown_string"].value class SubSection(FieldSet): def __init__(self, parent, name, desc=None): FieldSet.__init__(self, parent, name, desc) self.info("Got entry count: '%s'" % self["entry_count"].value) self._size = self.current_size + 8*20*int(self["entry_count"].value) \ + self["line_end"].size def createFields(self): yield PDFNumber(self, "start_number", "Object number of first entry in subsection") self.info("start_number = %i" % self["start_number"].value) yield PDFNumber(self, "entry_count", "Number of entries in subsection") self.info("entry_count = %i" % self["entry_count"].value) yield LineEnd(self, "line_end") yield GenericVector(self, "entries", int(self["entry_count"].value), Entry) #yield LineEnd(self, "line_end[]") def createDescription(self): return "Subsection with %s elements, starting at %s" % \ (self["entry_count"].value, self["start_number"]) class CrossReferenceTable(FieldSet): MAGIC = "xref" def __init__(self, parent, name, desc=None): FieldSet.__init__(self, parent, name, description=desc) pos = self.stream.searchBytesLength(Trailer.MAGIC, False) if pos == None: raise ParserError("Can't find '%s' starting at %u" \ (Trailer.MAGIC, self.absolute_address//8)) self._size = 8*pos-self.absolute_address def createFields(self): yield RawBytes(self, "marker", len(self.MAGIC)) yield LineEnd(self, "line_end[]") while not self.eof: yield SubSection(self, "sub_section[]") class Catalog(FieldSet): END_NAME = ['<', '/', '['] def __init__(self, parent, name, size=None, desc=None): FieldSet.__init__(self, parent, name, description=desc) if size != None: self._size = 8*size # object catalogs are ended with "obj" elif self["object"].value == "obj": size = self.stream.searchBytesLength("endobj", False) if size != None: self._size = 8*(size+2) def createFields(self): yield PDFNumber(self, "index") yield PDFNumber(self, "unknown[]") length = getElementEnd(self) for limit in self.END_NAME: new_length = getElementEnd(self, limit)-len(limit) if length == None or (new_length != None and new_length < length): length = new_length yield String(self, "object", length, strip=' ') if self.stream.readBytes(self.absolute_address+self.current_size, 2) == '<<': yield PDFDictionary(self, "key_list") # End of catalog: this one has "endobj" if self["object"].value == "obj": yield LineEnd(self, "line_end[]") yield String(self, "end_object", len("endobj")) yield LineEnd(self, "line_end[]") class Trailer(FieldSet): MAGIC = "trailer" def createFields(self): yield RawBytes(self, "marker", len(self.MAGIC)) yield LineEnd(self, "line_end[]") yield String(self, "start_attribute_marker", 2) addr = self.absolute_address + self.current_size while self.stream.readBytes(addr, 2) != '>>': t = PDFName(self, "type[]") yield t name = t.value self.info("Parsing PDFName '%s'" % name) if name == "Size": yield PDFNumber(self, "size", "Entries in the file cross-reference section") elif name == "Prev": yield PDFNumber(self, "offset") elif name == "Root": yield Catalog(self, "object_catalog") elif name == "Info": yield Catalog(self, "info") elif name == "ID": yield PDFArray(self, "id") elif name == "Encrypt": yield PDFDictionary(self, "decrypt") else: raise ParserError("Don't know trailer type '%s'" % name) addr = self.absolute_address + self.current_size yield String(self, "end_attribute_marker", 2) yield LineEnd(self, "line_end[]") yield String(self, "start_xref", 9) yield LineEnd(self, "line_end[]") yield PDFNumber(self, "cross_ref_table_start_address") yield LineEnd(self, "line_end[]") yield String(self, "end_marker", len(ENDMAGIC)) yield LineEnd(self, "line_end[]") class PDFDocument(Parser): endian = LITTLE_ENDIAN PARSER_TAGS = { "id": "pdf", "category": "misc", "file_ext": ("pdf",), "mime": (u"application/pdf",), "min_size": (5+4)*8, "magic": ((MAGIC, 5),), "description": "Portable Document Format (PDF) document" } def validate(self): if self.stream.readBytes(0, len(MAGIC)) != MAGIC: return "Invalid magic string" return True # Size is not always determined by position of "%%EOF": # - updated documents have several of those # - PDF files should be parsed from *end* # => TODO: find when a document has been updated def createFields(self): yield Header(self, "header") yield Body(self, "body") yield CrossReferenceTable(self, "cross_ref_table") yield Trailer(self, "trailer") </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284619"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">hpcloud/docker-registry</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/storage/swift.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">8</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "> import cache_lru import swiftclient from . import Storage class SwiftStorage(Storage): def __init__(self, config): self._swift_connection = self._create_swift_connection(config) self._swift_container = config.swift_container self._root_path = config.get('storage_path', '/') def _create_swift_connection(self, config): return swiftclient.client.Connection( authurl=config.get('swift_authurl'), user=config.get('swift_user'), key=config.get('swift_password'), auth_version=config.get('swift_auth_version', 2), os_options={ 'tenant_name': config.get('swift_tenant_name'), 'region_name': config.get('swift_region_name') }) def _init_path(self, path=None): path = self._root_path + '/' + path if path else self._root_path # Openstack does not like paths starting with '/' if path and path.startswith('/'): path = path[1:] return path @cache_lru.get def get_content(self, path, chunk_size=None): path = self._init_path(path) try: _, obj = self._swift_connection.get_object( self._swift_container, path, resp_chunk_size=chunk_size) return obj except Exception: raise IOError("Could not get content: {}".format(path)) @cache_lru.put def put_content(self, path, content, chunk=None): path = self._init_path(path) try: self._swift_connection.put_object(self._swift_container, path, content, chunk_size=chunk) return path except Exception: raise IOError("Could not put content: {}".format(path)) def stream_read(self, path, bytes_range=None): try: for buf in self.get_content(path, self.buffer_size): yield buf except Exception: raise OSError( "Could not read content from stream: {}".format(path)) def stream_write(self, path, fp): self.put_content(path, fp, self.buffer_size) def list_directory(self, path=None): try: path = self._init_path(path) if path and not path.endswith('/'): path += '/' _, directory = self._swift_connection.get_container( container=self._swift_container, path=path) if not directory: raise for inode in directory: # trim extra trailing slashes if inode['name'].endswith('/'): inode['name'] = inode['name'][:-1] yield inode['name'].replace(self._root_path[1:] + '/', '', 1) except Exception: raise OSError("No such directory: {}".format(path)) def exists(self, path): try: self.get_content(path) return True except Exception: return False @cache_lru.remove def remove(self, path): path = self._init_path(path) try: self._swift_connection.delete_object(self._swift_container, path) except Exception: pass def get_size(self, path): try: return len(self.get_content(path)) except Exception: raise OSError("Could not get file size: {}".format(path)) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284620"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">PongPi/isl-odoo</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/8.0</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">addons/document/content_index.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">430</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import os import tempfile from subprocess import Popen, PIPE _logger = logging.getLogger(__name__) class NhException(Exception): pass class indexer(object): """ An indexer knows how to parse the content of some file. Typically, one indexer should be instantiated per file type. Override this class to add more functionality. Note that you should only override the Content or the File methods that give an optimal result. """ def _getMimeTypes(self): """ Return supported mimetypes """ return [] def _getExtensions(self): return [] def _getDefMime(self, ext): """ Return a mimetype for this document type, ideally the closest to the extension ext. """ mts = self._getMimeTypes(); if len (mts): return mts[0] return None def indexContent(self, content, filename=None, realfile=None): """ Use either content or the real file, to index. Some parsers will work better with the actual content, others parse a file easier. Try the optimal. """ res = '' try: if content != None: return self._doIndexContent(content) except NhException: pass if realfile != None: try: return self._doIndexFile(realfile) except NhException: pass fp = open(realfile,'rb') try: content2 = fp.read() finally: fp.close() # The not-handled exception may be raised here return self._doIndexContent(content2) # last try, with a tmp file if content: try: fname,ext = filename and os.path.splitext(filename) or ('','') fd, rfname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) res = self._doIndexFile(rfname) os.unlink(rfname) return res except NhException: pass raise NhException('No appropriate method to index file.') def _doIndexContent(self, content): raise NhException("Content cannot be handled here.") def _doIndexFile(self, fpath): raise NhException("Content cannot be handled here.") def __repr__(self): return "<indexer %s.%s>" %(self.__module__, self.__class__.__name__) def mime_match(mime, mdict): if mdict.has_key(mime): return (mime, mdict[mime]) if '/' in mime: mpat = mime.split('/')[0]+'/*' if mdict.has_key(mpat): return (mime, mdict[mpat]) return (None, None) class contentIndex(object): def __init__(self): self.mimes = {} self.exts = {} def register(self, obj): f = False for mime in obj._getMimeTypes(): self.mimes[mime] = obj f = True for ext in obj._getExtensions(): self.exts[ext] = obj f = True if f: _logger.debug('Register content indexer: %r.', obj) if not f: raise Exception("Your indexer should at least support a mimetype or extension.") def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False): fobj = None fname = None mime = None if content_type and self.mimes.has_key(content_type): mime = content_type fobj = self.mimes[content_type] elif filename: bname,ext = os.path.splitext(filename) if self.exts.has_key(ext): fobj = self.exts[ext] mime = fobj._getDefMime(ext) if content_type and not fobj: mime,fobj = mime_match(content_type, self.mimes) if not fobj: try: if realfname : fname = realfname else: try: bname,ext = os.path.splitext(filename or 'test.tmp') except Exception: bname, ext = filename, 'tmp' fd, fname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE) (result, _) = pop.communicate() mime2 = result.split(';')[0] _logger.debug('File gives us: %s', mime2) # Note that the temporary file still exists now. mime,fobj = mime_match(mime2, self.mimes) if not mime: mime = mime2 except Exception: _logger.exception('Cannot determine mime type.') try: if fobj: res = (mime, fobj.indexContent(content,filename,fname or realfname) ) else: _logger.debug("Have no object, return (%s, None).", mime) res = (mime, '') except Exception: _logger.exception("Cannot index file %s (%s).", filename, fname or realfname) res = (mime, '') # If we created a tmp file, unlink it now if not realfname and fname: try: os.unlink(fname) except Exception: _logger.exception("Cannot unlink %s.", fname) return res cntIndex = contentIndex() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284621"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">structRecomputation/computations</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">_modules/auxiliary_monte.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import numpy as np def write_monte(spec, rslt): """ Write out results to table. """ num_evals = rslt['num_evals'] num_steps = rslt['num_steps'] points = rslt['points'] stat = rslt['rmse'] # Write out information. fname = 'table_4.' + str(spec + 1) + '.txt' with open(fname, 'w') as f: # Write out the heading of the table. args = list() args += ['Identifier', 'True Value', 'Mean', 'Bias', 't-Stat.', 'Std.'] fmt_ = ' {:>15}' + ' {:>15}' * 5 + '\n\n' line = fmt_.format(*args) f.write(line) fmt_ = ' {:>15}' + ' {:15.4f}' * 5 + '\n' for i in range(26): args = list() args += [str(i)] args += [points['true'][i]] args += [points['mean'][i]] args += [points['bias'][i]] args += [points['stat'][i]] args += [points['stan'][i]] line = fmt_.format(*args) f.write(line) string = '\n\n\n {0[0]:>15} {0[1]:>15} {0[2]:>15}\n\n' f.write(string.format(['RMSE', 'Evaluations', 'Steps'])) string = ' {:15.4f} {:>15} {:>15}' f.write(string.format(*[stat, int(num_evals), int(num_steps)])) def process_monte(x_iter, x_true): """ Process results from to bootstrap iterations to fill the table with the required information. """ # Initialize dictionary as the results container. rslt = dict() for key_ in ['mean', 'true', 'bias', 'stan', 'stat', 'msta']: rslt[key_] = [None] * 26 # Attach the results from each individual bootstrap run to the dictionary. rslt['x_iter'] = np.array(x_iter, ndmin=2) # Construct auxiliary objects rslt['mean'] = np.mean(rslt['x_iter'], axis=0) num_boots = len(rslt['x_iter']) # Construct the requested information. for i in range(26): # true parameter and bias rslt['true'][i] = x_true[i] rslt['bias'][i] = rslt['mean'][i] - rslt['true'][i] # standard deviation rslt['stan'][i] = 0.0 for j in range(num_boots): rslt['stan'][i] += (rslt['x_iter'][j, i] - rslt['mean'][i]) ** 2 try: rslt['stan'][i] = np.sqrt((1.0 / (num_boots - 1)) * rslt['stan'][i]) except ZeroDivisionError: rslt['stan'][i] = 0.0 # t-statistic if rslt['stan'][i] == 0.0: rslt['stat'][i] = 0.0 else: rslt['stat'][i] = ((rslt['mean'][i] - x_true[i]) / rslt['stan'][i]) rslt['stat'][i] = rslt['stat'][i] * np.sqrt(num_boots) return rslt </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284622"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">emadehsan/ieighteen</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">translate.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "> ''' @author Emad Ehsan 1. Picks up a line from sourceLang file, e.g. './en/strings.txt' 2. Request google to translate the query 3. Creates a file for targetLang, e.g. './ur/strings.txt' 4. Places translation at exact line number to targetLang file ''' from html.parser import HTMLParser import requests from requests.utils import quote import binascii counter = 1 # Locale code of source and target languages sourceLang = 'en' targetLang = 'ur' # Put file name here filename = 'strings.txt' ''' For each line in sourceLang file, getTranslation and put translated line targetLang file ''' def translate(infile, outfile): with open(infile) as fin: with open(outfile, 'wb') as fout: for line in fin: outline = bytes(line, 'utf-8') # print(line) line = line.strip() if len(line) == 0: continue # now line is prepared to be translated translation = getTranslation(line) # add new line at the end outline = translation + bytes('\n', 'utf-8') # save fout.write(outline) ''' Translates via google translate as described here https://ctrlq.org/code/19909-google-translate-api ''' def getTranslation(sentence): global counter, sourceLang, targetLang url = "https://translate.googleapis.com/translate_a/single?client=gtx&sl=" + sourceLang url = url + "&tl=" + targetLang + "&dt=t&q=" + quote(sentence); print('Request# ' + str(counter) + ': ' + url) counter += 1 page = requests.get(url) # strip the response to extract urdu text along with quotes translation = page.content translation = translation[3:] removeLast = 16 + len(sentence) translation = translation[:-removeLast] # still has a trailing comma if (translation[-1] == ','): translation = translation[:-1] return translation def main(): global filename infile = './' + sourceLang + '/' + filename outfile = './' + targetLang + '/' + filename translate(infile, outfile) main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284623"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bytor99999/vertx-web</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">src/test/sockjs-protocol/venv/lib/python2.7/site-packages/setuptools/tests/test_resources.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">345</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/python # -*- coding: utf-8 -*- # NOTE: the shebang and encoding lines are for ScriptHeaderTests do not remove import os import sys import tempfile import shutil from unittest import TestCase import pkg_resources from pkg_resources import (parse_requirements, VersionConflict, parse_version, Distribution, EntryPoint, Requirement, safe_version, safe_name, WorkingSet) from setuptools.command.easy_install import (get_script_header, is_sh, nt_quote_arg) from setuptools.compat import StringIO, iteritems try: frozenset except NameError: from sets import ImmutableSet as frozenset def safe_repr(obj, short=False): """ copied from Python2.7""" try: result = repr(obj) except Exception: result = object.__repr__(obj) if not short or len(result) < pkg_resources._MAX_LENGTH: return result return result[:pkg_resources._MAX_LENGTH] + ' [truncated]...' class Metadata(pkg_resources.EmptyProvider): """Mock object to return metadata as if from an on-disk distribution""" def __init__(self,*pairs): self.metadata = dict(pairs) def has_metadata(self,name): return name in self.metadata def get_metadata(self,name): return self.metadata[name] def get_metadata_lines(self,name): return pkg_resources.yield_lines(self.get_metadata(name)) dist_from_fn = pkg_resources.Distribution.from_filename class DistroTests(TestCase): def testCollection(self): # empty path should produce no distributions ad = pkg_resources.Environment([], platform=None, python=None) self.assertEqual(list(ad), []) self.assertEqual(ad['FooPkg'],[]) ad.add(dist_from_fn("FooPkg-1.3_1.egg")) ad.add(dist_from_fn("FooPkg-1.4-py2.4-win32.egg")) ad.add(dist_from_fn("FooPkg-1.2-py2.4.egg")) # Name is in there now self.assertTrue(ad['FooPkg']) # But only 1 package self.assertEqual(list(ad), ['foopkg']) # Distributions sort by version self.assertEqual( [dist.version for dist in ad['FooPkg']], ['1.4','1.3-1','1.2'] ) # Removing a distribution leaves sequence alone ad.remove(ad['FooPkg'][1]) self.assertEqual( [dist.version for dist in ad['FooPkg']], ['1.4','1.2'] ) # And inserting adds them in order ad.add(dist_from_fn("FooPkg-1.9.egg")) self.assertEqual( [dist.version for dist in ad['FooPkg']], ['1.9','1.4','1.2'] ) ws = WorkingSet([]) foo12 = dist_from_fn("FooPkg-1.2-py2.4.egg") foo14 = dist_from_fn("FooPkg-1.4-py2.4-win32.egg") req, = parse_requirements("FooPkg>=1.3") # Nominal case: no distros on path, should yield all applicable self.assertEqual(ad.best_match(req,ws).version, '1.9') # If a matching distro is already installed, should return only that ws.add(foo14) self.assertEqual(ad.best_match(req,ws).version, '1.4') # If the first matching distro is unsuitable, it's a version conflict ws = WorkingSet([]) ws.add(foo12) ws.add(foo14) self.assertRaises(VersionConflict, ad.best_match, req, ws) # If more than one match on the path, the first one takes precedence ws = WorkingSet([]) ws.add(foo14) ws.add(foo12) ws.add(foo14) self.assertEqual(ad.best_match(req,ws).version, '1.4') def checkFooPkg(self,d): self.assertEqual(d.project_name, "FooPkg") self.assertEqual(d.key, "foopkg") self.assertEqual(d.version, "1.3-1") self.assertEqual(d.py_version, "2.4") self.assertEqual(d.platform, "win32") self.assertEqual(d.parsed_version, parse_version("1.3-1")) def testDistroBasics(self): d = Distribution( "/some/path", project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32" ) self.checkFooPkg(d) d = Distribution("/some/path") self.assertEqual(d.py_version, sys.version[:3]) self.assertEqual(d.platform, None) def testDistroParse(self): d = dist_from_fn("FooPkg-1.3_1-py2.4-win32.egg") self.checkFooPkg(d) d = dist_from_fn("FooPkg-1.3_1-py2.4-win32.egg-info") self.checkFooPkg(d) def testDistroMetadata(self): d = Distribution( "/some/path", project_name="FooPkg", py_version="2.4", platform="win32", metadata = Metadata( ('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n") ) ) self.checkFooPkg(d) def distRequires(self, txt): return Distribution("/foo", metadata=Metadata(('depends.txt', txt))) def checkRequires(self, dist, txt, extras=()): self.assertEqual( list(dist.requires(extras)), list(parse_requirements(txt)) ) def testDistroDependsSimple(self): for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0": self.checkRequires(self.distRequires(v), v) def testResolve(self): ad = pkg_resources.Environment([]) ws = WorkingSet([]) # Resolving no requirements -> nothing to install self.assertEqual(list(ws.resolve([],ad)), []) # Request something not in the collection -> DistributionNotFound self.assertRaises( pkg_resources.DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad ) Foo = Distribution.from_filename( "/foo_dir/Foo-1.2.egg", metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0")) ) ad.add(Foo) ad.add(Distribution.from_filename("Foo-0.9.egg")) # Request thing(s) that are available -> list to activate for i in range(3): targets = list(ws.resolve(parse_requirements("Foo"), ad)) self.assertEqual(targets, [Foo]) list(map(ws.add,targets)) self.assertRaises(VersionConflict, ws.resolve, parse_requirements("Foo==0.9"), ad) ws = WorkingSet([]) # reset # Request an extra that causes an unresolved dependency for "Baz" self.assertRaises( pkg_resources.DistributionNotFound, ws.resolve,parse_requirements("Foo[bar]"), ad ) Baz = Distribution.from_filename( "/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo")) ) ad.add(Baz) # Activation list now includes resolved dependency self.assertEqual( list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo,Baz] ) # Requests for conflicting versions produce VersionConflict self.assertRaises(VersionConflict, ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad) def testDistroDependsOptions(self): d = self.distRequires(""" Twisted>=1.5 [docgen] ZConfig>=2.0 docutils>=0.3 [fastcgi] fcgiapp>=0.1""") self.checkRequires(d,"Twisted>=1.5") self.checkRequires( d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"] ) self.checkRequires( d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"] ) self.checkRequires( d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(), ["docgen","fastcgi"] ) self.checkRequires( d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(), ["fastcgi", "docgen"] ) self.assertRaises(pkg_resources.UnknownExtra, d.requires, ["foo"]) class EntryPointTests(TestCase): def assertfields(self, ep): self.assertEqual(ep.name,"foo") self.assertEqual(ep.module_name,"setuptools.tests.test_resources") self.assertEqual(ep.attrs, ("EntryPointTests",)) self.assertEqual(ep.extras, ("x",)) self.assertTrue(ep.load() is EntryPointTests) self.assertEqual( str(ep), "foo = setuptools.tests.test_resources:EntryPointTests [x]" ) def setUp(self): self.dist = Distribution.from_filename( "FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]'))) def testBasics(self): ep = EntryPoint( "foo", "setuptools.tests.test_resources", ["EntryPointTests"], ["x"], self.dist ) self.assertfields(ep) def testParse(self): s = "foo = setuptools.tests.test_resources:EntryPointTests [x]" ep = EntryPoint.parse(s, self.dist) self.assertfields(ep) ep = EntryPoint.parse("bar baz= spammity[PING]") self.assertEqual(ep.name,"bar baz") self.assertEqual(ep.module_name,"spammity") self.assertEqual(ep.attrs, ()) self.assertEqual(ep.extras, ("ping",)) ep = EntryPoint.parse(" fizzly = wocka:foo") self.assertEqual(ep.name,"fizzly") self.assertEqual(ep.module_name,"wocka") self.assertEqual(ep.attrs, ("foo",)) self.assertEqual(ep.extras, ()) def testRejects(self): for ep in [ "foo", "x=1=2", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2", ]: try: EntryPoint.parse(ep) except ValueError: pass else: raise AssertionError("Should've been bad", ep) def checkSubMap(self, m): self.assertEqual(len(m), len(self.submap_expect)) for key, ep in iteritems(self.submap_expect): self.assertEqual(repr(m.get(key)), repr(ep)) submap_expect = dict( feature1=EntryPoint('feature1', 'somemodule', ['somefunction']), feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']), feature3=EntryPoint('feature3', 'this.module', extras=['something']) ) submap_str = """ # define features for blah blah feature1 = somemodule:somefunction feature2 = another.module:SomeClass [extra1,extra2] feature3 = this.module [something] """ def testParseList(self): self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str)) self.assertRaises(ValueError, EntryPoint.parse_group, "x a", "foo=bar") self.assertRaises(ValueError, EntryPoint.parse_group, "x", ["foo=baz", "foo=bar"]) def testParseMap(self): m = EntryPoint.parse_map({'xyz':self.submap_str}) self.checkSubMap(m['xyz']) self.assertEqual(list(m.keys()),['xyz']) m = EntryPoint.parse_map("[xyz]\n"+self.submap_str) self.checkSubMap(m['xyz']) self.assertEqual(list(m.keys()),['xyz']) self.assertRaises(ValueError, EntryPoint.parse_map, ["[xyz]", "[xyz]"]) self.assertRaises(ValueError, EntryPoint.parse_map, self.submap_str) class RequirementsTests(TestCase): def testBasics(self): r = Requirement.parse("Twisted>=1.2") self.assertEqual(str(r),"Twisted>=1.2") self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')") self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ())) self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ())) self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ())) self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ())) self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ())) self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2")) def testOrdering(self): r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ()) r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ()) self.assertEqual(r1,r2) self.assertEqual(str(r1),str(r2)) self.assertEqual(str(r2),"Twisted==1.2c1,>=1.2") def testBasicContains(self): r = Requirement("Twisted", [('>=','1.2')], ()) foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg") twist11 = Distribution.from_filename("Twisted-1.1.egg") twist12 = Distribution.from_filename("Twisted-1.2.egg") self.assertTrue(parse_version('1.2') in r) self.assertTrue(parse_version('1.1') not in r) self.assertTrue('1.2' in r) self.assertTrue('1.1' not in r) self.assertTrue(foo_dist not in r) self.assertTrue(twist11 not in r) self.assertTrue(twist12 in r) def testAdvancedContains(self): r, = parse_requirements("Foo>=1.2,<=1.3,==1.9,>2.0,!=2.5,<3.0,==4.5") for v in ('1.2','1.2.2','1.3','1.9','2.0.1','2.3','2.6','3.0c1','4.5'): self.assertTrue(v in r, (v,r)) for v in ('1.2c1','1.3.1','1.5','1.9.1','2.0','2.5','3.0','4.0'): self.assertTrue(v not in r, (v,r)) def testOptionsAndHashing(self): r1 = Requirement.parse("Twisted[foo,bar]>=1.2") r2 = Requirement.parse("Twisted[bar,FOO]>=1.2") r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0") self.assertEqual(r1,r2) self.assertEqual(r1,r3) self.assertEqual(r1.extras, ("foo","bar")) self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized self.assertEqual(hash(r1), hash(r2)) self.assertEqual( hash(r1), hash(("twisted", ((">=",parse_version("1.2")),), frozenset(["foo","bar"]))) ) def testVersionEquality(self): r1 = Requirement.parse("foo==0.3a2") r2 = Requirement.parse("foo!=0.3a4") d = Distribution.from_filename self.assertTrue(d("foo-0.3a4.egg") not in r1) self.assertTrue(d("foo-0.3a1.egg") not in r1) self.assertTrue(d("foo-0.3a4.egg") not in r2) self.assertTrue(d("foo-0.3a2.egg") in r1) self.assertTrue(d("foo-0.3a2.egg") in r2) self.assertTrue(d("foo-0.3a3.egg") in r2) self.assertTrue(d("foo-0.3a5.egg") in r2) def testSetuptoolsProjectName(self): """ The setuptools project should implement the setuptools package. """ self.assertEqual( Requirement.parse('setuptools').project_name, 'setuptools') # setuptools 0.7 and higher means setuptools. self.assertEqual( Requirement.parse('setuptools == 0.7').project_name, 'setuptools') self.assertEqual( Requirement.parse('setuptools == 0.7a1').project_name, 'setuptools') self.assertEqual( Requirement.parse('setuptools >= 0.7').project_name, 'setuptools') class ParseTests(TestCase): def testEmptyParse(self): self.assertEqual(list(parse_requirements('')), []) def testYielding(self): for inp,out in [ ([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']), (['x\n\n','y'], ['x','y']), ]: self.assertEqual(list(pkg_resources.yield_lines(inp)),out) def testSplitting(self): sample = """ x [Y] z a [b ] # foo c [ d] [q] v """ self.assertEqual(list(pkg_resources.split_sections(sample)), [(None,["x"]), ("Y",["z","a"]), ("b",["c"]), ("d",[]), ("q",["v"])] ) self.assertRaises(ValueError,list,pkg_resources.split_sections("[foo")) def testSafeName(self): self.assertEqual(safe_name("adns-python"), "adns-python") self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils") self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils") self.assertEqual(safe_name("Money$$$Maker"), "Money-Maker") self.assertNotEqual(safe_name("peak.web"), "peak-web") def testSafeVersion(self): self.assertEqual(safe_version("1.2-1"), "1.2-1") self.assertEqual(safe_version("1.2 alpha"), "1.2.alpha") self.assertEqual(safe_version("2.3.4 20050521"), "2.3.4.20050521") self.assertEqual(safe_version("Money$$$Maker"), "Money-Maker") self.assertEqual(safe_version("peak.web"), "peak.web") def testSimpleRequirements(self): self.assertEqual( list(parse_requirements('Twis-Ted>=1.2-1')), [Requirement('Twis-Ted',[('>=','1.2-1')], ())] ) self.assertEqual( list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')), [Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())] ) self.assertEqual( Requirement.parse("FooBar==1.99a3"), Requirement("FooBar", [('==','1.99a3')], ()) ) self.assertRaises(ValueError,Requirement.parse,">=2.3") self.assertRaises(ValueError,Requirement.parse,"x\\") self.assertRaises(ValueError,Requirement.parse,"x==2 q") self.assertRaises(ValueError,Requirement.parse,"X==1\nY==2") self.assertRaises(ValueError,Requirement.parse,"#") def testVersionEquality(self): def c(s1,s2): p1, p2 = parse_version(s1),parse_version(s2) self.assertEqual(p1,p2, (s1,s2,p1,p2)) c('1.2-rc1', '1.2rc1') c('0.4', '0.4.0') c('0.4.0.0', '0.4.0') c('0.4.0-0', '0.4-0') c('0pl1', '0.0pl1') c('0pre1', '0.0c1') c('0.0.0preview1', '0c1') c('0.0c1', '0-rc1') c('1.2a1', '1.2.a.1') c('1.2...a', '1.2a') def testVersionOrdering(self): def c(s1,s2): p1, p2 = parse_version(s1),parse_version(s2) self.assertTrue(p1<p2, (s1,s2,p1,p2)) c('2.1','2.1.1') c('2a1','2b0') c('2a1','2.1') c('2.3a1', '2.3') c('2.1-1', '2.1-2') c('2.1-1', '2.1.1') c('2.1', '2.1pl4') c('2.1a0-20040501', '2.1') c('1.1', '02.1') c('A56','B27') c('3.2', '3.2.pl0') c('3.2-1', '3.2pl1') c('3.2pl1', '3.2pl1-1') c('0.4', '4.0') c('0.0.4', '0.4.0') c('0pl1', '0.4pl1') c('2.1.0-rc1','2.1.0') c('2.1dev','2.1a0') torture =""" 0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1 0.79.9999+0.80.0pre2-3 0.79.9999+0.80.0pre2-2 0.77.2-1 0.77.1-1 0.77.0-1 """.split() for p,v1 in enumerate(torture): for v2 in torture[p+1:]: c(v2,v1) class ScriptHeaderTests(TestCase): non_ascii_exe = '/Users/José/bin/python' exe_with_spaces = r'C:\Program Files\Python33\python.exe' def test_get_script_header(self): if not sys.platform.startswith('java') or not is_sh(sys.executable): # This test is for non-Jython platforms expected = '#!%s\n' % nt_quote_arg(os.path.normpath(sys.executable)) self.assertEqual(get_script_header('#!/usr/local/bin/python'), expected) expected = '#!%s -x\n' % nt_quote_arg(os.path.normpath(sys.executable)) self.assertEqual(get_script_header('#!/usr/bin/python -x'), expected) self.assertEqual(get_script_header('#!/usr/bin/python', executable=self.non_ascii_exe), '#!%s -x\n' % self.non_ascii_exe) candidate = get_script_header('#!/usr/bin/python', executable=self.exe_with_spaces) self.assertEqual(candidate, '#!"%s"\n' % self.exe_with_spaces) def test_get_script_header_jython_workaround(self): # This test doesn't work with Python 3 in some locales if (sys.version_info >= (3,) and os.environ.get("LC_CTYPE") in (None, "C", "POSIX")): return class java: class lang: class System: @staticmethod def getProperty(property): return "" sys.modules["java"] = java platform = sys.platform sys.platform = 'java1.5.0_13' stdout, stderr = sys.stdout, sys.stderr try: # A mock sys.executable that uses a shebang line (this file) exe = os.path.normpath(os.path.splitext(__file__)[0] + '.py') self.assertEqual( get_script_header('#!/usr/local/bin/python', executable=exe), '#!/usr/bin/env %s\n' % exe) # Ensure we generate what is basically a broken shebang line # when there's options, with a warning emitted sys.stdout = sys.stderr = StringIO() self.assertEqual(get_script_header('#!/usr/bin/python -x', executable=exe), '#!%s -x\n' % exe) self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue()) sys.stdout = sys.stderr = StringIO() self.assertEqual(get_script_header('#!/usr/bin/python', executable=self.non_ascii_exe), '#!%s -x\n' % self.non_ascii_exe) self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue()) finally: del sys.modules["java"] sys.platform = platform sys.stdout, sys.stderr = stdout, stderr class NamespaceTests(TestCase): def setUp(self): self._ns_pkgs = pkg_resources._namespace_packages.copy() self._tmpdir = tempfile.mkdtemp(prefix="tests-setuptools-") os.makedirs(os.path.join(self._tmpdir, "site-pkgs")) self._prev_sys_path = sys.path[:] sys.path.append(os.path.join(self._tmpdir, "site-pkgs")) def tearDown(self): shutil.rmtree(self._tmpdir) pkg_resources._namespace_packages = self._ns_pkgs.copy() sys.path = self._prev_sys_path[:] def _assertIn(self, member, container): """ assertIn and assertTrue does not exist in Python2.3""" if member not in container: standardMsg = '%s not found in %s' % (safe_repr(member), safe_repr(container)) self.fail(self._formatMessage(msg, standardMsg)) def test_two_levels_deep(self): """ Test nested namespace packages Create namespace packages in the following tree : site-packages-1/pkg1/pkg2 site-packages-2/pkg1/pkg2 Check both are in the _namespace_packages dict and that their __path__ is correct """ sys.path.append(os.path.join(self._tmpdir, "site-pkgs2")) os.makedirs(os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2")) os.makedirs(os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2")) ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n" for site in ["site-pkgs", "site-pkgs2"]: pkg1_init = open(os.path.join(self._tmpdir, site, "pkg1", "__init__.py"), "w") pkg1_init.write(ns_str) pkg1_init.close() pkg2_init = open(os.path.join(self._tmpdir, site, "pkg1", "pkg2", "__init__.py"), "w") pkg2_init.write(ns_str) pkg2_init.close() import pkg1 self._assertIn("pkg1", pkg_resources._namespace_packages.keys()) try: import pkg1.pkg2 except ImportError: self.fail("Setuptools tried to import the parent namespace package") # check the _namespace_packages dict self._assertIn("pkg1.pkg2", pkg_resources._namespace_packages.keys()) self.assertEqual(pkg_resources._namespace_packages["pkg1"], ["pkg1.pkg2"]) # check the __path__ attribute contains both paths self.assertEqual(pkg1.pkg2.__path__, [ os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"), os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2")]) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284624"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">edisonlz/fruit</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">web_project/base/site-packages/django/db/backends/sqlite3/introspection.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">114</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import re from django.db.backends import BaseDatabaseIntrospection, FieldInfo field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$') def get_field_size(name): """ Extract the size number from a "varchar(11)" type name """ m = field_size_re.search(name) return int(m.group(1)) if m else None # This light wrapper "fakes" a dictionary interface, because some SQLite data # types include variables in them -- e.g. "varchar(30)" -- and can't be matched # as a simple dictionary lookup. class FlexibleFieldLookupDict(object): # Maps SQL types to Django Field types. Some of the SQL types have multiple # entries here because SQLite allows for anything and doesn't normalize the # field type; it uses whatever was given. base_data_types_reverse = { 'bool': 'BooleanField', 'boolean': 'BooleanField', 'smallint': 'SmallIntegerField', 'smallint unsigned': 'PositiveSmallIntegerField', 'smallinteger': 'SmallIntegerField', 'int': 'IntegerField', 'integer': 'IntegerField', 'bigint': 'BigIntegerField', 'integer unsigned': 'PositiveIntegerField', 'decimal': 'DecimalField', 'real': 'FloatField', 'text': 'TextField', 'char': 'CharField', 'blob': 'BinaryField', 'date': 'DateField', 'datetime': 'DateTimeField', 'time': 'TimeField', } def __getitem__(self, key): key = key.lower() try: return self.base_data_types_reverse[key] except KeyError: size = get_field_size(key) if size is not None: return ('CharField', {'max_length': size}) raise KeyError class DatabaseIntrospection(BaseDatabaseIntrospection): data_types_reverse = FlexibleFieldLookupDict() def get_table_list(self, cursor): "Returns a list of table names in the current database." # Skip the sqlite_sequence system table used for autoincrement key # generation. cursor.execute(""" SELECT name FROM sqlite_master WHERE type='table' AND NOT name='sqlite_sequence' ORDER BY name""") return [row[0] for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): "Returns a description of the table, with the DB-API cursor.description interface." return [FieldInfo(info['name'], info['type'], None, info['size'], None, None, info['null_ok']) for info in self._table_info(cursor, table_name)] def get_relations(self, cursor, table_name): """ Returns a dictionary of {field_index: (field_index_other_table, other_table)} representing all relationships to the given table. Indexes are 0-based. """ # Dictionary of relations to return relations = {} # Schema for this table cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) results = cursor.fetchone()[0].strip() results = results[results.index('(')+1:results.rindex(')')] # Walk through and look for references to other tables. SQLite doesn't # really have enforced references, but since it echoes out the SQL used # to create the table we can look for REFERENCES statements used there. for field_index, field_desc in enumerate(results.split(',')): field_desc = field_desc.strip() if field_desc.startswith("UNIQUE"): continue m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I) if not m: continue table, column = [s.strip('"') for s in m.groups()] cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table]) result = cursor.fetchall()[0] other_table_results = result[0].strip() li, ri = other_table_results.index('('), other_table_results.rindex(')') other_table_results = other_table_results[li+1:ri] for other_index, other_desc in enumerate(other_table_results.split(',')): other_desc = other_desc.strip() if other_desc.startswith('UNIQUE'): continue name = other_desc.split(' ', 1)[0].strip('"') if name == column: relations[field_index] = (other_index, table) break return relations def get_key_columns(self, cursor, table_name): """ Returns a list of (column_name, referenced_table_name, referenced_column_name) for all key columns in given table. """ key_columns = [] # Schema for this table cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) results = cursor.fetchone()[0].strip() results = results[results.index('(')+1:results.rindex(')')] # Walk through and look for references to other tables. SQLite doesn't # really have enforced references, but since it echoes out the SQL used # to create the table we can look for REFERENCES statements used there. for field_index, field_desc in enumerate(results.split(',')): field_desc = field_desc.strip() if field_desc.startswith("UNIQUE"): continue m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I) if not m: continue # This will append (column_name, referenced_table_name, referenced_column_name) to key_columns key_columns.append(tuple([s.strip('"') for s in m.groups()])) return key_columns def get_indexes(self, cursor, table_name): indexes = {} for info in self._table_info(cursor, table_name): if info['pk'] != 0: indexes[info['name']] = {'primary_key': True, 'unique': False} cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name)) # seq, name, unique for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]: cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index)) info = cursor.fetchall() # Skip indexes across multiple fields if len(info) != 1: continue name = info[0][2] # seqno, cid, name indexes[name] = {'primary_key': False, 'unique': unique} return indexes def get_primary_key_column(self, cursor, table_name): """ Get the column name of the primary key for the given table. """ # Don't use PRAGMA because that causes issues with some transactions cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) results = cursor.fetchone()[0].strip() results = results[results.index('(')+1:results.rindex(')')] for field_desc in results.split(','): field_desc = field_desc.strip() m = re.search('"(.*)".*PRIMARY KEY$', field_desc) if m: return m.groups()[0] return None def _table_info(self, cursor, name): cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name)) # cid, name, type, notnull, dflt_value, pk return [{'name': field[1], 'type': field[2], 'size': get_field_size(field[2]), 'null_ok': not field[3], 'pk': field[5] # undocumented } for field in cursor.fetchall()] </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284625"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Edu-Glez/Bank_sentiment_analysis</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">env/lib/python3.6/site-packages/pip/_vendor/webencodings/x_user_defined.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">341</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># coding: utf8 """ webencodings.x_user_defined ~~~~~~~~~~~~~~~~~~~~~~~~~~~ An implementation of the x-user-defined encoding. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ from __future__ import unicode_literals import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self, input, errors='strict'): return codecs.charmap_encode(input, errors, encoding_table) def decode(self, input, errors='strict'): return codecs.charmap_decode(input, errors, decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input, self.errors, encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input, self.errors, decoding_table)[0] class StreamWriter(Codec, codecs.StreamWriter): pass class StreamReader(Codec, codecs.StreamReader): pass ### encodings module API codec_info = codecs.CodecInfo( name='x-user-defined', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table # Python 3: # for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700)) decoding_table = ( '\x00' '\x01' '\x02' '\x03' '\x04' '\x05' '\x06' '\x07' '\x08' '\t' '\n' '\x0b' '\x0c' '\r' '\x0e' '\x0f' '\x10' '\x11' '\x12' '\x13' '\x14' '\x15' '\x16' '\x17' '\x18' '\x19' '\x1a' '\x1b' '\x1c' '\x1d' '\x1e' '\x1f' ' ' '!' '"' '#' '$' '%' '&' "'" '(' ')' '*' '+' ',' '-' '.' '/' '0' '1' '2' '3' '4' '5' '6' '7' '8' '9' ':' ';' '<' '=' '>' '?' '@' 'A' 'B' 'C' 'D' 'E' 'F' 'G' 'H' 'I' 'J' 'K' 'L' 'M' 'N' 'O' 'P' 'Q' 'R' 'S' 'T' 'U' 'V' 'W' 'X' 'Y' 'Z' '[' '\\' ']' '^' '_' '`' 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 'u' 'v' 'w' 'x' 'y' 'z' '{' '|' '}' '~' '\x7f' '\uf780' '\uf781' '\uf782' '\uf783' '\uf784' '\uf785' '\uf786' '\uf787' '\uf788' '\uf789' '\uf78a' '\uf78b' '\uf78c' '\uf78d' '\uf78e' '\uf78f' '\uf790' '\uf791' '\uf792' '\uf793' '\uf794' '\uf795' '\uf796' '\uf797' '\uf798' '\uf799' '\uf79a' '\uf79b' '\uf79c' '\uf79d' '\uf79e' '\uf79f' '\uf7a0' '\uf7a1' '\uf7a2' '\uf7a3' '\uf7a4' '\uf7a5' '\uf7a6' '\uf7a7' '\uf7a8' '\uf7a9' '\uf7aa' '\uf7ab' '\uf7ac' '\uf7ad' '\uf7ae' '\uf7af' '\uf7b0' '\uf7b1' '\uf7b2' '\uf7b3' '\uf7b4' '\uf7b5' '\uf7b6' '\uf7b7' '\uf7b8' '\uf7b9' '\uf7ba' '\uf7bb' '\uf7bc' '\uf7bd' '\uf7be' '\uf7bf' '\uf7c0' '\uf7c1' '\uf7c2' '\uf7c3' '\uf7c4' '\uf7c5' '\uf7c6' '\uf7c7' '\uf7c8' '\uf7c9' '\uf7ca' '\uf7cb' '\uf7cc' '\uf7cd' '\uf7ce' '\uf7cf' '\uf7d0' '\uf7d1' '\uf7d2' '\uf7d3' '\uf7d4' '\uf7d5' '\uf7d6' '\uf7d7' '\uf7d8' '\uf7d9' '\uf7da' '\uf7db' '\uf7dc' '\uf7dd' '\uf7de' '\uf7df' '\uf7e0' '\uf7e1' '\uf7e2' '\uf7e3' '\uf7e4' '\uf7e5' '\uf7e6' '\uf7e7' '\uf7e8' '\uf7e9' '\uf7ea' '\uf7eb' '\uf7ec' '\uf7ed' '\uf7ee' '\uf7ef' '\uf7f0' '\uf7f1' '\uf7f2' '\uf7f3' '\uf7f4' '\uf7f5' '\uf7f6' '\uf7f7' '\uf7f8' '\uf7f9' '\uf7fa' '\uf7fb' '\uf7fc' '\uf7fd' '\uf7fe' '\uf7ff' ) ### Encoding table encoding_table = codecs.charmap_build(decoding_table) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284626"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">shhui/nova</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">nova/tests/scheduler/test_filter_scheduler.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Filter Scheduler. """ import contextlib import mock import uuid import mox from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import api as conductor_api from nova import context from nova import db from nova import exception from nova.objects import instance_group as instance_group_obj from nova.pci import pci_request from nova.scheduler import driver from nova.scheduler import filter_scheduler from nova.scheduler import host_manager from nova.scheduler import utils as scheduler_utils from nova.scheduler import weights from nova.tests import fake_instance from nova.tests.scheduler import fakes from nova.tests.scheduler import test_scheduler def fake_get_filtered_hosts(hosts, filter_properties, index): return list(hosts) def fake_get_group_filtered_hosts(hosts, filter_properties, index): group_hosts = filter_properties.get('group_hosts') or [] if group_hosts: hosts = list(hosts) hosts.pop(0) return hosts else: return list(hosts) def fake_get_group_filtered_affinity_hosts(hosts, filter_properties, index): group_hosts = filter_properties.get('group_hosts') or [] if group_hosts: affinity_host = hosts.pop(0) return [affinity_host] else: return list(hosts) class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): """Test case for Filter Scheduler.""" driver_cls = filter_scheduler.FilterScheduler def test_run_instance_no_hosts(self): def _fake_empty_call_zone_method(*args, **kwargs): return [] sched = fakes.FakeFilterScheduler() uuid = 'fake-uuid1' fake_context = context.RequestContext('user', 'project') instance_properties = {'project_id': 1, 'os_type': 'Linux'} request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0}, 'instance_properties': instance_properties, 'instance_uuids': [uuid]} self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') old_ref, new_ref = db.instance_update_and_get_original(fake_context, uuid, {'vm_state': vm_states.ERROR, 'task_state': None}).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc(fake_context, mox.IsA(conductor_api.LocalAPI), new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched.schedule_run_instance( fake_context, request_spec, None, None, None, None, {}, False) def test_run_instance_non_admin(self): self.was_admin = False def fake_get(context, *args, **kwargs): # make sure this is called with admin context, even though # we're using user context below self.was_admin = context.is_admin return {} sched = fakes.FakeFilterScheduler() self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get) fake_context = context.RequestContext('user', 'project') uuid = 'fake-uuid1' instance_properties = {'project_id': 1, 'os_type': 'Linux'} request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1}, 'instance_properties': instance_properties, 'instance_uuids': [uuid]} self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') old_ref, new_ref = db.instance_update_and_get_original(fake_context, uuid, {'vm_state': vm_states.ERROR, 'task_state': None}).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc(fake_context, mox.IsA(conductor_api.LocalAPI), new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() sched.schedule_run_instance( fake_context, request_spec, None, None, None, None, {}, False) self.assertTrue(self.was_admin) def test_scheduler_includes_launch_index(self): fake_context = context.RequestContext('user', 'project') instance_opts = {'fake_opt1': 'meow'} request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'], 'instance_properties': instance_opts} instance1 = {'uuid': 'fake-uuid1'} instance2 = {'uuid': 'fake-uuid2'} def _has_launch_index(expected_index): """Return a function that verifies the expected index.""" def _check_launch_index(value): if 'instance_properties' in value: if 'launch_index' in value['instance_properties']: index = value['instance_properties']['launch_index'] if index == expected_index: return True return False return _check_launch_index self.mox.StubOutWithMock(self.driver, '_schedule') self.mox.StubOutWithMock(self.driver, '_provision_resource') self.driver._schedule(fake_context, request_spec, {}, ['fake-uuid1', 'fake-uuid2']).AndReturn(['host1', 'host2']) # instance 1 self.driver._provision_resource( fake_context, 'host1', mox.Func(_has_launch_index(0)), {}, None, None, None, None, instance_uuid='fake-uuid1', legacy_bdm_in_spec=False).AndReturn(instance1) # instance 2 self.driver._provision_resource( fake_context, 'host2', mox.Func(_has_launch_index(1)), {}, None, None, None, None, instance_uuid='fake-uuid2', legacy_bdm_in_spec=False).AndReturn(instance2) self.mox.ReplayAll() self.driver.schedule_run_instance(fake_context, request_spec, None, None, None, None, {}, False) def test_schedule_happy_day(self): """Make sure there's nothing glaringly wrong with _schedule() by doing a happy day pass through. """ self.next_weight = 1.0 def _fake_weigh_objects(_self, functions, hosts, options): self.next_weight += 2.0 host_state = hosts[0] return [weights.WeighedHost(host_state, self.next_weight)] sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) self.stubs.Set(weights.HostWeightHandler, 'get_weighed_objects', _fake_weigh_objects) fakes.mox_host_manager_db_calls(self.mox, fake_context) request_spec = {'num_instances': 10, 'instance_type': {'memory_mb': 512, 'root_gb': 512, 'ephemeral_gb': 0, 'vcpus': 1}, 'instance_properties': {'project_id': 1, 'root_gb': 512, 'memory_mb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'os_type': 'Linux'}} self.mox.ReplayAll() weighed_hosts = sched._schedule(fake_context, request_spec, {}) self.assertEqual(len(weighed_hosts), 10) for weighed_host in weighed_hosts: self.assertIsNotNone(weighed_host.obj) def test_max_attempts(self): self.flags(scheduler_max_attempts=4) sched = fakes.FakeFilterScheduler() self.assertEqual(4, sched._max_attempts()) def test_invalid_max_attempts(self): self.flags(scheduler_max_attempts=0) sched = fakes.FakeFilterScheduler() self.assertRaises(exception.NovaException, sched._max_attempts) def test_retry_disabled(self): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=1) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties, instance_type={}) filter_properties = {} self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched._schedule(self.context, request_spec, filter_properties=filter_properties) # should not have retry info in the populated filter properties: self.assertNotIn("retry", filter_properties) def test_retry_force_hosts(self): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties) filter_properties = dict(force_hosts=['force_host']) self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched._schedule(self.context, request_spec, filter_properties=filter_properties) # should not have retry info in the populated filter properties: self.assertNotIn("retry", filter_properties) def test_retry_force_nodes(self): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties) filter_properties = dict(force_nodes=['force_node']) self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched._schedule(self.context, request_spec, filter_properties=filter_properties) # should not have retry info in the populated filter properties: self.assertNotIn("retry", filter_properties) def test_retry_attempt_one(self): # Test retry logic on initial scheduling attempt. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties, instance_type={}) filter_properties = {} self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched._schedule(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(1, num_attempts) def test_retry_attempt_two(self): # Test retry logic when re-scheduling. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties, instance_type={}) retry = dict(num_attempts=1) filter_properties = dict(retry=retry) self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched._schedule(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(2, num_attempts) def test_retry_exceeded_max_attempts(self): # Test for necessary explosion when max retries is exceeded and that # the information needed in request_spec is still present for error # handling self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} instance_uuids = ['fake-id'] request_spec = dict(instance_properties=instance_properties, instance_uuids=instance_uuids) retry = dict(num_attempts=2) filter_properties = dict(retry=retry) self.assertRaises(exception.NoValidHost, sched.schedule_run_instance, self.context, request_spec, admin_password=None, injected_files=None, requested_networks=None, is_first_time=False, filter_properties=filter_properties, legacy_bdm_in_spec=False) uuids = request_spec.get('instance_uuids') self.assertEqual(uuids, instance_uuids) def test_add_retry_host(self): retry = dict(num_attempts=1, hosts=[]) filter_properties = dict(retry=retry) host = "fakehost" node = "fakenode" scheduler_utils._add_retry_host(filter_properties, host, node) hosts = filter_properties['retry']['hosts'] self.assertEqual(1, len(hosts)) self.assertEqual([host, node], hosts[0]) def test_post_select_populate(self): # Test addition of certain filter props after a node is selected. retry = {'hosts': [], 'num_attempts': 1} filter_properties = {'retry': retry} host_state = host_manager.HostState('host', 'node') host_state.limits['vcpus'] = 5 scheduler_utils.populate_filter_properties(filter_properties, host_state) self.assertEqual(['host', 'node'], filter_properties['retry']['hosts'][0]) self.assertEqual({'vcpus': 5}, host_state.limits) def test_group_details_in_filter_properties(self): sched = fakes.FakeFilterScheduler() instance = fake_instance.fake_instance_obj(self.context, params={'host': 'hostA'}) group = instance_group_obj.InstanceGroup() group.uuid = str(uuid.uuid4()) group.members = [instance.uuid] group.policies = ['anti-affinity'] filter_properties = { 'scheduler_hints': { 'group': group.uuid, }, 'group_hosts': ['hostB'], } with contextlib.nested( mock.patch.object(instance_group_obj.InstanceGroup, 'get_by_uuid', return_value=group), mock.patch.object(instance_group_obj.InstanceGroup, 'get_hosts', return_value=['hostA']), ) as (get_group, get_hosts): update_group_hosts = sched._setup_instance_group(self.context, filter_properties) self.assertTrue(update_group_hosts) self.assertEqual(set(['hostA', 'hostB']), filter_properties['group_hosts']) self.assertEqual(['anti-affinity'], filter_properties['group_policies']) def test_schedule_host_pool(self): """Make sure the scheduler_host_subset_size property works properly.""" self.flags(scheduler_host_subset_size=2) sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) fakes.mox_host_manager_db_calls(self.mox, fake_context) instance_properties = {'project_id': 1, 'root_gb': 512, 'memory_mb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties, instance_type={}) filter_properties = {} self.mox.ReplayAll() hosts = sched._schedule(self.context, request_spec, filter_properties=filter_properties) # one host should be chosen self.assertEqual(len(hosts), 1) def test_schedule_large_host_pool(self): """Hosts should still be chosen if pool size is larger than number of filtered hosts. """ sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.flags(scheduler_host_subset_size=20) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) fakes.mox_host_manager_db_calls(self.mox, fake_context) instance_properties = {'project_id': 1, 'root_gb': 512, 'memory_mb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties, instance_type={}) filter_properties = {} self.mox.ReplayAll() hosts = sched._schedule(self.context, request_spec, filter_properties=filter_properties) # one host should be chose self.assertEqual(len(hosts), 1) def test_schedule_chooses_best_host(self): """If scheduler_host_subset_size is 1, the largest host with greatest weight should be returned. """ self.flags(scheduler_host_subset_size=1) sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) fakes.mox_host_manager_db_calls(self.mox, fake_context) self.next_weight = 50 def _fake_weigh_objects(_self, functions, hosts, options): this_weight = self.next_weight self.next_weight = 0 host_state = hosts[0] return [weights.WeighedHost(host_state, this_weight)] instance_properties = {'project_id': 1, 'root_gb': 512, 'memory_mb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties, instance_type={}) self.stubs.Set(weights.HostWeightHandler, 'get_weighed_objects', _fake_weigh_objects) filter_properties = {} self.mox.ReplayAll() hosts = sched._schedule(self.context, request_spec, filter_properties=filter_properties) # one host should be chosen self.assertEqual(1, len(hosts)) self.assertEqual(50, hosts[0].weight) def test_select_destinations(self): """select_destinations is basically a wrapper around _schedule(). Similar to the _schedule tests, this just does a happy path test to ensure there is nothing glaringly wrong. """ self.next_weight = 1.0 selected_hosts = [] selected_nodes = [] def _fake_weigh_objects(_self, functions, hosts, options): self.next_weight += 2.0 host_state = hosts[0] selected_hosts.append(host_state.host) selected_nodes.append(host_state.nodename) return [weights.WeighedHost(host_state, self.next_weight)] sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) self.stubs.Set(weights.HostWeightHandler, 'get_weighed_objects', _fake_weigh_objects) fakes.mox_host_manager_db_calls(self.mox, fake_context) request_spec = {'instance_type': {'memory_mb': 512, 'root_gb': 512, 'ephemeral_gb': 0, 'vcpus': 1}, 'instance_properties': {'project_id': 1, 'root_gb': 512, 'memory_mb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'os_type': 'Linux'}, 'num_instances': 1} self.mox.ReplayAll() dests = sched.select_destinations(fake_context, request_spec, {}) (host, node) = (dests[0]['host'], dests[0]['nodename']) self.assertEqual(host, selected_hosts[0]) self.assertEqual(node, selected_nodes[0]) def test_select_destinations_no_valid_host(self): def _return_no_host(*args, **kwargs): return [] self.stubs.Set(self.driver, '_schedule', _return_no_host) self.assertRaises(exception.NoValidHost, self.driver.select_destinations, self.context, {'num_instances': 1}, {}) def test_handles_deleted_instance(self): """Test instance deletion while being scheduled.""" def _raise_instance_not_found(*args, **kwargs): raise exception.InstanceNotFound(instance_id='123') self.stubs.Set(driver, 'instance_update_db', _raise_instance_not_found) sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') host_state = host_manager.HostState('host2', 'node2') weighted_host = weights.WeighedHost(host_state, 1.42) filter_properties = {} uuid = 'fake-uuid1' instance_properties = {'project_id': 1, 'os_type': 'Linux'} request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1}, 'instance_properties': instance_properties, 'instance_uuids': [uuid]} sched._provision_resource(fake_context, weighted_host, request_spec, filter_properties, None, None, None, None) def test_pci_request_in_filter_properties(self): instance_type = {} request_spec = {'instance_type': instance_type, 'instance_properties': {'project_id': 1, 'os_type': 'Linux'}} filter_properties = {} requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}] self.mox.StubOutWithMock(pci_request, 'get_pci_requests_from_flavor') pci_request.get_pci_requests_from_flavor( instance_type).AndReturn(requests) self.mox.ReplayAll() self.driver.populate_filter_properties( request_spec, filter_properties) self.assertEqual(filter_properties.get('pci_requests'), requests) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284627"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">feigames/Odoo</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">addons/web/tests/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">54</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- import test_js # import test_menu import test_serving_base </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284628"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">DepthDeluxe/ansible</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/devel</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/ansible/modules/web_infrastructure/jboss.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">21</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Jeroen Hoekx <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="95fff0e7faf0fbbbfdfaf0feedd5f1e6e4e0f4e7f0bbf7f0">[email protected]</a>> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ module: jboss version_added: "1.4" short_description: deploy applications to JBoss description: - Deploy applications to JBoss standalone using the filesystem options: deployment: required: true description: - The name of the deployment src: required: false description: - The remote path of the application ear or war to deploy deploy_path: required: false default: /var/lib/jbossas/standalone/deployments description: - The location in the filesystem where the deployment scanner listens state: required: false choices: [ present, absent ] default: "present" description: - Whether the application should be deployed or undeployed notes: - "The JBoss standalone deployment-scanner has to be enabled in standalone.xml" - "Ensure no identically named application is deployed through the JBoss CLI" author: "Jeroen Hoekx (@jhoekx)" """ EXAMPLES = """ # Deploy a hello world application - jboss: src: /tmp/hello-1.0-SNAPSHOT.war deployment: hello.war state: present # Update the hello world application - jboss: src: /tmp/hello-1.1-SNAPSHOT.war deployment: hello.war state: present # Undeploy the hello world application - jboss: deployment: hello.war state: absent """ import os import shutil import time from ansible.module_utils.basic import AnsibleModule def is_deployed(deploy_path, deployment): return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment)) def is_undeployed(deploy_path, deployment): return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment)) def is_failed(deploy_path, deployment): return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment)) def main(): module = AnsibleModule( argument_spec=dict( src=dict(type='path'), deployment=dict(required=True), deploy_path=dict(type='path', default='/var/lib/jbossas/standalone/deployments'), state=dict(choices=['absent', 'present'], default='present'), ), required_if=[('state', 'present', ('src',))] ) result = dict(changed=False) src = module.params['src'] deployment = module.params['deployment'] deploy_path = module.params['deploy_path'] state = module.params['state'] if not os.path.exists(deploy_path): module.fail_json(msg="deploy_path does not exist.") deployed = is_deployed(deploy_path, deployment) if state == 'present' and not deployed: if not os.path.exists(src): module.fail_json(msg='Source file %s does not exist.' % src) if is_failed(deploy_path, deployment): # Clean up old failed deployment os.remove(os.path.join(deploy_path, "%s.failed" % deployment)) shutil.copyfile(src, os.path.join(deploy_path, deployment)) while not deployed: deployed = is_deployed(deploy_path, deployment) if is_failed(deploy_path, deployment): module.fail_json(msg='Deploying %s failed.' % deployment) time.sleep(1) result['changed'] = True if state == 'present' and deployed: if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)): os.remove(os.path.join(deploy_path, "%s.deployed" % deployment)) shutil.copyfile(src, os.path.join(deploy_path, deployment)) deployed = False while not deployed: deployed = is_deployed(deploy_path, deployment) if is_failed(deploy_path, deployment): module.fail_json(msg='Deploying %s failed.' % deployment) time.sleep(1) result['changed'] = True if state == 'absent' and deployed: os.remove(os.path.join(deploy_path, "%s.deployed" % deployment)) while deployed: deployed = not is_undeployed(deploy_path, deployment) if is_failed(deploy_path, deployment): module.fail_json(msg='Undeploying %s failed.' % deployment) time.sleep(1) result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284629"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">teemulehtinen/a-plus</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">selenium_test/test/teacher_feedback_test.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import unittest from test_initializer import TestInitializer from page_objects import LoginPage, FileUploadGrader, AssessmentPage, SubmissionPage, HomePage, StudentFeedbackPage class TeacherFeedbackTest(unittest.TestCase): def setUp(self): self.driver = TestInitializer().getDefaultDriver() TestInitializer().recreateDatabase() def testStudentShouldGetFeedbackWithNotification(self): ASSISTANT_FEEDBACK_TEXT = "ASSISTANT_FEEDBACK" FEEDBACK_TEXT = "FEEDBACK" EXERCISE_NUMBER = "2" SUBMISSION_NUMBER = 1 POINTS = "50" # Submit exercise LoginPage(self.driver).loginAsStudent() fileUploadGrader = FileUploadGrader(self.driver) fileUploadGrader.submit() fileUploadGrader.logout() # Check submissions LoginPage(self.driver).loginAsAssistant() submissionPage = SubmissionPage(self.driver, exerciseId=EXERCISE_NUMBER) self.assertEqual(submissionPage.getSubmissionCount(), 1) # Assess exercise assessmentPage = AssessmentPage(self.driver, exerciseId=EXERCISE_NUMBER, submissionNumber=SUBMISSION_NUMBER) assessmentPage.setAssistantFeedback(ASSISTANT_FEEDBACK_TEXT) assessmentPage.setFeedback(FEEDBACK_TEXT) assessmentPage.setPoints(POINTS) assessmentPage.submit() assessmentPage.logout() # Check that student receives the correct assessment and a notification of it LoginPage(self.driver).loginAsStudent() homePage = HomePage(self.driver) self.assertTrue(homePage.hasNewNotifications()) studentFeedbackPage = StudentFeedbackPage(self.driver, exerciseId=EXERCISE_NUMBER, submissionNumber=SUBMISSION_NUMBER) self.assertEqual(studentFeedbackPage.getAssistantFeedbackText(), ASSISTANT_FEEDBACK_TEXT) self.assertEqual(studentFeedbackPage.getFeedbackText(), FEEDBACK_TEXT) self.assertEqual(FileUploadGrader(self.driver).getPoints(), POINTS) def tearDown(self): self.driver.quit() if __name__ == '__main__': unittest.main(verbosity=2) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284630"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gregdek/ansible</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/devel</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/ansible/modules/system/mksysb.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">52</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Kairo Araujo <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="167d777f6479567d777f64793873627f387464">[email protected]</a>> # GNU General Public License v3.0+ (see COPYING or # https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- author: Kairo Araujo (@kairoaraujo) module: mksysb short_description: Generates AIX mksysb rootvg backups. description: - This module manages a basic AIX mksysb (image) of rootvg. version_added: "2.5" options: backup_crypt_files: description: - Backup encrypted files. type: bool default: "yes" backup_dmapi_fs: description: - Back up DMAPI filesystem files. type: bool default: "yes" create_map_files: description: - Creates a new MAP files. type: bool default: "no" exclude_files: description: - Excludes files using C(/etc/rootvg.exclude). type: bool default: "no" exclude_wpar_files: description: - Excludes WPAR files. type: bool default: "no" extended_attrs: description: - Backup extended attributes. type: bool default: "yes" name: description: - Backup name required: true new_image_data: description: - Creates a new file data. type: bool default: "yes" software_packing: description: - Exclude files from packing option listed in C(/etc/exclude_packing.rootvg). type: bool default: "no" storage_path: description: - Storage path where the mksysb will stored. required: true use_snapshot: description: - Creates backup using snapshots. type: bool default: "no" ''' EXAMPLES = ''' - name: Running a backup image mksysb mksysb: name: myserver storage_path: /repository/images exclude_files: yes exclude_wpar_files: yes ''' RETURN = ''' changed: description: Return changed for mksysb actions as true or false. returned: always type: bool version_added: 2.5 msg: description: Return message regarding the action. returned: always type: str version_added: 2.5 ''' from ansible.module_utils.basic import AnsibleModule import os def main(): module = AnsibleModule( argument_spec=dict( backup_crypt_files=dict(type='bool', default=True), backup_dmapi_fs=dict(type='bool', default=True), create_map_files=dict(type='bool', default=False), exclude_files=dict(type='bool', default=False), exclude_wpar_files=dict(type='bool', default=False), extended_attrs=dict(type='bool', default=True), name=dict(required=True), new_image_data=dict(type='bool', default=True), software_packing=dict(type='bool', default=False), storage_path=dict(required=True), use_snapshot=dict(type='bool', default=False) ), supports_check_mode=True, ) # Command options. map_file_opt = { True: '-m', False: '' } use_snapshot_opt = { True: '-T', False: '' } exclude_files_opt = { True: '-e', False: '' } exclude_wpar_opt = { True: '-G', False: '' } new_image_data_opt = { True: '-i', False: '' } soft_packing_opt = { True: '', False: '-p' } extend_attr_opt = { True: '', False: '-a' } crypt_files_opt = { True: '', False: '-Z' } dmapi_fs_opt = { True: '-a', False: '' } backup_crypt_files = crypt_files_opt[module.params['backup_crypt_files']] backup_dmapi_fs = dmapi_fs_opt[module.params['backup_dmapi_fs']] create_map_files = map_file_opt[module.params['create_map_files']] exclude_files = exclude_files_opt[module.params['exclude_files']] exclude_wpar_files = exclude_wpar_opt[module.params['exclude_wpar_files']] extended_attrs = extend_attr_opt[module.params['extended_attrs']] name = module.params['name'] new_image_data = new_image_data_opt[module.params['new_image_data']] software_packing = soft_packing_opt[module.params['software_packing']] storage_path = module.params['storage_path'] use_snapshot = use_snapshot_opt[module.params['use_snapshot']] # Validate if storage_path is a valid directory. if os.path.isdir(storage_path): if not module.check_mode: # Generates the mksysb image backup. mksysb_cmd = module.get_bin_path('mksysb', True) rc, mksysb_output, err = module.run_command( "%s -X %s %s %s %s %s %s %s %s %s %s/%s" % ( mksysb_cmd, create_map_files, use_snapshot, exclude_files, exclude_wpar_files, software_packing, extended_attrs, backup_crypt_files, backup_dmapi_fs, new_image_data, storage_path, name)) if rc == 0: module.exit_json(changed=True, msg=mksysb_output) else: module.fail_json(msg="mksysb failed.", rc=rc, err=err) module.exit_json(changed=True) else: module.fail_json(msg="Storage path %s is not valid." % storage_path) if __name__ == '__main__': main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284631"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Lilywei123/tempest</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tempest/tests/test_wrappers.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">11</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import StringIO import subprocess import tempfile from tempest.tests import base DEVNULL = open(os.devnull, 'wb') class TestWrappers(base.TestCase): def setUp(self): super(TestWrappers, self).setUp() # Setup test dirs self.directory = tempfile.mkdtemp(prefix='tempest-unit') self.addCleanup(shutil.rmtree, self.directory) self.test_dir = os.path.join(self.directory, 'tests') os.mkdir(self.test_dir) # Setup Test files self.testr_conf_file = os.path.join(self.directory, '.testr.conf') self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg') self.passing_file = os.path.join(self.test_dir, 'test_passing.py') self.failing_file = os.path.join(self.test_dir, 'test_failing.py') self.init_file = os.path.join(self.test_dir, '__init__.py') self.setup_py = os.path.join(self.directory, 'setup.py') shutil.copy('tempest/tests/files/testr-conf', self.testr_conf_file) shutil.copy('tempest/tests/files/passing-tests', self.passing_file) shutil.copy('tempest/tests/files/failing-tests', self.failing_file) shutil.copy('setup.py', self.setup_py) shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file) shutil.copy('tempest/tests/files/__init__.py', self.init_file) # copy over the pretty_tox scripts shutil.copy('tools/pretty_tox.sh', os.path.join(self.directory, 'pretty_tox.sh')) shutil.copy('tools/pretty_tox_serial.sh', os.path.join(self.directory, 'pretty_tox_serial.sh')) self.stdout = StringIO.StringIO() self.stderr = StringIO.StringIO() # Change directory, run wrapper and check result self.addCleanup(os.chdir, os.path.abspath(os.curdir)) os.chdir(self.directory) def assertRunExit(self, cmd, expected): p = subprocess.Popen( "bash %s" % cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() self.assertEqual( p.returncode, expected, "Stdout: %s; Stderr: %s" % (out, err)) def test_pretty_tox(self): # Git init is required for the pbr testr command. pbr requires a git # version or an sdist to work. so make the test directory a git repo # too. subprocess.call(['git', 'init'], stderr=DEVNULL) self.assertRunExit('pretty_tox.sh passing', 0) def test_pretty_tox_fails(self): # Git init is required for the pbr testr command. pbr requires a git # version or an sdist to work. so make the test directory a git repo # too. subprocess.call(['git', 'init'], stderr=DEVNULL) self.assertRunExit('pretty_tox.sh', 1) def test_pretty_tox_serial(self): self.assertRunExit('pretty_tox_serial.sh passing', 0) def test_pretty_tox_serial_fails(self): self.assertRunExit('pretty_tox_serial.sh', 1) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284632"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">danilito19/django</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/test_utils/views.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">481</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from django.http import HttpResponse from django.shortcuts import get_object_or_404 from django.template import Context, Template from .models import Person def get_person(request, pk): person = get_object_or_404(Person, pk=pk) return HttpResponse(person.name) def no_template_used(request): template = Template("This is a string-based template") return HttpResponse(template.render(Context({}))) def empty_response(request): return HttpResponse('') </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284633"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">shubhdev/edx-platform</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">common/test/acceptance/pages/studio/video/video.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">58</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" CMS Video """ import time import os import requests from bok_choy.promise import EmptyPromise, Promise from bok_choy.javascript import wait_for_js, js_defined from ....tests.helpers import YouTubeStubConfig from ...lms.video.video import VideoPage from selenium.webdriver.common.keys import Keys from ..utils import wait_for_notification CLASS_SELECTORS = { 'video_container': 'div.video', 'video_init': '.is-initialized', 'video_xmodule': '.xmodule_VideoModule', 'video_spinner': '.video-wrapper .spinner', 'video_controls': 'section.video-controls', 'attach_asset': '.upload-dialog > input[type="file"]', 'upload_dialog': '.wrapper-modal-window-assetupload', 'xblock': '.add-xblock-component', 'slider_range': '.slider-range', 'error': '.transcripts-error-message', 'url_inputs': '.videolist-settings-item input.input', 'collapse_bar': '.videolist-extra-videos', 'status': '.transcripts-message-status', 'attach_transcript': '.file-chooser > input[type="file"]', } BUTTON_SELECTORS = { 'create_video': 'a[data-category="video"]', 'handout_download': '.video-handout.video-download-button a', 'handout_download_editor': '.wrapper-comp-setting.file-uploader .download-action', 'upload_asset': '.upload-action', 'asset_submit': '.action-upload', 'handout_clear': '.wrapper-comp-setting.file-uploader .setting-clear', 'translations_clear': '.metadata-video-translations .setting-clear', 'translation_add': '.wrapper-translations-settings > a', 'import': '.setting-import', 'download_to_edit': '.setting-download', 'disabled_download_to_edit': '.setting-download.is-disabled', 'upload_new_timed_transcripts': '.setting-upload', 'replace': '.setting-replace', 'choose': '.setting-choose', 'use_existing': '.setting-use-existing', 'collapse_link': '.collapse-action.collapse-setting', } DISPLAY_NAME = "Component Display Name" DEFAULT_SETTINGS = [ # basic [DISPLAY_NAME, 'Video', False], ['Default Video URL', 'http://youtu.be/3_yD_cEKoCk, , ', False], # advanced [DISPLAY_NAME, 'Video', False], ['Default Timed Transcript', '', False], ['Download Transcript Allowed', 'False', False], ['Downloadable Transcript URL', '', False], ['Show Transcript', 'True', False], ['Transcript Languages', '', False], ['Upload Handout', '', False], ['Video Available on Web Only', 'False', False], ['Video Download Allowed', 'False', False], ['Video File URLs', '', False], ['Video ID', '', False], ['Video Start Time', '00:00:00', False], ['Video Stop Time', '00:00:00', False], ['YouTube ID', '3_yD_cEKoCk', False], ['YouTube ID for .75x speed', '', False], ['YouTube ID for 1.25x speed', '', False], ['YouTube ID for 1.5x speed', '', False] ] # We should wait 300 ms for event handler invocation + 200ms for safety. DELAY = 0.5 @js_defined('window.Video', 'window.RequireJS.require', 'window.jQuery', 'window.XModule', 'window.XBlock', 'window.MathJax', 'window.MathJax.isReady') class VideoComponentPage(VideoPage): """ CMS Video Component Page """ url = None @wait_for_js def is_browser_on_page(self): return self.q(css='div{0}'.format(CLASS_SELECTORS['video_xmodule'])).present or self.q( css='div{0}'.format(CLASS_SELECTORS['xblock'])).present def get_element_selector(self, class_name, vertical=False): return super(VideoComponentPage, self).get_element_selector(class_name, vertical=vertical) def _wait_for(self, check_func, desc, result=False, timeout=30): """ Calls the method provided as an argument until the Promise satisfied or BrokenPromise Arguments: check_func (callable): Promise function to be fulfilled. desc (str): Description of the Promise, used in log messages. result (bool): Indicates whether we need result from Promise or not timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out. """ if result: return Promise(check_func, desc, timeout=timeout).fulfill() else: return EmptyPromise(check_func, desc, timeout=timeout).fulfill() def wait_for_video_component_render(self): """ Wait until video component rendered completely """ if not YouTubeStubConfig.get_configuration().get('youtube_api_blocked'): self._wait_for(lambda: self.q(css=CLASS_SELECTORS['video_init']).present, 'Video Player Initialized') self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['video_spinner']).visible, 'Video Buffering Completed') self._wait_for(self.is_controls_visible, 'Player Controls are Visible') @wait_for_js def is_controls_visible(self): """ Get current visibility sate of all video controls. Returns: bool: True means video controls are visible for all videos, False means video controls are not visible for one or more videos """ return self.q(css=CLASS_SELECTORS['video_controls']).visible def click_button(self, button_name, index=0, require_notification=False): """ Click on a button as specified by `button_name` Arguments: button_name (str): button name index (int): query index """ self.q(css=BUTTON_SELECTORS[button_name]).nth(index).click() if require_notification: wait_for_notification(self) self.wait_for_ajax() @staticmethod def file_path(filename): """ Construct file path to be uploaded to assets. Arguments: filename (str): asset filename """ return os.sep.join(__file__.split(os.sep)[:-5]) + '/data/uploads/' + filename def upload_handout(self, handout_filename): """ Upload a handout file to assets Arguments: handout_filename (str): handout file name """ self.upload_asset(handout_filename) def upload_asset(self, asset_filename, asset_type='handout', index=0): """ Upload a asset file to assets Arguments: asset_filename (str): asset file name asset_type (str): one of `handout`, `transcript` index (int): query index """ asset_file_path = self.file_path(asset_filename) self.click_button('upload_asset', index) self.q(css=CLASS_SELECTORS['attach_asset']).results[0].send_keys(asset_file_path) self.click_button('asset_submit') # Only srt format transcript files can be uploaded, If an error # occurs due to incorrect transcript file we will return from here if asset_type == 'transcript' and self.q(css='#upload_error').present: return # confirm upload completion self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['upload_dialog']).present, 'Upload Completed') def clear_handout(self): """ Clear handout from settings """ self.click_button('handout_clear') def _get_handout(self, url): """ Download handout at `url` """ kwargs = dict() session_id = [{i['name']: i['value']} for i in self.browser.get_cookies() if i['name'] == u'sessionid'] if session_id: kwargs.update({ 'cookies': session_id[0] }) response = requests.get(url, **kwargs) return response.status_code < 400, response.headers def download_handout(self, mime_type, is_editor=False): """ Download handout with mime type specified by `mime_type` Arguments: mime_type (str): mime type of handout file Returns: tuple: Handout download result. """ selector = BUTTON_SELECTORS['handout_download_editor'] if is_editor else BUTTON_SELECTORS['handout_download'] handout_url = self.q(css=selector).attrs('href')[0] result, headers = self._get_handout(handout_url) return result, headers['content-type'] == mime_type @property def is_handout_button_visible(self): """ Check if handout download button is visible """ return self.q(css=BUTTON_SELECTORS['handout_download']).visible def create_video(self): """ Create a Video Component by clicking on Video button and wait for rendering completion. """ # Create video self.click_button('create_video', require_notification=True) self.wait_for_video_component_render() def xblocks(self): """ Tells the total number of video xblocks present on current unit page. Returns: (int): total video xblocks """ return len(self.q(css='.xblock-header').filter( lambda el: 'xblock-header-video' in el.get_attribute('class')).results) def focus_caption_line(self, line_number): """ Focus a caption line as specified by `line_number` Arguments: line_number (int): caption line number """ caption_line_selector = ".subtitles > li[data-index='{index}']".format(index=line_number - 1) self.q(css=caption_line_selector).results[0].send_keys(Keys.ENTER) def is_caption_line_focused(self, line_number): """ Check if a caption line focused Arguments: line_number (int): caption line number """ caption_line_selector = ".subtitles > li[data-index='{index}']".format(index=line_number - 1) attributes = self.q(css=caption_line_selector).attrs('class') return 'focused' in attributes @property def is_slider_range_visible(self): """ Return True if slider range is visible. """ return self.q(css=CLASS_SELECTORS['slider_range']).visible def verify_settings(self): """ Verify that video component has correct default settings. """ query = '.wrapper-comp-setting' settings = self.q(css=query).results if len(DEFAULT_SETTINGS) != len(settings): return False for counter, setting in enumerate(settings): is_verified = self._verify_setting_entry(setting, DEFAULT_SETTINGS[counter][0], DEFAULT_SETTINGS[counter][1]) if not is_verified: return is_verified return True @staticmethod def _verify_setting_entry(setting, field_name, field_value): """ Verify a `setting` entry. Arguments: setting (WebElement): Selenium WebElement field_name (str): Name of field field_value (str): Value of field Returns: bool: Does `setting` have correct value. """ if field_name != setting.find_element_by_class_name('setting-label').get_attribute('innerHTML'): return False # Get class attribute values classes = setting.get_attribute('class').split() list_type_classes = ['metadata-list-enum', 'metadata-dict', 'metadata-video-translations'] is_list_type = any(list_type in classes for list_type in list_type_classes) if is_list_type: current_value = ', '.join( ele.get_attribute('value') for ele in setting.find_elements_by_class_name('list-settings-item')) elif 'metadata-videolist-enum' in setting.get_attribute('class'): current_value = ', '.join(item.find_element_by_tag_name('input').get_attribute('value') for item in setting.find_elements_by_class_name('videolist-settings-item')) else: current_value = setting.find_element_by_class_name('setting-input').get_attribute('value') if field_value != current_value: return False # Clear button should be visible(active class is present) for # every setting that don't have 'metadata-videolist-enum' class if 'metadata-videolist-enum' not in setting.get_attribute('class'): setting_clear_button = setting.find_elements_by_class_name('setting-clear')[0] if 'active' not in setting_clear_button.get_attribute('class'): return False return True def set_field_value(self, field_name, field_value, field_type='input'): """ Set settings input `field` with `value` Arguments: field_name (str): Name of field field_value (str): Name of value field_type (str): `input`, `select` etc(more to be added later) """ query = '.wrapper-comp-setting > label:nth-child(1)' field_id = '' if field_type == 'input': for index, _ in enumerate(self.q(css=query)): if field_name in self.q(css=query).nth(index).text[0]: field_id = self.q(css=query).nth(index).attrs('for')[0] break self.q(css='#{}'.format(field_id)).fill(field_value) elif field_type == 'select': self.q(css='select[name="{0}"] option[value="{1}"]'.format(field_name, field_value)).first.click() def verify_field_value(self, field_name, field_value): """ Get settings value of `field_name` Arguments: field_name (str): Name of field field_value (str): Name of value Returns: bool: If `field_name` has `field_value` """ _, setting = self._get_setting_entry(field_name) return self._verify_setting_entry(setting, field_name, field_value) def _get_setting_entry(self, field_name): """ Get setting entry of `field_name` Arguments: field_name (str): Name of field Returns: setting (WebElement): Selenium WebElement """ for index, setting in enumerate(self.q(css='.wrapper-comp-setting').results): if setting.find_element_by_class_name('setting-label').get_attribute('innerHTML') == field_name: return index, setting def translations_count(self): """ Get count of translations. """ return len(self.q(css='.wrapper-translations-settings .list-settings-item').results) def select_translation_language(self, language_code, index=0): """ Select translation language as specified by `language_code` Arguments: language_code (str): index (int): query index """ translations_items = '.wrapper-translations-settings .list-settings-item' language_selector = translations_items + ' select option[value="{}"]'.format(language_code) self.q(css=language_selector).nth(index).click() def upload_translation(self, transcript_name, language_code): """ Upload a translation file. Arguments: transcript_name (str): language_code (str): """ self.click_button('translation_add') translations_count = self.translations_count() self.select_translation_language(language_code, translations_count - 1) self.upload_asset(transcript_name, asset_type='transcript', index=translations_count - 1) def replace_translation(self, old_lang_code, new_lang_code, transcript_name): """ Replace a translation. Arguments: old_lang_code (str): new_lang_code (str): transcript_name (str): """ language_codes = self.translations() index = language_codes.index(old_lang_code) self.select_translation_language(new_lang_code, index) self.upload_asset(transcript_name, asset_type='transcript', index=index) def translations(self): """ Extract translations Returns: list: list of translation language codes """ translations_selector = '.metadata-video-translations .remove-setting' return self.q(css=translations_selector).attrs('data-lang') def download_translation(self, language_code, text_to_search): """ Download a translation having `language_code` and containing `text_to_search` Arguments: language_code (str): language code text_to_search (str): text to search in translation Returns: bool: whether download was successful """ mime_type = 'application/x-subrip' lang_code = '/{}?'.format(language_code) link = [link for link in self.q(css='.download-action').attrs('href') if lang_code in link] result, headers, content = self._get_transcript(link[0]) return result is True and mime_type in headers['content-type'] and text_to_search in content.decode('utf-8') def remove_translation(self, language_code): """ Remove a translation having `language_code` Arguments: language_code (str): language code """ self.q(css='.remove-action').filter(lambda el: language_code == el.get_attribute('data-lang')).click() @property def upload_status_message(self): """ Get asset upload status message """ return self.q(css='#upload_error').text[0] def captions_lines(self): """ Extract partial caption lines. As all the captions lines are exactly same so only getting partial lines will work. """ self.wait_for_captions() selector = '.subtitles > li:nth-child({})' return ' '.join([self.q(css=selector.format(i)).text[0] for i in range(1, 6)]) def set_url_field(self, url, field_number): """ Set video url field in basic settings tab. Arguments: url (str): video url field_number (int): video url field number """ if self.q(css=CLASS_SELECTORS['collapse_bar']).visible is False: self.click_button('collapse_link') self.q(css=CLASS_SELECTORS['url_inputs']).nth(field_number - 1).fill(url) time.sleep(DELAY) self.wait_for_ajax() def message(self, message_type): """ Get video url field status/error message. Arguments: message_type(str): type(status, error) of message Returns: str: status/error message """ if message_type == 'status': self.wait_for_element_visibility(CLASS_SELECTORS[message_type], '{} message is Visible'.format(message_type.title())) return self.q(css=CLASS_SELECTORS[message_type]).text[0] def url_field_status(self, *field_numbers): """ Get video url field status(enable/disable). Arguments: url (str): video url field_numbers (tuple or None): field numbers to check status for, None means get status for all. tuple items will be integers and must start from 1 Returns: dict: field numbers as keys and field status(bool) as values, False means a field is disabled """ if field_numbers: index_list = [number - 1 for number in field_numbers] else: index_list = range(3) # maximum three fields statuses = {} for index in index_list: status = 'is-disabled' not in self.q(css=CLASS_SELECTORS['url_inputs']).nth(index).attrs('class')[0] statuses[index + 1] = status return statuses def clear_field(self, index): """ Clear a video url field at index specified by `index`. """ self.q(css=CLASS_SELECTORS['url_inputs']).nth(index - 1).fill('') # Trigger an 'input' event after filling the field with an empty value. self.browser.execute_script( "$('{}:eq({})').trigger('{}')".format(CLASS_SELECTORS['url_inputs'], index, 'input')) time.sleep(DELAY) self.wait_for_ajax() def clear_fields(self): """ Clear video url fields. """ script = """ $('{selector}') .prop('disabled', false) .removeClass('is-disabled') .val('') .trigger('input'); """.format(selector=CLASS_SELECTORS['url_inputs']) self.browser.execute_script(script) time.sleep(DELAY) self.wait_for_ajax() def revert_field(self, field_name): """ Revert a field. """ _, setting = self._get_setting_entry(field_name) setting.find_element_by_class_name('setting-clear').click() def is_transcript_button_visible(self, button_name, index=0, button_text=None): """ Check if a transcript related button is visible. Arguments: button_name (str): name of button index (int): query index button_text (str or None): text to match with text on a button, if None then don't match texts Returns: bool: is button visible """ is_visible = self.q(css=BUTTON_SELECTORS[button_name]).nth(index).visible is_text_matched = True if button_text and button_text != self.q(css=BUTTON_SELECTORS[button_name]).nth(index).text[0]: is_text_matched = False return is_visible and is_text_matched def upload_transcript(self, transcript_filename): """ Upload a Transcript Arguments: transcript_filename (str): name of transcript file """ # Show the Browse Button self.browser.execute_script("$('form.file-chooser').show()") asset_file_path = self.file_path(transcript_filename) self.q(css=CLASS_SELECTORS['attach_transcript']).results[0].send_keys(asset_file_path) # confirm upload completion self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['attach_transcript']).visible, 'Upload Completed') </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284634"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Shao-Feng/crosswalk-test-suite</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">webapi/tct-csp-w3c-tests/csp-py/csp_script-src_unsafe-inline_unsafe-eval.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">30</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">def main(request, response): import simplejson as json f = file('config.json') source = f.read() s = json.JSONDecoder().decode(source) url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1]) url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0]) _CSP = "script-src 'self' 'unsafe-inline' 'unsafe-eval'" response.headers.set("Content-Security-Policy", _CSP) response.headers.set("X-Content-Security-Policy", _CSP) response.headers.set("X-WebKit-CSP", _CSP) return """<!DOCTYPE html> <!-- Copyright (c) 2013 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of works must retain the original copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the original copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this work without specific prior written permission. THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Hao, Yunfei <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="255c504b43404c5d0b4d444a654c4b5140490b464a48">[email protected]</a>> --> <html> <head> <title>CSP Test: csp_script-src_unsafe-inline_unsafe-eval</title> <link rel="author" title="Intel" href="http://www.intel.com/"/> <link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#script-src"/> <meta name="flags" content=""/> <meta name="assert" content="script-src 'self' 'unsafe-inline' 'unsafe-eval'"/> <meta charset="utf-8"/> <script src="../resources/testharness.js"></script> <script src="../resources/testharnessreport.js"></script> </head> <body> <div id="log"></div> <script src="support/csp.js"></script> <script> test(function() { assert_equals(X, 10, "X is 10"); assert_equals(Y, 27, "Y is X+17"); }, document.title + "_allowed_int"); test(function() { assert_equals(eval(X + Y), 37, "eval(X + Y) should be 37"); }, document.title + "_allowed_inline"); </script> </body> </html> """ </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284635"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ContinuumIO/odo</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">odo/backends/tests/test_ssh.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from __future__ import absolute_import, division, print_function import pytest paramiko = pytest.importorskip('paramiko') import pandas as pd import numpy as np import re import os import sys from odo.utils import tmpfile, filetext from odo.directory import _Directory, Directory from odo.backends.ssh import SSH, resource, ssh_pattern, sftp, drop, connect from odo.backends.csv import CSV from odo import into, discover, CSV, JSONLines, JSON, convert from odo.temp import _Temp, Temp from odo.compatibility import ON_TRAVIS_CI import socket skipif = pytest.mark.skipif # NOTE: this is a workaround for paramiko on Py2; connect() hangs without # raising an exception. Shows up on paramiko 1.16.0 and 2.0.2 with Py 2.7. # KWS: 2016-08-10 # JJ: Still happens as of 2016-10-20 try_to_connect = sys.version_info[0] >= 3 pytestmark = skipif(not try_to_connect, reason='could not connect') if try_to_connect: try: ssh = connect(hostname='localhost') ssh.close() except socket.error: pytestmark = pytest.mark.skip('Could not connect') except paramiko.PasswordRequiredException as e: pytestmark = pytest.mark.skip(str(e)) except paramiko.SSHException as e: pytestmark = pytest.mark.skip(str(e)) except TypeError: # NOTE: This is a workaround for paramiko version 1.16.0 on Python 3.4, # that raises a TypeError due to improper indexing internally into # dict_keys when a ConnectionRefused error is raised. # KWS 2016-04-21. pytestmark = pytest.mark.skip('Could not connect') def test_resource(): r = resource('ssh://joe@localhost:/path/to/myfile.csv') assert isinstance(r, SSH(CSV)) assert r.path == '/path/to/myfile.csv' assert r.auth['hostname'] == 'localhost' assert r.auth['username'] == 'joe' def test_connect(): a = connect(hostname='localhost') b = connect(hostname='localhost') assert a is b a.close() c = connect(hostname='localhost') assert a is c assert c.get_transport() and c.get_transport().is_active() def test_resource_directory(): r = resource('ssh://joe@localhost:/path/to/') assert issubclass(r.subtype, _Directory) r = resource('ssh://joe@localhost:/path/to/*.csv') assert r.subtype == Directory(CSV) assert r.path == '/path/to/' def test_discover(): with filetext('name,balance\nAlice,100\nBob,200') as fn: local = CSV(fn) remote = SSH(CSV)(fn, hostname='localhost') assert discover(local) == discover(remote) def test_discover_from_resource(): with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn: local = CSV(fn) remote = resource('ssh://localhost:' + fn) assert discover(local) == discover(remote) def test_ssh_pattern(): uris = ['localhost:myfile.csv', '127.0.0.1:/myfile.csv', '<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="80f5f3e5f2c0b1b2b7aeb0aeb0aeb1">[email protected]</a>:/myfile.csv', '<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="186d6b7d6a58292a2f362836283629">[email protected]</a>:/*.csv', '<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="bcc9cfd9cefc8d8e8b928c928c928d">[email protected]</a>:/my-dir/my-file3.csv'] for uri in uris: assert re.match(ssh_pattern, uri) def test_copy_remote_csv(): with tmpfile('csv') as target: with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn: csv = resource(fn) uri = 'ssh://localhost:%s.csv' % target scsv = into(uri, csv) assert isinstance(scsv, SSH(CSV)) assert discover(scsv) == discover(csv) # Round trip csv2 = into(target, scsv) assert into(list, csv) == into(list, csv2) def test_drop(): with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn: with tmpfile('csv') as target: scsv = SSH(CSV)(target, hostname='localhost') assert not os.path.exists(target) conn = sftp(**scsv.auth) conn.put(fn, target) assert os.path.exists(target) drop(scsv) drop(scsv) assert not os.path.exists(target) def test_drop_of_csv_json_lines_use_ssh_version(): from odo.backends.ssh import drop_ssh for typ in [CSV, JSON, JSONLines]: assert drop.dispatch(SSH(typ)) == drop_ssh def test_convert_local_file_to_temp_ssh_file(): with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn: csv = CSV(fn) scsv = convert(Temp(SSH(CSV)), csv, hostname='localhost') assert into(list, csv) == into(list, scsv) @skipif(ON_TRAVIS_CI, reason="Don't know") def test_temp_ssh_files(): with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn: csv = CSV(fn) scsv = into(Temp(SSH(CSV)), csv, hostname='localhost') assert discover(csv) == discover(scsv) assert isinstance(scsv, _Temp) @skipif(ON_TRAVIS_CI, reason="Don't know") def test_convert_through_temporary_local_storage(): with filetext('name,quantity\nAlice,100\nBob,200', extension='csv') as fn: csv = CSV(fn) df = into(pd.DataFrame, csv) scsv = into(Temp(SSH(CSV)), csv, hostname='localhost') assert into(list, csv) == into(list, scsv) scsv2 = into(Temp(SSH(CSV)), df, hostname='localhost') assert into(list, scsv2) == into(list, df) sjson = into(Temp(SSH(JSONLines)), df, hostname='localhost') assert (into(np.ndarray, sjson) == into(np.ndarray, df)).all() @skipif(ON_TRAVIS_CI and sys.version_info[0] == 3, reason='Strange hanging on travis for python33 and python34') def test_ssh_csv_to_s3_csv(): # for some reason this can only be run in the same file as other ssh tests # and must be a Temp(SSH(CSV)) otherwise tests above this one fail s3_bucket = pytest.importorskip('odo.backends.tests.test_aws').s3_bucket with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn: remote = into(Temp(SSH(CSV)), CSV(fn), hostname='localhost') with s3_bucket('.csv') as b: result = into(b, remote) assert discover(result) == discover(resource(b)) @skipif(ON_TRAVIS_CI and sys.version_info[0] == 3, reason='Strange hanging on travis for python33 and python34') def test_s3_to_ssh(): pytest.importorskip('boto') tips_uri = 's3://nyqpug/tips.csv' with tmpfile('.csv') as fn: result = into(Temp(SSH(CSV))(fn, hostname='localhost'), tips_uri) assert into(list, result) == into(list, tips_uri) assert discover(result) == discover(resource(tips_uri)) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284636"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">anfedorov/atom-script</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">examples/version_info.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">18</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python2.7 import sys print(sys.version_info) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284637"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">damonkohler/sl4a</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">python/gdata/src/gdata/tlslite/utils/OpenSSL_TripleDES.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">359</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">"""OpenSSL/M2Crypto 3DES implementation.""" from cryptomath import * from TripleDES import * if m2cryptoLoaded: def new(key, mode, IV): return OpenSSL_TripleDES(key, mode, IV) class OpenSSL_TripleDES(TripleDES): def __init__(self, key, mode, IV): TripleDES.__init__(self, key, mode, IV, "openssl") self.key = key self.IV = IV def _createContext(self, encrypt): context = m2.cipher_ctx_new() cipherType = m2.des_ede3_cbc() m2.cipher_init(context, cipherType, self.key, self.IV, encrypt) return context def encrypt(self, plaintext): TripleDES.encrypt(self, plaintext) context = self._createContext(1) ciphertext = m2.cipher_update(context, plaintext) m2.cipher_ctx_free(context) self.IV = ciphertext[-self.block_size:] return ciphertext def decrypt(self, ciphertext): TripleDES.decrypt(self, ciphertext) context = self._createContext(0) #I think M2Crypto has a bug - it fails to decrypt and return the last block passed in. #To work around this, we append sixteen zeros to the string, below: plaintext = m2.cipher_update(context, ciphertext+('\0'*16)) #If this bug is ever fixed, then plaintext will end up having a garbage #plaintext block on the end. That's okay - the below code will ignore it. plaintext = plaintext[:len(ciphertext)] m2.cipher_ctx_free(context) self.IV = ciphertext[-self.block_size:] return plaintext</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284638"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bigswitch/neutron</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">neutron/tests/unit/agent/metadata/test_agent.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as n_const import testtools import webob from neutron.agent.linux import utils as agent_utils from neutron.agent.metadata import agent from neutron.agent.metadata import config from neutron.agent import metadata_agent from neutron.common import utils from neutron.tests import base class FakeConf(object): auth_ca_cert = None nova_metadata_ip = '9.9.9.9' nova_metadata_port = 8775 metadata_proxy_shared_secret = 'secret' nova_metadata_protocol = 'http' nova_metadata_insecure = True nova_client_cert = 'nova_cert' nova_client_priv_key = 'nova_priv_key' cache_url = '' class FakeConfCache(FakeConf): cache_url = 'memory://?default_ttl=5' class TestMetadataProxyHandlerBase(base.BaseTestCase): fake_conf = FakeConf def setUp(self): super(TestMetadataProxyHandlerBase, self).setUp() self.log_p = mock.patch.object(agent, 'LOG') self.log = self.log_p.start() self.handler = agent.MetadataProxyHandler(self.fake_conf) self.handler.plugin_rpc = mock.Mock() self.handler.context = mock.Mock() class TestMetadataProxyHandlerRpc(TestMetadataProxyHandlerBase): def test_get_port_filters(self): router_id = 'test_router_id' ip = '1.2.3.4' networks = ('net_id1', 'net_id2') expected = {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS, 'network_id': networks, 'fixed_ips': {'ip_address': [ip]}} actual = self.handler._get_port_filters(router_id, ip, networks) self.assertEqual(expected, actual) def test_get_router_networks(self): router_id = 'router-id' expected = ('network_id1', 'network_id2') ports = [{'network_id': 'network_id1', 'something': 42}, {'network_id': 'network_id2', 'something_else': 32}] self.handler.plugin_rpc.get_ports.return_value = ports networks = self.handler._get_router_networks(router_id) self.assertEqual(expected, networks) def test_get_ports_for_remote_address(self): ip = '1.1.1.1' networks = ('network_id1', 'network_id2') expected = [{'port_id': 'port_id1'}, {'port_id': 'port_id2'}] self.handler.plugin_rpc.get_ports.return_value = expected ports = self.handler._get_ports_for_remote_address(ip, networks) self.assertEqual(expected, ports) class TestMetadataProxyHandlerCache(TestMetadataProxyHandlerBase): fake_conf = FakeConfCache def test_call(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_tenant_id') as get_ids: get_ids.return_value = ('instance_id', 'tenant_id') with mock.patch.object(self.handler, '_proxy_request') as proxy: proxy.return_value = 'value' retval = self.handler(req) self.assertEqual(retval, 'value') def test_call_no_instance_match(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_tenant_id') as get_ids: get_ids.return_value = None, None retval = self.handler(req) self.assertIsInstance(retval, webob.exc.HTTPNotFound) def test_call_internal_server_error(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_tenant_id') as get_ids: get_ids.side_effect = Exception retval = self.handler(req) self.assertIsInstance(retval, webob.exc.HTTPInternalServerError) self.assertEqual(len(self.log.mock_calls), 2) def test_get_router_networks(self): router_id = 'router-id' expected = ('network_id1', 'network_id2') ports = [{'network_id': 'network_id1', 'something': 42}, {'network_id': 'network_id2', 'something_else': 32}] mock_get_ports = self.handler.plugin_rpc.get_ports mock_get_ports.return_value = ports networks = self.handler._get_router_networks(router_id) mock_get_ports.assert_called_once_with( mock.ANY, {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS}) self.assertEqual(expected, networks) def _test_get_router_networks_twice_helper(self): router_id = 'router-id' ports = [{'network_id': 'network_id1', 'something': 42}] expected_networks = ('network_id1',) with mock.patch( 'oslo_utils.timeutils.utcnow_ts', return_value=0): mock_get_ports = self.handler.plugin_rpc.get_ports mock_get_ports.return_value = ports networks = self.handler._get_router_networks(router_id) mock_get_ports.assert_called_once_with( mock.ANY, {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS}) self.assertEqual(expected_networks, networks) networks = self.handler._get_router_networks(router_id) def test_get_router_networks_twice(self): self._test_get_router_networks_twice_helper() self.assertEqual( 1, self.handler.plugin_rpc.get_ports.call_count) def _get_ports_for_remote_address_cache_hit_helper(self): remote_address = 'remote_address' networks = ('net1', 'net2') mock_get_ports = self.handler.plugin_rpc.get_ports mock_get_ports.return_value = [{'network_id': 'net1', 'something': 42}] self.handler._get_ports_for_remote_address(remote_address, networks) mock_get_ports.assert_called_once_with( mock.ANY, {'network_id': networks, 'fixed_ips': {'ip_address': [remote_address]}} ) self.assertEqual(1, mock_get_ports.call_count) self.handler._get_ports_for_remote_address(remote_address, networks) def test_get_ports_for_remote_address_cache_hit(self): self._get_ports_for_remote_address_cache_hit_helper() self.assertEqual( 1, self.handler.plugin_rpc.get_ports.call_count) def test_get_ports_network_id(self): network_id = 'network-id' router_id = 'router-id' remote_address = 'remote-address' expected = ['port1'] networks = (network_id,) with mock.patch.object(self.handler, '_get_ports_for_remote_address' ) as mock_get_ip_addr,\ mock.patch.object(self.handler, '_get_router_networks' ) as mock_get_router_networks: mock_get_ip_addr.return_value = expected ports = self.handler._get_ports(remote_address, network_id, router_id) mock_get_ip_addr.assert_called_once_with(remote_address, networks) self.assertFalse(mock_get_router_networks.called) self.assertEqual(expected, ports) def test_get_ports_router_id(self): router_id = 'router-id' remote_address = 'remote-address' expected = ['port1'] networks = ('network1', 'network2') with mock.patch.object(self.handler, '_get_ports_for_remote_address', return_value=expected ) as mock_get_ip_addr,\ mock.patch.object(self.handler, '_get_router_networks', return_value=networks ) as mock_get_router_networks: ports = self.handler._get_ports(remote_address, router_id=router_id) mock_get_router_networks.assert_called_once_with(router_id) mock_get_ip_addr.assert_called_once_with(remote_address, networks) self.assertEqual(expected, ports) def test_get_ports_no_id(self): self.assertRaises(TypeError, self.handler._get_ports, 'remote_address') def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval, networks=None, router_id=None): remote_address = '192.168.1.1' headers['X-Forwarded-For'] = remote_address req = mock.Mock(headers=headers) def mock_get_ports(*args, **kwargs): return list_ports_retval.pop(0) self.handler.plugin_rpc.get_ports.side_effect = mock_get_ports instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req) expected = [] if router_id: expected.append( mock.call( mock.ANY, {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS} ) ) expected.append( mock.call( mock.ANY, {'network_id': networks, 'fixed_ips': {'ip_address': ['192.168.1.1']}} ) ) self.handler.plugin_rpc.get_ports.assert_has_calls(expected) return (instance_id, tenant_id) def test_get_instance_id_router_id(self): router_id = 'the_id' headers = { 'X-Neutron-Router-ID': router_id } networks = ('net1', 'net2') ports = [ [{'network_id': 'net1'}, {'network_id': 'net2'}], [{'device_id': 'device_id', 'tenant_id': 'tenant_id', 'network_id': 'net1'}] ] self.assertEqual( self._get_instance_and_tenant_id_helper(headers, ports, networks=networks, router_id=router_id), ('device_id', 'tenant_id') ) def test_get_instance_id_router_id_no_match(self): router_id = 'the_id' headers = { 'X-Neutron-Router-ID': router_id } networks = ('net1', 'net2') ports = [ [{'network_id': 'net1'}, {'network_id': 'net2'}], [] ] self.assertEqual( self._get_instance_and_tenant_id_helper(headers, ports, networks=networks, router_id=router_id), (None, None) ) def test_get_instance_id_network_id(self): network_id = 'the_id' headers = { 'X-Neutron-Network-ID': network_id } ports = [ [{'device_id': 'device_id', 'tenant_id': 'tenant_id', 'network_id': 'the_id'}] ] self.assertEqual( self._get_instance_and_tenant_id_helper(headers, ports, networks=('the_id',)), ('device_id', 'tenant_id') ) def test_get_instance_id_network_id_no_match(self): network_id = 'the_id' headers = { 'X-Neutron-Network-ID': network_id } ports = [[]] self.assertEqual( self._get_instance_and_tenant_id_helper(headers, ports, networks=('the_id',)), (None, None) ) def _proxy_request_test_helper(self, response_code=200, method='GET'): hdrs = {'X-Forwarded-For': '8.8.8.8'} body = 'body' req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs, method=method, body=body) resp = mock.MagicMock(status=response_code) req.response = resp with mock.patch.object(self.handler, '_sign_instance_id') as sign: sign.return_value = 'signed' with mock.patch('httplib2.Http') as mock_http: resp.__getitem__.return_value = "text/plain" mock_http.return_value.request.return_value = (resp, 'content') retval = self.handler._proxy_request('the_id', 'tenant_id', req) mock_http.assert_called_once_with( ca_certs=None, disable_ssl_certificate_validation=True) mock_http.assert_has_calls([ mock.call().add_certificate( FakeConf.nova_client_priv_key, FakeConf.nova_client_cert, "%s:%s" % (FakeConf.nova_metadata_ip, FakeConf.nova_metadata_port) ), mock.call().request( 'http://9.9.9.9:8775/the_path', method=method, headers={ 'X-Forwarded-For': '8.8.8.8', 'X-Instance-ID-Signature': 'signed', 'X-Instance-ID': 'the_id', 'X-Tenant-ID': 'tenant_id' }, body=body )] ) return retval def test_proxy_request_post(self): response = self._proxy_request_test_helper(method='POST') self.assertEqual(response.content_type, "text/plain") self.assertEqual(response.body, 'content') def test_proxy_request_200(self): response = self._proxy_request_test_helper(200) self.assertEqual(response.content_type, "text/plain") self.assertEqual(response.body, 'content') def test_proxy_request_400(self): self.assertIsInstance(self._proxy_request_test_helper(400), webob.exc.HTTPBadRequest) def test_proxy_request_403(self): self.assertIsInstance(self._proxy_request_test_helper(403), webob.exc.HTTPForbidden) def test_proxy_request_404(self): self.assertIsInstance(self._proxy_request_test_helper(404), webob.exc.HTTPNotFound) def test_proxy_request_409(self): self.assertIsInstance(self._proxy_request_test_helper(409), webob.exc.HTTPConflict) def test_proxy_request_500(self): self.assertIsInstance(self._proxy_request_test_helper(500), webob.exc.HTTPInternalServerError) def test_proxy_request_other_code(self): with testtools.ExpectedException(Exception): self._proxy_request_test_helper(302) def test_sign_instance_id(self): self.assertEqual( self.handler._sign_instance_id('foo'), '773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4' ) class TestMetadataProxyHandlerNoCache(TestMetadataProxyHandlerCache): fake_conf = FakeConf def test_get_router_networks_twice(self): self._test_get_router_networks_twice_helper() self.assertEqual( 2, self.handler.plugin_rpc.get_ports.call_count) def test_get_ports_for_remote_address_cache_hit(self): self._get_ports_for_remote_address_cache_hit_helper() self.assertEqual( 2, self.handler.plugin_rpc.get_ports.call_count) class TestUnixDomainMetadataProxy(base.BaseTestCase): def setUp(self): super(TestUnixDomainMetadataProxy, self).setUp() self.cfg_p = mock.patch.object(agent, 'cfg') self.cfg = self.cfg_p.start() looping_call_p = mock.patch( 'oslo_service.loopingcall.FixedIntervalLoopingCall') self.looping_mock = looping_call_p.start() self.cfg.CONF.metadata_proxy_socket = '/the/path' self.cfg.CONF.metadata_workers = 0 self.cfg.CONF.metadata_backlog = 128 self.cfg.CONF.metadata_proxy_socket_mode = config.USER_MODE @mock.patch.object(utils, 'ensure_dir') def test_init_doesnot_exists(self, ensure_dir): agent.UnixDomainMetadataProxy(mock.Mock()) ensure_dir.assert_called_once_with('/the') def test_init_exists(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: isdir.return_value = True agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') def test_init_exists_unlink_no_file(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: with mock.patch('os.path.exists') as exists: isdir.return_value = True exists.return_value = False unlink.side_effect = OSError agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') def test_init_exists_unlink_fails_file_still_exists(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: with mock.patch('os.path.exists') as exists: isdir.return_value = True exists.return_value = True unlink.side_effect = OSError with testtools.ExpectedException(OSError): agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') @mock.patch.object(agent, 'MetadataProxyHandler') @mock.patch.object(agent_utils, 'UnixDomainWSGIServer') @mock.patch.object(utils, 'ensure_dir') def test_run(self, ensure_dir, server, handler): p = agent.UnixDomainMetadataProxy(self.cfg.CONF) p.run() ensure_dir.assert_called_once_with('/the') server.assert_has_calls([ mock.call('neutron-metadata-agent'), mock.call().start(handler.return_value, '/the/path', workers=0, backlog=128, mode=0o644), mock.call().wait()] ) def test_main(self): with mock.patch.object(agent, 'UnixDomainMetadataProxy') as proxy: with mock.patch.object(metadata_agent, 'config') as config: with mock.patch.object(metadata_agent, 'cfg') as cfg: with mock.patch.object(utils, 'cfg'): metadata_agent.main() self.assertTrue(config.setup_logging.called) proxy.assert_has_calls([ mock.call(cfg.CONF), mock.call().run()] ) def test_init_state_reporting(self): with mock.patch('os.makedirs'): proxy = agent.UnixDomainMetadataProxy(mock.Mock()) self.looping_mock.assert_called_once_with(proxy._report_state) self.looping_mock.return_value.start.assert_called_once_with( interval=mock.ANY) def test_report_state(self): with mock.patch('neutron.agent.rpc.PluginReportStateAPI') as state_api: with mock.patch('os.makedirs'): proxy = agent.UnixDomainMetadataProxy(mock.Mock()) self.assertTrue(proxy.agent_state['start_flag']) proxy._report_state() self.assertNotIn('start_flag', proxy.agent_state) state_api_inst = state_api.return_value state_api_inst.report_state.assert_called_once_with( proxy.context, proxy.agent_state, use_call=True) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284639"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">pixelrebel/st2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">st2common/tests/unit/test_util_mistral_dsl_transform.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">7</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import six import yaml from st2tests import DbTestCase from st2tests.fixturesloader import FixturesLoader from st2common.exceptions.workflow import WorkflowDefinitionException from st2common.models.api.action import ActionAPI, RunnerTypeAPI from st2common.persistence.action import Action from st2common.persistence.runner import RunnerType from st2common.util.workflow import mistral as utils WB_PRE_XFORM_FILE = 'wb_pre_xform.yaml' WB_POST_XFORM_FILE = 'wb_post_xform.yaml' WF_PRE_XFORM_FILE = 'wf_pre_xform.yaml' WF_POST_XFORM_FILE = 'wf_post_xform.yaml' WF_NO_REQ_PARAM_FILE = 'wf_missing_required_param.yaml' WF_UNEXP_PARAM_FILE = 'wf_has_unexpected_param.yaml' TEST_FIXTURES = { 'workflows': [ WB_PRE_XFORM_FILE, WB_POST_XFORM_FILE, WF_PRE_XFORM_FILE, WF_POST_XFORM_FILE, WF_NO_REQ_PARAM_FILE, WF_UNEXP_PARAM_FILE ], 'actions': [ 'local.yaml', 'a1.yaml', 'a2.yaml', 'action1.yaml' ], 'runners': [ 'run-local.yaml', 'testrunner1.yaml', 'testrunner2.yaml' ] } PACK = 'generic' LOADER = FixturesLoader() FIXTURES = LOADER.load_fixtures(fixtures_pack=PACK, fixtures_dict=TEST_FIXTURES) WB_PRE_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB_PRE_XFORM_FILE) WB_PRE_XFORM_DEF = FIXTURES['workflows'][WB_PRE_XFORM_FILE] WB_POST_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB_POST_XFORM_FILE) WB_POST_XFORM_DEF = FIXTURES['workflows'][WB_POST_XFORM_FILE] WF_PRE_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_PRE_XFORM_FILE) WF_PRE_XFORM_DEF = FIXTURES['workflows'][WF_PRE_XFORM_FILE] WF_POST_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_POST_XFORM_FILE) WF_POST_XFORM_DEF = FIXTURES['workflows'][WF_POST_XFORM_FILE] WF_NO_REQ_PARAM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_NO_REQ_PARAM_FILE) WF_NO_REQ_PARAM_DEF = FIXTURES['workflows'][WF_NO_REQ_PARAM_FILE] WF_UNEXP_PARAM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_UNEXP_PARAM_FILE) WF_UNEXP_PARAM_DEF = FIXTURES['workflows'][WF_UNEXP_PARAM_FILE] class DSLTransformTestCase(DbTestCase): @classmethod def setUpClass(cls): super(DSLTransformTestCase, cls).setUpClass() for _, fixture in six.iteritems(FIXTURES['runners']): instance = RunnerTypeAPI(**fixture) RunnerType.add_or_update(RunnerTypeAPI.to_model(instance)) for _, fixture in six.iteritems(FIXTURES['actions']): instance = ActionAPI(**fixture) Action.add_or_update(ActionAPI.to_model(instance)) def _read_file_content(self, path): with open(path, 'r') as f: return f.read() def _read_yaml_file_as_json(self, path): def_yaml = self._read_file_content(path) return yaml.safe_load(def_yaml) def test_transform_workbook_dsl_yaml(self): def_yaml = self._read_file_content(WB_PRE_XFORM_PATH) new_def = utils.transform_definition(def_yaml) actual = yaml.safe_load(new_def) expected = copy.deepcopy(WB_POST_XFORM_DEF) self.assertDictEqual(actual, expected) def test_transform_workbook_dsl_dict(self): def_dict = self._read_yaml_file_as_json(WB_PRE_XFORM_PATH) actual = utils.transform_definition(def_dict) expected = copy.deepcopy(WB_POST_XFORM_DEF) self.assertDictEqual(actual, expected) def test_transform_workflow_dsl_yaml(self): def_yaml = self._read_file_content(WF_PRE_XFORM_PATH) new_def = utils.transform_definition(def_yaml) actual = yaml.safe_load(new_def) expected = copy.deepcopy(WF_POST_XFORM_DEF) self.assertDictEqual(actual, expected) def test_transform_workflow_dsl_dict(self): def_dict = self._read_yaml_file_as_json(WF_PRE_XFORM_PATH) actual = utils.transform_definition(def_dict) expected = copy.deepcopy(WF_POST_XFORM_DEF) self.assertDictEqual(actual, expected) def test_required_action_params_failure(self): def_dict = self._read_yaml_file_as_json(WF_NO_REQ_PARAM_PATH) with self.assertRaises(WorkflowDefinitionException) as cm: utils.transform_definition(def_dict) self.assertIn('Missing required parameters', cm.exception.message) def test_unexpected_action_params_failure(self): def_dict = self._read_yaml_file_as_json(WF_UNEXP_PARAM_PATH) with self.assertRaises(WorkflowDefinitionException) as cm: utils.transform_definition(def_dict) self.assertIn('Unexpected parameters', cm.exception.message) def test_deprecated_callback_action(self): def_dict = self._read_yaml_file_as_json(WB_PRE_XFORM_PATH) def_dict['workflows']['main']['tasks']['callback'] = {'action': 'st2.callback'} def_yaml = yaml.safe_dump(def_dict) self.assertRaises(WorkflowDefinitionException, utils.transform_definition, def_yaml) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284640"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Davideddu/kivy-forkedtouch</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">kivy/core/video/video_null.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">81</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "> ''' VideoNull: empty implementation of VideoBase for the no provider case ''' from kivy.core.video import VideoBase class VideoNull(VideoBase): '''VideoBase implementation when there is no provider. ''' pass </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284641"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">LUTAN/tensorflow</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">81</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Support for creating Stochastic Tensors. See the @{$python/contrib.bayesflow.stochastic_tensor} guide. @@BaseStochasticTensor @@StochasticTensor @@MeanValue @@SampleValue @@value_type @@get_current_value_type """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.contrib.bayesflow.python.ops.stochastic_tensor_impl import * # pylint: enable=wildcard-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ "BaseStochasticTensor", "StochasticTensor", "ObservedStochasticTensor", "MeanValue", "SampleValue", "value_type", "get_current_value_type", ] remove_undocumented(__name__, _allowed_symbols) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284642"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jsilhan/dnf</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">dnf/cli/commands/upgrade.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># upgrade.py # Upgrade CLI command. # # Copyright (C) 2014-2016 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # from __future__ import absolute_import from __future__ import unicode_literals from dnf.cli import commands from dnf.i18n import _ from dnf.cli.option_parser import OptionParser import dnf.exceptions import logging logger = logging.getLogger('dnf') class UpgradeCommand(commands.Command): """A class containing methods needed by the cli to execute the update command. """ aliases = ('upgrade', 'update', 'upgrade-to', 'update-to') summary = _('upgrade a package or packages on your system') @staticmethod def set_argparser(parser): parser.add_argument('packages', nargs='*', help=_('Package to upgrade'), action=OptionParser.ParseSpecGroupFileCallback, metavar=_('PACKAGE')) def configure(self): """Verify that conditions are met so that this command can run. These include that there are enabled repositories with gpg keys, and that this command is being run by the root user. """ demands = self.cli.demands demands.sack_activation = True demands.available_repos = True demands.resolving = True demands.root_user = True commands._checkGPGKey(self.base, self.cli) commands._checkEnabledRepo(self.base, self.opts.filenames) self.upgrade_minimal = None self.all_security = None def run(self): self.cli._populate_update_security_filter(self.opts, minimal=self.upgrade_minimal, all=self.all_security) done = False if self.opts.filenames or self.opts.pkg_specs or self.opts.grp_specs: # Update files. if self.opts.filenames: for pkg in self.base.add_remote_rpms(self.opts.filenames, strict=False): try: self.base.package_upgrade(pkg) except dnf.exceptions.MarkingError as e: logger.info(_('No match for argument: %s'), self.base.output.term.bold(pkg.location)) else: done = True # Update packages. for pkg_spec in self.opts.pkg_specs: try: self.base.upgrade(pkg_spec) except dnf.exceptions.MarkingError as e: logger.info(_('No match for argument: %s'), self.base.output.term.bold(pkg_spec)) else: done = True # Update groups. if self.opts.grp_specs: self.base.read_comps(arch_filter=True) self.base.env_group_upgrade(self.opts.grp_specs) done = True else: # Update all packages. self.base.upgrade_all() done = True if not done: raise dnf.exceptions.Error(_('No packages marked for upgrade.')) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284643"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">willprice/weboob</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">modules/ilmatieteenlaitos/pages.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">5</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- # Copyright(C) 2015 Matthieu Weber # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from datetime import date from itertools import imap, ifilter from weboob.browser.pages import JsonPage, HTMLPage from weboob.browser.elements import ItemElement, ListElement, DictElement, method from weboob.capabilities.weather import Forecast, Current, City, Temperature from weboob.browser.filters.json import Dict from weboob.browser.filters.standard import Filter, CleanText, CleanDecimal, Regexp, Format, Date class Id(Filter): def filter(self, txt): return txt.split(", ")[0] class SearchCitiesPage(JsonPage): @method class iter_cities(DictElement): item_xpath = '.' ignore_duplicate = True class item(ItemElement): klass = City obj_id = Id(Dict('id')) obj_name = Dict('value') class WeatherPage(HTMLPage): @method class iter_forecast(ListElement): item_xpath = ('//div[contains(@class, "mid") and contains(@class, "local-weather-forecast")]//' 'tr[@class="meteogram-dates"]/td') class item(ItemElement): klass = Forecast obj_id = CleanText('.//span/@title') def obj_date(self): months = [u'tammikuuta', u'helmikuuta', u'maaliskuuta', u'huhtikuuta', u'toukokuuta', u'kesäkuuta', u'heinäkuuta', u'elokuuta', u'syyskuuta', u'lokakuuta', u'marraskuuta', u'joulukuuta'] d = CleanText('.//span/@title')(self).split() return date(int(d[2]), months.index(d[1])+1, int(d[0].strip("."))) def temperatures(self): offset = int(CleanText('string(sum(./preceding-sibling::td/@colspan))')(self)) length = int(CleanText('@colspan')(self)) temps = CleanText('../../../tbody/tr[@class="meteogram-temperatures"]/td[position() > %d ' 'and position() <= %d]/span' % (offset, offset+length))(self) return [float(_.strip(u'\xb0')) for _ in temps.split()] def obj_low(self): return Temperature(min(self.temperatures()), u'C') def obj_high(self): return Temperature(max(self.temperatures()), u'C') def obj_text(self): offset = int(CleanText('string(sum(./preceding-sibling::td/@colspan))')(self)) length = int(CleanText('@colspan')(self)) hour_test = ('../../tr[@class="meteogram-times"]/td[position() > %d and position() <= %d ' 'and .//text() = "%%s"]' % (offset, offset+length)) hour_offset = 'string(count(%s/preceding-sibling::td)+1)' % (hour_test) values = [ '../../../tbody/tr[@class="meteogram-weather-symbols"]/td[position() = %d]/div/@title', '../../../tbody/tr[@class="meteogram-apparent-temperatures"]/td[position() = %d]/div/@title', '../../../tbody/tr[@class="meteogram-wind-symbols"]/td[position() = %d]/div/@title', '../../../tbody/tr[@class="meteogram-probabilities-of-precipitation"]/td[position() = %d]' + '/div/@title', '../../../tbody/tr[@class="meteogram-hourly-precipitation-values"]/td[position() = %d]/span/@title', ] def descriptive_text_for_hour(hour): hour_exists = CleanText(hour_test % hour)(self) == hour if hour_exists: offset = int(CleanText(hour_offset % hour)(self)) def info_for_value(value): return CleanText(value % offset)(self).replace(u'edeltävän tunnin ', u'') return ("klo %s: " % hour) + ", ".join(ifilter(bool, imap(info_for_value, values))) return u'\n' + u'\n'.join(ifilter(bool, imap(descriptive_text_for_hour, ["02", "14"]))) @method class get_current(ItemElement): klass = Current obj_id = date.today() obj_date = Date(Regexp(CleanText('//table[@class="observation-text"]//span[@class="time-stamp"]'), r'^(\d+\.\d+.\d+)')) obj_text = Format(u'%s, %s, %s', CleanText(u'(//table[@class="observation-text"])//tr[2]/td[2]'), CleanText(u'(//table[@class="observation-text"])//tr[5]/td[1]'), CleanText(u'(//table[@class="observation-text"])//tr[4]/td[2]')) def obj_temp(self): path = u'//table[@class="observation-text"]//span[@class="parameter-name" and text() = "Lämpötila"]' + \ u'/../span[@class="parameter-value"]' temp = CleanDecimal(Regexp(CleanText(path), r'^([^ \xa0]+)'), replace_dots=True)(self) unit = Regexp(CleanText(path), r'\xb0(\w)')(self) return Temperature(float(temp), unit) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284644"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">dkodnik/Ant</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">addons/account_analytic_analysis/res_config.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">426</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class sale_configuration(osv.osv_memory): _inherit = 'sale.config.settings' _columns = { 'group_template_required': fields.boolean("Mandatory use of templates.", implied_group='account_analytic_analysis.group_template_required', help="Allows you to set the template field as required when creating an analytic account or a contract."), } </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284645"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Sodki/ansible-modules-extras</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/devel</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">monitoring/pagerduty.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">132</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/python # -*- coding: utf-8 -*- # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' module: pagerduty short_description: Create PagerDuty maintenance windows description: - This module will let you create PagerDuty maintenance windows version_added: "1.2" author: - "Andrew Newdigate (@suprememoocow)" - "Dylan Silva (@thaumos)" - "Justin Johns" - "Bruce Pennypacker" requirements: - PagerDuty API access options: state: description: - Create a maintenance window or get a list of ongoing windows. required: true default: null choices: [ "running", "started", "ongoing", "absent" ] aliases: [] name: description: - PagerDuty unique subdomain. required: true default: null choices: [] aliases: [] user: description: - PagerDuty user ID. required: true default: null choices: [] aliases: [] passwd: description: - PagerDuty user password. required: true default: null choices: [] aliases: [] token: description: - A pagerduty token, generated on the pagerduty site. Can be used instead of user/passwd combination. required: true default: null choices: [] aliases: [] version_added: '1.8' requester_id: description: - ID of user making the request. Only needed when using a token and creating a maintenance_window. required: true default: null choices: [] aliases: [] version_added: '1.8' service: description: - A comma separated list of PagerDuty service IDs. required: false default: null choices: [] aliases: [ services ] hours: description: - Length of maintenance window in hours. required: false default: 1 choices: [] aliases: [] minutes: description: - Maintenance window in minutes (this is added to the hours). required: false default: 0 choices: [] aliases: [] version_added: '1.8' desc: description: - Short description of maintenance window. required: false default: Created by Ansible choices: [] aliases: [] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 ''' EXAMPLES=''' # List ongoing maintenance windows using a user/passwd - pagerduty: name=companyabc <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="eb9e988e99d68e938a869b878eab8e938a869b878ec5888486">[email protected]</a> passwd=password123 state=ongoing # List ongoing maintenance windows using a token - pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing # Create a 1 hour maintenance window for service FOO123, using a user/passwd - pagerduty: name=companyabc <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="cebbbdabbcf3abb6afa3bea2ab8eabb6afa3bea2abe0ada1a3">[email protected]</a> passwd=password123 state=running service=FOO123 # Create a 5 minute maintenance window for service FOO123, using a token - pagerduty: name=companyabc token=xxxxxxxxxxxxxx hours=0 minutes=5 state=running service=FOO123 # Create a 4 hour maintenance window for service FOO123 with the description "deployment". - pagerduty: name=companyabc <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="641117011659011c050914080124011c05091408014a070b09">[email protected]</a> passwd=password123 state=running service=FOO123 hours=4 desc=deployment register: pd_window # Delete the previous maintenance window - pagerduty: name=companyabc <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="23565046511e465b424e534f4663465b424e534f460d404c4e">[email protected]</a> passwd=password123 state=absent service={{ pd_window.result.maintenance_window.id }} ''' import datetime import base64 def auth_header(user, passwd, token): if token: return "Token token=%s" % token auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') return "Basic %s" % auth def ongoing(module, name, user, passwd, token): url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing" headers = {"Authorization": auth_header(user, passwd, token)} response, info = fetch_url(module, url, headers=headers) if info['status'] != 200: module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) try: json_out = json.loads(response.read()) except: json_out = "" return False, json_out, False def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc): now = datetime.datetime.utcnow() later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes)) start = now.strftime("%Y-%m-%dT%H:%M:%SZ") end = later.strftime("%Y-%m-%dT%H:%M:%SZ") url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows" headers = { 'Authorization': auth_header(user, passwd, token), 'Content-Type' : 'application/json', } request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}} if requester_id: request_data['requester_id'] = requester_id else: if token: module.fail_json(msg="requester_id is required when using a token") data = json.dumps(request_data) response, info = fetch_url(module, url, data=data, headers=headers, method='POST') if info['status'] != 200: module.fail_json(msg="failed to create the window: %s" % info['msg']) try: json_out = json.loads(response.read()) except: json_out = "" return False, json_out, True def absent(module, name, user, passwd, token, requester_id, service): url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/" + service[0] headers = { 'Authorization': auth_header(user, passwd, token), 'Content-Type' : 'application/json', } request_data = {} if requester_id: request_data['requester_id'] = requester_id else: if token: module.fail_json(msg="requester_id is required when using a token") data = json.dumps(request_data) response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE') if info['status'] != 200: module.fail_json(msg="failed to delete the window: %s" % info['msg']) try: json_out = json.loads(response.read()) except: json_out = "" return False, json_out, True def main(): module = AnsibleModule( argument_spec=dict( state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']), name=dict(required=True), user=dict(required=False), passwd=dict(required=False), token=dict(required=False), service=dict(required=False, type='list', aliases=["services"]), requester_id=dict(required=False), hours=dict(default='1', required=False), minutes=dict(default='0', required=False), desc=dict(default='Created by Ansible', required=False), validate_certs = dict(default='yes', type='bool'), ) ) state = module.params['state'] name = module.params['name'] user = module.params['user'] passwd = module.params['passwd'] token = module.params['token'] service = module.params['service'] hours = module.params['hours'] minutes = module.params['minutes'] token = module.params['token'] desc = module.params['desc'] requester_id = module.params['requester_id'] if not token and not (user or passwd): module.fail_json(msg="neither user and passwd nor token specified") if state == "running" or state == "started": if not service: module.fail_json(msg="service not specified") (rc, out, changed) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc) if rc == 0: changed=True if state == "ongoing": (rc, out, changed) = ongoing(module, name, user, passwd, token) if state == "absent": (rc, out, changed) = absent(module, name, user, passwd, token, requester_id, service) if rc != 0: module.fail_json(msg="failed", result=out) module.exit_json(msg="success", result=out, changed=changed) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284646"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">margaritis/iTerm2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tools/ply/ply-3.4/test/lex_rule2.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">174</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># lex_rule2.py # # Rule function with incorrect number of arguments import sys if ".." not in sys.path: sys.path.insert(0,"..") import ply.lex as lex tokens = [ "PLUS", "MINUS", "NUMBER", ] t_PLUS = r'\+' t_MINUS = r'-' def t_NUMBER(): r'\d+' return t def t_error(t): pass lex.lex() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284647"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">JamesClough/networkx</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/inverse_line_graph</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">7</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python from nose.tools import * from nose import SkipTest import networkx as nx from nose.plugins.attrib import attr from networkx import edge_current_flow_betweenness_centrality \ as edge_current_flow from networkx import edge_current_flow_betweenness_centrality_subset \ as edge_current_flow_subset class TestFlowBetweennessCentrality(object): numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test @classmethod def setupClass(cls): global np try: import numpy as np import scipy except ImportError: raise SkipTest('NumPy not available.') def test_K4_normalized(self): """Betweenness centrality: K4""" G=nx.complete_graph(4) b=nx.current_flow_betweenness_centrality_subset(G, list(G), list(G), normalized=True) b_answer=nx.current_flow_betweenness_centrality(G,normalized=True) for n in sorted(G): assert_almost_equal(b[n],b_answer[n]) def test_K4(self): """Betweenness centrality: K4""" G=nx.complete_graph(4) b=nx.current_flow_betweenness_centrality_subset(G, list(G), list(G), normalized=True) b_answer=nx.current_flow_betweenness_centrality(G,normalized=True) for n in sorted(G): assert_almost_equal(b[n],b_answer[n]) # test weighted network G.add_edge(0,1,weight=0.5,other=0.3) b=nx.current_flow_betweenness_centrality_subset(G, list(G), list(G), normalized=True, weight=None) for n in sorted(G): assert_almost_equal(b[n],b_answer[n]) b=nx.current_flow_betweenness_centrality_subset(G, list(G), list(G), normalized=True) b_answer=nx.current_flow_betweenness_centrality(G,normalized=True) for n in sorted(G): assert_almost_equal(b[n],b_answer[n]) b=nx.current_flow_betweenness_centrality_subset(G, list(G), list(G), normalized=True, weight='other') b_answer=nx.current_flow_betweenness_centrality(G,normalized=True,weight='other') for n in sorted(G): assert_almost_equal(b[n],b_answer[n]) def test_P4_normalized(self): """Betweenness centrality: P4 normalized""" G=nx.path_graph(4) b=nx.current_flow_betweenness_centrality_subset(G, list(G), list(G), normalized=True) b_answer=nx.current_flow_betweenness_centrality(G,normalized=True) for n in sorted(G): assert_almost_equal(b[n],b_answer[n]) def test_P4(self): """Betweenness centrality: P4""" G=nx.path_graph(4) b=nx.current_flow_betweenness_centrality_subset(G, list(G), list(G), normalized=True) b_answer=nx.current_flow_betweenness_centrality(G,normalized=True) for n in sorted(G): assert_almost_equal(b[n],b_answer[n]) def test_star(self): """Betweenness centrality: star """ G=nx.Graph() nx.add_star(G, ['a', 'b', 'c', 'd']) b=nx.current_flow_betweenness_centrality_subset(G, list(G), list(G), normalized=True) b_answer=nx.current_flow_betweenness_centrality(G,normalized=True) for n in sorted(G): assert_almost_equal(b[n],b_answer[n]) # class TestWeightedFlowBetweennessCentrality(): # pass class TestEdgeFlowBetweennessCentrality(object): numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test @classmethod def setupClass(cls): global np try: import numpy as np import scipy except ImportError: raise SkipTest('NumPy not available.') def test_K4_normalized(self): """Betweenness centrality: K4""" G=nx.complete_graph(4) b=edge_current_flow_subset(G,list(G),list(G),normalized=True) b_answer=edge_current_flow(G,normalized=True) for (s,t),v1 in b_answer.items(): v2=b.get((s,t),b.get((t,s))) assert_almost_equal(v1,v2) def test_K4(self): """Betweenness centrality: K4""" G=nx.complete_graph(4) b=edge_current_flow_subset(G,list(G),list(G),normalized=False) b_answer=edge_current_flow(G,normalized=False) for (s,t),v1 in b_answer.items(): v2=b.get((s,t),b.get((t,s))) assert_almost_equal(v1,v2) # test weighted network G.add_edge(0,1,weight=0.5,other=0.3) b=edge_current_flow_subset(G,list(G),list(G),normalized=False,weight=None) # weight is None => same as unweighted network for (s,t),v1 in b_answer.items(): v2=b.get((s,t),b.get((t,s))) assert_almost_equal(v1,v2) b=edge_current_flow_subset(G,list(G),list(G),normalized=False) b_answer=edge_current_flow(G,normalized=False) for (s,t),v1 in b_answer.items(): v2=b.get((s,t),b.get((t,s))) assert_almost_equal(v1,v2) b=edge_current_flow_subset(G,list(G),list(G),normalized=False,weight='other') b_answer=edge_current_flow(G,normalized=False,weight='other') for (s,t),v1 in b_answer.items(): v2=b.get((s,t),b.get((t,s))) assert_almost_equal(v1,v2) def test_C4(self): """Edge betweenness centrality: C4""" G=nx.cycle_graph(4) b=edge_current_flow_subset(G,list(G),list(G),normalized=True) b_answer=edge_current_flow(G,normalized=True) for (s,t),v1 in b_answer.items(): v2=b.get((s,t),b.get((t,s))) assert_almost_equal(v1,v2) def test_P4(self): """Edge betweenness centrality: P4""" G=nx.path_graph(4) b=edge_current_flow_subset(G, list(G), list(G), normalized=True) b_answer=edge_current_flow(G,normalized=True) for (s,t),v1 in b_answer.items(): v2=b.get((s,t),b.get((t,s))) assert_almost_equal(v1,v2) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284648"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">numba/numba</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">numba/misc/help/inspector.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" This file contains `__main__` so that it can be run as a commandline tool. This file contains functions to inspect Numba's support for a given Python module or a Python package. """ import argparse import pkgutil import warnings import types as pytypes from numba.core import errors from numba._version import get_versions from numba.core.registry import cpu_target from numba.tests.support import captured_stdout def _get_commit(): full = get_versions()['full'].split('.')[0] if not full: warnings.warn( "Cannot find git commit hash. Source links could be inaccurate.", category=errors.NumbaWarning, ) return 'master' return full commit = _get_commit() github_url = 'https://github.com/numba/numba/blob/{commit}/{path}#L{firstline}-L{lastline}' # noqa: E501 def inspect_function(function, target=None): """Return information about the support of a function. Returns ------- info : dict Defined keys: - "numba_type": str or None The numba type object of the function if supported. - "explained": str A textual description of the support. - "source_infos": dict A dictionary containing the source location of each definition. """ target = target or cpu_target tyct = target.typing_context # Make sure we have loaded all extensions tyct.refresh() target.target_context.refresh() info = {} # Try getting the function type source_infos = {} try: nbty = tyct.resolve_value_type(function) except ValueError: nbty = None explained = 'not supported' else: # Make a longer explanation of the type explained = tyct.explain_function_type(nbty) for temp in nbty.templates: try: source_infos[temp] = temp.get_source_info() except AttributeError: source_infos[temp] = None info['numba_type'] = nbty info['explained'] = explained info['source_infos'] = source_infos return info def inspect_module(module, target=None, alias=None): """Inspect a module object and yielding results from `inspect_function()` for each function object in the module. """ alias = {} if alias is None else alias # Walk the module for name in dir(module): if name.startswith('_'): # Skip continue obj = getattr(module, name) supported_types = (pytypes.FunctionType, pytypes.BuiltinFunctionType) if not isinstance(obj, supported_types): # Skip if it's not a function continue info = dict(module=module, name=name, obj=obj) if obj in alias: info['alias'] = alias[obj] else: alias[obj] = "{module}.{name}".format(module=module.__name__, name=name) info.update(inspect_function(obj, target=target)) yield info class _Stat(object): """For gathering simple statistic of (un)supported functions""" def __init__(self): self.supported = 0 self.unsupported = 0 @property def total(self): total = self.supported + self.unsupported return total @property def ratio(self): ratio = self.supported / self.total * 100 return ratio def describe(self): if self.total == 0: return "empty" return "supported = {supported} / {total} = {ratio:.2f}%".format( supported=self.supported, total=self.total, ratio=self.ratio, ) def __repr__(self): return "{clsname}({describe})".format( clsname=self.__class__.__name__, describe=self.describe(), ) def filter_private_module(module_components): return not any(x.startswith('_') for x in module_components) def filter_tests_module(module_components): return not any(x == 'tests' for x in module_components) _default_module_filters = ( filter_private_module, filter_tests_module, ) def list_modules_in_package(package, module_filters=_default_module_filters): """Yield all modules in a given package. Recursively walks the package tree. """ onerror_ignore = lambda _: None prefix = package.__name__ + "." package_walker = pkgutil.walk_packages( package.__path__, prefix, onerror=onerror_ignore, ) def check_filter(modname): module_components = modname.split('.') return any(not filter_fn(module_components) for filter_fn in module_filters) modname = package.__name__ if not check_filter(modname): yield package for pkginfo in package_walker: modname = pkginfo[1] if check_filter(modname): continue # In case importing of the module print to stdout with captured_stdout(): try: # Import the module mod = __import__(modname) except Exception: continue # Extract the module for part in modname.split('.')[1:]: try: mod = getattr(mod, part) except AttributeError: # Suppress error in getting the attribute mod = None break # Ignore if mod is not a module if not isinstance(mod, pytypes.ModuleType): # Skip non-module continue yield mod class Formatter(object): """Base class for formatters. """ def __init__(self, fileobj): self._fileobj = fileobj def print(self, *args, **kwargs): kwargs.setdefault('file', self._fileobj) print(*args, **kwargs) class HTMLFormatter(Formatter): """Formatter that outputs HTML """ def escape(self, text): import html return html.escape(text) def title(self, text): self.print('<h1>', text, '</h2>') def begin_module_section(self, modname): self.print('<h2>', modname, '</h2>') self.print('<ul>') def end_module_section(self): self.print('</ul>') def write_supported_item(self, modname, itemname, typename, explained, sources, alias): self.print('<li>') self.print('{}.<b>{}</b>'.format( modname, itemname, )) self.print(': <b>{}</b>'.format(typename)) self.print('<div><pre>', explained, '</pre></div>') self.print("<ul>") for tcls, source in sources.items(): if source: self.print("<li>") impl = source['name'] sig = source['sig'] filename = source['filename'] lines = source['lines'] self.print( "<p>defined by <b>{}</b>{} at {}:{}-{}</p>".format( self.escape(impl), self.escape(sig), self.escape(filename), lines[0], lines[1], ), ) self.print('<p>{}</p>'.format( self.escape(source['docstring'] or '') )) else: self.print("<li>{}".format(self.escape(str(tcls)))) self.print("</li>") self.print("</ul>") self.print('</li>') def write_unsupported_item(self, modname, itemname): self.print('<li>') self.print('{}.<b>{}</b>: UNSUPPORTED'.format( modname, itemname, )) self.print('</li>') def write_statistic(self, stats): self.print('<p>{}</p>'.format(stats.describe())) class ReSTFormatter(Formatter): """Formatter that output ReSTructured text format for Sphinx docs. """ def escape(self, text): return text def title(self, text): self.print(text) self.print('=' * len(text)) self.print() def begin_module_section(self, modname): self.print(modname) self.print('-' * len(modname)) self.print() def end_module_section(self): self.print() def write_supported_item(self, modname, itemname, typename, explained, sources, alias): self.print('.. function:: {}.{}'.format(modname, itemname)) self.print(' :noindex:') self.print() if alias: self.print(" Alias to: ``{}``".format(alias)) self.print() for tcls, source in sources.items(): if source: impl = source['name'] sig = source['sig'] filename = source['filename'] lines = source['lines'] source_link = github_url.format( commit=commit, path=filename, firstline=lines[0], lastline=lines[1], ) self.print( " - defined by ``{}{}`` at `{}:{}-{} <{}>`_".format( impl, sig, filename, lines[0], lines[1], source_link, ), ) else: self.print(" - defined by ``{}``".format(str(tcls))) self.print() def write_unsupported_item(self, modname, itemname): pass def write_statistic(self, stat): if stat.supported == 0: self.print("This module is not supported.") else: msg = "Not showing {} unsupported functions." self.print(msg.format(stat.unsupported)) self.print() self.print(stat.describe()) self.print() def _format_module_infos(formatter, package_name, mod_sequence, target=None): """Format modules. """ formatter.title('Listings for {}'.format(package_name)) alias_map = {} # remember object seen to track alias for mod in mod_sequence: stat = _Stat() modname = mod.__name__ formatter.begin_module_section(formatter.escape(modname)) for info in inspect_module(mod, target=target, alias=alias_map): nbtype = info['numba_type'] if nbtype is not None: stat.supported += 1 formatter.write_supported_item( modname=formatter.escape(info['module'].__name__), itemname=formatter.escape(info['name']), typename=formatter.escape(str(nbtype)), explained=formatter.escape(info['explained']), sources=info['source_infos'], alias=info.get('alias'), ) else: stat.unsupported += 1 formatter.write_unsupported_item( modname=formatter.escape(info['module'].__name__), itemname=formatter.escape(info['name']), ) formatter.write_statistic(stat) formatter.end_module_section() def write_listings(package_name, filename, output_format): """Write listing information into a file. Parameters ---------- package_name : str Name of the package to inspect. filename : str Output filename. Always overwrite. output_format : str Support formats are "html" and "rst". """ package = __import__(package_name) if hasattr(package, '__path__'): mods = list_modules_in_package(package) else: mods = [package] if output_format == 'html': with open(filename + '.html', 'w') as fout: fmtr = HTMLFormatter(fileobj=fout) _format_module_infos(fmtr, package_name, mods) elif output_format == 'rst': with open(filename + '.rst', 'w') as fout: fmtr = ReSTFormatter(fileobj=fout) _format_module_infos(fmtr, package_name, mods) else: raise ValueError( "Output format '{}' is not supported".format(output_format)) program_description = """ Inspect Numba support for a given top-level package. """.strip() def main(): parser = argparse.ArgumentParser(description=program_description) parser.add_argument( 'package', metavar='package', type=str, help='Package to inspect', ) parser.add_argument( '--format', dest='format', default='html', help='Output format; i.e. "html", "rst"', ) parser.add_argument( '--file', dest='file', default='inspector_output', help='Output filename. Defaults to "inspector_output.<format>"', ) args = parser.parse_args() package_name = args.package output_format = args.format filename = args.file write_listings(package_name, filename, output_format) if __name__ == '__main__': main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284649"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">hydralabs/pyamf</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">doc/tutorials/examples/gateways/appengine/demo/simplejson/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">10</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">r""" A simple, fast, extensible JSON encoder and decoder JSON (JavaScript Object Notation) <http://json.org> is a subset of JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data interchange format. simplejson exposes an API familiar to uses of the standard library marshal and pickle modules. Encoding basic Python object hierarchies:: >>> import simplejson >>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) '["foo", {"bar": ["baz", null, 1.0, 2]}]' >>> print simplejson.dumps("\"foo\bar") "\"foo\bar" >>> print simplejson.dumps(u'\u1234') "\u1234" >>> print simplejson.dumps('\\') "\\" >>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) {"a": 0, "b": 0, "c": 0} >>> from StringIO import StringIO >>> io = StringIO() >>> simplejson.dump(['streaming API'], io) >>> io.getvalue() '["streaming API"]' Compact encoding:: >>> import simplejson >>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) '[1,2,3,{"4":5,"6":7}]' Pretty printing:: >>> import simplejson >>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4) { "4": 5, "6": 7 } Decoding JSON:: >>> import simplejson >>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] >>> simplejson.loads('"\\"foo\\bar"') u'"foo\x08ar' >>> from StringIO import StringIO >>> io = StringIO('["streaming API"]') >>> simplejson.load(io) [u'streaming API'] Specializing JSON object decoding:: >>> import simplejson >>> def as_complex(dct): ... if '__complex__' in dct: ... return complex(dct['real'], dct['imag']) ... return dct ... >>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}', ... object_hook=as_complex) (1+2j) >>> import decimal >>> simplejson.loads('1.1', parse_float=decimal.Decimal) decimal.Decimal(1.1) Extending JSONEncoder:: >>> import simplejson >>> class ComplexEncoder(simplejson.JSONEncoder): ... def default(self, obj): ... if isinstance(obj, complex): ... return [obj.real, obj.imag] ... return simplejson.JSONEncoder.default(self, obj) ... >>> dumps(2 + 1j, cls=ComplexEncoder) '[2.0, 1.0]' >>> ComplexEncoder().encode(2 + 1j) '[2.0, 1.0]' >>> list(ComplexEncoder().iterencode(2 + 1j)) ['[', '2.0', ', ', '1.0', ']'] Using simplejson from the shell to validate and pretty-print:: $ echo '{"json":"obj"}' | python -msimplejson { "json": "obj" } $ echo '{ 1.2:3.4}' | python -msimplejson Expecting property name: line 1 column 2 (char 2) Note that the JSON produced by this module's default settings is a subset of YAML, so it may be used as a serializer for that as well. """ __version__ = '1.8.1' __all__ = [ 'dump', 'dumps', 'load', 'loads', 'JSONDecoder', 'JSONEncoder', ] if __name__ == '__main__': from simplejson.decoder import JSONDecoder from simplejson.encoder import JSONEncoder else: from decoder import JSONDecoder from encoder import JSONEncoder _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=None, separators=None, encoding='utf-8', default=None, ) def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """ Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (skipkeys is False and ensure_ascii is True and check_circular is True and allow_nan is True and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): iterable = _default_encoder.iterencode(obj) else: if cls is None: cls = JSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).iterencode(obj) # could accelerate with writelines in some versions of Python, at # a debuggability cost for chunk in iterable: fp.write(chunk) def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """ Serialize ``obj`` to a JSON formatted ``str``. If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the return value will be a ``unicode`` instance subject to normal Python ``str`` to ``unicode`` coercion rules instead of being escaped to an ASCII ``str``. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (skipkeys is False and ensure_ascii is True and check_circular is True and allow_nan is True and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): return _default_encoder.encode(obj) if cls is None: cls = JSONEncoder return cls( skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).encode(obj) _default_decoder = JSONDecoder(encoding=None, object_hook=None) def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, **kw): """ Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a JSON document) to a Python object. If the contents of ``fp`` is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed, and should be wrapped with ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode`` object and passed to ``loads()`` ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ return loads(fp.read(), encoding=encoding, cls=cls, object_hook=object_hook, parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant, **kw) def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, **kw): """ Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) to a Python object. If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed and should be decoded to ``unicode`` first. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). ``parse_float``, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN, null, true, false. This can be used to raise an exception if invalid JSON numbers are encountered. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and not kw): return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: kw['object_hook'] = object_hook if parse_float is not None: kw['parse_float'] = parse_float if parse_int is not None: kw['parse_int'] = parse_int if parse_constant is not None: kw['parse_constant'] = parse_constant return cls(encoding=encoding, **kw).decode(s) # # Compatibility cruft from other libraries # def decode(s): """ demjson, python-cjson API compatibility hook. Use loads(s) instead. """ import warnings warnings.warn("simplejson.loads(s) should be used instead of decode(s)", DeprecationWarning) return loads(s) def encode(obj): """ demjson, python-cjson compatibility hook. Use dumps(s) instead. """ import warnings warnings.warn("simplejson.dumps(s) should be used instead of encode(s)", DeprecationWarning) return dumps(obj) def read(s): """ jsonlib, JsonUtils, python-json, json-py API compatibility hook. Use loads(s) instead. """ import warnings warnings.warn("simplejson.loads(s) should be used instead of read(s)", DeprecationWarning) return loads(s) def write(obj): """ jsonlib, JsonUtils, python-json, json-py API compatibility hook. Use dumps(s) instead. """ import warnings warnings.warn("simplejson.dumps(s) should be used instead of write(s)", DeprecationWarning) return dumps(obj) # # Pretty printer: # curl http://mochikit.com/examples/ajax_tables/domains.json | python -msimplejson # def main(): import sys if len(sys.argv) == 1: infile = sys.stdin outfile = sys.stdout elif len(sys.argv) == 2: infile = open(sys.argv[1], 'rb') outfile = sys.stdout elif len(sys.argv) == 3: infile = open(sys.argv[1], 'rb') outfile = open(sys.argv[2], 'wb') else: raise SystemExit("%s [infile [outfile]]" % (sys.argv[0],)) try: obj = load(infile) except ValueError, e: raise SystemExit(e) dump(obj, outfile, sort_keys=True, indent=4) outfile.write('\n') if __name__ == '__main__': main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284650"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">z2care/sample-code-in-the-cloud</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">secure-chat/tchat.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import webapp2 from google.appengine.api import users from google.appengine.ext import db import datetime import os from google.appengine.ext.webapp import template #START: ChatMessage class ChatMessage(db.Model): user = db.StringProperty(required=True) timestamp = db.DateTimeProperty(auto_now_add=True) message = db.TextProperty(required=True) chat = db.StringProperty(required=True) CHATS = ['main', 'book', 'flame' ] #END: ChatMessage #START: UserRoles class UserRole(db.Model): name = db.StringProperty(required=True) role = db.StringProperty(choices=["User", "admin", "privileged"], default="User") @staticmethod def GetUserRole(name): user_record = db.GqlQuery("SELECT * from UserRole WHERE " + "name = :1", name).get() if user_record != None: return user.role else: return "User" #END: UserRoles #START: ValidateRole def ValidateUserRole(actual, required): if required == "admin": #<callout id="co.admin-check"/> return actual == "admin" elif required == "privileged": #<callout id="co.priv-check"/> return (actual == "admin" and actual or "privileged") elif required == "User": return True else: #<callout id="co.priv-default"/> return False #END: ValidateRole #START: NewChatRoom class NewChatRoomHandler(webapp2.RequestHandler): '''@login_required''' #<callout id="co.login-decorator"/> #http://djangosnippets.org/snippets/691/ #http://flask.pocoo.org/docs/patterns/viewdecorators/ def get(self): user = users.get_current_user() role = GetUserRole(user) #<callout id="co.role-check"/> if not ValidateRole(role, "privileged"): self.response.headers["Context-Type"] = "text/html" self.response.write( "<html><head>\n" + "<title>Insufficient Privileges</title>\n" + "</head>\n" + "<body><h1>Insufficient Privileges</h1>\n" + "<p> I'm sorry but you aren't allowed to " + "access this page</p>\n" + "</body></html>\n") else: self.response.headers["Content-Type"] = "text/html" template_values = { 'title': "MarkCC's AppEngine Chat Room", } path = os.path.join(os.path.dirname(__file__), 'new-chat.html') page = template.render(path, template_values) self.response.write(page) #END: NewChatRoom #START: NewChatRoomPost class NewChatRoomPostHandler(webapp2.RequestHandler): '''@login_required''' def post(self): user = users.get_current_user() role = GetUserRole(user) if not ValidateRole(role, "privileged"): self.response.headers["Context-Type"] = "text/html" self.response.write( "<html><head><title>Insufficient Privileges</title></head>\n" + "<body><h1>Insufficient Privileges</h1>\n" + "<p> I'm sorry but you aren't allowed to access this page</p>\n" + "</body></html>\n") else: newchat = cgi.escape(self.request.get("newchat")) CreateChat(user, newchat) self.response.write( "<html><head><title>Chat Room Created</title></head>\n" + "<body><h1>Chat Room Created</h1>\n" + "<p> New chat room %s created.</p>\n" "</body></html>\n" % newchat) #END: NewChatRoomPost #START: GenericChat class GenericChatPage(webapp2.RequestHandler): def get(self): requested_chat = self.request.get("chat", default_value="none") if requested_chat == "none" or requested_chat not in CHATS: template_params = { 'title': "Error! Requested chat not found!", 'chatname': requested_chat, } error_template = os.path.join(os.path.dirname(__file__), 'error.html') page = template.render(error_template, template_params) self.response.write(page) else: messages = db.GqlQuery("SELECT * from ChatMessage WHERE chat = :1 " "ORDER BY time", requested_chat) template_params = { 'title': "MarkCC's AppEngine Chat Room", 'msg_list': messages, 'chat': requested_chat } path = os.path.join(os.path.dirname(__file__), 'multichat.html') page = template.render(path, template_params) self.response.write(page) #END: GenericChat #START: ChatRoomCounted class ChatRoomCountedHandler(webapp2.RequestHandler): def get(self): user = users.get_current_user() if user is None: self.redirect(users.create_login_url(self.request.uri)) else: self.response.headers["Content-Type"] = "text/html" messages = db.GqlQuery("SELECT * From ChatMessage ORDER BY time " "DESC LIMIT 20") msglist = list(messages).reverse() for msg in msglist: msg.deltatime = datetime.datetime.now() - msg.timestamp template_values = { 'title': "MarkCC's AppEngine Chat Room", 'msg_list': messages, } path = os.path.join(os.path.dirname(__file__), 'count.html') page = template.render(path, template_values) self.response.write(page) #END: ChatRoomCounted #START: LandingPage class ChatRoomLandingPage(webapp2.RequestHandler): def get(self): user = users.get_current_user() if user is None: self.redirect(users.create_login_url(self.request.uri)) else: self.response.headers["Content-Type"] = "text/html" messages = db.GqlQuery("SELECT * From ChatMessage ORDER BY timestamp " "DESC LIMIT 20") # msglist = list(messages).reverse() template_values = { 'title': "MarkCC's AppEngine Chat Room", 'msg_list': messages, } path = os.path.join(os.path.dirname(__file__), 'landing.html') page = template.render(path, template_values) self.response.write(page) #END: LandingPage #START: ChatRoomPoster class ChatRoomPoster(webapp2.RequestHandler): def post(self): user = users.get_current_user() msgtext = self.request.get("message") chat = self.request.get("chat") msg = ChatMessage(user=user.nickname(), message=msgtext, chat=chat) msg.put() # Now that we've added the message to the chat, we'll redirect # to the root page, self.redirect('/enterchat&chat=%s' % chat) #END: ChatRoomPoster # START: Frame chatapp = webapp2.WSGIApplication([('/', ChatRoomLandingPage), ('/talk', ChatRoomPoster), ('/enterchat', GenericChatPage)]) # END: Frame </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284651"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ericpre/hyperspy</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/RELEASE_next_minor</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">hyperspy/tests/io/test_bruker.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import json import os import numpy as np import pytest from hyperspy import signals from hyperspy.io import load from hyperspy.misc.test_utils import assert_deep_almost_equal test_files = ['30x30_instructively_packed_16bit_compressed.bcf', '16x16_12bit_packed_8bit.bcf', 'P45_the_default_job.bcf', 'test_TEM.bcf', 'Hitachi_TM3030Plus.bcf', 'over16bit.bcf', 'bcf_v2_50x50px.bcf', 'bcf-edx-ebsd.bcf'] np_file = ['30x30_16bit.npy', '30x30_16bit_ds.npy'] spx_files = ['extracted_from_bcf.spx', 'bruker_nano.spx'] my_path = os.path.dirname(__file__) def test_load_16bit(): # test bcf from hyperspy load function level # some of functions can be not covered # it cant use cython parsing implementation, as it is not compiled filename = os.path.join(my_path, 'bruker_data', test_files[0]) print('testing bcf instructively packed 16bit...') s = load(filename) bse, hype = s # Bruker saves all images in true 16bit: assert bse.data.dtype == np.uint16 assert bse.data.shape == (30, 30) np_filename = os.path.join(my_path, 'bruker_data', np_file[0]) np.testing.assert_array_equal(hype.data[:, :, 222:224], np.load(np_filename)) assert hype.data.shape == (30, 30, 2048) def test_load_16bit_reduced(): filename = os.path.join(my_path, 'bruker_data', test_files[0]) print('testing downsampled 16bit bcf...') s = load(filename, downsample=4, cutoff_at_kV=10) bse, hype = s # sem images are never downsampled assert bse.data.shape == (30, 30) np_filename = os.path.join(my_path, 'bruker_data', np_file[1]) np.testing.assert_array_equal(hype.data[:, :, 222:224], np.load(np_filename)) assert hype.data.shape == (8, 8, 1047) # Bruker saves all images in true 16bit: assert bse.data.dtype == np.uint16 # hypermaps should always return unsigned integers: assert str(hype.data.dtype)[0] == 'u' def test_load_8bit(): for bcffile in test_files[1:3]: filename = os.path.join(my_path, 'bruker_data', bcffile) print('testing simple 8bit bcf...') s = load(filename) bse, hype = s[0], s[-1] # Bruker saves all images in true 16bit: assert bse.data.dtype == np.uint16 # hypermaps should always return unsigned integers: assert str(hype.data.dtype)[0] == 'u' def test_hyperspy_wrap(): filename = os.path.join(my_path, 'bruker_data', test_files[0]) print('testing bcf wrap to hyperspy signal...') from hyperspy.exceptions import VisibleDeprecationWarning with pytest.warns(VisibleDeprecationWarning): hype = load(filename, select_type='spectrum') hype = load(filename, select_type='spectrum_image') np.testing.assert_allclose( hype.axes_manager[0].scale, 1.66740910949362, atol=1E-12) np.testing.assert_allclose( hype.axes_manager[1].scale, 1.66740910949362, atol=1E-12) assert hype.axes_manager[1].units == 'µm' np.testing.assert_allclose(hype.axes_manager[2].scale, 0.009999) np.testing.assert_allclose(hype.axes_manager[2].offset, -0.47225277) assert hype.axes_manager[2].units == 'keV' assert hype.axes_manager[2].is_binned == True md_ref = { 'Acquisition_instrument': { 'SEM': { 'beam_energy': 20, 'magnification': 1819.22595, 'Detector': { 'EDS': { 'elevation_angle': 35.0, 'detector_type': 'XFlash 6|10', 'azimuth_angle': 90.0, 'real_time': 70.07298, 'energy_resolution_MnKa': 130.0}}, 'Stage': { 'tilt_alpha': 0.0, 'rotation': 326.10089, 'x': 66940.81, 'y': 54233.16, 'z': 39194.77}}}, 'General': { 'original_filename': '30x30_instructively_packed_16bit_compressed.bcf', 'title': 'EDX', 'date': '2018-10-04', 'time': '13:02:07'}, 'Sample': { 'name': 'chevkinite', 'elements': ['Al', 'C', 'Ca', 'Ce', 'Fe', 'Gd', 'K', 'Mg', 'Na', 'Nd', 'O', 'P', 'Si', 'Sm', 'Th', 'Ti'], 'xray_lines': ['Al_Ka', 'C_Ka', 'Ca_Ka', 'Ce_La', 'Fe_Ka', 'Gd_La', 'K_Ka', 'Mg_Ka', 'Na_Ka', 'Nd_La', 'O_Ka', 'P_Ka', 'Si_Ka', 'Sm_La', 'Th_Ma', 'Ti_Ka']}, 'Signal': { 'quantity': 'X-rays (Counts)', 'signal_type': 'EDS_SEM'}, '_HyperSpy': { 'Folding': {'original_axes_manager': None, 'original_shape': None, 'signal_unfolded': False, 'unfolded': False}}} filename_omd = os.path.join(my_path, 'bruker_data', '30x30_original_metadata.json') with open(filename_omd) as fn: # original_metadata: omd_ref = json.load(fn) assert_deep_almost_equal(hype.metadata.as_dictionary(), md_ref) assert_deep_almost_equal(hype.original_metadata.as_dictionary(), omd_ref) assert hype.metadata.General.date == "2018-10-04" assert hype.metadata.General.time == "13:02:07" assert hype.metadata.Signal.quantity == "X-rays (Counts)" def test_hyperspy_wrap_downsampled(): filename = os.path.join(my_path, 'bruker_data', test_files[0]) print('testing bcf wrap to hyperspy signal...') hype = load(filename, select_type='spectrum_image', downsample=5) np.testing.assert_allclose( hype.axes_manager[0].scale, 8.337045547468101, atol=1E-12) np.testing.assert_allclose( hype.axes_manager[1].scale, 8.337045547468101, atol=1E-12) assert hype.axes_manager[1].units == 'µm' def test_get_mode(): filename = os.path.join(my_path, 'bruker_data', test_files[0]) s = load(filename, select_type='spectrum_image', instrument='SEM') assert s.metadata.Signal.signal_type == "EDS_SEM" assert isinstance(s, signals.EDSSEMSpectrum) filename = os.path.join(my_path, 'bruker_data', test_files[0]) s = load(filename, select_type='spectrum_image', instrument='TEM') assert s.metadata.Signal.signal_type == "EDS_TEM" assert isinstance(s, signals.EDSTEMSpectrum) filename = os.path.join(my_path, 'bruker_data', test_files[0]) s = load(filename, select_type='spectrum_image') assert s.metadata.Signal.signal_type == "EDS_SEM" assert isinstance(s, signals.EDSSEMSpectrum) filename = os.path.join(my_path, 'bruker_data', test_files[3]) s = load(filename, select_type='spectrum_image') assert s.metadata.Signal.signal_type == "EDS_TEM" assert isinstance(s, signals.EDSTEMSpectrum) def test_wrong_file(): filename = os.path.join(my_path, 'bruker_data', 'Nope.bcf') with pytest.raises(TypeError): load(filename) def test_fast_bcf(): thingy = pytest.importorskip("hyperspy.io_plugins.unbcf_fast") from hyperspy.io_plugins import bruker for bcffile in test_files: filename = os.path.join(my_path, 'bruker_data', bcffile) thingy = bruker.BCF_reader(filename) for j in range(2, 5, 1): print('downsampling:', j) bruker.fast_unbcf = True # manually enabling fast parsing hmap1 = thingy.parse_hypermap(downsample=j) # using cython bruker.fast_unbcf = False # manually disabling fast parsing hmap2 = thingy.parse_hypermap(downsample=j) # py implementation np.testing.assert_array_equal(hmap1, hmap2) def test_decimal_regex(): from hyperspy.io_plugins.bruker import fix_dec_patterns dummy_xml_positive = [b'<dummy_tag>85,658</dummy_tag>', b'<dummy_tag>85,658E-8</dummy_tag>', b'<dummy_tag>-85,658E-8</dummy_tag>', b'<dum_tag>-85.658</dum_tag>', # negative check b'<dum_tag>85.658E-8</dum_tag>'] # negative check dummy_xml_negative = [b'<dum_tag>12,25,23,45,56,12,45</dum_tag>', b'<dum_tag>12e1,23,-24E-5</dum_tag>'] for i in dummy_xml_positive: assert b'85.658' in fix_dec_patterns.sub(b'\\1.\\2', i) for j in dummy_xml_negative: assert b'.' not in fix_dec_patterns.sub(b'\\1.\\2', j) def test_all_spx_loads(): for spxfile in spx_files: filename = os.path.join(my_path, 'bruker_data', spxfile) s = load(filename) assert s.data.dtype == np.uint64 assert s.metadata.Signal.signal_type == 'EDS_SEM' def test_stand_alone_spx(): filename = os.path.join(my_path, 'bruker_data', 'bruker_nano.spx') s = load(filename) assert s.metadata.Sample.elements == ['Fe', 'S', 'Cu'] assert s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time == 7.385 def test_bruker_XRF(): # See https://github.com/hyperspy/hyperspy/issues/2689 # Bruker M6 Jetstream SPX filename = os.path.join(my_path, 'bruker_data', 'bruker_m6_jetstream_file_example.spx') s = load(filename) assert s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time == 28.046 assert s.metadata.Acquisition_instrument.TEM.beam_energy == 50 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284652"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ksrajkumar/openerp-6.1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">openerp/pychart/afm/Courier_Oblique.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">15</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- # AFM font Courier-Oblique (path: /usr/share/fonts/afms/adobe/pcrro8a.afm). # Derived from Ghostscript distribution. # Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code. import dir dir.afm["Courier-Oblique"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 600, 600, 600, 500, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 500, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 600, 500, 600, 600, 600, 600, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 500, 600, 500, 500, 500, 500, 600, 600, 600, 600, 500, 500, 500, 500, 500, 600, 500, 500, 500, 600, 500, 500, 600, 600, 600, 600, ) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284653"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">40223246/0622W17test2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">static/Brython3.1.1-20150328-091302/Lib/browser/indexed_db.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">632</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">class EventListener: def __init__(self, events=[]): self._events=events def append(self, event): self._events.append(event) def fire(self, e): for _event in self._events: _event(e) class IndexedDB: def __init__(self): if not __BRYTHON__.has_indexedDB: raise NotImplementedError("Your browser doesn't support indexedDB") return self._indexedDB=__BRYTHON__.indexedDB() self._db=None self._version=None def _onsuccess(self, event): self._db=event.target.result def open(self, name, onsuccess, version=1.0, onerror=None, onupgradeneeded=None): self._version=version _result=self._indexedDB.open(name, version) _success=EventListener([self._onsuccess, onsuccess]) _result.onsuccess=_success.fire _result.onupgradeneeded=onupgradeneeded #if onerror is None: def onerror(e): print("onerror: %s:%s" % (e.type, e.target.result)) def onblocked(e): print("blocked: %s:%s" % (e.type, e.result)) _result.onerror=onerror _result.onblocked=onblocked def transaction(self, entities, mode='read'): return Transaction(self._db.transaction(entities, mode)) class Transaction: def __init__(self, transaction): self._transaction=transaction def objectStore(self, name): return ObjectStore(self._transaction.objectStore(name)) class ObjectStore: def __init__(self, objectStore): self._objectStore=objectStore self._data=[] def clear(self, onsuccess=None, onerror=None): _result=self._objectStore.clear() if onsuccess is not None: _result.onsuccess=onsuccess if onerror is not None: _result.onerror=onerror def _helper(self, func, object, onsuccess=None, onerror=None): _result=func(object) if onsuccess is not None: _result.onsuccess=onsuccess if onerror is not None: _result.onerror=onerror def put(self, obj, key=None, onsuccess=None, onerror=None): _r = self._objectStore.put(obj, key) _r.onsuccess = onsuccess _r.onerror = onerror def add(self, obj, key, onsuccess=None, onerror=None): _r = self._objectStore.add(obj, key) _r.onsuccess = onsuccess _r.onerror = onerror #self._helper(self._objectStore.add, object, onsuccess, onerror) def delete(self, index, onsuccess=None, onerror=None): self._helper(self._objectStore.delete, index, onsuccess, onerror) def query(self, *args): self._data=[] def onsuccess(event): cursor=event.target.result if cursor is not None: self._data.append(cursor.value) getattr(cursor,"continue")() # cursor.continue() is illegal self._objectStore.openCursor(args).onsuccess=onsuccess def fetchall(self): yield self._data def get(self, key, onsuccess=None, onerror=None): self._helper(self._objectStore.get, key, onsuccess, onerror) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284654"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jruiperezv/ANALYSE</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">common/djangoapps/enrollment/urls.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">8</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" URLs for the Enrollment API """ from django.conf import settings from django.conf.urls import patterns, url from .views import get_course_enrollment, list_student_enrollments urlpatterns = [] if settings.FEATURES.get('ENABLE_COMBINED_LOGIN_REGISTRATION'): urlpatterns += patterns( 'enrollment.views', url(r'^student$', list_student_enrollments, name='courseenrollments'), url( r'^course/{course_key}$'.format(course_key=settings.COURSE_ID_PATTERN), get_course_enrollment, name='courseenrollment' ), ) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284655"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">NobleNoob/buildpack</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/build_pack_utils/cloudfoundry.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">12</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os import sys import json import tempfile import shutil import utils import logging from urlparse import urlparse from zips import UnzipUtil from hashes import HashUtil from cache import DirectoryCacheManager from downloads import Downloader from downloads import CurlDownloader from utils import safe_makedirs _log = logging.getLogger('cloudfoundry') class CloudFoundryUtil(object): @staticmethod def initialize(): # Open stdout unbuffered if hasattr(sys.stdout, 'fileno'): sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0) ctx = utils.FormattedDict() # Add environment variables ctx.update(os.environ) # Convert JSON env variables ctx['VCAP_APPLICATION'] = json.loads(ctx.get('VCAP_APPLICATION', '{}', format=False)) ctx['VCAP_SERVICES'] = json.loads(ctx.get('VCAP_SERVICES', '{}', format=False)) # Build Pack Location ctx['BP_DIR'] = os.path.dirname(os.path.dirname(sys.argv[0])) # User's Application Files, build droplet here ctx['BUILD_DIR'] = sys.argv[1] # Cache space for the build pack ctx['CACHE_DIR'] = (len(sys.argv) == 3) and sys.argv[2] or None # Temp space if 'TMPDIR' not in ctx.keys(): ctx['TMPDIR'] = tempfile.gettempdir() # Make sure cache & build directories exist if not os.path.exists(ctx['BUILD_DIR']): os.makedirs(ctx['BUILD_DIR']) if ctx['CACHE_DIR'] and not os.path.exists(ctx['CACHE_DIR']): os.makedirs(ctx['CACHE_DIR']) # Add place holder for extensions ctx['EXTENSIONS'] = [] # Init Logging CloudFoundryUtil.init_logging(ctx) _log.info('CloudFoundry Initialized.') _log.debug("CloudFoundry Context Setup [%s]", ctx) return ctx @staticmethod def init_logging(ctx): logFmt = '%(asctime)s [%(levelname)s] %(name)s - %(message)s' if ctx.get('BP_DEBUG', False): logging.basicConfig(level=logging.DEBUG, format=logFmt) else: logLevelStr = ctx.get('BP_LOG_LEVEL', 'INFO') logLevel = getattr(logging, logLevelStr, logging.INFO) logDir = os.path.join(ctx['BUILD_DIR'], '.bp', 'logs') safe_makedirs(logDir) logging.basicConfig(level=logLevel, format=logFmt, filename=os.path.join(logDir, 'bp.log')) @staticmethod def load_json_config_file_from(folder, cfgFile): return CloudFoundryUtil.load_json_config_file(os.path.join(folder, cfgFile)) @staticmethod def load_json_config_file(cfgPath): if os.path.exists(cfgPath): _log.debug("Loading config from [%s]", cfgPath) with open(cfgPath, 'rt') as cfgFile: return json.load(cfgFile) return {} class CloudFoundryInstaller(object): def __init__(self, ctx): self._log = _log self._ctx = ctx self._unzipUtil = UnzipUtil(ctx) self._hashUtil = HashUtil(ctx) self._dcm = DirectoryCacheManager(ctx) self._dwn = self._get_downloader(ctx)(ctx) def _get_downloader(self, ctx): method = ctx.get('DOWNLOAD_METHOD', 'python') if method == 'python': self._log.debug('Using python downloader.') return Downloader elif method == 'curl': self._log.debug('Using cURL downloader.') return CurlDownloader elif method == 'custom': fullClsName = ctx['DOWNLOAD_CLASS'] self._log.debug('Using custom downloader [%s].', fullClsName) dotLoc = fullClsName.rfind('.') if dotLoc >= 0: clsName = fullClsName[dotLoc + 1: len(fullClsName)] modName = fullClsName[0:dotLoc] m = __import__(modName, globals(), locals(), [clsName]) try: return getattr(m, clsName) except AttributeError: self._log.exception( 'WARNING: DOWNLOAD_CLASS not found!') else: self._log.error( 'WARNING: DOWNLOAD_CLASS invalid, must include ' 'package name!') return Downloader def _is_url(self, val): return urlparse(val).scheme != '' def install_binary_direct(self, url, hsh, installDir, fileName=None, strip=False): self._log.debug("Installing direct [%s]", url) if not fileName: fileName = url.split('/')[-1] if self._is_url(hsh): digest = self._dwn.download_direct(hsh) else: digest = hsh self._log.debug( "Installing [%s] with digest [%s] into [%s] with " "name [%s] stripping [%s]", url, digest, installDir, fileName, strip) fileToInstall = self._dcm.get(fileName, digest) if fileToInstall is None: self._log.debug('File [%s] not in cache.', fileName) fileToInstall = os.path.join(self._ctx['TMPDIR'], fileName) self._dwn.download(url, fileToInstall) digest = self._hashUtil.calculate_hash(fileToInstall) fileToInstall = self._dcm.put(fileName, fileToInstall, digest) return self._unzipUtil.extract(fileToInstall, installDir, strip) def install_binary(self, installKey): self._log.debug('Installing [%s]', installKey) url = self._ctx['%s_DOWNLOAD_URL' % installKey] hashUrl = self._ctx.get( '%s_HASH_DOWNLOAD_URL' % installKey, "%s.%s" % (url, self._ctx['CACHE_HASH_ALGORITHM'])) installDir = os.path.join(self._ctx['BUILD_DIR'], self._ctx.get( '%s_PACKAGE_INSTALL_DIR' % installKey, installKey.lower())) strip = self._ctx.get('%s_STRIP' % installKey, False) return self.install_binary_direct(url, hashUrl, installDir, strip=strip) def _install_from(self, fromPath, fromLoc, toLocation=None, ignore=None): """Copy file or directory from a location to the droplet Copies a file or directory from a location to the application droplet. Directories are copied recursively, but specific files in those directories can be ignored by specifing the ignore parameter. fromPath -> file to copy, relative build pack fromLoc -> root of the from path. Full path to file or directory to be copied is fromLoc + fromPath toLocation -> optional location where to copy the file relative to app droplet. If not specified uses fromPath. ignore -> an optional callable that is passed to the ignore argument of shutil.copytree. """ self._log.debug("Install file [%s] from [%s]", fromPath, fromLoc) fullPathFrom = os.path.join(fromLoc, fromPath) if os.path.exists(fullPathFrom): fullPathTo = os.path.join( self._ctx['BUILD_DIR'], ((toLocation is None) and fromPath or toLocation)) safe_makedirs(os.path.dirname(fullPathTo)) self._log.debug("Copying [%s] to [%s]", fullPathFrom, fullPathTo) if os.path.isfile(fullPathFrom): shutil.copy(fullPathFrom, fullPathTo) else: utils.copytree(fullPathFrom, fullPathTo, ignore=ignore) def install_from_build_pack(self, fromPath, toLocation=None, ignore=None): """Copy file or directory from the build pack to the droplet Copies a file or directory from the build pack to the application droplet. Directories are copied recursively, but specific files in those directories can be ignored by specifing the ignore parameter. fromPath -> file to copy, relative build pack toLocation -> optional location where to copy the file relative to app droplet. If not specified uses fromPath. ignore -> an optional callable that is passed to the ignore argument of shutil.copytree. """ self._install_from( fromPath, self._ctx['BP_DIR'], toLocation, ignore) def install_from_application(self, fromPath, toLocation, ignore=None): """Copy file or directory from one place to another in the application Copies a file or directory from one place to another place within the application droplet. fromPath -> file or directory to copy, relative to application droplet. toLocation -> location where to copy the file, relative to app droplet. ignore -> optional callable that is passed to the ignore argument of shutil.copytree """ self._install_from( fromPath, self._ctx['BUILD_DIR'], toLocation, ignore) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284656"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">brendangregg/bcc</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tools/tcpsubnet.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/python # @lint-avoid-python-3-compatibility-imports # # tcpsubnet Summarize TCP bytes sent to different subnets. # For Linux, uses BCC, eBPF. Embedded C. # # USAGE: tcpsubnet [-h] [-v] [-J] [-f FORMAT] [-i INTERVAL] [subnets] # # This uses dynamic tracing of kernel functions, and will need to be updated # to match kernel changes. # # This is an adaptation of tcptop from written by Brendan Gregg. # # WARNING: This traces all send at the TCP level, and while it # summarizes data in-kernel to reduce overhead, there may still be some # overhead at high TCP send/receive rates (eg, ~13% of one CPU at 100k TCP # events/sec. This is not the same as packet rate: funccount can be used to # count the kprobes below to find out the TCP rate). Test in a lab environment # first. If your send rate is low (eg, <1k/sec) then the overhead is # expected to be negligible. # # Copyright 2017 Rodrigo Manyari # Licensed under the Apache License, Version 2.0 (the "License") # # 03-Oct-2017 Rodrigo Manyari Created this based on tcptop. # 13-Feb-2018 Rodrigo Manyari Fix pep8 errors, some refactoring. # 05-Mar-2018 Rodrigo Manyari Add date time to output. import argparse import json import logging import struct import socket from bcc import BPF from datetime import datetime as dt from time import sleep # arguments examples = """examples: ./tcpsubnet # Trace TCP sent to the default subnets: # 127.0.0.1/32,10.0.0.0/8,172.16.0.0/12, # 192.168.0.0/16,0.0.0.0/0 ./tcpsubnet -f K # Trace TCP sent to the default subnets # aggregated in KBytes. ./tcpsubnet 10.80.0.0/24 # Trace TCP sent to 10.80.0.0/24 only ./tcpsubnet -J # Format the output in JSON. """ default_subnets = "127.0.0.1/32,10.0.0.0/8," \ "172.16.0.0/12,192.168.0.0/16,0.0.0.0/0" parser = argparse.ArgumentParser( description="Summarize TCP send and aggregate by subnet", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=examples) parser.add_argument("subnets", help="comma separated list of subnets", type=str, nargs="?", default=default_subnets) parser.add_argument("-v", "--verbose", action="store_true", help="output debug statements") parser.add_argument("-J", "--json", action="store_true", help="format output in JSON") parser.add_argument("--ebpf", action="store_true", help=argparse.SUPPRESS) parser.add_argument("-f", "--format", default="B", help="[bkmBKM] format to report: bits, Kbits, Mbits, bytes, " + "KBytes, MBytes (default B)", choices=["b", "k", "m", "B", "K", "M"]) parser.add_argument("-i", "--interval", default=1, type=int, help="output interval, in seconds (default 1)") args = parser.parse_args() level = logging.INFO if args.verbose: level = logging.DEBUG logging.basicConfig(level=level) logging.debug("Starting with the following args:") logging.debug(args) # args checking if int(args.interval) <= 0: logging.error("Invalid interval, must be > 0. Exiting.") exit(1) else: args.interval = int(args.interval) # map of supported formats formats = { "b": lambda x: (x * 8), "k": lambda x: ((x * 8) / 1024), "m": lambda x: ((x * 8) / pow(1024, 2)), "B": lambda x: x, "K": lambda x: x / 1024, "M": lambda x: x / pow(1024, 2) } # Let's swap the string with the actual numeric value # once here so we don't have to do it on every interval formatFn = formats[args.format] # define the basic structure of the BPF program bpf_text = """ #include <uapi/linux/ptrace.h> #include <net/sock.h> #include <bcc/proto.h> struct index_key_t { u32 index; }; BPF_HASH(ipv4_send_bytes, struct index_key_t); int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk, struct msghdr *msg, size_t size) { u16 family = sk->__sk_common.skc_family; if (family == AF_INET) { u32 dst = sk->__sk_common.skc_daddr; unsigned categorized = 0; __SUBNETS__ } return 0; } """ # Takes in a mask and returns the integer equivalent # e.g. # mask_to_int(8) returns 4278190080 def mask_to_int(n): return ((1 << n) - 1) << (32 - n) # Takes in a list of subnets and returns a list # of tuple-3 containing: # - The subnet info at index 0 # - The addr portion as an int at index 1 # - The mask portion as an int at index 2 # # e.g. # parse_subnets([10.10.0.0/24]) returns # [ # ['10.10.0.0/24', 168427520, 4294967040], # ] def parse_subnets(subnets): m = [] for s in subnets: parts = s.split("/") if len(parts) != 2: msg = "Subnet [%s] is invalid, please refer to the examples." % s raise ValueError(msg) netaddr_int = 0 mask_int = 0 try: netaddr_int = struct.unpack("!I", socket.inet_aton(parts[0]))[0] except: msg = ("Invalid net address in subnet [%s], " + "please refer to the examples.") % s raise ValueError(msg) try: mask_int = int(parts[1]) except: msg = "Invalid mask in subnet [%s]. Mask must be an int" % s raise ValueError(msg) if mask_int < 0 or mask_int > 32: msg = ("Invalid mask in subnet [%s]. Must be an " + "int between 0 and 32.") % s raise ValueError(msg) mask_int = mask_to_int(int(parts[1])) m.append([s, netaddr_int, mask_int]) return m def generate_bpf_subnets(subnets): template = """ if (!categorized && (__NET_ADDR__ & __NET_MASK__) == (dst & __NET_MASK__)) { struct index_key_t key = {.index = __POS__}; ipv4_send_bytes.increment(key, size); categorized = 1; } """ bpf = '' for i, s in enumerate(subnets): branch = template branch = branch.replace("__NET_ADDR__", str(socket.htonl(s[1]))) branch = branch.replace("__NET_MASK__", str(socket.htonl(s[2]))) branch = branch.replace("__POS__", str(i)) bpf += branch return bpf subnets = [] if args.subnets: subnets = args.subnets.split(",") subnets = parse_subnets(subnets) logging.debug("Packets are going to be categorized in the following subnets:") logging.debug(subnets) bpf_subnets = generate_bpf_subnets(subnets) # initialize BPF bpf_text = bpf_text.replace("__SUBNETS__", bpf_subnets) logging.debug("Done preprocessing the BPF program, " + "this is what will actually get executed:") logging.debug(bpf_text) if args.ebpf: print(bpf_text) exit() b = BPF(text=bpf_text) ipv4_send_bytes = b["ipv4_send_bytes"] if not args.json: print("Tracing... Output every %d secs. Hit Ctrl-C to end" % args.interval) # output exiting = 0 while (1): try: sleep(args.interval) except KeyboardInterrupt: exiting = 1 # IPv4: build dict of all seen keys keys = ipv4_send_bytes for k, v in ipv4_send_bytes.items(): if k not in keys: keys[k] = v # to hold json data data = {} # output now = dt.now() data['date'] = now.strftime('%x') data['time'] = now.strftime('%X') data['entries'] = {} if not args.json: print(now.strftime('[%x %X]')) for k, v in reversed(sorted(keys.items(), key=lambda keys: keys[1].value)): send_bytes = 0 if k in ipv4_send_bytes: send_bytes = int(ipv4_send_bytes[k].value) subnet = subnets[k.index][0] send = formatFn(send_bytes) if args.json: data['entries'][subnet] = send else: print("%-21s %6d" % (subnet, send)) if args.json: print(json.dumps(data)) ipv4_send_bytes.clear() if exiting: exit(0) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284657"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">postlund/home-assistant</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/dev</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">homeassistant/components/opentherm_gw/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">"""Support for OpenTherm Gateway devices.""" import asyncio from datetime import date, datetime import logging import pyotgw import pyotgw.vars as gw_vars import voluptuous as vol from homeassistant.components.binary_sensor import DOMAIN as COMP_BINARY_SENSOR from homeassistant.components.climate import DOMAIN as COMP_CLIMATE from homeassistant.components.sensor import DOMAIN as COMP_SENSOR from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import ( ATTR_DATE, ATTR_ID, ATTR_MODE, ATTR_TEMPERATURE, ATTR_TIME, CONF_DEVICE, CONF_ID, CONF_NAME, EVENT_HOMEASSISTANT_STOP, PRECISION_HALVES, PRECISION_TENTHS, PRECISION_WHOLE, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from .const import ( ATTR_DHW_OVRD, ATTR_GW_ID, ATTR_LEVEL, CONF_CLIMATE, CONF_FLOOR_TEMP, CONF_PRECISION, DATA_GATEWAYS, DATA_OPENTHERM_GW, DOMAIN, SERVICE_RESET_GATEWAY, SERVICE_SET_CLOCK, SERVICE_SET_CONTROL_SETPOINT, SERVICE_SET_GPIO_MODE, SERVICE_SET_HOT_WATER_OVRD, SERVICE_SET_LED_MODE, SERVICE_SET_MAX_MOD, SERVICE_SET_OAT, SERVICE_SET_SB_TEMP, ) _LOGGER = logging.getLogger(__name__) CLIMATE_SCHEMA = vol.Schema( { vol.Optional(CONF_PRECISION): vol.In( [PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE] ), vol.Optional(CONF_FLOOR_TEMP, default=False): cv.boolean, } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: cv.schema_with_slug_keys( { vol.Required(CONF_DEVICE): cv.string, vol.Optional(CONF_CLIMATE, default={}): CLIMATE_SCHEMA, vol.Optional(CONF_NAME): cv.string, } ) }, extra=vol.ALLOW_EXTRA, ) async def options_updated(hass, entry): """Handle options update.""" gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]] async_dispatcher_send(hass, gateway.options_update_signal, entry) async def async_setup_entry(hass, config_entry): """Set up the OpenTherm Gateway component.""" if DATA_OPENTHERM_GW not in hass.data: hass.data[DATA_OPENTHERM_GW] = {DATA_GATEWAYS: {}} gateway = OpenThermGatewayDevice(hass, config_entry) hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][config_entry.data[CONF_ID]] = gateway config_entry.add_update_listener(options_updated) # Schedule directly on the loop to avoid blocking HA startup. hass.loop.create_task(gateway.connect_and_subscribe()) for comp in [COMP_BINARY_SENSOR, COMP_CLIMATE, COMP_SENSOR]: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, comp) ) register_services(hass) return True async def async_setup(hass, config): """Set up the OpenTherm Gateway component.""" if not hass.config_entries.async_entries(DOMAIN) and DOMAIN in config: conf = config[DOMAIN] for device_id, device_config in conf.items(): device_config[CONF_ID] = device_id hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data=device_config ) ) return True def register_services(hass): """Register services for the component.""" service_reset_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ) } ) service_set_clock_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Optional(ATTR_DATE, default=date.today()): cv.date, vol.Optional(ATTR_TIME, default=datetime.now().time()): cv.time, } ) service_set_control_setpoint_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_TEMPERATURE): vol.All( vol.Coerce(float), vol.Range(min=0, max=90) ), } ) service_set_hot_water_ovrd_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_DHW_OVRD): vol.Any( vol.Equal("A"), vol.All(vol.Coerce(int), vol.Range(min=0, max=1)) ), } ) service_set_gpio_mode_schema = vol.Schema( vol.Any( vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_ID): vol.Equal("A"), vol.Required(ATTR_MODE): vol.All( vol.Coerce(int), vol.Range(min=0, max=6) ), } ), vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_ID): vol.Equal("B"), vol.Required(ATTR_MODE): vol.All( vol.Coerce(int), vol.Range(min=0, max=7) ), } ), ) ) service_set_led_mode_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_ID): vol.In("ABCDEF"), vol.Required(ATTR_MODE): vol.In("RXTBOFHWCEMP"), } ) service_set_max_mod_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_LEVEL): vol.All( vol.Coerce(int), vol.Range(min=-1, max=100) ), } ) service_set_oat_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_TEMPERATURE): vol.All( vol.Coerce(float), vol.Range(min=-40, max=99) ), } ) service_set_sb_temp_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_TEMPERATURE): vol.All( vol.Coerce(float), vol.Range(min=0, max=30) ), } ) async def reset_gateway(call): """Reset the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] mode_rst = gw_vars.OTGW_MODE_RESET status = await gw_dev.gateway.set_mode(mode_rst) gw_dev.status = status async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_RESET_GATEWAY, reset_gateway, service_reset_schema ) async def set_control_setpoint(call): """Set the control setpoint on the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gw_var = gw_vars.DATA_CONTROL_SETPOINT value = await gw_dev.gateway.set_control_setpoint(call.data[ATTR_TEMPERATURE]) gw_dev.status.update({gw_var: value}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_CONTROL_SETPOINT, set_control_setpoint, service_set_control_setpoint_schema, ) async def set_dhw_ovrd(call): """Set the domestic hot water override on the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gw_var = gw_vars.OTGW_DHW_OVRD value = await gw_dev.gateway.set_hot_water_ovrd(call.data[ATTR_DHW_OVRD]) gw_dev.status.update({gw_var: value}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_HOT_WATER_OVRD, set_dhw_ovrd, service_set_hot_water_ovrd_schema, ) async def set_device_clock(call): """Set the clock on the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] attr_date = call.data[ATTR_DATE] attr_time = call.data[ATTR_TIME] await gw_dev.gateway.set_clock(datetime.combine(attr_date, attr_time)) hass.services.async_register( DOMAIN, SERVICE_SET_CLOCK, set_device_clock, service_set_clock_schema ) async def set_gpio_mode(call): """Set the OpenTherm Gateway GPIO modes.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gpio_id = call.data[ATTR_ID] gpio_mode = call.data[ATTR_MODE] mode = await gw_dev.gateway.set_gpio_mode(gpio_id, gpio_mode) gpio_var = getattr(gw_vars, f"OTGW_GPIO_{gpio_id}") gw_dev.status.update({gpio_var: mode}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_GPIO_MODE, set_gpio_mode, service_set_gpio_mode_schema ) async def set_led_mode(call): """Set the OpenTherm Gateway LED modes.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] led_id = call.data[ATTR_ID] led_mode = call.data[ATTR_MODE] mode = await gw_dev.gateway.set_led_mode(led_id, led_mode) led_var = getattr(gw_vars, f"OTGW_LED_{led_id}") gw_dev.status.update({led_var: mode}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_LED_MODE, set_led_mode, service_set_led_mode_schema ) async def set_max_mod(call): """Set the max modulation level.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gw_var = gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD level = call.data[ATTR_LEVEL] if level == -1: # Backend only clears setting on non-numeric values. level = "-" value = await gw_dev.gateway.set_max_relative_mod(level) gw_dev.status.update({gw_var: value}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_MAX_MOD, set_max_mod, service_set_max_mod_schema ) async def set_outside_temp(call): """Provide the outside temperature to the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gw_var = gw_vars.DATA_OUTSIDE_TEMP value = await gw_dev.gateway.set_outside_temp(call.data[ATTR_TEMPERATURE]) gw_dev.status.update({gw_var: value}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_OAT, set_outside_temp, service_set_oat_schema ) async def set_setback_temp(call): """Set the OpenTherm Gateway SetBack temperature.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gw_var = gw_vars.OTGW_SB_TEMP value = await gw_dev.gateway.set_setback_temp(call.data[ATTR_TEMPERATURE]) gw_dev.status.update({gw_var: value}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_SB_TEMP, set_setback_temp, service_set_sb_temp_schema ) async def async_unload_entry(hass, entry): """Cleanup and disconnect from gateway.""" await asyncio.gather( hass.config_entries.async_forward_entry_unload(entry, COMP_BINARY_SENSOR), hass.config_entries.async_forward_entry_unload(entry, COMP_CLIMATE), hass.config_entries.async_forward_entry_unload(entry, COMP_SENSOR), ) gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]] await gateway.cleanup() return True class OpenThermGatewayDevice: """OpenTherm Gateway device class.""" def __init__(self, hass, config_entry): """Initialize the OpenTherm Gateway.""" self.hass = hass self.device_path = config_entry.data[CONF_DEVICE] self.gw_id = config_entry.data[CONF_ID] self.name = config_entry.data[CONF_NAME] self.climate_config = config_entry.options self.status = {} self.update_signal = f"{DATA_OPENTHERM_GW}_{self.gw_id}_update" self.options_update_signal = f"{DATA_OPENTHERM_GW}_{self.gw_id}_options_update" self.gateway = pyotgw.pyotgw() self.gw_version = None async def cleanup(self, event=None): """Reset overrides on the gateway.""" await self.gateway.set_control_setpoint(0) await self.gateway.set_max_relative_mod("-") await self.gateway.disconnect() async def connect_and_subscribe(self): """Connect to serial device and subscribe report handler.""" self.status = await self.gateway.connect(self.hass.loop, self.device_path) _LOGGER.debug("Connected to OpenTherm Gateway at %s", self.device_path) self.gw_version = self.status.get(gw_vars.OTGW_BUILD) self.hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, self.cleanup) async def handle_report(status): """Handle reports from the OpenTherm Gateway.""" _LOGGER.debug("Received report: %s", status) self.status = status async_dispatcher_send(self.hass, self.update_signal, status) self.gateway.subscribe(handle_report) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284658"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">sunlianqiang/kbengine</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">kbe/res/scripts/common/Lib/test/leakers/test_selftype.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">195</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Reference cycles involving only the ob_type field are rather uncommon # but possible. Inspired by SF bug 1469629. import gc def leak(): class T(type): pass class U(type, metaclass=T): pass U.__class__ = U del U gc.collect(); gc.collect(); gc.collect() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284659"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Grogdor/CouchPotatoServer</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">libs/dateutil/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">147</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- """ Copyright (c) 2003-2010 Gustavo Niemeyer <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="264153555247504966484f434b435f435408484352">[email protected]</a>> This module offers extensions to the standard Python datetime module. """ __author__ = "Tomi Pieviläinen <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="ec98838185c29c85899a85808d85828982ac858785c28a85">[email protected]</a>>" __license__ = "Simplified BSD" __version__ = "2.1" </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284660"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">collmot/ardupilot</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Tools/scripts/build_binaries_history.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">18</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python from __future__ import print_function import os import sqlite3 class BuildBinariesHistory(): def __init__(self, db_filepath): self.db_filepath = db_filepath self.assure_db_present() def progress(self, msg): print("BBHIST: %s" % msg) def conn(self): return sqlite3.connect(self.db_filepath) def create_schema(self, c): '''create our tables and whatnot''' schema_version = 1 c.execute("create table version (version integer)") c.execute("insert into version (version) values (?)", (schema_version,)) # at some stage we should probably directly associate build with runs.... c.execute("create table build (hash text, tag text, vehicle text, board text, " "frame text, text integer, data integer, bss integer, start_time real, duration real)") c.execute("create table run (hash text, tag text, start_time real, duration real)") c.commit() def sizes_for_file(self, filepath): cmd = "size %s" % (filepath,) stuff = os.popen(cmd).read() lines = stuff.split("\n") sizes = lines[1].split("\t") text = int(sizes[0]) data = int(sizes[1]) bss = int(sizes[2]) self.progress("Binary size of %s:" % filepath) self.progress("text=%u" % text) self.progress("data=%u" % data) self.progress("bss=%u" % bss) return (text, data, bss) def assure_db_present(self): c = self.conn() need_schema_create = False try: version_cursor = c.execute("select version from version") except sqlite3.OperationalError as e: if "no such table" in str(e): # FIXME: do better here? what's in "e"? print("need schema create") need_schema_create = True if need_schema_create: self.create_schema(c) version_cursor = c.execute("select version from version") version_results = version_cursor.fetchall() if len(version_results) == 0: raise IOError("No version number?") if len(version_results) > 1: raise IOError("More than one version result?") first = version_results[0] want_version = 1 got_version = first[0] if got_version != want_version: raise IOError("Bad version number (want=%u got=%u" % (want_version, got_version)) self.progress("Got history version %u" % got_version) def record_build(self, hash, tag, vehicle, board, frame, bare_path, start_time, duration): if bare_path is None: (text, data, bss) = (None, None, None) else: (text, data, bss) = self.sizes_for_file(bare_path) c = self.conn() c.execute("replace into build (hash, tag, vehicle, board, frame, text, data, bss, start_time, duration) " "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (hash, tag, vehicle, board, frame, text, data, bss, start_time, duration)) c.commit() def record_run(self, hash, tag, start_time, duration): c = self.conn() c.execute("replace into run (hash, tag, start_time, duration) " "values (?, ?, ?, ?)", (hash, tag, start_time, duration)) c.commit() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284661"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gerv/bedrock</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/functional/firefox/desktop/test_all.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import pytest from pages.firefox.desktop.all import FirefoxDesktopBasePage @pytest.mark.skip_if_firefox(reason='Download button is not shown for up-to-date Firefox browsers.') @pytest.mark.nondestructive @pytest.mark.parametrize(('slug', 'locale'), [ ('customize', None), ('fast', 'de'), ('trust', None)]) def test_download_button_is_displayed(slug, locale, base_url, selenium): locale = locale or 'en-US' page = FirefoxDesktopBasePage(selenium, base_url, locale, slug=slug).open() assert page.download_button.is_displayed </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284662"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">kerr-huang/SL4A</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">python/src/Lib/glob.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">173</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">"""Filename globbing utility.""" import sys import os import re import fnmatch __all__ = ["glob", "iglob"] def glob(pathname): """Return a list of paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la fnmatch. """ return list(iglob(pathname)) def iglob(pathname): """Return an iterator which yields the paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la fnmatch. """ if not has_magic(pathname): if os.path.lexists(pathname): yield pathname return dirname, basename = os.path.split(pathname) if not dirname: for name in glob1(os.curdir, basename): yield name return if has_magic(dirname): dirs = iglob(dirname) else: dirs = [dirname] if has_magic(basename): glob_in_dir = glob1 else: glob_in_dir = glob0 for dirname in dirs: for name in glob_in_dir(dirname, basename): yield os.path.join(dirname, name) # These 2 helper functions non-recursively glob inside a literal directory. # They return a list of basenames. `glob1` accepts a pattern while `glob0` # takes a literal basename (so it only has to check for its existence). def glob1(dirname, pattern): if not dirname: dirname = os.curdir if isinstance(pattern, unicode) and not isinstance(dirname, unicode): dirname = unicode(dirname, sys.getfilesystemencoding() or sys.getdefaultencoding()) try: names = os.listdir(dirname) except os.error: return [] if pattern[0] != '.': names = filter(lambda x: x[0] != '.', names) return fnmatch.filter(names, pattern) def glob0(dirname, basename): if basename == '': # `os.path.split()` returns an empty basename for paths ending with a # directory separator. 'q*x/' should match only directories. if os.path.isdir(dirname): return [basename] else: if os.path.lexists(os.path.join(dirname, basename)): return [basename] return [] magic_check = re.compile('[*?[]') def has_magic(s): return magic_check.search(s) is not None </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284663"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">rasata/ansible</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/devel</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/ansible/executor/task_queue_manager.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">9</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># (c) 2012-2014, Michael DeHaan <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="7914101a11181c15571d1c11181817391e14181015571a1614">[email protected]</a>> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import multiprocessing import os import socket import sys import tempfile from ansible import constants as C from ansible.errors import AnsibleError from ansible.executor.play_iterator import PlayIterator from ansible.executor.process.worker import WorkerProcess from ansible.executor.process.result import ResultProcess from ansible.executor.stats import AggregateStats from ansible.playbook.play_context import PlayContext from ansible.plugins import callback_loader, strategy_loader, module_loader from ansible.template import Templar __all__ = ['TaskQueueManager'] class TaskQueueManager: ''' This class handles the multiprocessing requirements of Ansible by creating a pool of worker forks, a result handler fork, and a manager object with shared datastructures/queues for coordinating work between all processes. The queue manager is responsible for loading the play strategy plugin, which dispatches the Play's tasks to hosts. ''' def __init__(self, inventory, variable_manager, loader, display, options, passwords, stdout_callback=None): self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._display = display self._options = options self._stats = AggregateStats() self.passwords = passwords self._stdout_callback = stdout_callback self._callbacks_loaded = False self._callback_plugins = [] # make sure the module path (if specified) is parsed and # added to the module_loader object if options.module_path is not None: for path in options.module_path.split(os.pathsep): module_loader.add_directory(path) # a special flag to help us exit cleanly self._terminated = False # this dictionary is used to keep track of notified handlers self._notified_handlers = dict() # dictionaries to keep track of failed/unreachable hosts self._failed_hosts = dict() self._unreachable_hosts = dict() self._final_q = multiprocessing.Queue() # create the pool of worker threads, based on the number of forks specified try: fileno = sys.stdin.fileno() except ValueError: fileno = None # A temporary file (opened pre-fork) used by connection # plugins for inter-process locking. self._connection_lockfile = tempfile.TemporaryFile() self._workers = [] for i in range(self._options.forks): main_q = multiprocessing.Queue() rslt_q = multiprocessing.Queue() prc = WorkerProcess(self, main_q, rslt_q, loader) prc.start() self._workers.append((prc, main_q, rslt_q)) self._result_prc = ResultProcess(self._final_q, self._workers) self._result_prc.start() def _initialize_notified_handlers(self, handlers): ''' Clears and initializes the shared notified handlers dict with entries for each handler in the play, which is an empty array that will contain inventory hostnames for those hosts triggering the handler. ''' # Zero the dictionary first by removing any entries there. # Proxied dicts don't support iteritems, so we have to use keys() for key in self._notified_handlers.keys(): del self._notified_handlers[key] # FIXME: there is a block compile helper for this... handler_list = [] for handler_block in handlers: for handler in handler_block.block: handler_list.append(handler) # then initialize it with the handler names from the handler list for handler in handler_list: self._notified_handlers[handler.get_name()] = [] def load_callbacks(self): ''' Loads all available callbacks, with the exception of those which utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout', only one such callback plugin will be loaded. ''' if self._callbacks_loaded: return stdout_callback_loaded = False if self._stdout_callback is None: self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK if self._stdout_callback not in callback_loader: raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback) for callback_plugin in callback_loader.all(class_only=True): if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0: # we only allow one callback of type 'stdout' to be loaded, so check # the name of the current plugin and type to see if we need to skip # loading this callback plugin callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None) (callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path)) if callback_type == 'stdout': if callback_name != self._stdout_callback or stdout_callback_loaded: continue stdout_callback_loaded = True elif C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST: continue self._callback_plugins.append(callback_plugin(self._display)) else: self._callback_plugins.append(callback_plugin()) self._callbacks_loaded = True def run(self, play): ''' Iterates over the roles/tasks in a play, using the given (or default) strategy for queueing tasks. The default is the linear strategy, which operates like classic Ansible by keeping all hosts in lock-step with a given task (meaning no hosts move on to the next task until all hosts are done with the current task). ''' if not self._callbacks_loaded: self.load_callbacks() all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno()) for callback_plugin in self._callback_plugins: if hasattr(callback_plugin, 'set_play_context'): callback_plugin.set_play_context(play_context) self.send_callback('v2_playbook_on_play_start', new_play) # initialize the shared dictionary containing the notified handlers self._initialize_notified_handlers(new_play.handlers) # load the specified strategy (or the default linear one) strategy = strategy_loader.get(new_play.strategy, self) if strategy is None: raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds) # build the iterator iterator = PlayIterator( inventory=self._inventory, play=new_play, play_context=play_context, variable_manager=self._variable_manager, all_vars=all_vars, ) # and run the play using the strategy return strategy.run(iterator, play_context) def cleanup(self): self._display.debug("RUNNING CLEANUP") self.terminate() self._final_q.close() self._result_prc.terminate() for (worker_prc, main_q, rslt_q) in self._workers: rslt_q.close() main_q.close() worker_prc.terminate() def clear_failed_hosts(self): self._failed_hosts = dict() self._unreachable_hosts = dict() def get_inventory(self): return self._inventory def get_variable_manager(self): return self._variable_manager def get_loader(self): return self._loader def get_notified_handlers(self): return self._notified_handlers def get_workers(self): return self._workers[:] def terminate(self): self._terminated = True def send_callback(self, method_name, *args, **kwargs): for callback_plugin in self._callback_plugins: # a plugin that set self.disabled to True will not be called # see osx_say.py example for such a plugin if getattr(callback_plugin, 'disabled', False): continue methods = [ getattr(callback_plugin, method_name, None), getattr(callback_plugin, 'v2_on_any', None) ] for method in methods: if method is not None: try: method(*args, **kwargs) except Exception as e: self._display.warning('Error when using %s: %s' % (method, str(e))) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284664"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">yumaokao/gdrv</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gdrv/commands/command_base.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai import os import sys import logging import fnmatch import httplib2 from apiclient import errors from apiclient.discovery import build from oauth2client.file import Storage lg = logging.getLogger("BASE") # lg.setLevel(logging.INFO) class DriveCommand(): """ A Drive Command Class """ def __init__(self, pconfig): self.config = pconfig self.msgout = sys.stdout @staticmethod def static_add_sub_command_parser(psub_par): pass def __call__(self, args=None): if args is not None: self.args = args self.do_drive_command() def do_drive_command(self): pass # ## base command methods ## def info(self, *args): try: self.msgout.write(*args) self.msgout.write('\n') self.msgout.flush() except UnicodeError: pass def info_append(self, *args): try: self.msgout.write(*args) self.msgout.flush() # self.msgout.write('\n') except UnicodeError: pass def parse_input_string(self, pinstr, pmaxlen): idxs = [] if pinstr == 'a': return range(pmaxlen) for acom in pinstr.split(','): arange = acom.split('-') # lg.debug("aidx ") # lg.debug(arange) try: if len(arange) == 1: aidx = int(arange[0]) idxs.append(aidx) elif len(arange) == 2: aidx = int(arange[0]) bidx = int(arange[1]) idxs.extend(range(aidx, bidx + 1)) except ValueError: pass # lg.debug("aidx %d bidx %d") % (aidx, bidx) # ridx = filter(lambda x: x < pmaxlen, idxs) # lg.debug(ridx) return set(filter(lambda x: x < pmaxlen, idxs)) class DriveServiceCommand(DriveCommand): """ A Drive Service Command Class """ def get_storage(self): self.storage = Storage( os.path.expanduser(self.config.get('api', 'storage'))) def get_credentials(self): self.credentials = None self.get_storage() self.credentials = self.storage.get() def get_service(self): self.service = None self.get_credentials() if self.credentials is None or self.credentials.invalid: print "Please init oauth2 flow first" else: http = httplib2.Http() http = self.credentials.authorize(http) self.service = build('drive', 'v2', http=http) def do_drive_command(self): self.get_service() if self.service is not None: self.do_service_command() def do_service_command(self): pass # ## helper drive apis ## def find_drive_files(self, psrcdir, pname, hidedir=False, hidetrashed=True): matches = [] files = self.get_all_children(psrcdir, hidedir=hidedir, hidetrashed=hidetrashed) for afile in files: if fnmatch.fnmatch(afile['title'], pname): matches.append(afile) return matches def get_all_children(self, psrcdir, hidedir=False, hidetrashed=True): parentid = self.find_parent_id(psrcdir) if parentid is None: lg.error("Can't find directory %s in drive" % psrcdir) sys.exit("Can't find directory %s in drive" % psrcdir) query = "'%s' in parents" % parentid if hidedir is True: query += " and mimeType != 'application/vnd.google-apps.folder'" if hidetrashed is True: query += " and trashed = false" return self.file_list(query) def find_parent_id(self, pdir, pmkdir=False): dirs = pdir.split('/') parentid = 'root' # for aidx in range(len(dirs)): for adir in dirs: # lg.debug("dirs %s" % (adir)) if adir == '': continue children_dirs = self.check_children_dirs(adir, parentid) dirs_nums = len(children_dirs) if dirs_nums == 0: lg.error("Can't find directory %s" % (adir)) return None elif dirs_nums > 1: lg.warn("Find %d instances of directory %s" % ( dirs_nums, adir)) parentid = children_dirs[0]['id'] return parentid def check_children_dirs(self, dirname, parent="root"): query = "mimeType = 'application/vnd.google-apps.folder'" query += " and title = '%s'" % dirname query += " and '%s' in parents" % parent # lg.debug("query %s" % query) children_dirs = self.file_list(query) # for adir in children_dirs: # lg.debug("children %s id %s" % (adir['title'], adir['id'])) return children_dirs # ## basic drive apis ## def file_list(self, query=""): """Retrieve a list of File resources. Args: service: Drive API service instance. Returns: List of File resources. """ # lg.debug("file_list query %s" % query) result = [] page_token = None while True: try: param = {} if query != "": param['q'] = query if page_token: param['pageToken'] = page_token files = self.service.files().list(**param).execute() result.extend(files['items']) page_token = files.get('nextPageToken') if not page_token: break except errors.HttpError, error: print 'An error occurred: %s' % error break return result def permission_list(self, pfile="root"): """Retrieve a list of permissions of the file Args: pfile: drive file id Returns: list of file permissions """ # lg.debug("permission_list query %s" % query) result = [] page_token = None while True: try: param = {} if page_token: param['pageToken'] = page_token perms = self.service.permissions().list(fileId=pfile).execute() result.extend(perms['items']) page_token = perms.get('nextPageToken') if not page_token: break except errors.HttpError, error: print 'An error occurred: %s' % error break return result # deprecated def children_list(self, parent="root", query=""): """Retrieve a list of File resources. Args: parent: parent id or alias 'root' query: query string Returns: List of File resources. """ result = [] page_token = None while True: try: param = {} if query != "": param['q'] = query if page_token: param['pageToken'] = page_token files = self.service.children().list( folderId=parent, **param).execute() result.extend(files['items']) page_token = files.get('nextPageToken') if not page_token: break except errors.HttpError, error: print 'An error occurred: %s' % error break return result </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284665"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Ahn1/Clinq</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">web/clinq/management/commands/updateindex.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os import clinq.models as model from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django.conf import settings import clinq.management.commands.tagHandler as handler class Command(BaseCommand): #option_list = BaseCommand.option_list + ( # make_option('--long', '-l', dest='long', # help='Help for the long options'), #) help = 'Refresh media index' def handle(self, **options): path = settings.MEDIA_PATH print path self.IndexFolder(path) def IndexFolder(self, path): oslist = os.listdir(path) oslist = [os.path.join(path,f) for f in oslist] files = [f for f in oslist if os.path.isfile(f)] dirs = [f for f in oslist if os.path.isdir(f)] for subdir in dirs: self.IndexFolder(subdir) #print subdir for targetFile in files: self.IndexFile(targetFile) def IndexFile(self, path): try: fileName, fileExtension = os.path.splitext(path) relPath = os.path.relpath(path,settings.MEDIA_PATH) dbObj = None if model.File.objects.filter(path=relPath).count() == 0: dbObj = model.File() dbObj.path = relPath else: dbObj = model.File.objects.filter(path=relPath)[:1][0] lastEditTime = os.stat(path).st_mtime if dbObj.changeDate < lastEditTime: dbObj.changeDate = lastEditTime dbObj.save() if fileExtension in [".mp3"]: self.HandleAudioFile(path, dbObj) else: print("Skip file '%s'" % path) except Exception, e: print e def HandleAudioFile(self, path, refdbFile): print "Try to handle {0}".format(path) fileName, fileExtension = os.path.splitext(path) tagObject = None if model.AudioFile.objects.filter(refFile=refdbFile).count() == 0: tagObject = model.AudioFile() tagObject.refFile = refdbFile print "Create new mp3 Tag" else: tagObject = model.AudioFile.objects.filter(refFile=refdbFile)[:1][0] print "Load mp3 Tag" if fileExtension in handler.audio: handlerClass = handler.audio[fileExtension] handlerObj = handlerClass() handlerObj.UpdateTags(path, tagObject) print [tagObject] tagObject.save() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284666"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bqbn/addons-server</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">src/olympia/addons/api_urls.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from django.conf.urls import include, url from rest_framework.routers import SimpleRouter from rest_framework_nested.routers import NestedSimpleRouter from olympia.activity.views import VersionReviewNotesViewSet from .views import ( AddonAutoCompleteSearchView, AddonFeaturedView, AddonRecommendationView, AddonSearchView, AddonVersionViewSet, AddonViewSet, CompatOverrideView, LanguageToolsView, ReplacementAddonView, StaticCategoryView) addons = SimpleRouter() addons.register(r'addon', AddonViewSet, basename='addon') # Router for children of /addons/addon/{addon_pk}/. sub_addons = NestedSimpleRouter(addons, r'addon', lookup='addon') sub_addons.register('versions', AddonVersionViewSet, basename='addon-version') sub_versions = NestedSimpleRouter(sub_addons, r'versions', lookup='version') sub_versions.register(r'reviewnotes', VersionReviewNotesViewSet, basename='version-reviewnotes') urls = [ url(r'', include(addons.urls)), url(r'', include(sub_addons.urls)), url(r'', include(sub_versions.urls)), url(r'^autocomplete/$', AddonAutoCompleteSearchView.as_view(), name='addon-autocomplete'), url(r'^search/$', AddonSearchView.as_view(), name='addon-search'), url(r'^categories/$', StaticCategoryView.as_view(), name='category-list'), url(r'^language-tools/$', LanguageToolsView.as_view(), name='addon-language-tools'), url(r'^replacement-addon/$', ReplacementAddonView.as_view(), name='addon-replacement-addon'), url(r'^recommendations/$', AddonRecommendationView.as_view(), name='addon-recommendations'), ] addons_v3 = urls + [ url(r'^compat-override/$', CompatOverrideView.as_view(), name='addon-compat-override'), url(r'^featured/$', AddonFeaturedView.as_view(), name='addon-featured'), ] addons_v4 = urls </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284667"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Dubrzr/django-push-notifications</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/test_models.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">14</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import json import mock from django.test import TestCase from django.utils import timezone from push_notifications.models import GCMDevice, APNSDevice from tests.mock_responses import GCM_PLAIN_RESPONSE, \ GCM_MULTIPLE_JSON_RESPONSE, GCM_PLAIN_RESPONSE_ERROR, \ GCM_JSON_RESPONSE_ERROR, GCM_PLAIN_RESPONSE_ERROR_B from push_notifications.gcm import GCMError class ModelTestCase(TestCase): def test_can_save_gcm_device(self): device = GCMDevice.objects.create( registration_id="a valid registration id" ) assert device.id is not None assert device.date_created is not None assert device.date_created.date() == timezone.now().date() def test_can_create_save_device(self): device = APNSDevice.objects.create( registration_id="a valid registration id" ) assert device.id is not None assert device.date_created is not None assert device.date_created.date() == timezone.now().date() def test_gcm_send_message(self): device = GCMDevice.objects.create( registration_id="abc", ) with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_PLAIN_RESPONSE) as p: device.send_message("Hello world") p.assert_called_once_with( b"data.message=Hello+world&registration_id=abc", "application/x-www-form-urlencoded;charset=UTF-8") def test_gcm_send_message_extra(self): device = GCMDevice.objects.create( registration_id="abc", ) with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_PLAIN_RESPONSE) as p: device.send_message("Hello world", extra={"foo": "bar"}) p.assert_called_once_with( b"data.foo=bar&data.message=Hello+world&registration_id=abc", "application/x-www-form-urlencoded;charset=UTF-8") def test_gcm_send_message_collapse_key(self): device = GCMDevice.objects.create( registration_id="abc", ) with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_PLAIN_RESPONSE) as p: device.send_message("Hello world", collapse_key="test_key") p.assert_called_once_with( b"collapse_key=test_key&data.message=Hello+world&registration_id=abc", "application/x-www-form-urlencoded;charset=UTF-8") def test_gcm_send_message_to_multiple_devices(self): GCMDevice.objects.create( registration_id="abc", ) GCMDevice.objects.create( registration_id="abc1", ) with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE) as p: GCMDevice.objects.all().send_message("Hello world") p.assert_called_once_with( json.dumps({ "data": { "message": "Hello world" }, "registration_ids": ["abc", "abc1"] }, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json") def test_gcm_send_message_extra_to_multiple_devices(self): GCMDevice.objects.create( registration_id="abc", ) GCMDevice.objects.create( registration_id="abc1", ) with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE) as p: GCMDevice.objects.all().send_message("Hello world", extra={"foo": "bar"}) p.assert_called_once_with( json.dumps({ "data": { "foo": "bar", "message": "Hello world" }, "registration_ids": ["abc", "abc1"] }, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json") def test_gcm_send_message_collapse_to_multiple_devices(self): GCMDevice.objects.create( registration_id="abc", ) GCMDevice.objects.create( registration_id="abc1", ) with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE) as p: GCMDevice.objects.all().send_message("Hello world", collapse_key="test_key") p.assert_called_once_with( json.dumps({ "collapse_key": "test_key", "data": { "message": "Hello world" }, "registration_ids": ["abc", "abc1"] }, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json") def test_gcm_send_message_to_single_device_with_error(self): # these errors are device specific, device.active will be set false device_list = ['abc', 'abc1'] self.create_devices(device_list) for index, error in enumerate(GCM_PLAIN_RESPONSE_ERROR): with mock.patch("push_notifications.gcm._gcm_send", return_value=error) as p: device = GCMDevice.objects. \ get(registration_id=device_list[index]) device.send_message("Hello World!") assert GCMDevice.objects.get(registration_id=device_list[index]) \ .active is False def test_gcm_send_message_to_single_device_with_error_b(self): # these errors are not device specific, GCMError should be thrown device_list = ['abc'] self.create_devices(device_list) with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_PLAIN_RESPONSE_ERROR_B) as p: device = GCMDevice.objects. \ get(registration_id=device_list[0]) with self.assertRaises(GCMError): device.send_message("Hello World!") assert GCMDevice.objects.get(registration_id=device_list[0]) \ .active is True def test_gcm_send_message_to_multiple_devices_with_error(self): device_list = ['abc', 'abc1', 'abc2'] self.create_devices(device_list) with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_JSON_RESPONSE_ERROR) as p: devices = GCMDevice.objects.all() devices.send_message("Hello World") assert GCMDevice.objects.get(registration_id=device_list[0]) \ .active is False assert GCMDevice.objects.get(registration_id=device_list[1]) \ .active is True assert GCMDevice.objects.get(registration_id=device_list[2]) \ .active is False def test_apns_send_message(self): device = APNSDevice.objects.create( registration_id="abc", ) socket = mock.MagicMock() with mock.patch("push_notifications.apns._apns_pack_frame") as p: device.send_message("Hello world", socket=socket, expiration=1) p.assert_called_once_with("abc", b'{"aps":{"alert":"Hello world"}}', 0, 1, 10) def test_apns_send_message_extra(self): device = APNSDevice.objects.create( registration_id="abc", ) socket = mock.MagicMock() with mock.patch("push_notifications.apns._apns_pack_frame") as p: device.send_message("Hello world", extra={"foo": "bar"}, socket=socket, identifier=1, expiration=2, priority=5) p.assert_called_once_with("abc", b'{"aps":{"alert":"Hello world"},"foo":"bar"}', 1, 2, 5) def create_devices(self, devices): for device in devices: GCMDevice.objects.create( registration_id=device, ) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284668"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">kthordarson/youtube-dl-ruv</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">youtube_dl/extractor/comedycentral.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from __future__ import unicode_literals import re from .common import InfoExtractor from .mtv import MTVServicesInfoExtractor from ..utils import ( compat_str, compat_urllib_parse, ExtractorError, float_or_none, unified_strdate, ) class ComedyCentralIE(MTVServicesInfoExtractor): _VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/ (video-clips|episodes|cc-studios|video-collections|full-episodes) /(?P<title>.*)''' _FEED_URL = 'http://comedycentral.com/feeds/mrss/' _TEST = { 'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother', 'md5': 'c4f48e9eda1b16dd10add0744344b6d8', 'info_dict': { 'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354', 'ext': 'mp4', 'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother', 'description': 'After a certain point, breastfeeding becomes c**kblocking.', }, } class ComedyCentralShowsIE(InfoExtractor): IE_DESC = 'The Daily Show / The Colbert Report' # urls can be abbreviations like :thedailyshow or :colbert # urls for episodes like: # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day # or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news # or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524 _VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport) |https?://(:www\.)? (?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/ ((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)| (?P<clip> (?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+)) |(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?)) |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)) )| (?P<interview> extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?))) (?:[?#].*|$)''' _TESTS = [{ 'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart', 'md5': '4e2f5cb088a83cd8cdb7756132f9739d', 'info_dict': { 'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55', 'ext': 'mp4', 'upload_date': '20121213', 'description': 'Kristen Stewart learns to let loose in "On the Road."', 'uploader': 'thedailyshow', 'title': 'thedailyshow kristen-stewart part 1', } }, { 'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview', 'only_matching': True, }, { 'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news', 'only_matching': True, }, { 'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114', 'only_matching': True, }, { 'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3', 'only_matching': True, }, { 'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary', 'only_matching': True, }, { 'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall', 'only_matching': True, }, { 'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights', 'only_matching': True, }, { 'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food', 'only_matching': True, }, { 'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel', 'only_matching': True, }] _available_formats = ['3500', '2200', '1700', '1200', '750', '400'] _video_extensions = { '3500': 'mp4', '2200': 'mp4', '1700': 'mp4', '1200': 'mp4', '750': 'mp4', '400': 'mp4', } _video_dimensions = { '3500': (1280, 720), '2200': (960, 540), '1700': (768, 432), '1200': (640, 360), '750': (512, 288), '400': (384, 216), } @staticmethod def _transform_rtmp_url(rtmp_video_url): m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\.comedystor/.*)$', rtmp_video_url) if not m: raise ExtractorError('Cannot transform RTMP url') base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/' return base + m.group('finalid') def _real_extract(self, url): mobj = re.match(self._VALID_URL, url, re.VERBOSE) if mobj is None: raise ExtractorError('Invalid URL: %s' % url) if mobj.group('shortname'): if mobj.group('shortname') in ('tds', 'thedailyshow'): url = 'http://thedailyshow.cc.com/full-episodes/' else: url = 'http://thecolbertreport.cc.com/full-episodes/' mobj = re.match(self._VALID_URL, url, re.VERBOSE) assert mobj is not None if mobj.group('clip'): if mobj.group('videotitle'): epTitle = mobj.group('videotitle') elif mobj.group('showname') == 'thedailyshow': epTitle = mobj.group('tdstitle') else: epTitle = mobj.group('cntitle') dlNewest = False elif mobj.group('interview'): epTitle = mobj.group('interview_title') dlNewest = False else: dlNewest = not mobj.group('episode') if dlNewest: epTitle = mobj.group('showname') else: epTitle = mobj.group('episode') show_name = mobj.group('showname') webpage, htmlHandle = self._download_webpage_handle(url, epTitle) if dlNewest: url = htmlHandle.geturl() mobj = re.match(self._VALID_URL, url, re.VERBOSE) if mobj is None: raise ExtractorError('Invalid redirected URL: ' + url) if mobj.group('episode') == '': raise ExtractorError('Redirected URL is still not specific: ' + url) epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1] mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage) if len(mMovieParams) == 0: # The Colbert Report embeds the information in a without # a URL prefix; so extract the alternate reference # and then add the URL prefix manually. altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage) if len(altMovieParams) == 0: raise ExtractorError('unable to find Flash URL in webpage ' + url) else: mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])] uri = mMovieParams[0][1] # Correct cc.com in uri uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.cc.com', uri) index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri})) idoc = self._download_xml( index_url, epTitle, 'Downloading show index', 'Unable to download episode index') title = idoc.find('./channel/title').text description = idoc.find('./channel/description').text entries = [] item_els = idoc.findall('.//item') for part_num, itemEl in enumerate(item_els): upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text) thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url') content = itemEl.find('.//{http://search.yahoo.com/mrss/}content') duration = float_or_none(content.attrib.get('duration')) mediagen_url = content.attrib['url'] guid = itemEl.find('./guid').text.rpartition(':')[-1] cdoc = self._download_xml( mediagen_url, epTitle, 'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els))) turls = [] for rendition in cdoc.findall('.//rendition'): finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text) turls.append(finfo) formats = [] for format, rtmp_video_url in turls: w, h = self._video_dimensions.get(format, (None, None)) formats.append({ 'format_id': 'vhttp-%s' % format, 'url': self._transform_rtmp_url(rtmp_video_url), 'ext': self._video_extensions.get(format, 'mp4'), 'height': h, 'width': w, 'format_note': 'HTTP 400 at the moment (patches welcome!)', 'preference': -100, }) formats.append({ 'format_id': 'rtmp-%s' % format, 'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'), 'ext': self._video_extensions.get(format, 'mp4'), 'height': h, 'width': w, }) self._sort_formats(formats) virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1) entries.append({ 'id': guid, 'title': virtual_id, 'formats': formats, 'uploader': show_name, 'upload_date': upload_date, 'duration': duration, 'thumbnail': thumbnail, 'description': description, }) return { '_type': 'playlist', 'entries': entries, 'title': show_name + ' ' + title, 'description': description, } </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284669"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">MaizerGomes/youtube-dl</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">youtube_dl/extractor/collegerama.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">111</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from __future__ import unicode_literals import json from .common import InfoExtractor from ..compat import compat_urllib_request from ..utils import ( float_or_none, int_or_none, ) class CollegeRamaIE(InfoExtractor): _VALID_URL = r'https?://collegerama\.tudelft\.nl/Mediasite/Play/(?P<id>[\da-f]+)' _TESTS = [ { 'url': 'https://collegerama.tudelft.nl/Mediasite/Play/585a43626e544bdd97aeb71a0ec907a01d', 'md5': '481fda1c11f67588c0d9d8fbdced4e39', 'info_dict': { 'id': '585a43626e544bdd97aeb71a0ec907a01d', 'ext': 'mp4', 'title': 'Een nieuwe wereld: waarden, bewustzijn en techniek van de mensheid 2.0.', 'description': '', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 7713.088, 'timestamp': 1413309600, 'upload_date': '20141014', }, }, { 'url': 'https://collegerama.tudelft.nl/Mediasite/Play/86a9ea9f53e149079fbdb4202b521ed21d?catalog=fd32fd35-6c99-466c-89d4-cd3c431bc8a4', 'md5': 'ef1fdded95bdf19b12c5999949419c92', 'info_dict': { 'id': '86a9ea9f53e149079fbdb4202b521ed21d', 'ext': 'wmv', 'title': '64ste Vakantiecursus: Afvalwater', 'description': 'md5:7fd774865cc69d972f542b157c328305', 'duration': 10853, 'timestamp': 1326446400, 'upload_date': '20120113', }, }, ] def _real_extract(self, url): video_id = self._match_id(url) player_options_request = { "getPlayerOptionsRequest": { "ResourceId": video_id, "QueryString": "", } } request = compat_urllib_request.Request( 'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions', json.dumps(player_options_request)) request.add_header('Content-Type', 'application/json') player_options = self._download_json(request, video_id) presentation = player_options['d']['Presentation'] title = presentation['Title'] description = presentation.get('Description') thumbnail = None duration = float_or_none(presentation.get('Duration'), 1000) timestamp = int_or_none(presentation.get('UnixTime'), 1000) formats = [] for stream in presentation['Streams']: for video in stream['VideoUrls']: thumbnail_url = stream.get('ThumbnailUrl') if thumbnail_url: thumbnail = 'http://collegerama.tudelft.nl' + thumbnail_url format_id = video['MediaType'] if format_id == 'SS': continue formats.append({ 'url': video['Location'], 'format_id': format_id, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'timestamp': timestamp, 'formats': formats, } </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284670"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">DVSBA/ajenti</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ajenti/feedback.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">17</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Module for sending usage statistics to ajenti.org """ __all__ = ['send_stats', 'check_uid'] import os import base64 import random from ajenti.utils import * from ajenti import version global uid uid = '' def send_stats(server, plugins, addplugin=None, delplugin=None): """ Sends usage statistics to the server. Statistics include: OS name, list of installed plugins and Ajenti version. :param server: server URL :type server: str :param addplugin: plugin being currently installed or None :type addplugin: str :param delplugin: plugin being currently removed or None :type delplugin: str """ plugs = [] plugs.extend(plugins) if addplugin: plugs.append(addplugin) if delplugin and delplugin in plugs: plugs.remove(delplugin) plugs = ','.join(plugs) data = '1|%s|%s|%s|,%s,' % (uid, version(), detect_platform(mapping=False), plugs) data = base64.b64encode(data) download('http://%s/api/submit?data=%s' % (server, data)) def check_uid(): """ Checks that installation UID is present and generates it if it's not. """ global uid file = '/var/lib/ajenti/installation-uid' if not os.path.exists(file): uid = str(random.randint(1, 9000*9000)) try: open(file, 'w').write(uid) except: uid = '0' else: uid = open(file).read() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284671"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tshirtman/zine_ad_sense</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- """ zine.plugins.ad_sense ~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2011 by gabriel pettier :license: GPL, see LICENSE for more details. """ from os.path import dirname, join from random import choice from zine.api import * from zine.views.admin import render_admin_response from zine.utils.admin import flash from zine.utils.http import redirect from zine.utils.forms import TextField from zine.config import ConfigurationTransactionError from zine.privileges import BLOG_ADMIN TEMPLATES = join(dirname(__file__), 'templates') def add_ad_sense_link(req, navigation_bar): if not req.user.has_privilege(BLOG_ADMIN): return for link_id, url, title, children in navigation_bar: if link_id == 'options': children.insert(-3, ('ad sense', url_for('ad_sense/config'), _('Ad sense'))) @require_privilege(BLOG_ADMIN) def view_ad_sense_config(req): client_code = req.args.get('client_code') banner_slot = req.args.get('banner_slot') width = req.args.get('width') height = req.args.get('height') if client_code and banner_slot and width and height: try: req.app.cfg.change_single('ad_sense/client_code', client_code) req.app.cfg.change_single('ad_sense/banner_slot', banner_slot) req.app.cfg.change_single('ad_sense/width', width) req.app.cfg.change_single('ad_sense/height', height) flash(_('Config updated!'), 'info') except ConfigurationTransactionError, e: flash(_('The code could not be changed.'), 'error') return redirect(url_for('ad_sense/config')) return render_admin_response('admin/ad_sense.html', 'config.ad_sense', client_code=req.app.cfg['ad_sense/client_code'], banner_slot=req.app.cfg['ad_sense/banner_slot'], width=req.app.cfg['ad_sense/width'], height=req.app.cfg['ad_sense/height'] ) def add_adsense_banner(post): conf = get_application().cfg client_code = conf['ad_sense/client_code'] banner_slot = conf['ad_sense/banner_slot'] banner_width = conf['ad_sense/width'] banner_height = conf['ad_sense/height'] if choice((True, False)): return ''' <span class="ad"> <script type="text/javascript"><!-- google_ad_client = "'''+client_code+'''"; google_ad_slot = "'''+banner_slot+'''"; google_ad_width = '''+banner_width+'''; google_ad_height = '''+banner_height+'''; //--> </script> <script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"> </script> </span> ''' else: return '' def insert_header_js(metadata): metadata.append(''' <script type="text/javascript"> var _gaq = _gaq || []; _gaq.push(['_setAccount', 'UA-23430110-1']); _gaq.push(['_trackPageview']); (function() { var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); })(); </script> ''') def setup(app, plugin): """This function is called by Zine in the application initialization phase. Here we connect to the events and register our template paths, url rules, views etc. """ app.connect_event('after-entry-rendered', add_adsense_banner) # our fish has a configurable skin. So we register one for it which # defaults to blue. app.add_config_var('ad_sense/client_code', TextField(default='')) app.add_config_var('ad_sense/banner_slot', TextField(default='')) app.add_config_var('ad_sense/width', TextField(default='')) app.add_config_var('ad_sense/height', TextField(default='')) app.connect_event('modify-admin-navigation-bar', add_ad_sense_link) app.connect_event('before-metadata-assembled', insert_header_js) # for the admin panel we add a url rule. Because it's an admin panel # page located in options we add such an url rule. app.add_url_rule('/options/ad_sense', prefix='admin', endpoint='ad_sense/config', view=view_ad_sense_config) # add our templates to the searchpath so that Zine can find the # admin panel template for the fish config panel. app.add_template_searchpath(TEMPLATES) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284672"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jonyroda97/redbot-amigosprovaveis</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/develop</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/pip/_vendor/urllib3/contrib/socks.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">65</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- """ This module contains provisional support for SOCKS proxies from within urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and SOCKS5. To enable its functionality, either install PySocks or install this module with the ``socks`` extra. The SOCKS implementation supports the full range of urllib3 features. It also supports the following SOCKS features: - SOCKS4 - SOCKS4a - SOCKS5 - Usernames and passwords for the SOCKS proxy Known Limitations: - Currently PySocks does not support contacting remote websites via literal IPv6 addresses. Any such connection attempt will fail. You must use a domain name. - Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any such connection attempt will fail. """ from __future__ import absolute_import try: import socks except ImportError: import warnings from ..exceptions import DependencyWarning warnings.warn(( 'SOCKS support in urllib3 requires the installation of optional ' 'dependencies: specifically, PySocks. For more information, see ' 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies' ), DependencyWarning ) raise from socket import error as SocketError, timeout as SocketTimeout from ..connection import ( HTTPConnection, HTTPSConnection ) from ..connectionpool import ( HTTPConnectionPool, HTTPSConnectionPool ) from ..exceptions import ConnectTimeoutError, NewConnectionError from ..poolmanager import PoolManager from ..util.url import parse_url try: import ssl except ImportError: ssl = None class SOCKSConnection(HTTPConnection): """ A plain-text HTTP connection that connects via a SOCKS proxy. """ def __init__(self, *args, **kwargs): self._socks_options = kwargs.pop('_socks_options') super(SOCKSConnection, self).__init__(*args, **kwargs) def _new_conn(self): """ Establish a new connection via the SOCKS proxy. """ extra_kw = {} if self.source_address: extra_kw['source_address'] = self.source_address if self.socket_options: extra_kw['socket_options'] = self.socket_options try: conn = socks.create_connection( (self.host, self.port), proxy_type=self._socks_options['socks_version'], proxy_addr=self._socks_options['proxy_host'], proxy_port=self._socks_options['proxy_port'], proxy_username=self._socks_options['username'], proxy_password=self._socks_options['password'], proxy_rdns=self._socks_options['rdns'], timeout=self.timeout, **extra_kw ) except SocketTimeout as e: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout)) except socks.ProxyError as e: # This is fragile as hell, but it seems to be the only way to raise # useful errors here. if e.socket_err: error = e.socket_err if isinstance(error, SocketTimeout): raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout) ) else: raise NewConnectionError( self, "Failed to establish a new connection: %s" % error ) else: raise NewConnectionError( self, "Failed to establish a new connection: %s" % e ) except SocketError as e: # Defensive: PySocks should catch all these. raise NewConnectionError( self, "Failed to establish a new connection: %s" % e) return conn # We don't need to duplicate the Verified/Unverified distinction from # urllib3/connection.py here because the HTTPSConnection will already have been # correctly set to either the Verified or Unverified form by that module. This # means the SOCKSHTTPSConnection will automatically be the correct type. class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): pass class SOCKSHTTPConnectionPool(HTTPConnectionPool): ConnectionCls = SOCKSConnection class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): ConnectionCls = SOCKSHTTPSConnection class SOCKSProxyManager(PoolManager): """ A version of the urllib3 ProxyManager that routes connections via the defined SOCKS proxy. """ pool_classes_by_scheme = { 'http': SOCKSHTTPConnectionPool, 'https': SOCKSHTTPSConnectionPool, } def __init__(self, proxy_url, username=None, password=None, num_pools=10, headers=None, **connection_pool_kw): parsed = parse_url(proxy_url) if username is None and password is None and parsed.auth is not None: split = parsed.auth.split(':') if len(split) == 2: username, password = split if parsed.scheme == 'socks5': socks_version = socks.PROXY_TYPE_SOCKS5 rdns = False elif parsed.scheme == 'socks5h': socks_version = socks.PROXY_TYPE_SOCKS5 rdns = True elif parsed.scheme == 'socks4': socks_version = socks.PROXY_TYPE_SOCKS4 rdns = False elif parsed.scheme == 'socks4a': socks_version = socks.PROXY_TYPE_SOCKS4 rdns = True else: raise ValueError( "Unable to determine SOCKS version from %s" % proxy_url ) self.proxy_url = proxy_url socks_options = { 'socks_version': socks_version, 'proxy_host': parsed.host, 'proxy_port': parsed.port, 'username': username, 'password': password, 'rdns': rdns } connection_pool_kw['_socks_options'] = socks_options super(SOCKSProxyManager, self).__init__( num_pools, headers, **connection_pool_kw ) self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284673"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">acshan/odoo</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/8.0</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">addons/google_calendar/google_calendar.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">59</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- import operator import simplejson import urllib2 import openerp from openerp import tools from openerp import SUPERUSER_ID from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, exception_to_unicode from openerp.tools.translate import _ from openerp.http import request from datetime import datetime, timedelta from dateutil import parser import pytz from openerp.osv import fields, osv import logging _logger = logging.getLogger(__name__) def status_response(status, substr=False): if substr: return int(str(status)[0]) else: return status_response(status, substr=True) == 2 class Meta(type): """ This Meta class allow to define class as a structure, and so instancied variable in __init__ to avoid to have side effect alike 'static' variable """ def __new__(typ, name, parents, attrs): methods = dict((k, v) for k, v in attrs.iteritems() if callable(v)) attrs = dict((k, v) for k, v in attrs.iteritems() if not callable(v)) def init(self, **kw): for k, v in attrs.iteritems(): setattr(self, k, v) for k, v in kw.iteritems(): assert k in attrs setattr(self, k, v) methods['__init__'] = init methods['__getitem__'] = getattr return type.__new__(typ, name, parents, methods) class Struct(object): __metaclass__ = Meta class OpenerpEvent(Struct): event = False found = False event_id = False isRecurrence = False isInstance = False update = False status = False attendee_id = False synchro = False class GmailEvent(Struct): event = False found = False isRecurrence = False isInstance = False update = False status = False class SyncEvent(object): def __init__(self): self.OE = OpenerpEvent() self.GG = GmailEvent() self.OP = None def __getitem__(self, key): return getattr(self, key) def compute_OP(self, modeFull=True): #If event are already in Gmail and in OpenERP if self.OE.found and self.GG.found: is_owner = self.OE.event.env.user.id == self.OE.event.user_id.id #If the event has been deleted from one side, we delete on other side ! if self.OE.status != self.GG.status and is_owner: self.OP = Delete((self.OE.status and "OE") or (self.GG.status and "GG"), 'The event has been deleted from one side, we delete on other side !') #If event is not deleted ! elif self.OE.status and (self.GG.status or not is_owner): if self.OE.update.split('.')[0] != self.GG.update.split('.')[0]: if self.OE.update < self.GG.update: tmpSrc = 'GG' elif self.OE.update > self.GG.update: tmpSrc = 'OE' assert tmpSrc in ['GG', 'OE'] #if self.OP.action == None: if self[tmpSrc].isRecurrence: if self[tmpSrc].status: self.OP = Update(tmpSrc, 'Only need to update, because i\'m active') else: self.OP = Exclude(tmpSrc, 'Need to Exclude (Me = First event from recurrence) from recurrence') elif self[tmpSrc].isInstance: self.OP = Update(tmpSrc, 'Only need to update, because already an exclu') else: self.OP = Update(tmpSrc, 'Simply Update... I\'m a single event') else: if not self.OE.synchro or self.OE.synchro.split('.')[0] < self.OE.update.split('.')[0]: self.OP = Update('OE', 'Event already updated by another user, but not synchro with my google calendar') else: self.OP = NothingToDo("", 'Not update needed') else: self.OP = NothingToDo("", "Both are already deleted") # New in openERP... Create on create_events of synchronize function elif self.OE.found and not self.GG.found: if self.OE.status: self.OP = Delete('OE', 'Update or delete from GOOGLE') else: if not modeFull: self.OP = Delete('GG', 'Deleted from Odoo, need to delete it from Gmail if already created') else: self.OP = NothingToDo("", "Already Deleted in gmail and unlinked in Odoo") elif self.GG.found and not self.OE.found: tmpSrc = 'GG' if not self.GG.status and not self.GG.isInstance: # don't need to make something... because event has been created and deleted before the synchronization self.OP = NothingToDo("", 'Nothing to do... Create and Delete directly') else: if self.GG.isInstance: if self[tmpSrc].status: self.OP = Exclude(tmpSrc, 'Need to create the new exclu') else: self.OP = Exclude(tmpSrc, 'Need to copy and Exclude') else: self.OP = Create(tmpSrc, 'New EVENT CREATE from GMAIL') def __str__(self): return self.__repr__() def __repr__(self): myPrint = "\n\n---- A SYNC EVENT ---" myPrint += "\n ID OE: %s " % (self.OE.event and self.OE.event.id) myPrint += "\n ID GG: %s " % (self.GG.event and self.GG.event.get('id', False)) myPrint += "\n Name OE: %s " % (self.OE.event and self.OE.event.name.encode('utf8')) myPrint += "\n Name GG: %s " % (self.GG.event and self.GG.event.get('summary', '').encode('utf8')) myPrint += "\n Found OE:%5s vs GG: %5s" % (self.OE.found, self.GG.found) myPrint += "\n Recurrence OE:%5s vs GG: %5s" % (self.OE.isRecurrence, self.GG.isRecurrence) myPrint += "\n Instance OE:%5s vs GG: %5s" % (self.OE.isInstance, self.GG.isInstance) myPrint += "\n Synchro OE: %10s " % (self.OE.synchro) myPrint += "\n Update OE: %10s " % (self.OE.update) myPrint += "\n Update GG: %10s " % (self.GG.update) myPrint += "\n Status OE:%5s vs GG: %5s" % (self.OE.status, self.GG.status) if (self.OP is None): myPrint += "\n Action %s" % "---!!!---NONE---!!!---" else: myPrint += "\n Action %s" % type(self.OP).__name__ myPrint += "\n Source %s" % (self.OP.src) myPrint += "\n comment %s" % (self.OP.info) return myPrint class SyncOperation(object): def __init__(self, src, info, **kw): self.src = src self.info = info for k, v in kw.items(): setattr(self, k, v) def __str__(self): return 'in__STR__' class Create(SyncOperation): pass class Update(SyncOperation): pass class Delete(SyncOperation): pass class NothingToDo(SyncOperation): pass class Exclude(SyncOperation): pass class google_calendar(osv.AbstractModel): STR_SERVICE = 'calendar' _name = 'google.%s' % STR_SERVICE def generate_data(self, cr, uid, event, isCreating=False, context=None): if not context: context = {} if event.allday: start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T').split('T')[0] final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=1), context=context).isoformat('T').split('T')[0] type = 'date' vstype = 'dateTime' else: start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T') final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T') type = 'dateTime' vstype = 'date' attendee_list = [] for attendee in event.attendee_ids: email = tools.email_split(attendee.email) email = email[0] if email else '<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="430d2c062e222a2f032e222a2f6d202c2e">[email protected]</a>' attendee_list.append({ 'email': email, 'displayName': attendee.partner_id.name, 'responseStatus': attendee.state or 'needsAction', }) reminders = [] for alarm in event.alarm_ids: reminders.append({ "method": "email" if alarm.type == "email" else "popup", "minutes": alarm.duration_minutes }) data = { "summary": event.name or '', "description": event.description or '', "start": { type: start_date, vstype: None, 'timeZone': context.get('tz', 'UTC'), }, "end": { type: final_date, vstype: None, 'timeZone': context.get('tz', 'UTC'), }, "attendees": attendee_list, "reminders": { "overrides": reminders, "useDefault": "false" }, "location": event.location or '', "visibility": event['class'] or 'public', } if event.recurrency and event.rrule: data["recurrence"] = ["RRULE:" + event.rrule] if not event.active: data["state"] = "cancelled" if not self.get_need_synchro_attendee(cr, uid, context=context): data.pop("attendees") if isCreating: other_google_ids = [other_att.google_internal_event_id for other_att in event.attendee_ids if other_att.google_internal_event_id] if other_google_ids: data["id"] = other_google_ids[0] return data def create_an_event(self, cr, uid, event, context=None): gs_pool = self.pool['google.service'] data = self.generate_data(cr, uid, event, isCreating=True, context=context) url = "/calendar/v3/calendars/%s/events?fields=%s&access_token=%s" % ('primary', urllib2.quote('id,updated'), self.get_token(cr, uid, context)) headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} data_json = simplejson.dumps(data) return gs_pool._do_request(cr, uid, url, data_json, headers, type='POST', context=context) def delete_an_event(self, cr, uid, event_id, context=None): gs_pool = self.pool['google.service'] params = { 'access_token': self.get_token(cr, uid, context) } headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event_id) return gs_pool._do_request(cr, uid, url, params, headers, type='DELETE', context=context) def get_calendar_primary_id(self, cr, uid, context=None): params = { 'fields': 'id', 'access_token': self.get_token(cr, uid, context) } headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} url = "/calendar/v3/calendars/primary" try: st, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context) except Exception, e: if (e.code == 401): # Token invalid / Acces unauthorized error_msg = "Your token is invalid or has been revoked !" registry = openerp.modules.registry.RegistryManager.get(request.session.db) with registry.cursor() as cur: self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_token': False, 'google_calendar_token_validity': False}, context=context) raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context) raise return (status_response(st), content['id'] or False, ask_time) def get_event_synchro_dict(self, cr, uid, lastSync=False, token=False, nextPageToken=False, context=None): if not token: token = self.get_token(cr, uid, context) params = { 'fields': 'items,nextPageToken', 'access_token': token, 'maxResults': 1000, #'timeMin': self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz"), } if lastSync: params['updatedMin'] = lastSync.strftime("%Y-%m-%dT%H:%M:%S.%fz") params['showDeleted'] = True else: params['timeMin'] = self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz") headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} url = "/calendar/v3/calendars/%s/events" % 'primary' if nextPageToken: params['pageToken'] = nextPageToken status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context) google_events_dict = {} for google_event in content['items']: google_events_dict[google_event['id']] = google_event if content.get('nextPageToken'): google_events_dict.update( self.get_event_synchro_dict(cr, uid, lastSync=lastSync, token=token, nextPageToken=content['nextPageToken'], context=context) ) return google_events_dict def get_one_event_synchro(self, cr, uid, google_id, context=None): token = self.get_token(cr, uid, context) params = { 'access_token': token, 'maxResults': 1000, 'showDeleted': True, } headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} url = "/calendar/v3/calendars/%s/events/%s" % ('primary', google_id) try: status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context) except: _logger.info("Calendar Synchro - In except of get_one_event_synchro") pass return status_response(status) and content or False def update_to_google(self, cr, uid, oe_event, google_event, context): calendar_event = self.pool['calendar.event'] url = "/calendar/v3/calendars/%s/events/%s?fields=%s&access_token=%s" % ('primary', google_event['id'], 'id,updated', self.get_token(cr, uid, context)) headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} data = self.generate_data(cr, uid, oe_event, context=context) data['sequence'] = google_event.get('sequence', 0) data_json = simplejson.dumps(data) status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, data_json, headers, type='PATCH', context=context) update_date = datetime.strptime(content['updated'], "%Y-%m-%dT%H:%M:%S.%fz") calendar_event.write(cr, uid, [oe_event.id], {'oe_update_date': update_date}) if context['curr_attendee']: self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date}, context) def update_an_event(self, cr, uid, event, context=None): data = self.generate_data(cr, uid, event, context=context) url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event.google_internal_event_id) headers = {} data['access_token'] = self.get_token(cr, uid, context) status, response, ask_time = self.pool['google.service']._do_request(cr, uid, url, data, headers, type='GET', context=context) #TO_CHECK : , if http fail, no event, do DELETE ? return response def update_recurrent_event_exclu(self, cr, uid, instance_id, event_ori_google_id, event_new, context=None): gs_pool = self.pool['google.service'] data = self.generate_data(cr, uid, event_new, context=context) data['recurringEventId'] = event_ori_google_id data['originalStartTime'] = event_new.recurrent_id_date url = "/calendar/v3/calendars/%s/events/%s?access_token=%s" % ('primary', instance_id, self.get_token(cr, uid, context)) headers = {'Content-type': 'application/json'} data['sequence'] = self.get_sequence(cr, uid, instance_id, context) data_json = simplejson.dumps(data) return gs_pool._do_request(cr, uid, url, data_json, headers, type='PUT', context=context) def update_from_google(self, cr, uid, event, single_event_dict, type, context): if context is None: context = [] calendar_event = self.pool['calendar.event'] res_partner_obj = self.pool['res.partner'] calendar_attendee_obj = self.pool['calendar.attendee'] calendar_alarm_obj = self.pool['calendar.alarm'] user_obj = self.pool['res.users'] myPartnerID = user_obj.browse(cr, uid, uid, context).partner_id.id attendee_record = [] alarm_record = set() partner_record = [(4, myPartnerID)] result = {} if self.get_need_synchro_attendee(cr, uid, context=context): for google_attendee in single_event_dict.get('attendees', []): partner_email = google_attendee.get('email', False) if type == "write": for oe_attendee in event['attendee_ids']: if oe_attendee.email == partner_email: calendar_attendee_obj.write(cr, uid, [oe_attendee.id], {'state': google_attendee['responseStatus']}, context=context) google_attendee['found'] = True continue if google_attendee.get('found'): continue attendee_id = res_partner_obj.search(cr, uid, [('email', '=', partner_email)], context=context) if not attendee_id: data = { 'email': partner_email, 'customer': False, 'name': google_attendee.get("displayName", False) or partner_email } attendee_id = [res_partner_obj.create(cr, uid, data, context=context)] attendee = res_partner_obj.read(cr, uid, attendee_id[0], ['email'], context=context) partner_record.append((4, attendee.get('id'))) attendee['partner_id'] = attendee.pop('id') attendee['state'] = google_attendee['responseStatus'] attendee_record.append((0, 0, attendee)) for google_alarm in single_event_dict.get('reminders', {}).get('overrides', []): alarm_id = calendar_alarm_obj.search( cr, uid, [ ('type', '=', google_alarm['method'] if google_alarm['method'] == 'email' else 'notification'), ('duration_minutes', '=', google_alarm['minutes']) ], context=context ) if not alarm_id: data = { 'type': google_alarm['method'] if google_alarm['method'] == 'email' else 'notification', 'duration': google_alarm['minutes'], 'interval': 'minutes', 'name': "%s minutes - %s" % (google_alarm['minutes'], google_alarm['method']) } alarm_id = [calendar_alarm_obj.create(cr, uid, data, context=context)] alarm_record.add(alarm_id[0]) UTC = pytz.timezone('UTC') if single_event_dict.get('start') and single_event_dict.get('end'): # If not cancelled if single_event_dict['start'].get('dateTime', False) and single_event_dict['end'].get('dateTime', False): date = parser.parse(single_event_dict['start']['dateTime']) stop = parser.parse(single_event_dict['end']['dateTime']) date = str(date.astimezone(UTC))[:-6] stop = str(stop.astimezone(UTC))[:-6] allday = False else: date = (single_event_dict['start']['date']) stop = (single_event_dict['end']['date']) d_end = datetime.strptime(stop, DEFAULT_SERVER_DATE_FORMAT) allday = True d_end = d_end + timedelta(days=-1) stop = d_end.strftime(DEFAULT_SERVER_DATE_FORMAT) update_date = datetime.strptime(single_event_dict['updated'], "%Y-%m-%dT%H:%M:%S.%fz") result.update({ 'start': date, 'stop': stop, 'allday': allday }) result.update({ 'attendee_ids': attendee_record, 'partner_ids': list(set(partner_record)), 'alarm_ids': [(6, 0, list(alarm_record))], 'name': single_event_dict.get('summary', 'Event'), 'description': single_event_dict.get('description', False), 'location': single_event_dict.get('location', False), 'class': single_event_dict.get('visibility', 'public'), 'oe_update_date': update_date, }) if single_event_dict.get("recurrence", False): rrule = [rule for rule in single_event_dict["recurrence"] if rule.startswith("RRULE:")][0][6:] result['rrule'] = rrule context = dict(context or {}, no_mail_to_attendees=True) if type == "write": res = calendar_event.write(cr, uid, event['id'], result, context=context) elif type == "copy": result['recurrence'] = True res = calendar_event.write(cr, uid, [event['id']], result, context=context) elif type == "create": res = calendar_event.create(cr, uid, result, context=context) if context['curr_attendee']: self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date, 'google_internal_event_id': single_event_dict.get('id', False)}, context) return res def remove_references(self, cr, uid, context=None): current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context) reset_data = { 'google_calendar_rtoken': False, 'google_calendar_token': False, 'google_calendar_token_validity': False, 'google_calendar_last_sync_date': False, 'google_calendar_cal_id': False, } all_my_attendees = self.pool['calendar.attendee'].search(cr, uid, [('partner_id', '=', current_user.partner_id.id)], context=context) self.pool['calendar.attendee'].write(cr, uid, all_my_attendees, {'oe_synchro_date': False, 'google_internal_event_id': False}, context=context) current_user.write(reset_data) return True def synchronize_events_cron(self, cr, uid, context=None): ids = self.pool['res.users'].search(cr, uid, [('google_calendar_last_sync_date', '!=', False)], context=context) _logger.info("Calendar Synchro - Started by cron") for user_to_sync in ids: _logger.info("Calendar Synchro - Starting synchronization for a new user [%s] " % user_to_sync) try: resp = self.synchronize_events(cr, user_to_sync, False, lastSync=True, context=None) if resp.get("status") == "need_reset": _logger.info("[%s] Calendar Synchro - Failed - NEED RESET !" % user_to_sync) else: _logger.info("[%s] Calendar Synchro - Done with status : %s !" % (user_to_sync, resp.get("status"))) except Exception, e: _logger.info("[%s] Calendar Synchro - Exception : %s !" % (user_to_sync, exception_to_unicode(e))) _logger.info("Calendar Synchro - Ended by cron") def synchronize_events(self, cr, uid, ids, lastSync=True, context=None): if context is None: context = {} # def isValidSync(syncToken): # gs_pool = self.pool['google.service'] # params = { # 'maxResults': 1, # 'fields': 'id', # 'access_token': self.get_token(cr, uid, context), # 'syncToken': syncToken, # } # url = "/calendar/v3/calendars/primary/events" # status, response = gs_pool._do_request(cr, uid, url, params, type='GET', context=context) # return int(status) != 410 user_to_sync = ids and ids[0] or uid current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, user_to_sync, context=context) st, current_google, ask_time = self.get_calendar_primary_id(cr, user_to_sync, context=context) if current_user.google_calendar_cal_id: if current_google != current_user.google_calendar_cal_id: return { "status": "need_reset", "info": { "old_name": current_user.google_calendar_cal_id, "new_name": current_google }, "url": '' } if lastSync and self.get_last_sync_date(cr, user_to_sync, context=context) and not self.get_disable_since_synchro(cr, user_to_sync, context=context): lastSync = self.get_last_sync_date(cr, user_to_sync, context) _logger.info("[%s] Calendar Synchro - MODE SINCE_MODIFIED : %s !" % (user_to_sync, lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT))) else: lastSync = False _logger.info("[%s] Calendar Synchro - MODE FULL SYNCHRO FORCED" % user_to_sync) else: current_user.write({'google_calendar_cal_id': current_google}) lastSync = False _logger.info("[%s] Calendar Synchro - MODE FULL SYNCHRO - NEW CAL ID" % user_to_sync) new_ids = [] new_ids += self.create_new_events(cr, user_to_sync, context=context) new_ids += self.bind_recurring_events_to_google(cr, user_to_sync, context) res = self.update_events(cr, user_to_sync, lastSync, context) current_user.write({'google_calendar_last_sync_date': ask_time}) return { "status": res and "need_refresh" or "no_new_event_from_google", "url": '' } def create_new_events(self, cr, uid, context=None): if context is None: context = {} new_ids = [] ev_obj = self.pool['calendar.event'] att_obj = self.pool['calendar.attendee'] user_obj = self.pool['res.users'] myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id context_norecurrent = context.copy() context_norecurrent['virtual_id'] = False my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('google_internal_event_id', '=', False), '|', ('event_id.stop', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('event_id.final_date', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ], context=context_norecurrent) for att in att_obj.browse(cr, uid, my_att_ids, context=context): other_google_ids = [other_att.google_internal_event_id for other_att in att.event_id.attendee_ids if other_att.google_internal_event_id and other_att.id != att.id] for other_google_id in other_google_ids: if self.get_one_event_synchro(cr, uid, other_google_id, context=context): att_obj.write(cr, uid, [att.id], {'google_internal_event_id': other_google_id}) break else: if not att.event_id.recurrent_id or att.event_id.recurrent_id == 0: st, response, ask_time = self.create_an_event(cr, uid, att.event_id, context=context) if status_response(st): update_date = datetime.strptime(response['updated'], "%Y-%m-%dT%H:%M:%S.%fz") ev_obj.write(cr, uid, att.event_id.id, {'oe_update_date': update_date}) new_ids.append(response['id']) att_obj.write(cr, uid, [att.id], {'google_internal_event_id': response['id'], 'oe_synchro_date': update_date}) cr.commit() else: _logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st)) _logger.warning("Response : %s" % response) return new_ids def get_context_no_virtual(self, context): context_norecurrent = context.copy() context_norecurrent['virtual_id'] = False context_norecurrent['active_test'] = False return context_norecurrent def bind_recurring_events_to_google(self, cr, uid, context=None): if context is None: context = {} new_ids = [] ev_obj = self.pool['calendar.event'] att_obj = self.pool['calendar.attendee'] user_obj = self.pool['res.users'] myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id context_norecurrent = self.get_context_no_virtual(context) my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('google_internal_event_id', '=', False)], context=context_norecurrent) for att in att_obj.browse(cr, uid, my_att_ids, context=context): if att.event_id.recurrent_id and att.event_id.recurrent_id > 0: new_google_internal_event_id = False source_event_record = ev_obj.browse(cr, uid, att.event_id.recurrent_id, context) source_attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', source_event_record.id)], context=context) if not source_attendee_record_id: continue source_attendee_record = att_obj.browse(cr, uid, source_attendee_record_id, context)[0] if att.event_id.recurrent_id_date and source_event_record.allday and source_attendee_record.google_internal_event_id: new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.split(' ')[0].replace('-', '') elif att.event_id.recurrent_id_date and source_attendee_record.google_internal_event_id: new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.replace('-', '').replace(' ', 'T').replace(':', '') + 'Z' if new_google_internal_event_id: #TODO WARNING, NEED TO CHECK THAT EVENT and ALL instance NOT DELETE IN GMAIL BEFORE ! try: st, response, ask_time = self.update_recurrent_event_exclu(cr, uid, new_google_internal_event_id, source_attendee_record.google_internal_event_id, att.event_id, context=context) if status_response(st): att_obj.write(cr, uid, [att.id], {'google_internal_event_id': new_google_internal_event_id}, context=context) new_ids.append(new_google_internal_event_id) cr.commit() else: _logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st)) _logger.warning("Response : %s" % response) except: pass return new_ids def update_events(self, cr, uid, lastSync=False, context=None): context = dict(context or {}) calendar_event = self.pool['calendar.event'] user_obj = self.pool['res.users'] att_obj = self.pool['calendar.attendee'] myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id context_novirtual = self.get_context_no_virtual(context) if lastSync: try: all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=lastSync, context=context) except urllib2.HTTPError, e: if e.code == 410: # GONE, Google is lost. # we need to force the rollback from this cursor, because it locks my res_users but I need to write in this tuple before to raise. cr.rollback() registry = openerp.modules.registry.RegistryManager.get(request.session.db) with registry.cursor() as cur: self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_last_sync_date': False}, context=context) error_key = simplejson.loads(str(e)) error_key = error_key.get('error', {}).get('message', 'nc') error_msg = "Google is lost... the next synchro will be a full synchro. \n\n %s" % error_key raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context) my_google_att_ids = att_obj.search(cr, uid, [ ('partner_id', '=', myPartnerID), ('google_internal_event_id', 'in', all_event_from_google.keys()) ], context=context_novirtual) my_openerp_att_ids = att_obj.search(cr, uid, [ ('partner_id', '=', myPartnerID), ('event_id.oe_update_date', '>', lastSync and lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT) or self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('google_internal_event_id', '!=', False), ], context=context_novirtual) my_openerp_googleinternal_ids = att_obj.read(cr, uid, my_openerp_att_ids, ['google_internal_event_id', 'event_id'], context=context_novirtual) if self.get_print_log(cr, uid, context=context): _logger.info("Calendar Synchro - \n\nUPDATE IN GOOGLE\n%s\n\nRETRIEVE FROM OE\n%s\n\nUPDATE IN OE\n%s\n\nRETRIEVE FROM GG\n%s\n\n" % (all_event_from_google, my_google_att_ids, my_openerp_att_ids, my_openerp_googleinternal_ids)) for giid in my_openerp_googleinternal_ids: active = True # if not sure, we request google if giid.get('event_id'): active = calendar_event.browse(cr, uid, int(giid.get('event_id')[0]), context=context_novirtual).active if giid.get('google_internal_event_id') and not all_event_from_google.get(giid.get('google_internal_event_id')) and active: one_event = self.get_one_event_synchro(cr, uid, giid.get('google_internal_event_id'), context=context) if one_event: all_event_from_google[one_event['id']] = one_event my_att_ids = list(set(my_google_att_ids + my_openerp_att_ids)) else: domain = [ ('partner_id', '=', myPartnerID), ('google_internal_event_id', '!=', False), '|', ('event_id.stop', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('event_id.final_date', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ] # Select all events from OpenERP which have been already synchronized in gmail my_att_ids = att_obj.search(cr, uid, domain, context=context_novirtual) all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=False, context=context) event_to_synchronize = {} for att in att_obj.browse(cr, uid, my_att_ids, context=context): event = att.event_id base_event_id = att.google_internal_event_id.rsplit('_', 1)[0] if base_event_id not in event_to_synchronize: event_to_synchronize[base_event_id] = {} if att.google_internal_event_id not in event_to_synchronize[base_event_id]: event_to_synchronize[base_event_id][att.google_internal_event_id] = SyncEvent() ev_to_sync = event_to_synchronize[base_event_id][att.google_internal_event_id] ev_to_sync.OE.attendee_id = att.id ev_to_sync.OE.event = event ev_to_sync.OE.found = True ev_to_sync.OE.event_id = event.id ev_to_sync.OE.isRecurrence = event.recurrency ev_to_sync.OE.isInstance = bool(event.recurrent_id and event.recurrent_id > 0) ev_to_sync.OE.update = event.oe_update_date ev_to_sync.OE.status = event.active ev_to_sync.OE.synchro = att.oe_synchro_date for event in all_event_from_google.values(): event_id = event.get('id') base_event_id = event_id.rsplit('_', 1)[0] if base_event_id not in event_to_synchronize: event_to_synchronize[base_event_id] = {} if event_id not in event_to_synchronize[base_event_id]: event_to_synchronize[base_event_id][event_id] = SyncEvent() ev_to_sync = event_to_synchronize[base_event_id][event_id] ev_to_sync.GG.event = event ev_to_sync.GG.found = True ev_to_sync.GG.isRecurrence = bool(event.get('recurrence', '')) ev_to_sync.GG.isInstance = bool(event.get('recurringEventId', 0)) ev_to_sync.GG.update = event.get('updated', None) # if deleted, no date without browse event if ev_to_sync.GG.update: ev_to_sync.GG.update = ev_to_sync.GG.update.replace('T', ' ').replace('Z', '') ev_to_sync.GG.status = (event.get('status') != 'cancelled') ###################### # PRE-PROCESSING # ###################### for base_event in event_to_synchronize: for current_event in event_to_synchronize[base_event]: event_to_synchronize[base_event][current_event].compute_OP(modeFull=not lastSync) if self.get_print_log(cr, uid, context=context): if not isinstance(event_to_synchronize[base_event][current_event].OP, NothingToDo): _logger.info(event_to_synchronize[base_event]) ###################### # DO ACTION # ###################### for base_event in event_to_synchronize: event_to_synchronize[base_event] = sorted(event_to_synchronize[base_event].iteritems(), key=operator.itemgetter(0)) for current_event in event_to_synchronize[base_event]: cr.commit() event = current_event[1] # event is an Sync Event ! actToDo = event.OP actSrc = event.OP.src context['curr_attendee'] = event.OE.attendee_id if isinstance(actToDo, NothingToDo): continue elif isinstance(actToDo, Create): context_tmp = context.copy() context_tmp['NewMeeting'] = True if actSrc == 'GG': res = self.update_from_google(cr, uid, False, event.GG.event, "create", context=context_tmp) event.OE.event_id = res meeting = calendar_event.browse(cr, uid, res, context=context) attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', res)], context=context) self.pool['calendar.attendee'].write(cr, uid, attendee_record_id, {'oe_synchro_date': meeting.oe_update_date, 'google_internal_event_id': event.GG.event['id']}, context=context_tmp) elif actSrc == 'OE': raise "Should be never here, creation for OE is done before update !" #TODO Add to batch elif isinstance(actToDo, Update): if actSrc == 'GG': self.update_from_google(cr, uid, event.OE.event, event.GG.event, 'write', context) elif actSrc == 'OE': self.update_to_google(cr, uid, event.OE.event, event.GG.event, context) elif isinstance(actToDo, Exclude): if actSrc == 'OE': self.delete_an_event(cr, uid, current_event[0], context=context) elif actSrc == 'GG': new_google_event_id = event.GG.event['id'].rsplit('_', 1)[1] if 'T' in new_google_event_id: new_google_event_id = new_google_event_id.replace('T', '')[:-1] else: new_google_event_id = new_google_event_id + "000000" if event.GG.status: parent_event = {} if not event_to_synchronize[base_event][0][1].OE.event_id: main_ev = att_obj.search_read(cr, uid, [('google_internal_event_id', '=', event.GG.event['id'].rsplit('_', 1)[0])], fields=['event_id'], context=context_novirtual) event_to_synchronize[base_event][0][1].OE.event_id = main_ev[0].get('event_id')[0] parent_event['id'] = "%s-%s" % (event_to_synchronize[base_event][0][1].OE.event_id, new_google_event_id) res = self.update_from_google(cr, uid, parent_event, event.GG.event, "copy", context) else: parent_oe_id = event_to_synchronize[base_event][0][1].OE.event_id if parent_oe_id: calendar_event.unlink(cr, uid, "%s-%s" % (parent_oe_id, new_google_event_id), can_be_deleted=True, context=context) elif isinstance(actToDo, Delete): if actSrc == 'GG': try: self.delete_an_event(cr, uid, current_event[0], context=context) except Exception, e: error = simplejson.loads(e.read()) error_nr = error.get('error', {}).get('code') # if already deleted from gmail or never created if error_nr in (404, 410,): pass else: raise e elif actSrc == 'OE': calendar_event.unlink(cr, uid, event.OE.event_id, can_be_deleted=False, context=context) return True def check_and_sync(self, cr, uid, oe_event, google_event, context): if datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") > datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"): self.update_to_google(cr, uid, oe_event, google_event, context) elif datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") < datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"): self.update_from_google(cr, uid, oe_event, google_event, 'write', context) def get_sequence(self, cr, uid, instance_id, context=None): gs_pool = self.pool['google.service'] params = { 'fields': 'sequence', 'access_token': self.get_token(cr, uid, context) } headers = {'Content-type': 'application/json'} url = "/calendar/v3/calendars/%s/events/%s" % ('primary', instance_id) st, content, ask_time = gs_pool._do_request(cr, uid, url, params, headers, type='GET', context=context) return content.get('sequence', 0) ################################# ## MANAGE CONNEXION TO GMAIL ## ################################# def get_token(self, cr, uid, context=None): current_user = self.pool['res.users'].browse(cr, uid, uid, context=context) if not current_user.google_calendar_token_validity or \ datetime.strptime(current_user.google_calendar_token_validity.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT) < (datetime.now() + timedelta(minutes=1)): self.do_refresh_token(cr, uid, context=context) current_user.refresh() return current_user.google_calendar_token def get_last_sync_date(self, cr, uid, context=None): current_user = self.pool['res.users'].browse(cr, uid, uid, context=context) return current_user.google_calendar_last_sync_date and datetime.strptime(current_user.google_calendar_last_sync_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(minutes=0) or False def do_refresh_token(self, cr, uid, context=None): current_user = self.pool['res.users'].browse(cr, uid, uid, context=context) gs_pool = self.pool['google.service'] all_token = gs_pool._refresh_google_token_json(cr, uid, current_user.google_calendar_rtoken, self.STR_SERVICE, context=context) vals = {} vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in')) vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token') self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context) def need_authorize(self, cr, uid, context=None): current_user = self.pool['res.users'].browse(cr, uid, uid, context=context) return current_user.google_calendar_rtoken is False def get_calendar_scope(self, RO=False): readonly = RO and '.readonly' or '' return 'https://www.googleapis.com/auth/calendar%s' % (readonly) def authorize_google_uri(self, cr, uid, from_url='http://www.openerp.com', context=None): url = self.pool['google.service']._get_authorize_uri(cr, uid, from_url, self.STR_SERVICE, scope=self.get_calendar_scope(), context=context) return url def can_authorize_google(self, cr, uid, context=None): return self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager') def set_all_tokens(self, cr, uid, authorization_code, context=None): gs_pool = self.pool['google.service'] all_token = gs_pool._get_google_token_json(cr, uid, authorization_code, self.STR_SERVICE, context=context) vals = {} vals['google_%s_rtoken' % self.STR_SERVICE] = all_token.get('refresh_token') vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in')) vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token') self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context) def get_minTime(self, cr, uid, context=None): number_of_week = int(self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.week_synchro', default=13)) return datetime.now() - timedelta(weeks=number_of_week) def get_need_synchro_attendee(self, cr, uid, context=None): return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_synchro_attendee', default=True) def get_disable_since_synchro(self, cr, uid, context=None): return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_since_synchro', default=False) def get_print_log(self, cr, uid, context=None): return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.debug_print', default=False) class res_users(osv.Model): _inherit = 'res.users' _columns = { 'google_calendar_rtoken': fields.char('Refresh Token'), 'google_calendar_token': fields.char('User token'), 'google_calendar_token_validity': fields.datetime('Token Validity'), 'google_calendar_last_sync_date': fields.datetime('Last synchro date'), 'google_calendar_cal_id': fields.char('Calendar ID', help='Last Calendar ID who has been synchronized. If it is changed, we remove \ all links between GoogleID and Odoo Google Internal ID') } class calendar_event(osv.Model): _inherit = "calendar.event" def get_fields_need_update_google(self, cr, uid, context=None): return ['name', 'description', 'allday', 'start', 'date_end', 'stop', 'attendee_ids', 'alarm_ids', 'location', 'class', 'active', 'start_date', 'start_datetime', 'stop_date', 'stop_datetime'] def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} sync_fields = set(self.get_fields_need_update_google(cr, uid, context)) if (set(vals.keys()) & sync_fields) and 'oe_update_date' not in vals.keys() and 'NewMeeting' not in context: vals['oe_update_date'] = datetime.now() return super(calendar_event, self).write(cr, uid, ids, vals, context=context) def copy(self, cr, uid, id, default=None, context=None): default = default or {} if default.get('write_type', False): del default['write_type'] elif default.get('recurrent_id', False): default['oe_update_date'] = datetime.now() else: default['oe_update_date'] = False return super(calendar_event, self).copy(cr, uid, id, default, context) def unlink(self, cr, uid, ids, can_be_deleted=False, context=None): return super(calendar_event, self).unlink(cr, uid, ids, can_be_deleted=can_be_deleted, context=context) _columns = { 'oe_update_date': fields.datetime('Odoo Update Date'), } class calendar_attendee(osv.Model): _inherit = 'calendar.attendee' _columns = { 'google_internal_event_id': fields.char('Google Calendar Event Id'), 'oe_synchro_date': fields.datetime('Odoo Synchro Date'), } _sql_constraints = [('google_id_uniq', 'unique(google_internal_event_id,partner_id,event_id)', 'Google ID should be unique!')] def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} for id in ids: ref = vals.get('event_id', self.browse(cr, uid, id, context=context).event_id.id) # If attendees are updated, we need to specify that next synchro need an action # Except if it come from an update_from_google if not context.get('curr_attendee', False) and not context.get('NewMeeting', False): self.pool['calendar.event'].write(cr, uid, ref, {'oe_update_date': datetime.now()}, context) return super(calendar_attendee, self).write(cr, uid, ids, vals, context=context) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284674"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">dfalt974/SickRage</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/html5lib/_trie/_base.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">79</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from __future__ import absolute_import, division, unicode_literals from collections import Mapping class Trie(Mapping): """Abstract base class for tries""" def keys(self, prefix=None): # pylint:disable=arguments-differ keys = super(Trie, self).keys() if prefix is None: return set(keys) return {x for x in keys if x.startswith(prefix)} def has_keys_with_prefix(self, prefix): for key in self.keys(): if key.startswith(prefix): return True return False def longest_prefix(self, prefix): if prefix in self: return prefix for i in range(1, len(prefix) + 1): if prefix[:-i] in self: return prefix[:-i] raise KeyError(prefix) def longest_prefix_item(self, prefix): lprefix = self.longest_prefix(prefix) return (lprefix, self[lprefix]) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284675"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">edisonlz/fruit</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">web_project/base/site-packages/grappelli/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">VERSION = '2.3.6'</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284676"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ULHPC/modules</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/devel</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">easybuild/easybuild-easyblocks/easybuild/easyblocks/m/metavelvet.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">12</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">## # This file is an EasyBuild reciPY as per https://github.com/hpcugent/easybuild # # Copyright:: Copyright 2012-2015 Uni.Lu/LCSB, NTUA # Authors:: Cedric Laczny <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="b0d3d5d4c2d9d39edcd1d3cadec9f0c5ded99edcc5">[email protected]</a>>, Fotis Georgatos <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="ccaaa3b8a5bf8cafa9bea2e2afa4">[email protected]</a>>, Kenneth Hoste # License:: MIT/GPL # $Id$ # # This work implements a part of the HPCBIOS project and is a component of the policy: # http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html ## """ EasyBuild support for building and installing MetaVelvet, implemented as an easyblock @author: Cedric Laczny (Uni.Lu) @author: Fotis Georgatos (Uni.Lu) @author: Kenneth Hoste (Ghent University) """ import os import shutil from easybuild.easyblocks.generic.configuremake import ConfigureMake from easybuild.tools.build_log import EasyBuildError class EB_MetaVelvet(ConfigureMake): """ Support for building MetaVelvet """ def configure_step(self): """ No configure """ pass def install_step(self): """ Install by copying files to install dir """ srcdir = self.cfg['start_dir'] destdir = os.path.join(self.installdir, 'bin') srcfile = None # Get executable files: for i in $(find . -maxdepth 1 -type f -perm +111 -print | sed -e 's/\.\///g' | awk '{print "\""$0"\""}' | grep -vE "\.sh|\.html"); do echo -ne "$i, "; done && echo try: os.makedirs(destdir) for filename in ["meta-velvetg"]: srcfile = os.path.join(srcdir, filename) shutil.copy2(srcfile, destdir) except OSError, err: raise EasyBuildError("Copying %s to installation dir %s failed: %s", srcfile, destdir, err) def sanity_check_step(self): """Custom sanity check for MetaVelvet.""" custom_paths = { 'files': ['bin/meta-velvetg'], 'dirs': [] } super(EB_MetaVelvet, self).sanity_check_step(custom_paths=custom_paths) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284677"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">igorg1312/googlepythonsskeleton</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/jinja2/loaders.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">333</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- """ jinja2.loaders ~~~~~~~~~~~~~~ Jinja loader classes. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import os import sys import weakref from types import ModuleType from os import path from hashlib import sha1 from jinja2.exceptions import TemplateNotFound from jinja2.utils import open_if_exists, internalcode from jinja2._compat import string_types, iteritems def split_template_path(template): """Split a path into segments and perform a sanity check. If it detects '..' in the path it will raise a `TemplateNotFound` error. """ pieces = [] for piece in template.split('/'): if path.sep in piece \ or (path.altsep and path.altsep in piece) or \ piece == path.pardir: raise TemplateNotFound(template) elif piece and piece != '.': pieces.append(piece) return pieces class BaseLoader(object): """Baseclass for all loaders. Subclass this and override `get_source` to implement a custom loading mechanism. The environment provides a `get_template` method that calls the loader's `load` method to get the :class:`Template` object. A very basic example for a loader that looks up templates on the file system could look like this:: from jinja2 import BaseLoader, TemplateNotFound from os.path import join, exists, getmtime class MyLoader(BaseLoader): def __init__(self, path): self.path = path def get_source(self, environment, template): path = join(self.path, template) if not exists(path): raise TemplateNotFound(template) mtime = getmtime(path) with file(path) as f: source = f.read().decode('utf-8') return source, path, lambda: mtime == getmtime(path) """ #: if set to `False` it indicates that the loader cannot provide access #: to the source of templates. #: #: .. versionadded:: 2.4 has_source_access = True def get_source(self, environment, template): """Get the template source, filename and reload helper for a template. It's passed the environment and template name and has to return a tuple in the form ``(source, filename, uptodate)`` or raise a `TemplateNotFound` error if it can't locate the template. The source part of the returned tuple must be the source of the template as unicode string or a ASCII bytestring. The filename should be the name of the file on the filesystem if it was loaded from there, otherwise `None`. The filename is used by python for the tracebacks if no loader extension is used. The last item in the tuple is the `uptodate` function. If auto reloading is enabled it's always called to check if the template changed. No arguments are passed so the function must store the old state somewhere (for example in a closure). If it returns `False` the template will be reloaded. """ if not self.has_source_access: raise RuntimeError('%s cannot provide access to the source' % self.__class__.__name__) raise TemplateNotFound(template) def list_templates(self): """Iterates over all templates. If the loader does not support that it should raise a :exc:`TypeError` which is the default behavior. """ raise TypeError('this loader cannot iterate over all templates') @internalcode def load(self, environment, name, globals=None): """Loads a template. This method looks up the template in the cache or loads one by calling :meth:`get_source`. Subclasses should not override this method as loaders working on collections of other loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`) will not call this method but `get_source` directly. """ code = None if globals is None: globals = {} # first we try to get the source for this template together # with the filename and the uptodate function. source, filename, uptodate = self.get_source(environment, name) # try to load the code from the bytecode cache if there is a # bytecode cache configured. bcc = environment.bytecode_cache if bcc is not None: bucket = bcc.get_bucket(environment, name, filename, source) code = bucket.code # if we don't have code so far (not cached, no longer up to # date) etc. we compile the template if code is None: code = environment.compile(source, name, filename) # if the bytecode cache is available and the bucket doesn't # have a code so far, we give the bucket the new code and put # it back to the bytecode cache. if bcc is not None and bucket.code is None: bucket.code = code bcc.set_bucket(bucket) return environment.template_class.from_code(environment, code, globals, uptodate) class FileSystemLoader(BaseLoader): """Loads templates from the file system. This loader can find templates in folders on the file system and is the preferred way to load them. The loader takes the path to the templates as string, or if multiple locations are wanted a list of them which is then looked up in the given order:: >>> loader = FileSystemLoader('/path/to/templates') >>> loader = FileSystemLoader(['/path/to/templates', '/other/path']) Per default the template encoding is ``'utf-8'`` which can be changed by setting the `encoding` parameter to something else. To follow symbolic links, set the *followlinks* parameter to ``True``:: >>> loader = FileSystemLoader('/path/to/templates', followlinks=True) .. versionchanged:: 2.8+ The *followlinks* parameter was added. """ def __init__(self, searchpath, encoding='utf-8', followlinks=False): if isinstance(searchpath, string_types): searchpath = [searchpath] self.searchpath = list(searchpath) self.encoding = encoding self.followlinks = followlinks def get_source(self, environment, template): pieces = split_template_path(template) for searchpath in self.searchpath: filename = path.join(searchpath, *pieces) f = open_if_exists(filename) if f is None: continue try: contents = f.read().decode(self.encoding) finally: f.close() mtime = path.getmtime(filename) def uptodate(): try: return path.getmtime(filename) == mtime except OSError: return False return contents, filename, uptodate raise TemplateNotFound(template) def list_templates(self): found = set() for searchpath in self.searchpath: walk_dir = os.walk(searchpath, followlinks=self.followlinks) for dirpath, dirnames, filenames in walk_dir: for filename in filenames: template = os.path.join(dirpath, filename) \ [len(searchpath):].strip(os.path.sep) \ .replace(os.path.sep, '/') if template[:2] == './': template = template[2:] if template not in found: found.add(template) return sorted(found) class PackageLoader(BaseLoader): """Load templates from python eggs or packages. It is constructed with the name of the python package and the path to the templates in that package:: loader = PackageLoader('mypackage', 'views') If the package path is not given, ``'templates'`` is assumed. Per default the template encoding is ``'utf-8'`` which can be changed by setting the `encoding` parameter to something else. Due to the nature of eggs it's only possible to reload templates if the package was loaded from the file system and not a zip file. """ def __init__(self, package_name, package_path='templates', encoding='utf-8'): from pkg_resources import DefaultProvider, ResourceManager, \ get_provider provider = get_provider(package_name) self.encoding = encoding self.manager = ResourceManager() self.filesystem_bound = isinstance(provider, DefaultProvider) self.provider = provider self.package_path = package_path def get_source(self, environment, template): pieces = split_template_path(template) p = '/'.join((self.package_path,) + tuple(pieces)) if not self.provider.has_resource(p): raise TemplateNotFound(template) filename = uptodate = None if self.filesystem_bound: filename = self.provider.get_resource_filename(self.manager, p) mtime = path.getmtime(filename) def uptodate(): try: return path.getmtime(filename) == mtime except OSError: return False source = self.provider.get_resource_string(self.manager, p) return source.decode(self.encoding), filename, uptodate def list_templates(self): path = self.package_path if path[:2] == './': path = path[2:] elif path == '.': path = '' offset = len(path) results = [] def _walk(path): for filename in self.provider.resource_listdir(path): fullname = path + '/' + filename if self.provider.resource_isdir(fullname): _walk(fullname) else: results.append(fullname[offset:].lstrip('/')) _walk(path) results.sort() return results class DictLoader(BaseLoader): """Loads a template from a python dict. It's passed a dict of unicode strings bound to template names. This loader is useful for unittesting: >>> loader = DictLoader({'index.html': 'source here'}) Because auto reloading is rarely useful this is disabled per default. """ def __init__(self, mapping): self.mapping = mapping def get_source(self, environment, template): if template in self.mapping: source = self.mapping[template] return source, None, lambda: source == self.mapping.get(template) raise TemplateNotFound(template) def list_templates(self): return sorted(self.mapping) class FunctionLoader(BaseLoader): """A loader that is passed a function which does the loading. The function receives the name of the template and has to return either an unicode string with the template source, a tuple in the form ``(source, filename, uptodatefunc)`` or `None` if the template does not exist. >>> def load_template(name): ... if name == 'index.html': ... return '...' ... >>> loader = FunctionLoader(load_template) The `uptodatefunc` is a function that is called if autoreload is enabled and has to return `True` if the template is still up to date. For more details have a look at :meth:`BaseLoader.get_source` which has the same return value. """ def __init__(self, load_func): self.load_func = load_func def get_source(self, environment, template): rv = self.load_func(template) if rv is None: raise TemplateNotFound(template) elif isinstance(rv, string_types): return rv, None, None return rv class PrefixLoader(BaseLoader): """A loader that is passed a dict of loaders where each loader is bound to a prefix. The prefix is delimited from the template by a slash per default, which can be changed by setting the `delimiter` argument to something else:: loader = PrefixLoader({ 'app1': PackageLoader('mypackage.app1'), 'app2': PackageLoader('mypackage.app2') }) By loading ``'app1/index.html'`` the file from the app1 package is loaded, by loading ``'app2/index.html'`` the file from the second. """ def __init__(self, mapping, delimiter='/'): self.mapping = mapping self.delimiter = delimiter def get_loader(self, template): try: prefix, name = template.split(self.delimiter, 1) loader = self.mapping[prefix] except (ValueError, KeyError): raise TemplateNotFound(template) return loader, name def get_source(self, environment, template): loader, name = self.get_loader(template) try: return loader.get_source(environment, name) except TemplateNotFound: # re-raise the exception with the correct fileame here. # (the one that includes the prefix) raise TemplateNotFound(template) @internalcode def load(self, environment, name, globals=None): loader, local_name = self.get_loader(name) try: return loader.load(environment, local_name, globals) except TemplateNotFound: # re-raise the exception with the correct fileame here. # (the one that includes the prefix) raise TemplateNotFound(name) def list_templates(self): result = [] for prefix, loader in iteritems(self.mapping): for template in loader.list_templates(): result.append(prefix + self.delimiter + template) return result class ChoiceLoader(BaseLoader): """This loader works like the `PrefixLoader` just that no prefix is specified. If a template could not be found by one loader the next one is tried. >>> loader = ChoiceLoader([ ... FileSystemLoader('/path/to/user/templates'), ... FileSystemLoader('/path/to/system/templates') ... ]) This is useful if you want to allow users to override builtin templates from a different location. """ def __init__(self, loaders): self.loaders = loaders def get_source(self, environment, template): for loader in self.loaders: try: return loader.get_source(environment, template) except TemplateNotFound: pass raise TemplateNotFound(template) @internalcode def load(self, environment, name, globals=None): for loader in self.loaders: try: return loader.load(environment, name, globals) except TemplateNotFound: pass raise TemplateNotFound(name) def list_templates(self): found = set() for loader in self.loaders: found.update(loader.list_templates()) return sorted(found) class _TemplateModule(ModuleType): """Like a normal module but with support for weak references""" class ModuleLoader(BaseLoader): """This loader loads templates from precompiled templates. Example usage: >>> loader = ChoiceLoader([ ... ModuleLoader('/path/to/compiled/templates'), ... FileSystemLoader('/path/to/templates') ... ]) Templates can be precompiled with :meth:`Environment.compile_templates`. """ has_source_access = False def __init__(self, path): package_name = '_jinja2_module_templates_%x' % id(self) # create a fake module that looks for the templates in the # path given. mod = _TemplateModule(package_name) if isinstance(path, string_types): path = [path] else: path = list(path) mod.__path__ = path sys.modules[package_name] = weakref.proxy(mod, lambda x: sys.modules.pop(package_name, None)) # the only strong reference, the sys.modules entry is weak # so that the garbage collector can remove it once the # loader that created it goes out of business. self.module = mod self.package_name = package_name @staticmethod def get_template_key(name): return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest() @staticmethod def get_module_filename(name): return ModuleLoader.get_template_key(name) + '.py' @internalcode def load(self, environment, name, globals=None): key = self.get_template_key(name) module = '%s.%s' % (self.package_name, key) mod = getattr(self.module, module, None) if mod is None: try: mod = __import__(module, None, None, ['root']) except ImportError: raise TemplateNotFound(name) # remove the entry from sys.modules, we only want the attribute # on the module object we have stored on the loader. sys.modules.pop(module, None) return environment.template_class.from_module_dict( environment, mod.__dict__, globals) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284678"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ArduPilot/MAVProxy</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">MAVProxy/modules/mavproxy_mode.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python '''mode command handling''' import time, os from pymavlink import mavutil from MAVProxy.modules.lib import mp_module class ModeModule(mp_module.MPModule): def __init__(self, mpstate): super(ModeModule, self).__init__(mpstate, "mode", public=True) self.add_command('mode', self.cmd_mode, "mode change", self.available_modes()) self.add_command('guided', self.cmd_guided, "fly to a clicked location on map") def cmd_mode(self, args): '''set arbitrary mode''' mode_mapping = self.master.mode_mapping() if mode_mapping is None: print('No mode mapping available') return if len(args) != 1: print('Available modes: ', mode_mapping.keys()) return if args[0].isdigit(): modenum = int(args[0]) else: mode = args[0].upper() if mode not in mode_mapping: print('Unknown mode %s: ' % mode) return modenum = mode_mapping[mode] self.master.set_mode(modenum) def available_modes(self): if self.master is None: print('No mode mapping available') return [] mode_mapping = self.master.mode_mapping() if mode_mapping is None: print('No mode mapping available') return [] return mode_mapping.keys() def unknown_command(self, args): '''handle mode switch by mode name as command''' mode_mapping = self.master.mode_mapping() mode = args[0].upper() if mode in mode_mapping: self.master.set_mode(mode_mapping[mode]) return True return False def cmd_guided(self, args): '''set GUIDED target''' if len(args) != 1 and len(args) != 3: print("Usage: guided ALTITUDE | guided LAT LON ALTITUDE") return if len(args) == 3: latitude = float(args[0]) longitude = float(args[1]) altitude = float(args[2]) latlon = (latitude, longitude) else: latlon = self.mpstate.click_location if latlon is None: print("No map click position available") return altitude = float(args[0]) print("Guided %s %s" % (str(latlon), str(altitude))) self.master.mav.mission_item_int_send (self.settings.target_system, self.settings.target_component, 0, self.module('wp').get_default_frame(), mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 2, 0, 0, 0, 0, 0, int(latlon[0]*1.0e7), int(latlon[1]*1.0e7), altitude) def init(mpstate): '''initialise module''' return ModeModule(mpstate) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284679"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">drxos/python-social-auth</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">social/tests/test_utils.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">73</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import sys import unittest2 as unittest from mock import Mock from social.utils import sanitize_redirect, user_is_authenticated, \ user_is_active, slugify, build_absolute_uri, \ partial_pipeline_data PY3 = sys.version_info[0] == 3 class SanitizeRedirectTest(unittest.TestCase): def test_none_redirect(self): self.assertEqual(sanitize_redirect('myapp.com', None), None) def test_empty_redirect(self): self.assertEqual(sanitize_redirect('myapp.com', ''), None) def test_dict_redirect(self): self.assertEqual(sanitize_redirect('myapp.com', {}), None) def test_invalid_redirect(self): self.assertEqual(sanitize_redirect('myapp.com', {'foo': 'bar'}), None) def test_wrong_path_redirect(self): self.assertEqual( sanitize_redirect('myapp.com', 'http://notmyapp.com/path/'), None ) def test_valid_absolute_redirect(self): self.assertEqual( sanitize_redirect('myapp.com', 'http://myapp.com/path/'), 'http://myapp.com/path/' ) def test_valid_relative_redirect(self): self.assertEqual(sanitize_redirect('myapp.com', '/path/'), '/path/') class UserIsAuthenticatedTest(unittest.TestCase): def test_user_is_none(self): self.assertEqual(user_is_authenticated(None), False) def test_user_is_not_none(self): self.assertEqual(user_is_authenticated(object()), True) def test_user_has_is_authenticated(self): class User(object): is_authenticated = True self.assertEqual(user_is_authenticated(User()), True) def test_user_has_is_authenticated_callable(self): class User(object): def is_authenticated(self): return True self.assertEqual(user_is_authenticated(User()), True) class UserIsActiveTest(unittest.TestCase): def test_user_is_none(self): self.assertEqual(user_is_active(None), False) def test_user_is_not_none(self): self.assertEqual(user_is_active(object()), True) def test_user_has_is_active(self): class User(object): is_active = True self.assertEqual(user_is_active(User()), True) def test_user_has_is_active_callable(self): class User(object): def is_active(self): return True self.assertEqual(user_is_active(User()), True) class SlugifyTest(unittest.TestCase): def test_slugify_formats(self): if PY3: self.assertEqual(slugify('FooBar'), 'foobar') self.assertEqual(slugify('Foo Bar'), 'foo-bar') self.assertEqual(slugify('Foo (Bar)'), 'foo-bar') else: self.assertEqual(slugify('FooBar'.decode('utf-8')), 'foobar') self.assertEqual(slugify('Foo Bar'.decode('utf-8')), 'foo-bar') self.assertEqual(slugify('Foo (Bar)'.decode('utf-8')), 'foo-bar') class BuildAbsoluteURITest(unittest.TestCase): def setUp(self): self.host = 'http://foobar.com' def tearDown(self): self.host = None def test_path_none(self): self.assertEqual(build_absolute_uri(self.host), self.host) def test_path_empty(self): self.assertEqual(build_absolute_uri(self.host, ''), self.host) def test_path_http(self): self.assertEqual(build_absolute_uri(self.host, 'http://barfoo.com'), 'http://barfoo.com') def test_path_https(self): self.assertEqual(build_absolute_uri(self.host, 'https://barfoo.com'), 'https://barfoo.com') def test_host_ends_with_slash_and_path_starts_with_slash(self): self.assertEqual(build_absolute_uri(self.host + '/', '/foo/bar'), 'http://foobar.com/foo/bar') def test_absolute_uri(self): self.assertEqual(build_absolute_uri(self.host, '/foo/bar'), 'http://foobar.com/foo/bar') class PartialPipelineData(unittest.TestCase): def test_kwargs_included_in_result(self): backend = self._backend() key, val = ('foo', 'bar') _, xkwargs = partial_pipeline_data(backend, None, *(), **dict([(key, val)])) self.assertTrue(key in xkwargs) self.assertEqual(xkwargs[key], val) def test_update_user(self): user = object() backend = self._backend(session_kwargs={'user': None}) _, xkwargs = partial_pipeline_data(backend, user) self.assertTrue('user' in xkwargs) self.assertEqual(xkwargs['user'], user) def _backend(self, session_kwargs=None): strategy = Mock() strategy.request = None strategy.session_get.return_value = object() strategy.partial_from_session.return_value = \ (0, 'mock-backend', [], session_kwargs or {}) backend = Mock() backend.name = 'mock-backend' backend.strategy = strategy return backend </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284680"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">kstrauser/ansible</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/devel</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/ansible/plugins/shell/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">7690</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># (c) 2012-2014, Michael DeHaan <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="9ff2f6fcf7fefaf3b1fbfaf7fefef1dff8f2fef6f3b1fcf0f2">[email protected]</a>> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284681"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">MungoRae/home-assistant</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/dev</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">homeassistant/components/remote/itach.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Support for iTach IR Devices. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/remote.itach/ """ import logging import voluptuous as vol import homeassistant.helpers.config_validation as cv import homeassistant.components.remote as remote from homeassistant.const import ( DEVICE_DEFAULT_NAME, CONF_NAME, CONF_MAC, CONF_HOST, CONF_PORT, CONF_DEVICES) from homeassistant.components.remote import PLATFORM_SCHEMA REQUIREMENTS = ['pyitachip2ir==0.0.6'] _LOGGER = logging.getLogger(__name__) DEFAULT_PORT = 4998 CONNECT_TIMEOUT = 5000 CONF_MODADDR = 'modaddr' CONF_CONNADDR = 'connaddr' CONF_COMMANDS = 'commands' CONF_DATA = 'data' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_MAC): cv.string, vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Required(CONF_DEVICES): vol.All(cv.ensure_list, [{ vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_MODADDR): vol.Coerce(int), vol.Required(CONF_CONNADDR): vol.Coerce(int), vol.Required(CONF_COMMANDS): vol.All(cv.ensure_list, [{ vol.Required(CONF_NAME): cv.string, vol.Required(CONF_DATA): cv.string }]) }]) }) # pylint: disable=unused-argument def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the ITach connection and devices.""" import pyitachip2ir itachip2ir = pyitachip2ir.ITachIP2IR( config.get(CONF_MAC), config.get(CONF_HOST), int(config.get(CONF_PORT))) if not itachip2ir.ready(CONNECT_TIMEOUT): _LOGGER.error("Unable to find iTach") return False devices = [] for data in config.get(CONF_DEVICES): name = data.get(CONF_NAME) modaddr = int(data.get(CONF_MODADDR, 1)) connaddr = int(data.get(CONF_CONNADDR, 1)) cmddatas = "" for cmd in data.get(CONF_COMMANDS): cmdname = cmd[CONF_NAME].strip() if not cmdname: cmdname = '""' cmddata = cmd[CONF_DATA].strip() if not cmddata: cmddata = '""' cmddatas += "{}\n{}\n".format(cmdname, cmddata) itachip2ir.addDevice(name, modaddr, connaddr, cmddatas) devices.append(ITachIP2IRRemote(itachip2ir, name)) add_devices(devices, True) return True class ITachIP2IRRemote(remote.RemoteDevice): """Device that sends commands to an ITachIP2IR device.""" def __init__(self, itachip2ir, name): """Initialize device.""" self.itachip2ir = itachip2ir self._power = False self._name = name or DEVICE_DEFAULT_NAME @property def name(self): """Return the name of the device.""" return self._name @property def is_on(self): """Return true if device is on.""" return self._power def turn_on(self, **kwargs): """Turn the device on.""" self._power = True self.itachip2ir.send(self._name, "ON", 1) self.schedule_update_ha_state() def turn_off(self, **kwargs): """Turn the device off.""" self._power = False self.itachip2ir.send(self._name, "OFF", 1) self.schedule_update_ha_state() def send_command(self, command, **kwargs): """Send a command to one device.""" for single_command in command: self.itachip2ir.send(self._name, single_command, 1) def update(self): """Update the device.""" self.itachip2ir.update() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284682"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Yichuans/ccv</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">languages/hu.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">162</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># coding: utf8 { '!langcode!': 'hu', '!langname!': 'Magyar', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN', '%s %%{row} deleted': '%s sorok törlődtek', '%s %%{row} updated': '%s sorok frissítődtek', '%s selected': '%s kiválasztott', '%Y-%m-%d': '%Y.%m.%d.', '%Y-%m-%d %H:%M:%S': '%Y.%m.%d. %H:%M:%S', 'About': 'About', 'Access Control': 'Access Control', 'Administrative Interface': 'Administrative Interface', 'Administrative interface': 'az adminisztrációs felületért kattints ide', 'Ajax Recipes': 'Ajax Recipes', 'appadmin is disabled because insecure channel': 'az appadmin a biztonságtalan csatorna miatt letiltva', 'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?', 'Available Databases and Tables': 'Elérhető adatbázisok és táblák', 'Buy this book': 'Buy this book', 'cache': 'gyorsítótár', 'Cache': 'Cache', 'Cache Keys': 'Cache Keys', 'Cannot be empty': 'Nem lehet üres', 'change password': 'jelszó megváltoztatása', 'Check to delete': 'Törléshez válaszd ki', 'Clear CACHE?': 'Clear CACHE?', 'Clear DISK': 'Clear DISK', 'Clear RAM': 'Clear RAM', 'Client IP': 'Client IP', 'Community': 'Community', 'Components and Plugins': 'Components and Plugins', 'Controller': 'Controller', 'Copyright': 'Copyright', 'Current request': 'Jelenlegi lekérdezés', 'Current response': 'Jelenlegi válasz', 'Current session': 'Jelenlegi folyamat', 'customize me!': 'változtass meg!', 'data uploaded': 'adat feltöltve', 'Database': 'adatbázis', 'Database %s select': 'adatbázis %s kiválasztás', 'db': 'db', 'DB Model': 'DB Model', 'Delete:': 'Töröl:', 'Demo': 'Demo', 'Deployment Recipes': 'Deployment Recipes', 'Description': 'Description', 'design': 'design', 'DISK': 'DISK', 'Disk Cache Keys': 'Disk Cache Keys', 'Disk Cleared': 'Disk Cleared', 'Documentation': 'Documentation', "Don't know what to do?": "Don't know what to do?", 'done!': 'kész!', 'Download': 'Download', 'E-mail': 'E-mail', 'Edit': 'Szerkeszt', 'Edit current record': 'Aktuális bejegyzés szerkesztése', 'edit profile': 'profil szerkesztése', 'Edit This App': 'Alkalmazást szerkeszt', 'Email and SMS': 'Email and SMS', 'Errors': 'Errors', 'export as csv file': 'exportál csv fájlba', 'FAQ': 'FAQ', 'First name': 'First name', 'Forms and Validators': 'Forms and Validators', 'Free Applications': 'Free Applications', 'Group ID': 'Group ID', 'Groups': 'Groups', 'Hello World': 'Hello Világ', 'Home': 'Home', 'How did you get here?': 'How did you get here?', 'import': 'import', 'Import/Export': 'Import/Export', 'Index': 'Index', 'insert new': 'új beillesztése', 'insert new %s': 'új beillesztése %s', 'Internal State': 'Internal State', 'Introduction': 'Introduction', 'Invalid email': 'Invalid email', 'Invalid Query': 'Hibás lekérdezés', 'invalid request': 'hibás kérés', 'Key': 'Key', 'Last name': 'Last name', 'Layout': 'Szerkezet', 'Layout Plugins': 'Layout Plugins', 'Layouts': 'Layouts', 'Live Chat': 'Live Chat', 'login': 'belép', 'logout': 'kilép', 'lost password': 'elveszett jelszó', 'Lost Password': 'Lost Password', 'Main Menu': 'Főmenü', 'Manage Cache': 'Manage Cache', 'Menu Model': 'Menü model', 'My Sites': 'My Sites', 'Name': 'Name', 'New Record': 'Új bejegyzés', 'new record inserted': 'új bejegyzés felvéve', 'next 100 rows': 'következő 100 sor', 'No databases in this application': 'Nincs adatbázis ebben az alkalmazásban', 'Online examples': 'online példákért kattints ide', 'or import from csv file': 'vagy betöltés csv fájlból', 'Origin': 'Origin', 'Other Plugins': 'Other Plugins', 'Other Recipes': 'Other Recipes', 'Overview': 'Overview', 'Password': 'Password', 'Plugins': 'Plugins', 'Powered by': 'Powered by', 'Preface': 'Preface', 'previous 100 rows': 'előző 100 sor', 'Python': 'Python', 'Query:': 'Lekérdezés:', 'Quick Examples': 'Quick Examples', 'RAM': 'RAM', 'RAM Cache Keys': 'RAM Cache Keys', 'Ram Cleared': 'Ram Cleared', 'Recipes': 'Recipes', 'Record': 'bejegyzés', 'record does not exist': 'bejegyzés nem létezik', 'Record ID': 'Record ID', 'Record id': 'bejegyzés id', 'Register': 'Register', 'register': 'regisztráció', 'Registration key': 'Registration key', 'Reset Password key': 'Reset Password key', 'Role': 'Role', 'Rows in Table': 'Sorok a táblában', 'Rows selected': 'Kiválasztott sorok', 'Semantic': 'Semantic', 'Services': 'Services', 'Size of cache:': 'Size of cache:', 'state': 'állapot', 'Statistics': 'Statistics', 'Stylesheet': 'Stylesheet', 'submit': 'submit', 'Support': 'Support', 'Sure you want to delete this object?': 'Biztos törli ezt az objektumot?', 'Table': 'tábla', 'Table name': 'Table name', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.', 'The Core': 'The Core', 'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s', 'The Views': 'The Views', 'This App': 'This App', 'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)', 'Timestamp': 'Timestamp', 'Twitter': 'Twitter', 'unable to parse csv file': 'nem lehet a csv fájlt beolvasni', 'Update:': 'Frissít:', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.', 'User ID': 'User ID', 'Videos': 'Videos', 'View': 'Nézet', 'Welcome %s': 'Welcome %s', 'Welcome to web2py': 'Isten hozott a web2py-ban', 'Welcome to web2py!': 'Welcome to web2py!', 'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s', 'You are successfully running web2py': 'You are successfully running web2py', 'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs', 'You visited the url %s': 'You visited the url %s', } </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284683"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">sjerdo/letsencrypt</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">letsencrypt/plugins/webroot_test.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">"""Tests for letsencrypt.plugins.webroot.""" import os import shutil import tempfile import unittest import mock from acme import challenges from acme import jose from letsencrypt import achallenges from letsencrypt import errors from letsencrypt.tests import acme_util from letsencrypt.tests import test_util KEY = jose.JWKRSA.load(test_util.load_vector("rsa512_key.pem")) class AuthenticatorTest(unittest.TestCase): """Tests for letsencrypt.plugins.webroot.Authenticator.""" achall = achallenges.KeyAuthorizationAnnotatedChallenge( challb=acme_util.HTTP01_P, domain=None, account_key=KEY) def setUp(self): from letsencrypt.plugins.webroot import Authenticator self.path = tempfile.mkdtemp() self.validation_path = os.path.join( self.path, ".well-known", "acme-challenge", "ZXZhR3hmQURzNnBTUmIyTEF2OUlaZjE3RHQzanV4R0orUEN0OTJ3citvQQ") self.config = mock.MagicMock(webroot_path=self.path) self.auth = Authenticator(self.config, "webroot") self.auth.prepare() def tearDown(self): shutil.rmtree(self.path) def test_more_info(self): more_info = self.auth.more_info() self.assertTrue(isinstance(more_info, str)) self.assertTrue(self.path in more_info) def test_add_parser_arguments(self): add = mock.MagicMock() self.auth.add_parser_arguments(add) self.assertEqual(1, add.call_count) def test_prepare_bad_root(self): self.config.webroot_path = os.path.join(self.path, "null") self.assertRaises(errors.PluginError, self.auth.prepare) def test_prepare_missing_root(self): self.config.webroot_path = None self.assertRaises(errors.PluginError, self.auth.prepare) def test_prepare_full_root_exists(self): # prepare() has already been called once in setUp() self.auth.prepare() # shouldn't raise any exceptions def test_prepare_reraises_other_errors(self): self.auth.full_path = os.path.join(self.path, "null") os.chmod(self.path, 0o000) self.assertRaises(errors.PluginError, self.auth.prepare) os.chmod(self.path, 0o700) def test_perform_cleanup(self): responses = self.auth.perform([self.achall]) self.assertEqual(1, len(responses)) self.assertTrue(os.path.exists(self.validation_path)) with open(self.validation_path) as validation_f: validation = validation_f.read() self.assertTrue( challenges.KeyAuthorizationChallengeResponse( key_authorization=validation).verify( self.achall.chall, KEY.public_key())) self.auth.cleanup([self.achall]) self.assertFalse(os.path.exists(self.validation_path)) if __name__ == "__main__": unittest.main() # pragma: no cover </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284684"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ddepaoli3/magnum</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">magnum/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">19</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import pbr.version __version__ = pbr.version.VersionInfo( 'magnum').version_string() # Make a project global TLS trace storage repository TLS = threading.local() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284685"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">chvalean/lis-test</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">WS2012R2/lisa/tools/middleware_bench/utils/setup.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">7</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Linux on Hyper-V and Azure Test Code, ver. 1.0.0 Copyright (c) Microsoft Corporation All rights reserved Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See the Apache Version 2.0 License for specific language governing permissions and limitations under the License. """ import os import sys import time import logging from utils import constants from utils.cmdshell import SSHClient from report.db_utils import upload_results from paramiko.ssh_exception import NoValidConnectionsError from providers.amazon_service import AWSConnector from providers.azure_service import AzureConnector from providers.gcp_service import GCPConnector logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', datefmt='%y/%m/%d %H:%M:%S', level=logging.INFO) log = logging.getLogger(__name__) class SetupTestEnv: """ Setup test environment. """ def __init__(self, provider=None, vm_count=None, test_type=None, disk_size=None, raid=None, keyid=None, secret=None, token=None, subscriptionid=None, tenantid=None, projectid=None, imageid=None, instancetype=None, user=None, localpath=None, region=None, zone=None, sriov=False, kernel=None): """ Init AWS connector to create and configure AWS ec2 instances. :param provider Service provider to be used e.g. azure, aws, gce. :param vm_count: Number of VMs to prepare :param test_type: vm_disk > 1 VM with disk (Orion and Sysbench) no_disk > No disk attached (Redis, Memcached, Apache_bench) db_disk > Second VM with disk (MariaDB, MongoDB) cluster_disk > All VMs have disks (Terasort) :param disk_size: :param raid: Bool or Int (the number of disks), to specify if a RAID will be configured :param keyid: user key for executing remote connection :param secret: user secret for executing remote connection :param token: GCE refresh token obtained with gcloud sdk :param subscriptionid: Azure specific subscription id :param tenantid: Azure specific tenant id :param projectid: GCE specific project id :param imageid: AWS OS AMI image id or Azure image references offer and sku: e.g. 'UbuntuServer#16.04.0-LTS' or GCE image family, e.g. 'ubuntu-1604-lts' :param instancetype: AWS instance resource type e.g 'd2.4xlarge' or Azure hardware profile vm size e.g. 'Standard_DS14_v2' or GCE instance size e.g. 'n1-highmem-16' :param user: remote ssh user for the instance :param localpath: localpath where the logs should be downloaded, and the default path for other necessary tools :param region: region to connect to :param zone: zone where other resources should be available :param sriov: bool for configuring SR-IOV or not :param kernel: kernel deb name to install provided in localpath :rtype Tuple :return: connector <Connector>, vm_ips <VM private IPs dict>, device <attached disk devices>, ssh_client <ssh clients dict> """ self.provider = provider self.vm_count = vm_count self.test_type = test_type self.disk_size = disk_size self.raid = raid self.keyid = keyid self.secret = secret self.token = token self.subscriptionid = subscriptionid self.tenantid = tenantid self.projectid = projectid self.imageid = imageid self.instancetype = instancetype self.user = user self.localpath = localpath self.region = region self.zone = zone self.sriov = sriov self.kernel = kernel # create and generate setup details try: self.connector = self.create_connector() self.vms = self.create_instances() self.device = self.get_disk_devices() self.ssh_client, self.vm_ips = self.get_instance_details() self.perf_tuning() self.reconnect_sshclient() except Exception as e: log.exception(e) if self.connector: self.connector.teardown() raise def create_connector(self): """ Create connector by provider. :return: connector """ connector = None if self.provider == constants.AWS: connector = AWSConnector(keyid=self.keyid, secret=self.secret, imageid=self.imageid, instancetype=self.instancetype, user=self.user, localpath=self.localpath, region=self.region, zone=self.zone) elif self.provider == constants.AZURE: connector = AzureConnector(clientid=self.keyid, secret=self.secret, subscriptionid=self.subscriptionid, tenantid=self.tenantid, imageid=self.imageid, instancetype=self.instancetype, user=self.user, localpath=self.localpath, location=self.region, sriov=self.sriov) elif self.provider == constants.GCE: connector = GCPConnector(clientid=self.keyid, secret=self.secret, token=self.token, projectid=self.projectid, imageid=self.imageid, instancetype=self.instancetype, user=self.user, localpath=self.localpath, zone=self.zone) if connector: connector.connect() return connector else: raise Exception('Unsupported provider or connector failed.') def create_instances(self): """ Create instances. :return: VM instances """ open(self.connector.host_key_file, 'w').close() vms = {} for i in xrange(1, self.vm_count + 1): vms[i] = self.connector.create_vm() return vms def reconnect_sshclient(self): if self.provider == constants.AWS: log.info('The provider is AWS, reconnect sshclient') for i in xrange(1, self.vm_count + 1): self.ssh_client[i].connect() def get_instance_details(self): """ Create ssh client and get vm IPs :return: ssh_client, vm_ips """ ssh_client = {} vm_ips = {} for i in xrange(1, self.vm_count + 1): if self.provider == constants.AWS: ssh_client[i] = self.connector.wait_for_ping(self.vms[i]) # SRIOV is enabled by default on AWS for the tested platforms # if sriov == constants.ENABLED: # ssh_client[i] = connector.enable_sr_iov(vms[i], ssh_client[i]) self.vms[i].update() vm_ips[i] = self.vms[i].private_ip_address elif self.provider == constants.AZURE: ssh_client[i] = SSHClient(server=self.vms[i].name + self.connector.dns_suffix, host_key_file=self.connector.host_key_file, user=self.connector.user, ssh_key_file=os.path.join( self.connector.localpath, self.connector.key_name + '.pem')) ip = ssh_client[i].run( 'ifconfig eth0 | grep "inet\ addr" | cut -d: -f2 | cut -d" " -f1') vm_ips[i] = ip[1].strip() elif self.provider == constants.GCE: ssh_client[i] = self.connector.wait_for_ping(self.vms[i]) vm_ips[i] = self.vms[i]['networkInterfaces'][0]['networkIP'] return ssh_client, vm_ips def attach_raid_disks(self, vm_tag, disk_args): device = [] for i in xrange(self.raid): if self.provider == constants.AWS: disk_args['device'] = '/dev/sd{}'.format(chr(120 - i)) device.append(disk_args['device'].replace('sd', 'xvd')) elif self.provider == constants.AZURE: disk_args['device'] = i device.append('/dev/sd{}'.format(chr(99 + i))) elif self.provider == constants.GCE: device.append('/dev/sd{}'.format(chr(98 + i))) self.connector.attach_disk(self.vms[vm_tag], disk_size=self.disk_size, **disk_args) return device def get_disk_devices(self): if not self.test_type: return None device = None disk_args = {} if self.provider == constants.AWS: device = constants.DEVICE_AWS.replace('sd', 'xvd') disk_args['iops'] = 5000 disk_args['volume_type'] = self.connector.volume_type['ssd_io1'] disk_args['device'] = constants.DEVICE_AWS elif self.provider == constants.AZURE: device = constants.DEVICE_AZURE elif self.provider == constants.GCE: # Note: using disk device order prediction,GCE API is not consistent in the disk naming # device = constants.DEVICE_GCE + disk_name device = constants.TEMP_DEVICE_GCE if self.test_type == constants.CLUSTER_DISK: self.connector.attach_disk(self.vms[1], disk_size=self.disk_size + 200, **disk_args) for i in xrange(2, self.vm_count + 1): self.connector.attach_disk(self.vms[i], disk_size=self.disk_size, **disk_args) time.sleep(3) return device vm_tag = None if self.test_type == constants.VM_DISK: vm_tag = 1 elif self.test_type == constants.DB_DISK: vm_tag = 2 if self.raid and type(self.raid) is int: return self.attach_raid_disks(vm_tag, disk_args) else: self.connector.attach_disk(self.vms[vm_tag], disk_size=self.disk_size, **disk_args) return device def perf_tuning(self): current_path = os.path.dirname(sys.modules['__main__'].__file__) for i in range(1, self.vm_count + 1): log.info('Running perf tuning on {}'.format(self.vm_ips[i])) self.ssh_client[i].connect() self.ssh_client[i].put_file(os.path.join(current_path, 'tests', 'perf_tuning.sh'), '/tmp/perf_tuning.sh') self.ssh_client[i].run('chmod +x /tmp/perf_tuning.sh') self.ssh_client[i].run("sed -i 's/\r//' /tmp/perf_tuning.sh") params = [self.provider] if '.deb' in self.kernel: log.info('Uploading kernel {} on {}'.format(self.kernel, self.vm_ips[i])) self.ssh_client[i].put_file(os.path.join(self.localpath, self.kernel), '/tmp/{}'.format(self.kernel)) params.append('/tmp/{}'.format(self.kernel)) self.ssh_client[i].run('/tmp/perf_tuning.sh {}'.format(' '.join(params))) if self.provider in [constants.AWS, constants.GCE]: self.ssh_client[i] = self.connector.restart_vm(self.vms[i]) elif self.provider == constants.AZURE: self.vms[i] = self.connector.restart_vm(self.vms[i].name) # TODO add custom kernel support for all providers - only azure support self.ssh_client[i] = SSHClient(server=self.vms[i].name + self.connector.dns_suffix, host_key_file=self.connector.host_key_file, user=self.connector.user, ssh_key_file=os.path.join( self.connector.localpath, self.connector.key_name + '.pem')) ip = self.ssh_client[i].run( 'ifconfig eth0 | grep "inet\ addr" | cut -d: -f2 | cut -d" " -f1') self.vm_ips[i] = ip[1].strip() def run_test(self, ssh_vm_conf=0, testname=None, test_cmd=None, results_path=None, raid=False, ssh_raid=1, timeout=constants.TIMEOUT): try: if all(client is not None for client in self.ssh_client.values()): current_path = os.path.dirname(sys.modules['__main__'].__file__) # enable key auth between instances for i in xrange(1, ssh_vm_conf + 1): self.ssh_client[i].put_file(os.path.join(self.localpath, self.connector.key_name + '.pem'), '/home/{}/.ssh/id_rsa'.format(self.user)) self.ssh_client[i].run('chmod 0600 /home/{0}/.ssh/id_rsa'.format(self.user)) if raid: self.ssh_client[ssh_raid].put_file(os.path.join( current_path, 'tests', 'raid.sh'), '/tmp/raid.sh') self.ssh_client[ssh_raid].run('chmod +x /tmp/raid.sh') self.ssh_client[ssh_raid].run("sed -i 's/\r//' /tmp/raid.sh") self.ssh_client[ssh_raid].run('/tmp/raid.sh 0 {} {}'.format(raid, ' '.join( self.device))) bash_testname = 'run_{}.sh'.format(testname) self.ssh_client[1].put_file(os.path.join(current_path, 'tests', bash_testname), '/tmp/{}'.format(bash_testname)) self.ssh_client[1].run('chmod +x /tmp/{}'.format(bash_testname)) self.ssh_client[1].run("sed -i 's/\r//' /tmp/{}".format(bash_testname)) log.info('Starting background command {}'.format(test_cmd)) channel = self.ssh_client[1].run_pty(test_cmd) _, pid, _ = self.ssh_client[1].run( "ps aux | grep -v grep | grep {} | awk '{{print $2}}'".format( bash_testname)) self._wait_for_pid(self.ssh_client[1], bash_testname, pid, timeout=timeout) channel.close() self.ssh_client[1].get_file('/tmp/{}.zip'.format(testname), results_path) except Exception as e: log.exception(e) raise finally: if self.connector: self.connector.teardown() @staticmethod def _wait_for_pid(ssh_client, bash_testname, pid, timeout=constants.TIMEOUT): t = 0 while t < timeout: try: _, new_pid, _ = ssh_client.run( "ps aux | grep -v grep | grep {} | awk '{{print $2}}'".format( bash_testname)) if new_pid != pid: return except NoValidConnectionsError: log.debug('NoValidConnectionsError, will retry in 60 seconds') time.sleep(60) t += 60 time.sleep(60) t += 60 else: raise Exception('Timeout waiting for process to end.'.format(timeout)) def run_test_nohup(self, ssh_vm_conf=0, test_cmd=None, timeout=constants.TIMEOUT, track=None): try: if all(client is not None for client in self.ssh_client.values()): current_path = os.path.dirname(sys.modules['__main__'].__file__) # enable key auth between instances for i in xrange(1, ssh_vm_conf + 1): self.ssh_client[i].put_file(os.path.join(self.localpath, self.connector.key_name + '.pem'), '/home/{}/.ssh/id_rsa'.format(self.user)) self.ssh_client[i].run('chmod 0600 /home/{0}/.ssh/id_rsa'.format(self.user)) log.info('Starting run nohup command {}'.format(test_cmd)) self.ssh_client[1].run(test_cmd) self._wait_for_command(self.ssh_client[1], track, timeout=timeout) except Exception as e: log.exception(e) raise finally: log.info('Finish to run nohup command {}'.format(test_cmd)) @staticmethod def _wait_for_command(ssh_client, track, timeout=constants.TIMEOUT): t = 0 while t < timeout: try: _, p_count, _ = ssh_client.run( "ps aux | grep -v grep | grep {} | awk '{{print $2}}' | wc -l".format( track)) if int(p_count) == 0 : return except NoValidConnectionsError: log.debug('NoValidConnectionsError, will retry in 60 seconds') time.sleep(60) t += 60 time.sleep(60) t += 60 else: raise Exception('Timeout waiting for process to end.'.format(timeout))</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284686"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jalavik/inspire-next</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">inspire/testsuite/test_export.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- # # This file is part of INSPIRE. # Copyright (C) 2015 CERN. # # INSPIRE is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # INSPIRE is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with INSPIRE; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. import pkg_resources import os from dojson.contrib.marc21.utils import create_record from invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite from inspire.dojson.hep import hep from invenio.base.wrappers import lazy_import Export = lazy_import('inspire.utils.export.Export') class ExportTests(InvenioTestCase): def setUp(self): self.marcxml = pkg_resources.resource_string('inspire.testsuite', os.path.join( 'fixtures', 'test_hep_formats.xml') ) record = create_record(self.marcxml) self.hep_record = hep.do(record) self.export = Export(self.hep_record) self.sample_export_good = { 'citation_key': 'Aad:2015wqa', 'doi': '10.1140/epjc/s10052-015-3661-9, 10.1140/epjc/s10052-015-3518-2', 'arxiv_field': {u'categories': [u'hep-ex'], u'value': u'arXiv:1503.03290'}, 'arxiv': 'arXiv:1503.03290 [hep-ex]', 'reportNumber': 'CERN-PH-EP-2015-038', 'SLACcitation': '%%CITATION = ARXIV:1503.03290;%%', } def test_citation_key(self): """Test if citation key is created correctly""" self.assertEqual(self.sample_export_good['citation_key'], self.export._get_citation_key()) def test_doi(self): """Test if doi is created correctly""" self.assertEqual(self.sample_export_good['doi'], self.export._get_doi()) def test_arxiv_field(self): """Test if arxiv_field is created correctly""" self.assertEqual(self.sample_export_good['arxiv_field'], self.export.arxiv_field) def test_arxiv(self): """Test if arxiv is created correctly""" self.assertEqual(self.sample_export_good['arxiv'], self.export._get_arxiv()) def test_report_number(self): """Test if report number is created correctly""" self.assertEqual(self.sample_export_good['reportNumber'], self.export._get_report_number()) def test_slac_citations(self): """Test if slac citation is created correctly""" self.assertEqual(self.sample_export_good['SLACcitation'], self.export._get_slac_citation()) TEST_SUITE = make_test_suite(ExportTests) if __name__ == "__main__": run_test_suite(TEST_SUITE) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284687"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">p0psicles/SickRage</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/feedparser/util.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">36</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright 2010-2015 Kurt McKee <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="debdb1b0aabfbdaab3bb9eb5abacaab3bdb5bbbbf0b1acb9">[email protected]</a>> # Copyright 2002-2008 Mark Pilgrim # All rights reserved. # # This file is a part of feedparser. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, unicode_literals import warnings class FeedParserDict(dict): keymap = {'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'updated', 'date_parsed': 'updated_parsed', 'description': ['summary', 'subtitle'], 'description_detail': ['summary_detail', 'subtitle_detail'], 'url': ['href'], 'modified': 'updated', 'modified_parsed': 'updated_parsed', 'issued': 'published', 'issued_parsed': 'published_parsed', 'copyright': 'rights', 'copyright_detail': 'rights_detail', 'tagline': 'subtitle', 'tagline_detail': 'subtitle_detail'} def __getitem__(self, key): ''' :return: A :class:`FeedParserDict`. ''' if key == 'category': try: return dict.__getitem__(self, 'tags')[0]['term'] except IndexError: raise KeyError("object doesn't have key 'category'") elif key == 'enclosures': norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel']) return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']=='enclosure'] elif key == 'license': for link in dict.__getitem__(self, 'links'): if link['rel']=='license' and 'href' in link: return link['href'] elif key == 'updated': # Temporarily help developers out by keeping the old # broken behavior that was reported in issue 310. # This fix was proposed in issue 328. if not dict.__contains__(self, 'updated') and \ dict.__contains__(self, 'published'): warnings.warn("To avoid breaking existing software while " "fixing issue 310, a temporary mapping has been created " "from `updated` to `published` if `updated` doesn't " "exist. This fallback will be removed in a future version " "of feedparser.", DeprecationWarning) return dict.__getitem__(self, 'published') return dict.__getitem__(self, 'updated') elif key == 'updated_parsed': if not dict.__contains__(self, 'updated_parsed') and \ dict.__contains__(self, 'published_parsed'): warnings.warn("To avoid breaking existing software while " "fixing issue 310, a temporary mapping has been created " "from `updated_parsed` to `published_parsed` if " "`updated_parsed` doesn't exist. This fallback will be " "removed in a future version of feedparser.", DeprecationWarning) return dict.__getitem__(self, 'published_parsed') return dict.__getitem__(self, 'updated_parsed') else: realkey = self.keymap.get(key, key) if isinstance(realkey, list): for k in realkey: if dict.__contains__(self, k): return dict.__getitem__(self, k) elif dict.__contains__(self, realkey): return dict.__getitem__(self, realkey) return dict.__getitem__(self, key) def __contains__(self, key): if key in ('updated', 'updated_parsed'): # Temporarily help developers out by keeping the old # broken behavior that was reported in issue 310. # This fix was proposed in issue 328. return dict.__contains__(self, key) try: self.__getitem__(key) except KeyError: return False else: return True has_key = __contains__ def get(self, key, default=None): ''' :return: A :class:`FeedParserDict`. ''' try: return self.__getitem__(key) except KeyError: return default def __setitem__(self, key, value): key = self.keymap.get(key, key) if isinstance(key, list): key = key[0] return dict.__setitem__(self, key, value) def setdefault(self, key, value): if key not in self: self[key] = value return value return self[key] def __getattr__(self, key): # __getattribute__() is called first; this will be called # only if an attribute was not already found try: return self.__getitem__(key) except KeyError: raise AttributeError("object has no attribute '%s'" % key) def __hash__(self): return id(self) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284688"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">VielSoft/odoo</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/8.0</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">addons/l10n_ar/__openerp__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">260</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Argentina Localization Chart Account', 'version': '1.0', 'description': """ Argentinian accounting chart and tax localization. ================================================== Plan contable argentino e impuestos de acuerdo a disposiciones vigentes """, 'author': ['Cubic ERP'], 'website': 'http://cubicERP.com', 'category': 'Localization/Account Charts', 'depends': ['account_chart'], 'data':[ 'account_tax_code.xml', 'l10n_ar_chart.xml', 'account_tax.xml', 'l10n_ar_wizard.xml', ], 'demo': [], 'active': False, 'installable': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284689"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">agiliq/django</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">django/conf/locale/fr/formats.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">116</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j F Y H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'j N Y' SHORT_DATETIME_FORMAT = 'j N Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06' '%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06' # '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006' ) DATETIME_INPUT_FORMATS = ( '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30' '%d.%m.%Y', # Swiss (fr_CH), '25.10.2006' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284690"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">sonium0/pymatgen</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">pymatgen/alchemy/materials.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import unicode_literals """ This module provides various representations of transformed structures. A TransformedStructure is a structure that has been modified by undergoing a series of transformations. """ __author__ = "Shyue Ping Ong, Will Richards" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "1.0" __maintainer__ = "Shyue Ping Ong" __email__ = "<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="a0d3c8d9d5c5d0e0c7cdc1c9cc8ec3cfcd">[email protected]</a>" __date__ = "Mar 2, 2012" import os import re import json import datetime from copy import deepcopy from monty.json import MontyDecoder from pymatgen.core.structure import Structure from pymatgen.io.cif import CifParser from pymatgen.io.vasp.inputs import Poscar from pymatgen.serializers.json_coders import PMGSONable from pymatgen.matproj.snl import StructureNL from warnings import warn dec = MontyDecoder() class TransformedStructure(PMGSONable): """ Container object for new structures that include history of transformations. Each transformed structure is made up of a sequence of structures with associated transformation history. """ def __init__(self, structure, transformations=None, history=None, other_parameters=None): """ Initializes a transformed structure from a structure. Args: structure (Structure): Input structure transformations ([Transformations]): List of transformations to apply. history (list): Previous history. other_parameters (dict): Additional parameters to be added. """ self.final_structure = structure self.history = history or [] self.other_parameters = other_parameters or {} self._undone = [] transformations = transformations or [] for t in transformations: self.append_transformation(t) def undo_last_change(self): """ Undo the last change in the TransformedStructure. Raises: IndexError: If already at the oldest change. """ if len(self.history) == 0: raise IndexError("Can't undo. Already at oldest change.") if 'input_structure' not in self.history[-1]: raise IndexError("Can't undo. Latest history has no " "input_structure") h = self.history.pop() self._undone.append((h, self.final_structure)) s = h["input_structure"] if isinstance(s, dict): s = Structure.from_dict(s) self.final_structure = s def redo_next_change(self): """ Redo the last undone change in the TransformedStructure. Raises: IndexError: If already at the latest change. """ if len(self._undone) == 0: raise IndexError("Can't redo. Already at latest change.") h, s = self._undone.pop() self.history.append(h) self.final_structure = s def __getattr__(self, name): s = object.__getattribute__(self, 'final_structure') return getattr(s, name) def __len__(self): return len(self.history) def append_transformation(self, transformation, return_alternatives=False, clear_redo=True): """ Appends a transformation to the TransformedStructure. Args: transformation: Transformation to append return_alternatives: Whether to return alternative TransformedStructures for one-to-many transformations. return_alternatives can be a number, which stipulates the total number of structures to return. clear_redo: Boolean indicating whether to clear the redo list. By default, this is True, meaning any appends clears the history of undoing. However, when using append_transformation to do a redo, the redo list should not be cleared to allow multiple redos. """ if clear_redo: self._undone = [] if return_alternatives and transformation.is_one_to_many: ranked_list = transformation.apply_transformation( self.final_structure, return_ranked_list=return_alternatives) input_structure = self.final_structure.as_dict() alts = [] for x in ranked_list[1:]: s = x.pop("structure") actual_transformation = x.pop("transformation", transformation) hdict = actual_transformation.as_dict() hdict["input_structure"] = input_structure hdict["output_parameters"] = x self.final_structure = s d = self.as_dict() d['history'].append(hdict) d['final_structure'] = s.as_dict() alts.append(TransformedStructure.from_dict(d)) x = ranked_list[0] s = x.pop("structure") actual_transformation = x.pop("transformation", transformation) hdict = actual_transformation.as_dict() hdict["input_structure"] = self.final_structure.as_dict() hdict["output_parameters"] = x self.history.append(hdict) self.final_structure = s return alts else: s = transformation.apply_transformation(self.final_structure) hdict = transformation.as_dict() hdict["input_structure"] = self.final_structure.as_dict() hdict["output_parameters"] = {} self.history.append(hdict) self.final_structure = s def append_filter(self, structure_filter): """ Adds a filter. Args: structure_filter (StructureFilter): A filter implementating the AbstractStructureFilter API. Tells transmuter waht structures to retain. """ hdict = structure_filter.as_dict() hdict["input_structure"] = self.final_structure.as_dict() self.history.append(hdict) def extend_transformations(self, transformations, return_alternatives=False): """ Extends a sequence of transformations to the TransformedStructure. Args: transformations: Sequence of Transformations return_alternatives: Whether to return alternative TransformedStructures for one-to-many transformations. return_alternatives can be a number, which stipulates the total number of structures to return. """ for t in transformations: self.append_transformation(t, return_alternatives=return_alternatives) def get_vasp_input(self, vasp_input_set, generate_potcar=True): """ Returns VASP input as a dict of vasp objects. Args: vasp_input_set (pymatgen.io.vaspio_set.VaspInputSet): input set to create vasp input files from structures generate_potcar (bool): Set to False to generate a POTCAR.spec file instead of a POTCAR, which contains the POTCAR labels but not the actual POTCAR. Defaults to True. """ d = vasp_input_set.get_all_vasp_input(self.final_structure, generate_potcar) d["transformations.json"] = json.dumps(self.as_dict()) return d def write_vasp_input(self, vasp_input_set, output_dir, create_directory=True): """ Writes VASP input to an output_dir. Args: vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet like object that creates vasp input files from structures output_dir: Directory to output files create_directory: Create the directory if not present. Defaults to True. """ vasp_input_set.write_input(self.final_structure, output_dir, make_dir_if_not_present=create_directory) with open(os.path.join(output_dir, "transformations.json"), "w") as fp: json.dump(self.as_dict(), fp) def __str__(self): output = ["Current structure", "------------", str(self.final_structure), "\nHistory", "------------"] for h in self.history: h.pop('input_structure', None) output.append(str(h)) output.append("\nOther parameters") output.append("------------") output.append(str(self.other_parameters)) return "\n".join(output) def set_parameter(self, key, value): self.other_parameters[key] = value @property def was_modified(self): """ Boolean describing whether the last transformation on the structure made any alterations to it one example of when this would return false is in the case of performing a substitution transformation on the structure when the specie to replace isn't in the structure. """ return not self.final_structure == self.structures[-2] @property def structures(self): """ Copy of all structures in the TransformedStructure. A structure is stored after every single transformation. """ hstructs = [Structure.from_dict(s['input_structure']) for s in self.history if 'input_structure' in s] return hstructs + [self.final_structure] @staticmethod def from_cif_string(cif_string, transformations=None, primitive=True, occupancy_tolerance=1.): """ Generates TransformedStructure from a cif string. Args: cif_string (str): Input cif string. Should contain only one structure. For cifs containing multiple structures, please use CifTransmuter. transformations ([Transformations]): Sequence of transformations to be applied to the input structure. primitive (bool): Option to set if the primitive cell should be extracted. Defaults to True. However, there are certain instances where you might want to use a non-primitive cell, e.g., if you are trying to generate all possible orderings of partial removals or order a disordered structure. occupancy_tolerance (float): If total occupancy of a site is between 1 and occupancy_tolerance, the occupancies will be scaled down to 1. Returns: TransformedStructure """ parser = CifParser.from_string(cif_string, occupancy_tolerance) raw_string = re.sub("'", "\"", cif_string) cif_dict = parser.as_dict() cif_keys = list(cif_dict.keys()) s = parser.get_structures(primitive)[0] partial_cif = cif_dict[cif_keys[0]] if "_database_code_ICSD" in partial_cif: source = partial_cif["_database_code_ICSD"] + "-ICSD" else: source = "uploaded cif" source_info = {"source": source, "datetime": str(datetime.datetime.now()), "original_file": raw_string, "cif_data": cif_dict[cif_keys[0]]} return TransformedStructure(s, transformations, history=[source_info]) @staticmethod def from_poscar_string(poscar_string, transformations=None): """ Generates TransformedStructure from a poscar string. Args: poscar_string (str): Input POSCAR string. transformations ([Transformations]): Sequence of transformations to be applied to the input structure. """ p = Poscar.from_string(poscar_string) if not p.true_names: raise ValueError("Transformation can be craeted only from POSCAR " "strings with proper VASP5 element symbols.") raw_string = re.sub("'", "\"", poscar_string) s = p.structure source_info = {"source": "POSCAR", "datetime": str(datetime.datetime.now()), "original_file": raw_string} return TransformedStructure(s, transformations, history=[source_info]) def as_dict(self): """ Dict representation of the TransformedStructure. """ d = self.final_structure.as_dict() d["@module"] = self.__class__.__module__ d["@class"] = self.__class__.__name__ d["history"] = deepcopy(self.history) d["version"] = __version__ d["last_modified"] = str(datetime.datetime.utcnow()) d["other_parameters"] = deepcopy(self.other_parameters) return d @classmethod def from_dict(cls, d): """ Creates a TransformedStructure from a dict. """ s = Structure.from_dict(d) return cls(s, history=d["history"], other_parameters=d.get("other_parameters", None)) def to_snl(self, authors, projects=None, references='', remarks=None, data=None, created_at=None): if self.other_parameters: warn('Data in TransformedStructure.other_parameters discarded ' 'during type conversion to SNL') hist = [] for h in self.history: snl_metadata = h.pop('_snl', {}) hist.append({'name' : snl_metadata.pop('name', 'pymatgen'), 'url' : snl_metadata.pop('url', 'http://pypi.python.org/pypi/pymatgen'), 'description' : h}) return StructureNL(self.final_structure, authors, projects, references, remarks, data, hist, created_at) @classmethod def from_snl(cls, snl): """ Create TransformedStructure from SNL. Args: snl (StructureNL): Starting snl Returns: TransformedStructure """ hist = [] for h in snl.history: d = h.description d['_snl'] = {'url' : h.url, 'name' : h.name} hist.append(d) return cls(snl.structure, history=hist) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284691"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">openiitbombayx/edx-platform</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lms/djangoapps/lms_xblock/admin.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">173</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Django admin dashboard configuration for LMS XBlock infrastructure. """ from django.contrib import admin from config_models.admin import ConfigurationModelAdmin from lms.djangoapps.lms_xblock.models import XBlockAsidesConfig admin.site.register(XBlockAsidesConfig, ConfigurationModelAdmin) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284692"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">cw0100/cwse</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">nodejs/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">899</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import os import gyp import gyp.common import gyp.msvs_emulation import json import sys generator_supports_multiple_toolsets = True generator_wants_static_library_dependencies_adjusted = False generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: # Some gyp steps fail if these are empty(!). generator_default_variables[dirname] = 'dir' for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' def CalculateVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) for key, val in generator_flags.items(): default_variables.setdefault(key, val) default_variables.setdefault('OS', gyp.common.GetFlavor(params)) flavor = gyp.common.GetFlavor(params) if flavor =='win': # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) if generator_flags.get('adjust_static_libraries', False): global generator_wants_static_library_dependencies_adjusted generator_wants_static_library_dependencies_adjusted = True def GenerateOutput(target_list, target_dicts, data, params): # Map of target -> list of targets it depends on. edges = {} # Queue of targets to visit. targets_to_visit = target_list[:] while len(targets_to_visit) > 0: target = targets_to_visit.pop() if target in edges: continue edges[target] = [] for dep in target_dicts[target].get('dependencies', []): edges[target].append(dep) targets_to_visit.append(dep) filename = 'dump.json' f = open(filename, 'w') json.dump(edges, f) f.close() print 'Wrote json to %s.' % filename </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284693"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lexyan/SickBeard</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/hachoir_core/field/timestamp.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">90</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from lib.hachoir_core.tools import (humanDatetime, humanDuration, timestampUNIX, timestampMac32, timestampUUID60, timestampWin64, durationWin64) from lib.hachoir_core.field import Bits, FieldSet from datetime import datetime class GenericTimestamp(Bits): def __init__(self, parent, name, size, description=None): Bits.__init__(self, parent, name, size, description) def createDisplay(self): return humanDatetime(self.value) def createRawDisplay(self): value = Bits.createValue(self) return unicode(value) def __nonzero__(self): return Bits.createValue(self) != 0 def timestampFactory(cls_name, handler, size): class Timestamp(GenericTimestamp): def __init__(self, parent, name, description=None): GenericTimestamp.__init__(self, parent, name, size, description) def createValue(self): value = Bits.createValue(self) return handler(value) cls = Timestamp cls.__name__ = cls_name return cls TimestampUnix32 = timestampFactory("TimestampUnix32", timestampUNIX, 32) TimestampUnix64 = timestampFactory("TimestampUnix64", timestampUNIX, 64) TimestampMac32 = timestampFactory("TimestampUnix32", timestampMac32, 32) TimestampUUID60 = timestampFactory("TimestampUUID60", timestampUUID60, 60) TimestampWin64 = timestampFactory("TimestampWin64", timestampWin64, 64) class TimeDateMSDOS32(FieldSet): """ 32-bit MS-DOS timestamp (16-bit time, 16-bit date) """ static_size = 32 def createFields(self): # TODO: Create type "MSDOS_Second" : value*2 yield Bits(self, "second", 5, "Second/2") yield Bits(self, "minute", 6) yield Bits(self, "hour", 5) yield Bits(self, "day", 5) yield Bits(self, "month", 4) # TODO: Create type "MSDOS_Year" : value+1980 yield Bits(self, "year", 7, "Number of year after 1980") def createValue(self): return datetime( 1980+self["year"].value, self["month"].value, self["day"].value, self["hour"].value, self["minute"].value, 2*self["second"].value) def createDisplay(self): return humanDatetime(self.value) class DateTimeMSDOS32(TimeDateMSDOS32): """ 32-bit MS-DOS timestamp (16-bit date, 16-bit time) """ def createFields(self): yield Bits(self, "day", 5) yield Bits(self, "month", 4) yield Bits(self, "year", 7, "Number of year after 1980") yield Bits(self, "second", 5, "Second/2") yield Bits(self, "minute", 6) yield Bits(self, "hour", 5) class TimedeltaWin64(GenericTimestamp): def __init__(self, parent, name, description=None): GenericTimestamp.__init__(self, parent, name, 64, description) def createDisplay(self): return humanDuration(self.value) def createValue(self): value = Bits.createValue(self) return durationWin64(value) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284694"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">frouty/odoo_oph</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/dev_70</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">addons/web_hello/__openerp__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">69</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">{ 'name': 'Hello', 'category': 'Hidden', 'description':""" OpenERP Web example module. =========================== """, 'version': '2.0', 'depends': [], 'js': ['static/*/*.js', 'static/*/js/*.js'], 'css': [], 'auto_install': False, 'web_preload': False, } </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284695"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">zdary/intellij-community</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">python/testData/copyPaste/singleLine/Indent12.dst.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">664</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">class C: def foo(self): <caret> y = 2 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284696"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">acabey/acabey.github.io</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">projects/demos/engineering.purdue.edu/scriptingwithobjects/swocode/chap10/KeyError.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/local/bin/python # KeyError.py x = {'a' : 1, 'b' : 2} #(A) print x['a'] # 1 #(B) print x['b'] # 2 #(C) print x['c'] # KeyError: 'c' #(D) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284697"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">psav/cfme_tests</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">cfme/utils/net.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from collections import defaultdict import socket import os import re from cfme.fixtures.pytest_store import store from cfme.utils.log import logger _ports = defaultdict(dict) _dns_cache = {} ip_address = re.compile( r"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}" r"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$") def random_port(tcp=True): """Get a random port number for making a socket Args: tcp: Return a TCP port number if True, UDP if False This may not be reliable at all due to an inherent race condition. This works by creating a socket on an ephemeral port, inspecting it to see what port was used, closing it, and returning that port number. In the time between closing the socket and opening a new one, it's possible for the OS to reopen that port for another purpose. In practical testing, this race condition did not result in a failure to (re)open the returned port number, making this solution squarely "good enough for now". """ # Port 0 will allocate an ephemeral port socktype = socket.SOCK_STREAM if tcp else socket.SOCK_DGRAM s = socket.socket(socket.AF_INET, socktype) s.bind(('', 0)) addr, port = s.getsockname() s.close() return port def my_ip_address(http=False): """Get the ip address of the host running tests using the service listed in cfme_data['ip_echo'] The ip echo endpoint is expected to write the ip address to the socket and close the connection. See a working example of this in :py:func:`ip_echo_socket`. """ # the pytest store does this work, it's included here for convenience return store.my_ip_address def ip_echo_socket(port=32123): """A simple socket server, for use with :py:func:`my_ip_address`""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', port)) s.listen(0) while True: conn, addr = s.accept() conn.sendall(addr[0]) conn.close() def net_check(port, addr=None, force=False): """Checks the availablility of a port""" port = int(port) if not addr: addr = store.current_appliance.hostname if port not in _ports[addr] or force: # First try DNS resolution try: addr = socket.gethostbyname(addr) # Then try to connect to the port try: socket.create_connection((addr, port), timeout=10) _ports[addr][port] = True except socket.error: _ports[addr][port] = False except: _ports[addr][port] = False return _ports[addr][port] def net_check_remote(port, addr=None, machine_addr=None, ssh_creds=None, force=False): """Checks the availability of a port from outside using another machine (over SSH)""" from cfme.utils.ssh import SSHClient port = int(port) if not addr: addr = my_ip_address() if port not in _ports[addr] or force: if not machine_addr: machine_addr = store.current_appliance.hostname if not ssh_creds: ssh_client = store.current_appliance.ssh_client else: ssh_client = SSHClient( hostname=machine_addr, username=ssh_creds['username'], password=ssh_creds['password'] ) with ssh_client: # on exception => fails with return code 1 cmd = '''python -c " import sys, socket addr = socket.gethostbyname('%s') socket.create_connection((addr, %d), timeout=10) sys.exit(0) "''' % (addr, port) result = ssh_client.run_command(cmd) _ports[addr][port] = result.success return _ports[addr][port] def resolve_hostname(hostname, force=False): """Cached DNS resolver. If the hostname does not resolve to an IP, returns None.""" if hostname not in _dns_cache or force: try: _dns_cache[hostname] = socket.gethostbyname(hostname) except socket.gaierror: _dns_cache[hostname] = None return _dns_cache[hostname] def resolve_ips(host_iterable, force_dns=False): """Takes list of hostnames, ips and another things. If the item is not an IP, it will be tried to be converted to an IP. If that succeeds, it is appended to the set together with original hostname. If it can't be resolved, just the original hostname is appended. """ result = set([]) for host in map(str, host_iterable): result.add(host) # It is already an IP address if ip_address.match(host) is None: ip = resolve_hostname(host, force=force_dns) if ip is not None: result.add(ip) return result def is_pingable(ip_addr): """verifies the specified ip_address is reachable or not. Args: ip_addr: ip_address to verify the PING. returns: return True is ip_address is pinging else returns False. """ try: status = os.system("ping -c1 -w2 {}".format(ip_addr)) if status == 0: logger.info('IP: %s is UP !', ip_addr) return True logger.info('IP: %s is DOWN !', ip_addr) return False except Exception as e: logger.exception(e) return False </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284698"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">NewPresident1/kitsune</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">kitsune/questions/tests/test_utils.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">16</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from nose.tools import eq_ from kitsune.questions.models import Question, Answer from kitsune.questions.tests import question, answer from kitsune.questions.utils import ( num_questions, num_answers, num_solutions, mark_content_as_spam) from kitsune.sumo.tests import TestCase from kitsune.users.tests import user class ContributionCountTestCase(TestCase): def test_num_questions(self): """Answers are counted correctly on a user.""" u = user(save=True) eq_(num_questions(u), 0) q1 = question(creator=u, save=True) eq_(num_questions(u), 1) q2 = question(creator=u, save=True) eq_(num_questions(u), 2) q1.delete() eq_(num_questions(u), 1) q2.delete() eq_(num_questions(u), 0) def test_num_answers(self): u = user(save=True) q = question(save=True) eq_(num_answers(u), 0) a1 = answer(creator=u, question=q, save=True) eq_(num_answers(u), 1) a2 = answer(creator=u, question=q, save=True) eq_(num_answers(u), 2) a1.delete() eq_(num_answers(u), 1) a2.delete() eq_(num_answers(u), 0) def test_num_solutions(self): u = user(save=True) q1 = question(save=True) q2 = question(save=True) a1 = answer(creator=u, question=q1, save=True) a2 = answer(creator=u, question=q2, save=True) eq_(num_solutions(u), 0) q1.solution = a1 q1.save() eq_(num_solutions(u), 1) q2.solution = a2 q2.save() eq_(num_solutions(u), 2) q1.solution = None q1.save() eq_(num_solutions(u), 1) a2.delete() eq_(num_solutions(u), 0) class FlagUserContentAsSpamTestCase(TestCase): def test_flag_content_as_spam(self): # Create some questions and answers by the user. u = user(save=True) question(creator=u, save=True) question(creator=u, save=True) answer(creator=u, save=True) answer(creator=u, save=True) answer(creator=u, save=True) # Verify they are not marked as spam yet. eq_(2, Question.objects.filter(is_spam=False, creator=u).count()) eq_(0, Question.objects.filter(is_spam=True, creator=u).count()) eq_(3, Answer.objects.filter(is_spam=False, creator=u).count()) eq_(0, Answer.objects.filter(is_spam=True, creator=u).count()) # Flag content as spam and verify it is updated. mark_content_as_spam(u, user(save=True)) eq_(0, Question.objects.filter(is_spam=False, creator=u).count()) eq_(2, Question.objects.filter(is_spam=True, creator=u).count()) eq_(0, Answer.objects.filter(is_spam=False, creator=u).count()) eq_(3, Answer.objects.filter(is_spam=True, creator=u).count()) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="284699"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">brainelectronics/towerdefense</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">refs/heads/master</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">examples/pyglet/image/codecs/quicktime.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">43</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- ''' ''' __docformat__ = 'restructuredtext' __version__ = '$Id: pil.py 163 2006-11-13 04:15:46Z Alex.Holkner $' import sys from ctypes import * from pyglet.gl import * from pyglet.image import * from pyglet.image.codecs import * from pyglet.window.carbon import carbon, quicktime, _oscheck from pyglet.libs.darwin.constants import _name from pyglet.libs.darwin.types import * Handle = POINTER(POINTER(c_byte)) GWorldPtr = c_void_p carbon.NewHandle.restype = Handle HandleDataHandlerSubType = _name('hndl') PointerDataHandlerSubType = _name('ptr ') kDataHCanRead = 1 kDataRefExtensionFileName = _name('fnam') kDataRefExtensionMIMEType = _name('mime') ComponentInstance = c_void_p k1MonochromePixelFormat = 0x00000001 k2IndexedPixelFormat = 0x00000002 k4IndexedPixelFormat = 0x00000004 k8IndexedPixelFormat = 0x00000008 k16BE555PixelFormat = 0x00000010 k24RGBPixelFormat = 0x00000018 k32ARGBPixelFormat = 0x00000020 k32BGRAPixelFormat = _name('BGRA') k1IndexedGrayPixelFormat = 0x00000021 k2IndexedGrayPixelFormat = 0x00000022 k4IndexedGrayPixelFormat = 0x00000024 k8IndexedGrayPixelFormat = 0x00000028 kNativeEndianPixMap = 1 << 8 kGraphicsImporterDontDoGammaCorrection = 1 << 0 kGraphicsImporterDontUseColorMatching = 1 << 3 newMovieActive = 1 noErr = 0 movieTrackMediaType = 1 << 0 movieTrackCharacteristic = 1 << 1 movieTrackEnabledOnly = 1 << 2 VisualMediaCharacteristic = _name('eyes') nextTimeMediaSample = 1 class PointerDataRefRecord(Structure): _fields_ = [ ('data', c_void_p), ('dataLength', c_long) ] def Str255(value): return create_string_buffer(chr(len(value)) + value) class QuickTimeImageDecoder(ImageDecoder): def get_file_extensions(self): # Only most common ones shown here return ['.bmp', '.cur', '.gif', '.ico', '.jpg', '.jpeg', '.pcx', '.png', '.tga', '.tif', '.tiff', '.xbm', '.xpm'] def get_animation_file_extensions(self): return ['.gif'] def _get_data_ref(self, file, filename): self._data_hold = data = create_string_buffer(file.read()) dataref = carbon.NewHandle(sizeof(PointerDataRefRecord)) datarec = cast(dataref, POINTER(POINTER(PointerDataRefRecord))).contents.contents datarec.data = addressof(data) datarec.dataLength = len(data) self._data_handler_holder = data_handler = ComponentInstance() r = quicktime.OpenADataHandler(dataref, PointerDataHandlerSubType, None, 0, None, kDataHCanRead, byref(data_handler)) _oscheck(r) extension_handle = Handle() self._filename_hold = filename = Str255(filename) r = carbon.PtrToHand(filename, byref(extension_handle), len(filename)) r = quicktime.DataHSetDataRefExtension(data_handler, extension_handle, kDataRefExtensionFileName) _oscheck(r) quicktime.DisposeHandle(extension_handle) quicktime.DisposeHandle(dataref) dataref = c_void_p() r = quicktime.DataHGetDataRef(data_handler, byref(dataref)) _oscheck(r) quicktime.CloseComponent(data_handler) return dataref def _get_formats(self): # TODO choose 24 bit where appropriate. if sys.byteorder == 'big': format = 'ARGB' qtformat = k32ARGBPixelFormat else: format = 'BGRA' qtformat = k32BGRAPixelFormat return format, qtformat def decode(self, file, filename): dataref = self._get_data_ref(file, filename) importer = ComponentInstance() quicktime.GetGraphicsImporterForDataRef(dataref, PointerDataHandlerSubType, byref(importer)) if not importer: raise ImageDecodeException(filename or file) rect = Rect() quicktime.GraphicsImportGetNaturalBounds(importer, byref(rect)) width = rect.right height = rect.bottom format, qtformat = self._get_formats() buffer = (c_byte * (width * height * len(format)))() world = GWorldPtr() quicktime.QTNewGWorldFromPtr(byref(world), qtformat, byref(rect), c_void_p(), c_void_p(), 0, buffer, len(format) * width) flags = (kGraphicsImporterDontUseColorMatching | kGraphicsImporterDontDoGammaCorrection) quicktime.GraphicsImportSetFlags(importer, flags) quicktime.GraphicsImportSetGWorld(importer, world, c_void_p()) result = quicktime.GraphicsImportDraw(importer) quicktime.DisposeGWorld(world) quicktime.CloseComponent(importer) if result != 0: raise ImageDecodeException(filename or file) pitch = len(format) * width return ImageData(width, height, format, buffer, -pitch) def decode_animation(self, file, filename): # TODO: Stop playing chicken with the GC # TODO: Cleanup in errors quicktime.EnterMovies() data_ref = self._get_data_ref(file, filename) if not data_ref: raise ImageDecodeException(filename or file) movie = c_void_p() id = c_short() result = quicktime.NewMovieFromDataRef(byref(movie), newMovieActive, 0, data_ref, PointerDataHandlerSubType) if not movie: #_oscheck(result) raise ImageDecodeException(filename or file) quicktime.GoToBeginningOfMovie(movie) time_scale = float(quicktime.GetMovieTimeScale(movie)) format, qtformat = self._get_formats() # Get movie width and height rect = Rect() quicktime.GetMovieBox(movie, byref(rect)) width = rect.right height = rect.bottom pitch = len(format) * width # Set gworld buffer = (c_byte * (width * height * len(format)))() world = GWorldPtr() quicktime.QTNewGWorldFromPtr(byref(world), qtformat, byref(rect), c_void_p(), c_void_p(), 0, buffer, len(format) * width) quicktime.SetGWorld(world, 0) quicktime.SetMovieGWorld(movie, world, 0) visual = quicktime.GetMovieIndTrackType(movie, 1, VisualMediaCharacteristic, movieTrackCharacteristic) if not visual: raise ImageDecodeException('No video track') time = 0 interesting_time = c_int() quicktime.GetTrackNextInterestingTime( visual, nextTimeMediaSample, time, 1, byref(interesting_time), None) duration = interesting_time.value / time_scale frames = [] while time >= 0: result = quicktime.GetMoviesError() if result == noErr: # force redraw result = quicktime.UpdateMovie(movie) if result == noErr: # process movie quicktime.MoviesTask(movie, 0) result = quicktime.GetMoviesError() _oscheck(result) buffer_copy = (c_byte * len(buffer))() memmove(buffer_copy, buffer, len(buffer)) image = ImageData(width, height, format, buffer_copy, -pitch) frames.append(AnimationFrame(image, duration)) interesting_time = c_int() duration = c_int() quicktime.GetTrackNextInterestingTime( visual, nextTimeMediaSample, time, 1, byref(interesting_time), byref(duration)) quicktime.SetMovieTimeValue(movie, interesting_time) time = interesting_time.value duration = duration.value / time_scale if duration <= 0.01: duration = 0.1 quicktime.DisposeMovie(movie) carbon.DisposeHandle(data_ref) quicktime.ExitMovies() return Animation(frames) def get_decoders(): return [QuickTimeImageDecoder()] def get_encoders(): return [] </span></div> </div></div> </td> </tr></tbody></table> </div> <div class="bg-linear-to-b from-gray-100 to-white dark:from-gray-950 dark:to-gray-900 rounded-b-lg"><hr class="flex-none -translate-y-px border-t border-dashed border-gray-300 bg-white dark:border-gray-700 dark:bg-gray-950"> <nav><ul class="flex select-none items-center justify-between space-x-2 text-gray-700 sm:justify-center py-1 text-center font-mono text-xs "><li><a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 " href="/datasets/thomwolf/github-python/viewer/default/train?p=2845"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M10 16L20 6l1.4 1.4l-8.6 8.6l8.6 8.6L20 26z" fill="currentColor"></path></svg> Previous</a></li> <li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/thomwolf/github-python/viewer/default/train?p=0">1</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 pointer-events-none cursor-default" href="#">...</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/thomwolf/github-python/viewer/default/train?p=2844">2,845</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/thomwolf/github-python/viewer/default/train?p=2845">2,846</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 bg-gray-50 font-semibold ring-1 ring-inset ring-gray-200 dark:bg-gray-900 dark:text-yellow-500 dark:ring-gray-900 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/thomwolf/github-python/viewer/default/train?p=2846">2,847</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/thomwolf/github-python/viewer/default/train?p=2847">2,848</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/thomwolf/github-python/viewer/default/train?p=2848">2,849</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 pointer-events-none cursor-default" href="#">...</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/thomwolf/github-python/viewer/default/train?p=2852">2,853</a> </li> <li><a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 " href="/datasets/thomwolf/github-python/viewer/default/train?p=2847">Next <svg class="ml-1.5 transform rotate-180" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M10 16L20 6l1.4 1.4l-8.6 8.6l8.6 8.6L20 26z" fill="currentColor"></path></svg></a></li></ul></nav></div></div> </div></div></div></div></div></div></div> <div class="hidden items-center md:flex"> <div class="mx-1 flex items-center justify-center"><div class="h-8 w-1 cursor-ew-resize rounded-full bg-gray-200 hover:bg-gray-400 dark:bg-gray-700 dark:hover:bg-gray-600 max-sm:hidden" role="separator"></div></div> <div class="flex h-full flex-col" style="height: calc(100vh - 48px)"><div class="my-4 mr-4 h-full overflow-auto rounded-lg border shadow-lg dark:border-gray-800" style="width: 480px"><div class="flex h-full flex-col"><div class="flex flex-col "> <div class="px-4 md:mt-4"><div class="mb-4 flex justify-end"> <div class="flex w-full flex-col rounded-lg border-slate-200 bg-white p-2 shadow-md ring-1 ring-slate-200 dark:border-slate-700 dark:bg-slate-800 dark:ring-slate-700"> <div class="mt-0 flex items-start gap-1"><div class="flex items-center rounded-md bg-slate-100 p-2 dark:bg-slate-700"><svg class="size-4 text-gray-700 dark:text-gray-300" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 11 11"><path fill="currentColor" d="M4.881 4.182c0 .101-.031.2-.087.283a.5.5 0 0 1-.242.18l-.65.217a1.3 1.3 0 0 0-.484.299 1.3 1.3 0 0 0-.298.484l-.222.639a.46.46 0 0 1-.18.242.5.5 0 0 1-.288.092.5.5 0 0 1-.294-.097.5.5 0 0 1-.175-.242l-.211-.644a1.26 1.26 0 0 0-.299-.48 1.14 1.14 0 0 0-.479-.298L.328 4.64a.48.48 0 0 1-.247-.18.515.515 0 0 1 .247-.758l.644-.21a1.28 1.28 0 0 0 .788-.789l.211-.634a.5.5 0 0 1 .165-.242.5.5 0 0 1 .283-.103.5.5 0 0 1 .294.083c.086.058.152.14.19.237l.217.659a1.28 1.28 0 0 0 .788.788l.644.222a.476.476 0 0 1 .237.18.5.5 0 0 1 .092.288"></path><path fill="currentColor" d="M10.031 7.458a.5.5 0 0 1-.098.314.5.5 0 0 1-.267.196l-.881.293c-.272.09-.519.242-.721.443a1.8 1.8 0 0 0-.443.721l-.31.876a.5.5 0 0 1-.185.263.56.56 0 0 1-.319.098.515.515 0 0 1-.515-.366l-.294-.88a1.8 1.8 0 0 0-.443-.722c-.204-.2-.45-.353-.72-.448l-.881-.288a.57.57 0 0 1-.263-.191.56.56 0 0 1-.014-.64.5.5 0 0 1 .271-.194l.886-.294A1.82 1.82 0 0 0 6.01 5.465l.293-.87a.515.515 0 0 1 .49-.377c.11 0 .219.03.314.088a.56.56 0 0 1 .206.263l.298.896a1.82 1.82 0 0 0 1.175 1.174l.875.31a.5.5 0 0 1 .263.195c.07.09.108.2.108.314"></path><path fill="currentColor" d="M7.775 1.684a.5.5 0 0 0 .088-.262.45.45 0 0 0-.088-.263.5.5 0 0 0-.21-.155L7.24.896a.5.5 0 0 1-.165-.103.5.5 0 0 1-.103-.17l-.108-.33a.5.5 0 0 0-.165-.21A.5.5 0 0 0 6.426 0a.5.5 0 0 0-.252.098.5.5 0 0 0-.145.206l-.108.32a.5.5 0 0 1-.103.17.5.5 0 0 1-.17.102L5.334 1a.45.45 0 0 0-.216.155.5.5 0 0 0-.088.262c0 .094.029.186.083.263a.5.5 0 0 0 .216.16l.32.103q.095.03.164.103a.37.37 0 0 1 .103.165l.108.319c.031.09.088.17.165.227a.56.56 0 0 0 .252.077.42.42 0 0 0 .268-.093.5.5 0 0 0 .15-.2l.113-.325a.43.43 0 0 1 .268-.268l.32-.108a.42.42 0 0 0 .215-.155"></path></svg></div> <div class="flex min-w-0 flex-1"><textarea placeholder="Ask AI to help write your query..." class="max-h-64 min-h-8 w-full resize-none overflow-y-auto border-none bg-transparent py-1 text-sm leading-6 text-slate-700 placeholder-slate-400 [scrollbar-width:thin] focus:ring-0 dark:text-slate-200 dark:placeholder-slate-400" rows="1"></textarea> </div> </div> </div></div> <div class="relative flex flex-col rounded-md bg-gray-100 pt-2 dark:bg-gray-800/50"> <div class="flex h-64 items-center justify-center "><svg class="animate-spin text-xs" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path class="opacity-75" fill-rule="evenodd" clip-rule="evenodd" d="M6 0C2.6862 0 0 2.6862 0 6H1.8C1.8 4.88609 2.2425 3.8178 3.03015 3.03015C3.8178 2.2425 4.88609 1.8 6 1.8V0ZM12 6C12 9.3138 9.3138 12 6 12V10.2C7.11391 10.2 8.1822 9.7575 8.96985 8.96985C9.7575 8.1822 10.2 7.11391 10.2 6H12Z" fill="currentColor"></path><path class="opacity-25" fill-rule="evenodd" clip-rule="evenodd" d="M3.03015 8.96985C3.8178 9.7575 4.88609 10.2 6 10.2V12C2.6862 12 0 9.3138 0 6H1.8C1.8 7.11391 2.2425 8.1822 3.03015 8.96985ZM7.60727 2.11971C7.0977 1.90864 6.55155 1.8 6 1.8V0C9.3138 0 12 2.6862 12 6H10.2C10.2 5.44845 10.0914 4.9023 9.88029 4.39273C9.66922 3.88316 9.35985 3.42016 8.96985 3.03015C8.57984 2.64015 8.11684 2.33078 7.60727 2.11971Z" fill="currentColor"></path></svg></div></div> <div class="mt-2 flex flex-col gap-2"><div class="flex items-center justify-between max-sm:text-sm"><div class="flex w-full items-center justify-between gap-4"> <span class="flex flex-shrink-0 items-center gap-1"><span class="font-semibold">Subsets and Splits</span> <span class="inline-block "><span class="contents"><svg class="text-xs text-gray-500 dark:text-gray-400" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M17 22v-8h-4v2h2v6h-3v2h8v-2h-3z" fill="currentColor"></path><path d="M16 8a1.5 1.5 0 1 0 1.5 1.5A1.5 1.5 0 0 0 16 8z" fill="currentColor"></path><path d="M16 30a14 14 0 1 1 14-14a14 14 0 0 1-14 14zm0-26a12 12 0 1 0 12 12A12 12 0 0 0 16 4z" fill="currentColor"></path></svg></span> </span> </span> <div class="ml-4 flex flex-1 items-center justify-end gap-1"> </div></div></div> <div class="flex flex-nowrap gap-1 overflow-x-auto"></div></div> <button type="button" class="btn mt-2 h-10 w-full text-sm font-semibold md:text-base" ><span class="flex items-center gap-1.5"> <span>Run Query</span> <span class="shadow-xs ml-2 hidden items-center rounded-sm border bg-white px-0.5 text-xs font-medium text-gray-700 sm:inline-flex">Ctrl+↵</span></span></button></div> <div class="flex flex-col px-2 pb-4"></div></div> <div class="mt-auto pb-4"><div class="flex justify-center"><div class="w-full sm:px-4"><div class="mb-3"><ul class="flex gap-1 text-sm "><li><button class="flex items-center whitespace-nowrap rounded-lg px-2 text-gray-500 hover:bg-gray-100 hover:text-gray-700 dark:hover:bg-gray-900 dark:hover:text-gray-300">Saved Queries </button> </li><li><button class="flex items-center whitespace-nowrap rounded-lg px-2 bg-black text-white dark:bg-gray-800">Top Community Queries </button> </li></ul></div> <div class="h-48 overflow-y-auto"><div class="flex flex-col gap-2"><div class="flex h-48 flex-col items-center justify-center rounded border border-gray-200 bg-gray-50 p-4 text-center dark:border-gray-700/60 dark:bg-gray-900"><p class="mb-1 font-semibold text-gray-600 dark:text-gray-400">No community queries yet</p> <p class="max-w-xs text-xs text-gray-500 dark:text-gray-400">The top public SQL queries from the community will appear here once available.</p></div></div></div></div></div></div></div></div></div></div> </div></div></div></main> </div> <script data-cfasync="false" src="/cdn-cgi/scripts/5c5dd728/cloudflare-static/email-decode.min.js"></script><script> import("\/front\/build\/kube-62d1bb0\/index.js"); window.moonSha = "kube-62d1bb0\/"; window.__hf_deferred = {}; </script> <!-- Stripe --> <script> if (["hf.co", "huggingface.co"].includes(window.location.hostname)) { const script = document.createElement("script"); script.src = "https://js.stripe.com/v3/"; script.async = true; document.head.appendChild(script); } </script> </body> </html>