{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \"\n components.html(html, width=200, scrolling=True)\n\n el = self.get_delta_from_queue().new_element\n self.assertEqual(el.iframe.src, \"\")\n self.assertEqual(el.iframe.srcdoc, html)\n self.assertEqual(el.iframe.width, 200)\n self.assertTrue(el.iframe.has_width)\n self.assertTrue(el.iframe.scrolling)\n"}}},{"rowIdx":542616,"cells":{"filename":{"kind":"string","value":"the-stack_106_30920"},"text":{"kind":"string","value":"\"\"\"\nPerform general agent monitoring, like:\n 1. Status of the agent processes\n 2. Status of the agent threads\n 3. Couchdb replication status (and status of its database)\n 4. Disk usage status\n\"\"\"\nfrom __future__ import division\nfrom future.utils import viewitems\n\nimport time\nimport logging\nimport threading\nfrom pprint import pformat\nfrom Utils.Timers import timeFunction\nfrom Utils.Utilities import numberCouchProcess\nfrom Utils.PortForward import PortForward\nfrom WMComponent.AgentStatusWatcher.DrainStatusPoller import DrainStatusPoller\nfrom WMComponent.AnalyticsDataCollector.DataCollectAPI import WMAgentDBData, initAgentInfo\nfrom WMCore.Credential.Proxy import Proxy\nfrom WMCore.Database.CMSCouch import CouchMonitor\nfrom WMCore.Lexicon import sanitizeURL\nfrom WMCore.Services.ReqMgrAux.ReqMgrAux import isDrainMode, listDiskUsageOverThreshold\nfrom WMCore.Services.WMStats.WMStatsWriter import WMStatsWriter\nfrom WMCore.Services.WorkQueue.WorkQueue import WorkQueue as WorkQueueDS\nfrom WMCore.WorkQueue.DataStructs.WorkQueueElementsSummary import getGlobalSiteStatusSummary\nfrom WMCore.WorkerThreads.BaseWorkerThread import BaseWorkerThread\n\n# CMSMonitoring modules\nfrom CMSMonitoring.StompAMQ import StompAMQ\n\n\nclass AgentStatusPoller(BaseWorkerThread):\n \"\"\"\n Gether the summary data for request (workflow) from local queue,\n local job couchdb, wmbs/boss air and populate summary db for monitoring\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n initialize properties specified from config\n \"\"\"\n BaseWorkerThread.__init__(self)\n # set the workqueue service for REST call\n self.config = config\n # need to get campaign, user, owner info\n self.agentInfo = initAgentInfo(self.config)\n self.summaryLevel = config.AnalyticsDataCollector.summaryLevel\n\n proxyArgs = {'logger': logging.getLogger(), 'cleanEnvironment': True}\n self.proxy = Proxy(proxyArgs)\n self.proxyFile = self.proxy.getProxyFilename() # X509_USER_PROXY\n self.userCertFile = self.proxy.getUserCertFilename() # X509_USER_CERT\n # credential lifetime warning/error thresholds, in days\n self.credThresholds = {'proxy': {'error': 3, 'warning': 5},\n 'certificate': {'error': 10, 'warning': 20}}\n\n # create a portForwarder to be used for rerouting the replication process\n self.portForwarder = PortForward(8443)\n\n # Monitoring setup\n self.userAMQ = getattr(config.AgentStatusWatcher, \"userAMQ\", None)\n self.passAMQ = getattr(config.AgentStatusWatcher, \"passAMQ\", None)\n self.postToAMQ = getattr(config.AgentStatusWatcher, \"enableAMQ\", False)\n self.topicAMQ = getattr(config.AgentStatusWatcher, \"topicAMQ\", None)\n self.hostPortAMQ = getattr(config.AgentStatusWatcher, \"hostPortAMQ\", [('cms-mb.cern.ch', 61313)])\n\n # T0 doesn't have WorkQueue, so some monitoring/replication code has to be skipped here\n if hasattr(self.config, \"Tier0Feeder\"):\n self.isT0agent = True\n self.producer = \"tier0wmagent\"\n else:\n self.isT0agent = False\n self.producer = \"wmagent\"\n localWQUrl = config.AnalyticsDataCollector.localQueueURL\n self.workqueueDS = WorkQueueDS(localWQUrl)\n\n def setUpCouchDBReplication(self):\n\n self.replicatorDocs = []\n # set up common replication code\n wmstatsSource = self.config.JobStateMachine.jobSummaryDBName\n wmstatsTarget = self.config.General.centralWMStatsURL\n wmstatsTarget = self.portForwarder(wmstatsTarget)\n\n self.replicatorDocs.append({'source': wmstatsSource, 'target': wmstatsTarget,\n 'filter': \"WMStatsAgent/repfilter\"})\n if self.isT0agent:\n t0Source = self.config.Tier0Feeder.requestDBName\n t0Target = self.config.AnalyticsDataCollector.centralRequestDBURL\n self.replicatorDocs.append({'source': t0Source, 'target': t0Target,\n 'filter': \"T0Request/repfilter\"})\n else:\n # set up workqueue replication\n wqfilter = 'WorkQueue/queueFilter'\n parentQURL = self.config.WorkQueueManager.queueParams[\"ParentQueueCouchUrl\"]\n parentQURL = self.portForwarder(parentQURL)\n childURL = self.config.WorkQueueManager.queueParams[\"QueueURL\"]\n childURL = self.portForwarder(childURL)\n query_params = {'childUrl': childURL, 'parentUrl': sanitizeURL(parentQURL)['url']}\n localQInboxURL = \"%s_inbox\" % self.config.AnalyticsDataCollector.localQueueURL\n self.replicatorDocs.append({'source': sanitizeURL(parentQURL)['url'], 'target': localQInboxURL,\n 'filter': wqfilter, 'query_params': query_params})\n self.replicatorDocs.append({'source': sanitizeURL(localQInboxURL)['url'], 'target': parentQURL,\n 'filter': wqfilter, 'query_params': query_params})\n\n # delete old replicator docs before setting up\n self.localCouchMonitor.deleteReplicatorDocs()\n\n for rp in self.replicatorDocs:\n self.localCouchMonitor.couchServer.replicate(\n rp['source'], rp['target'], filter=rp['filter'],\n query_params=rp.get('query_params', False),\n continuous=True)\n # First cicle need to be skipped since document is not updated that fast\n self.skipReplicationCheck = True\n\n def setup(self, parameters):\n \"\"\"\n set db connection(couchdb, wmbs) to prepare to gather information\n \"\"\"\n\n # interface to WMBS/BossAir db\n myThread = threading.currentThread()\n # set wmagent db data\n self.wmagentDB = WMAgentDBData(self.summaryLevel, myThread.dbi, myThread.logger)\n\n self.centralWMStatsCouchDB = WMStatsWriter(self.config.General.centralWMStatsURL)\n\n self.localCouchMonitor = CouchMonitor(self.config.JobStateMachine.couchurl)\n self.setUpCouchDBReplication()\n\n @timeFunction\n def algorithm(self, parameters):\n \"\"\"\n get information from wmbs, workqueue and local couch\n \"\"\"\n try:\n agentInfo = self.collectAgentInfo()\n self.checkCredLifetime(agentInfo, \"proxy\")\n self.checkCredLifetime(agentInfo, \"certificate\")\n\n timeSpent, wmbsInfo, _ = self.collectWMBSInfo()\n wmbsInfo['total_query_time'] = int(timeSpent)\n agentInfo[\"WMBS_INFO\"] = wmbsInfo\n logging.info(\"WMBS data collected in: %d secs\", timeSpent)\n\n if not self.isT0agent:\n timeSpent, localWQInfo, _ = self.collectWorkQueueInfo()\n localWQInfo['total_query_time'] = int(timeSpent)\n agentInfo[\"LocalWQ_INFO\"] = localWQInfo\n logging.info(\"Local WorkQueue data collected in: %d secs\", timeSpent)\n\n self.uploadAgentInfoToCentralWMStats(agentInfo)\n\n self.buildMonITDocs(agentInfo)\n\n except Exception as ex:\n logging.exception(\"Error occurred, will retry later.\\nDetails: %s\", str(ex))\n\n @timeFunction\n def collectWorkQueueInfo(self):\n \"\"\"\n Collect information from local workqueue database\n :return:\n \"\"\"\n results = {}\n wqStates = ['Available', 'Acquired']\n\n results['workByStatus'] = self.workqueueDS.getJobsByStatus()\n results['workByStatusAndPriority'] = self.workqueueDS.getJobsByStatusAndPriority()\n\n elements = self.workqueueDS.getElementsByStatus(wqStates)\n uniSites, posSites = getGlobalSiteStatusSummary(elements, status=wqStates, dataLocality=True)\n results['uniqueJobsPerSite'] = uniSites\n results['possibleJobsPerSite'] = posSites\n\n return results\n\n def collectCouchDBInfo(self):\n\n couchInfo = {'name': 'CouchServer', 'status': 'ok', 'error_message': \"\"}\n\n if self.skipReplicationCheck:\n # skipping the check this round set if False so it can be checked next round.\n self.skipReplicationCheck = False\n return couchInfo\n\n for rp in self.replicatorDocs:\n cInfo = self.localCouchMonitor.checkCouchServerStatus(rp['source'],\n rp['target'], checkUpdateSeq=False)\n if cInfo['status'] != 'ok':\n couchInfo['status'] = 'error'\n couchInfo['error_message'] = cInfo['error_message']\n\n return couchInfo\n\n def collectAgentInfo(self):\n \"\"\"\n Monitors the general health of the agent, as:\n 1. status of the agent processes\n 2. status of the agent threads based on the database info\n 3. couchdb active tasks and its replications\n 4. check the disk usage\n 5. check the number of couch processes\n\n :return: a dict with all the info collected\n \"\"\"\n logging.info(\"Getting agent info ...\")\n agentInfo = self.wmagentDB.getComponentStatus(self.config)\n agentInfo.update(self.agentInfo)\n\n agentInfo['disk_warning'] = listDiskUsageOverThreshold(self.config, updateDB=True)\n\n if isDrainMode(self.config):\n logging.info(\"Agent is in DrainMode\")\n agentInfo['drain_mode'] = True\n agentInfo['drain_stats'] = DrainStatusPoller.getDrainInfo()\n else:\n agentInfo['drain_mode'] = False\n\n couchInfo = self.collectCouchDBInfo()\n if couchInfo['status'] != 'ok':\n agentInfo['down_components'].append(couchInfo['name'])\n agentInfo['status'] = couchInfo['status']\n agentInfo['down_component_detail'].append(couchInfo)\n\n # Couch process warning\n couchProc = numberCouchProcess()\n logging.info(\"CouchDB is running with %d processes\", couchProc)\n couchProcessThreshold = self.config.AnalyticsDataCollector.couchProcessThreshold\n if couchProc >= couchProcessThreshold:\n agentInfo['couch_process_warning'] = couchProc\n else:\n agentInfo['couch_process_warning'] = 0\n\n # Change status if there is data_error, couch process maxed out or disk full problems.\n if agentInfo['status'] == 'ok' and (agentInfo['drain_mode'] or agentInfo['disk_warning']):\n agentInfo['status'] = \"warning\"\n\n if agentInfo['status'] == 'ok' or agentInfo['status'] == 'warning':\n if agentInfo.get('data_error', 'ok') != 'ok' or agentInfo.get('couch_process_warning', 0):\n agentInfo['status'] = \"error\"\n\n logging.info(\"List of agent components down: %s\", agentInfo['down_components'])\n\n return agentInfo\n\n def uploadAgentInfoToCentralWMStats(self, agentInfo):\n \"\"\"\n Add some required fields to the document before it can get uploaded\n to WMStats.\n :param agentInfo: dict with agent stats to be posted to couchdb\n \"\"\"\n agentInfo['_id'] = agentInfo[\"agent_url\"]\n agentInfo['timestamp'] = int(time.time())\n agentInfo['type'] = \"agent_info\"\n # directly upload to the remote to prevent data conflict when agent is cleaned up and redeployed\n try:\n self.centralWMStatsCouchDB.updateAgentInfo(agentInfo,\n propertiesToKeep=[\"data_last_update\", \"data_error\"])\n except Exception as e:\n logging.error(\"Failed to upload agent statistics to WMStats. Error: %s\", str(e))\n\n @timeFunction\n def collectWMBSInfo(self):\n \"\"\"\n Fetches WMBS job information.\n In addition to WMBS, also collects RunJob info from BossAir\n :return: dict with the number of jobs in each status\n \"\"\"\n logging.info(\"Getting wmbs job info ...\")\n results = {}\n\n # first retrieve the site thresholds\n results['thresholds'] = self.wmagentDB.getJobSlotInfo()\n logging.debug(\"Running and pending site thresholds: %s\", results['thresholds'])\n\n # now fetch the amount of jobs in each state and the amount of created\n # jobs grouped by task\n results.update(self.wmagentDB.getAgentMonitoring())\n\n logging.debug(\"Total number of jobs in WMBS sorted by status: %s\", results['wmbsCountByState'])\n logging.debug(\"Total number of 'created' jobs in WMBS sorted by type: %s\", results['wmbsCreatedTypeCount'])\n logging.debug(\"Total number of 'executing' jobs in WMBS sorted by type: %s\", results['wmbsExecutingTypeCount'])\n\n logging.debug(\"Total number of active jobs in BossAir sorted by status: %s\", results['activeRunJobByStatus'])\n logging.debug(\"Total number of complete jobs in BossAir sorted by status: %s\",\n results['completeRunJobByStatus'])\n\n logging.debug(\"Available slots thresholds to pull work from GQ to LQ: %s\", results['thresholdsGQ2LQ'])\n logging.debug(\"List of jobs pending for each site, sorted by priority: %s\", results['sitePendCountByPrio'])\n\n return results\n\n def checkCredLifetime(self, agInfo, credType):\n \"\"\"\n Check the credential lifetime. Usually X509_USER_PROXY or X509_USER_CERT\n and raise either a warning or an error if the proxy validity is about to expire.\n :param agInfo: dictionary with plenty of agent monitoring information in place.\n :param credType: credential type, can be: \"proxy\" or \"certificate\"\n :return: same dictionary object plus additional keys/values if needed.\n \"\"\"\n if credType == \"proxy\":\n credFile = self.proxyFile\n secsLeft = self.proxy.getTimeLeft(proxy=credFile)\n elif credType == \"certificate\":\n credFile = self.userCertFile\n secsLeft = self.proxy.getUserCertTimeLeft(openSSL=True)\n else:\n logging.error(\"Unknown credential type. Available options are: [proxy, certificate]\")\n return\n\n logging.debug(\"%s '%s' lifetime is %d seconds\", credType, credFile, secsLeft)\n\n daysLeft = secsLeft / (60 * 60 * 24)\n\n if daysLeft <= self.credThresholds[credType]['error']:\n credWarning = True\n agInfo['status'] = \"error\"\n elif daysLeft <= self.credThresholds[credType]['warning']:\n credWarning = True\n if agInfo['status'] == \"ok\":\n agInfo['status'] = \"warning\"\n else:\n credWarning = False\n\n if credWarning:\n warnMsg = \"Agent %s '%s' must be renewed ASAP. \" % (credType, credFile)\n warnMsg += \"Its time left is: %.2f hours;\" % (secsLeft / 3600.)\n agInfo['proxy_warning'] = agInfo.get('proxy_warning', \"\") + warnMsg\n logging.warning(warnMsg)\n\n return\n\n def buildMonITDocs(self, dataStats):\n \"\"\"\n Convert agent statistics into MonIT-friendly documents to be posted\n to AMQ/ES. It creates 5 different type of documents:\n * priority information\n * site information\n * work information\n * agent information\n * agent health information\n Note that the internal methods are popping some metrics out of dataStats\n \"\"\"\n if not self.postToAMQ:\n return\n\n logging.info(\"Preparing documents to be posted to AMQ/MonIT..\")\n allDocs = self._buildMonITPrioDocs(dataStats)\n allDocs.extend(self._buildMonITSitesDocs(dataStats))\n allDocs.extend(self._buildMonITWorkDocs(dataStats))\n allDocs.extend(self._buildMonITWMBSDocs(dataStats))\n allDocs.extend(self._buildMonITAgentDocs(dataStats))\n allDocs.extend(self._buildMonITHealthDocs(dataStats))\n allDocs.extend(self._buildMonITSummaryDocs(dataStats))\n\n # and finally post them all to AMQ\n logging.info(\"Found %d documents to post to AMQ\", len(allDocs))\n self.uploadToAMQ(allDocs, dataStats['agent_url'], dataStats['timestamp'])\n\n\n def _buildMonITPrioDocs(self, dataStats):\n \"\"\"\n Uses the `sitePendCountByPrio` metric in order to build documents\n reporting the site name, job priority and amount of jobs within that\n priority.\n :param dataStats: dictionary with metrics previously posted to WMStats\n :return: list of dictionaries with the wma_prio_info MonIT docs\n \"\"\"\n docType = \"wma_prio_info\"\n prioDocs = []\n sitePendCountByPrio = dataStats['WMBS_INFO'].pop('sitePendCountByPrio', [])\n\n for site, item in viewitems(sitePendCountByPrio):\n # it seems sites with no jobs are also always here as \"Sitename\": {0: 0}\n if list(item) == [0]:\n continue\n for prio, jobs in viewitems(item):\n prioDoc = {}\n prioDoc['site_name'] = site\n prioDoc['type'] = docType\n prioDoc['priority'] = prio\n prioDoc['job_count'] = jobs\n prioDocs.append(prioDoc)\n return prioDocs\n\n def _buildMonITSitesDocs(self, dataStats):\n \"\"\"\n Uses the site thresholds and job information for each site in order\n to build a `site_info` document type for MonIT.\n :param dataStats: dictionary with metrics previously posted to WMStats\n :return: list of dictionaries with the wma_site_info MonIT docs\n \"\"\"\n docType = \"wma_site_info\"\n siteDocs = []\n thresholds = dataStats['WMBS_INFO'].pop('thresholds', {})\n thresholdsGQ2LQ = dataStats['WMBS_INFO'].pop('thresholdsGQ2LQ', {})\n if self.isT0agent:\n possibleJobsPerSite = {}\n uniqueJobsPerSite = {}\n else:\n possibleJobsPerSite = dataStats['LocalWQ_INFO'].pop('possibleJobsPerSite', {})\n uniqueJobsPerSite = dataStats['LocalWQ_INFO'].pop('uniqueJobsPerSite', {})\n\n for site in sorted(thresholds):\n siteDoc = {}\n siteDoc['site_name'] = site\n siteDoc['type'] = docType\n siteDoc['thresholds'] = thresholds[site]\n siteDoc['state'] = siteDoc['thresholds'].pop('state', 'Unknown')\n siteDoc['thresholdsGQ2LQ'] = thresholdsGQ2LQ.get(site, 0)\n\n for status in possibleJobsPerSite:\n # make sure these keys are always present in the documents\n jobKey = \"possible_%s_jobs\" % status.lower()\n elemKey = \"num_%s_elem\" % status.lower()\n uniJobKey = \"unique_%s_jobs\" % status.lower()\n siteDoc[jobKey], siteDoc[elemKey], siteDoc[uniJobKey] = 0, 0, 0\n if site in possibleJobsPerSite[status]:\n siteDoc[jobKey] = possibleJobsPerSite[status][site]['sum_jobs']\n siteDoc[elemKey] = possibleJobsPerSite[status][site]['num_elem']\n if site in uniqueJobsPerSite[status]:\n siteDoc[uniJobKey] = uniqueJobsPerSite[status][site]['sum_jobs']\n\n siteDocs.append(siteDoc)\n\n return siteDocs\n\n def _buildMonITWorkDocs(self, dataStats):\n \"\"\"\n Uses the local workqueue information order by WQE status and build\n statistics for the workload in terms of workqueue elements and top\n level jobs.\n Using the WMBS data, also builds documents to show the amount of\n work in 'created' and 'executing' WMBS status.\n :param dataStats: dictionary with metrics previously posted to WMStats\n :return: list of dictionaries with the wma_work_info MonIT docs\n \"\"\"\n workDocs = []\n if self.isT0agent:\n return workDocs\n\n docType = \"wma_work_info\"\n workByStatus = dataStats['LocalWQ_INFO'].pop('workByStatus', {})\n for status, info in viewitems(workByStatus):\n workDoc = {}\n workDoc['type'] = docType\n workDoc['status'] = status\n workDoc['num_elem'] = info.get('num_elem', 0)\n workDoc['sum_jobs'] = info.get('sum_jobs', 0)\n workDocs.append(workDoc)\n\n return workDocs\n\n def _buildMonITWMBSDocs(self, dataStats):\n \"\"\"\n Using the WMBS data, builds documents to show the amount of work in\n 'created' and 'executing' WMBS status.\n It also builds a document for every single wmbs_status in the database.\n :param dataStats: dictionary with metrics previously posted to WMStats\n :return: list of dictionaries with the wma_wmbs_info and wma_wmbs_state_info docs\n \"\"\"\n docType = \"wma_wmbs_info\"\n wmbsDocs = []\n wmbsCreatedTypeCount = dataStats['WMBS_INFO'].pop('wmbsCreatedTypeCount', {})\n wmbsExecutingTypeCount = dataStats['WMBS_INFO'].pop('wmbsExecutingTypeCount', {})\n for jobType in wmbsCreatedTypeCount:\n wmbsDoc = {}\n wmbsDoc['type'] = docType\n wmbsDoc['job_type'] = jobType\n wmbsDoc['created_jobs'] = wmbsCreatedTypeCount[jobType]\n wmbsDoc['executing_jobs'] = wmbsExecutingTypeCount[jobType]\n wmbsDocs.append(wmbsDoc)\n\n docType = \"wma_wmbs_state_info\"\n wmbsCountByState = dataStats['WMBS_INFO'].pop('wmbsCountByState', {})\n for wmbsStatus in wmbsCountByState:\n wmbsDoc = {}\n wmbsDoc['type'] = docType\n wmbsDoc['wmbs_status'] = wmbsStatus\n wmbsDoc['num_jobs'] = wmbsCountByState[wmbsStatus]\n wmbsDocs.append(wmbsDoc)\n\n return wmbsDocs\n\n def _buildMonITAgentDocs(self, dataStats):\n \"\"\"\n Uses the BossAir and WMBS table information in order to build a\n view of amount of jobs in different statuses.\n :param dataStats: dictionary with metrics previously posted to WMStats\n :return: list of dictionaries with the wma_agent_info MonIT docs\n \"\"\"\n docType = \"wma_agent_info\"\n agentDocs = []\n activeRunJobByStatus = dataStats['WMBS_INFO'].pop('activeRunJobByStatus', {})\n completeRunJobByStatus = dataStats['WMBS_INFO'].pop('completeRunJobByStatus', {})\n for schedStatus in activeRunJobByStatus:\n agentDoc = {}\n agentDoc['type'] = docType\n agentDoc['schedd_status'] = schedStatus\n agentDoc['active_jobs'] = activeRunJobByStatus[schedStatus]\n agentDoc['completed_jobs'] = completeRunJobByStatus[schedStatus]\n agentDocs.append(agentDoc)\n\n return agentDocs\n\n def _buildMonITHealthDocs(self, dataStats):\n \"\"\"\n Creates documents with specific agent information, status of\n each component and worker thread (similar to what is shown in\n wmstats) and also some very basic performance numbers.\n :param dataStats: dictionary with metrics previously posted to WMStats\n :return: list of dictionaries with the wma_health_info MonIT docs\n \"\"\"\n docType = \"wma_health_info\"\n healthDocs = []\n workersStatus = dataStats.pop('workers', {})\n for worker in workersStatus:\n healthDoc = {}\n healthDoc['type'] = docType\n healthDoc['worker_name'] = worker['name']\n healthDoc['worker_state'] = worker['state']\n healthDoc['worker_poll'] = worker['poll_interval']\n healthDoc['worker_last_hb'] = worker['last_updated']\n healthDoc['worker_cycle_time'] = worker['cycle_time']\n healthDocs.append(healthDoc)\n\n return healthDocs\n\n def _buildMonITSummaryDocs(self, dataStats):\n \"\"\"\n Creates a document with the very basic agent info used\n in the wmstats monitoring tab.\n :param dataStats: dictionary with metrics previously posted to WMStats\n :return: list of dictionaries with the wma_health_info MonIT docs\n \"\"\"\n docType = \"wma_summary_info\"\n summaryDocs = []\n summaryDoc = {}\n summaryDoc['type'] = docType\n summaryDoc['agent_team'] = dataStats['agent_team']\n summaryDoc['agent_version'] = dataStats['agent_version']\n summaryDoc['agent_status'] = dataStats['status']\n if not self.isT0agent:\n summaryDoc['wq_query_time'] = dataStats['LocalWQ_INFO']['total_query_time']\n summaryDoc['wmbs_query_time'] = dataStats['WMBS_INFO']['total_query_time']\n summaryDoc['drain_mode'] = dataStats['drain_mode']\n summaryDoc['down_components'] = dataStats['down_components']\n summaryDocs.append(summaryDoc)\n return summaryDocs\n\n def uploadToAMQ(self, docs, agentUrl, timeS):\n \"\"\"\n _uploadToAMQ_\n\n Sends data to AMQ, which ends up in the MonIT infrastructure.\n :param docs: list of documents/dicts to be posted\n \"\"\"\n if not docs:\n logging.info(\"There are no documents to send to AMQ\")\n return\n # add mandatory information for every single document\n for doc in docs:\n doc['agent_url'] = agentUrl\n\n docType = \"cms_%s_info\" % self.producer\n notifications = []\n\n logging.debug(\"Sending the following data to AMQ %s\", pformat(docs))\n try:\n stompSvc = StompAMQ(username=self.userAMQ,\n password=self.passAMQ,\n producer=self.producer,\n topic=self.topicAMQ,\n validation_schema=None,\n host_and_ports=self.hostPortAMQ,\n logger=logging)\n\n for doc in docs:\n singleNotif, _, _ = stompSvc.make_notification(payload=doc, docType=docType,\n ts=timeS, dataSubfield=\"payload\")\n notifications.append(singleNotif)\n\n failures = stompSvc.send(notifications)\n msg = \"%i out of %i documents successfully sent to AMQ\" % (len(notifications) - len(failures),\n len(notifications))\n logging.info(msg)\n except Exception as ex:\n logging.exception(\"Failed to send data to StompAMQ. Error %s\", str(ex))\n\n return\n"}}},{"rowIdx":542617,"cells":{"filename":{"kind":"string","value":"the-stack_106_30921"},"text":{"kind":"string","value":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Main entry point into the Resource service.\"\"\"\n\nfrom oslo_log import log\nimport six\n\nfrom keystone import assignment\nfrom keystone.common import cache\nfrom keystone.common import driver_hints\nfrom keystone.common import manager\nfrom keystone.common import provider_api\nfrom keystone.common import utils\nimport keystone.conf\nfrom keystone import exception\nfrom keystone.i18n import _\nfrom keystone import notifications\nfrom keystone.resource.backends import base\nfrom keystone.resource.backends import sql as resource_sql\nfrom keystone.token import provider as token_provider\n\nCONF = keystone.conf.CONF\nLOG = log.getLogger(__name__)\nMEMOIZE = cache.get_memoization_decorator(group='resource')\nPROVIDERS = provider_api.ProviderAPIs\n\n\nTAG_SEARCH_FILTERS = ('tags', 'tags-any', 'not-tags', 'not-tags-any')\n\n\nclass Manager(manager.Manager):\n \"\"\"Default pivot point for the Resource backend.\n\n See :mod:`keystone.common.manager.Manager` for more details on how this\n dynamically calls the backend.\n\n \"\"\"\n\n driver_namespace = 'keystone.resource'\n _provides_api = 'resource_api'\n\n _DOMAIN = 'domain'\n _PROJECT = 'project'\n _PROJECT_TAG = 'project tag'\n\n def __init__(self):\n # NOTE(morgan): The resource driver must be SQL. This is because there\n # is a FK between identity and resource. Almost every deployment uses\n # SQL Identity in some form. Even if SQL Identity is not used, there\n # is almost no reason to have non-SQL Resource. Keystone requires\n # SQL in a number of ways, this simply codifies it plainly for resource\n # the driver_name = None simply implies we don't need to load a driver.\n self.driver = resource_sql.Resource()\n super(Manager, self).__init__(driver_name=None)\n\n def _get_hierarchy_depth(self, parents_list):\n return len(parents_list) + 1\n\n def _assert_max_hierarchy_depth(self, project_id, parents_list=None):\n if parents_list is None:\n parents_list = self.list_project_parents(project_id)\n # NOTE(henry-nash): In upgrading to a scenario where domains are\n # represented as projects acting as domains, we will effectively\n # increase the depth of any existing project hierarchy by one. To avoid\n # pushing any existing hierarchies over the limit, we add one to the\n # maximum depth allowed, as specified in the configuration file.\n max_depth = CONF.max_project_tree_depth + 1\n\n # NOTE(wxy): If the hierarchical limit enforcement model is used, the\n # project depth should be not greater than the model's limit as well.\n #\n # TODO(wxy): Deprecate and remove CONF.max_project_tree_depth, let the\n # depth check only based on the limit enforcement model.\n limit_model = PROVIDERS.unified_limit_api.enforcement_model\n if limit_model.MAX_PROJECT_TREE_DEPTH is not None:\n max_depth = min(max_depth, limit_model.MAX_PROJECT_TREE_DEPTH + 1)\n if self._get_hierarchy_depth(parents_list) > max_depth:\n raise exception.ForbiddenNotSecurity(\n _('Max hierarchy depth reached for %s branch.') % project_id)\n\n def _assert_is_domain_project_constraints(self, project_ref):\n \"\"\"Enforce specific constraints of projects that act as domains.\n\n Called when is_domain is true, this method ensures that:\n\n * multiple domains are enabled\n * the project name is not the reserved name for a federated domain\n * the project is a root project\n\n :raises keystone.exception.ValidationError: If one of the constraints\n was not satisfied.\n \"\"\"\n if (not PROVIDERS.identity_api.multiple_domains_supported and\n project_ref['id'] != CONF.identity.default_domain_id and\n project_ref['id'] != base.NULL_DOMAIN_ID):\n raise exception.ValidationError(\n message=_('Multiple domains are not supported'))\n\n self.assert_domain_not_federated(project_ref['id'], project_ref)\n\n if project_ref['parent_id']:\n raise exception.ValidationError(\n message=_('only root projects are allowed to act as '\n 'domains.'))\n\n def _assert_regular_project_constraints(self, project_ref):\n \"\"\"Enforce regular project hierarchy constraints.\n\n Called when is_domain is false. The project must contain a valid\n domain_id and parent_id. The goal of this method is to check\n that the domain_id specified is consistent with the domain of its\n parent.\n\n :raises keystone.exception.ValidationError: If one of the constraints\n was not satisfied.\n :raises keystone.exception.DomainNotFound: In case the domain is not\n found.\n \"\"\"\n # Ensure domain_id is valid, and by inference will not be None.\n domain = self.get_domain(project_ref['domain_id'])\n parent_ref = self.get_project(project_ref['parent_id'])\n\n if parent_ref['is_domain']:\n if parent_ref['id'] != domain['id']:\n raise exception.ValidationError(\n message=_('Cannot create project, the parent '\n '(%(parent_id)s) is acting as a domain, '\n 'but this project\\'s domain id (%(domain_id)s) '\n 'does not match the parent\\'s id.')\n % {'parent_id': parent_ref['id'],\n 'domain_id': domain['id']})\n else:\n parent_domain_id = parent_ref.get('domain_id')\n if parent_domain_id != domain['id']:\n raise exception.ValidationError(\n message=_('Cannot create project, since it specifies '\n 'its domain_id %(domain_id)s, but '\n 'specifies a parent in a different domain '\n '(%(parent_domain_id)s).')\n % {'domain_id': domain['id'],\n 'parent_domain_id': parent_domain_id})\n\n def _enforce_project_constraints(self, project_ref):\n if project_ref.get('is_domain'):\n self._assert_is_domain_project_constraints(project_ref)\n else:\n self._assert_regular_project_constraints(project_ref)\n # The whole hierarchy (upwards) must be enabled\n parent_id = project_ref['parent_id']\n parents_list = self.list_project_parents(parent_id)\n parent_ref = self.get_project(parent_id)\n parents_list.append(parent_ref)\n for ref in parents_list:\n if not ref.get('enabled', True):\n raise exception.ValidationError(\n message=_('cannot create a project in a '\n 'branch containing a disabled '\n 'project: %s') % ref['id'])\n\n self._assert_max_hierarchy_depth(project_ref.get('parent_id'),\n parents_list)\n\n def _raise_reserved_character_exception(self, entity_type, name):\n msg = _('%(entity)s name cannot contain the following reserved '\n 'characters: %(chars)s')\n raise exception.ValidationError(\n message=msg % {\n 'entity': entity_type,\n 'chars': utils.list_url_unsafe_chars(name)\n })\n\n def _generate_project_name_conflict_msg(self, project):\n if project['is_domain']:\n return _('it is not permitted to have two projects '\n 'acting as domains with the same name: %s'\n ) % project['name']\n else:\n return _('it is not permitted to have two projects '\n 'with either the same name or same id in '\n 'the same domain: '\n 'name is %(name)s, project id %(id)s'\n ) % project\n\n def create_project(self, project_id, project, initiator=None):\n project = project.copy()\n\n if (CONF.resource.project_name_url_safe != 'off' and\n utils.is_not_url_safe(project['name'])):\n self._raise_reserved_character_exception('Project',\n project['name'])\n\n project.setdefault('enabled', True)\n project['name'] = project['name'].strip()\n project.setdefault('description', '')\n\n # For regular projects, the controller will ensure we have a valid\n # domain_id. For projects acting as a domain, the project_id\n # is, effectively, the domain_id - and for such projects we don't\n # bother to store a copy of it in the domain_id attribute.\n project.setdefault('domain_id', None)\n project.setdefault('parent_id', None)\n if not project['parent_id']:\n project['parent_id'] = project['domain_id']\n project.setdefault('is_domain', False)\n\n self._enforce_project_constraints(project)\n\n # We leave enforcing name uniqueness to the underlying driver (instead\n # of doing it in code in the project_constraints above), so as to allow\n # this check to be done at the storage level, avoiding race conditions\n # in multi-process keystone configurations.\n try:\n ret = self.driver.create_project(project_id, project)\n except exception.Conflict:\n raise exception.Conflict(\n type='project',\n details=self._generate_project_name_conflict_msg(project))\n\n if project.get('is_domain'):\n notifications.Audit.created(self._DOMAIN, project_id, initiator)\n else:\n notifications.Audit.created(self._PROJECT, project_id, initiator)\n if MEMOIZE.should_cache(ret):\n self.get_project.set(ret, self, project_id)\n self.get_project_by_name.set(ret, self, ret['name'],\n ret['domain_id'])\n\n assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()\n\n return ret\n\n def assert_domain_enabled(self, domain_id, domain=None):\n \"\"\"Assert the Domain is enabled.\n\n :raise AssertionError: if domain is disabled.\n \"\"\"\n if domain is None:\n domain = self.get_domain(domain_id)\n if not domain.get('enabled', True):\n raise AssertionError(_('Domain is disabled: %s') % domain_id)\n\n def assert_domain_not_federated(self, domain_id, domain):\n \"\"\"Assert the Domain's name and id do not match the reserved keyword.\n\n Note that the reserved keyword is defined in the configuration file,\n by default, it is 'Federated', it is also case insensitive.\n If config's option is empty the default hardcoded value 'Federated'\n will be used.\n\n :raise AssertionError: if domain named match the value in the config.\n\n \"\"\"\n # NOTE(marek-denis): We cannot create this attribute in the __init__ as\n # config values are always initialized to default value.\n federated_domain = CONF.federation.federated_domain_name.lower()\n if (domain.get('name') and domain['name'].lower() == federated_domain):\n raise AssertionError(_('Domain cannot be named %s')\n % domain['name'])\n if (domain_id.lower() == federated_domain):\n raise AssertionError(_('Domain cannot have ID %s')\n % domain_id)\n\n def assert_project_enabled(self, project_id, project=None):\n \"\"\"Assert the project is enabled and its associated domain is enabled.\n\n :raise AssertionError: if the project or domain is disabled.\n \"\"\"\n if project is None:\n project = self.get_project(project_id)\n # If it's a regular project (i.e. it has a domain_id), we need to make\n # sure the domain itself is not disabled\n if project['domain_id']:\n self.assert_domain_enabled(domain_id=project['domain_id'])\n if not project.get('enabled', True):\n raise AssertionError(_('Project is disabled: %s') % project_id)\n\n def _assert_all_parents_are_enabled(self, project_id):\n parents_list = self.list_project_parents(project_id)\n for project in parents_list:\n if not project.get('enabled', True):\n raise exception.ForbiddenNotSecurity(\n _('Cannot enable project %s since it has disabled '\n 'parents') % project_id)\n\n def _check_whole_subtree_is_disabled(self, project_id, subtree_list=None):\n if not subtree_list:\n subtree_list = self.list_projects_in_subtree(project_id)\n subtree_enabled = [ref.get('enabled', True) for ref in subtree_list]\n return (not any(subtree_enabled))\n\n def _update_project(self, project_id, project, initiator=None,\n cascade=False):\n # Use the driver directly to prevent using old cached value.\n original_project = self.driver.get_project(project_id)\n project = project.copy()\n self._require_matching_domain_id(project, original_project)\n\n if original_project['is_domain']:\n domain = self._get_domain_from_project(original_project)\n self.assert_domain_not_federated(project_id, domain)\n url_safe_option = CONF.resource.domain_name_url_safe\n exception_entity = 'Domain'\n else:\n url_safe_option = CONF.resource.project_name_url_safe\n exception_entity = 'Project'\n\n project_name_changed = ('name' in project and project['name'] !=\n original_project['name'])\n if (url_safe_option != 'off' and project_name_changed and\n utils.is_not_url_safe(project['name'])):\n self._raise_reserved_character_exception(exception_entity,\n project['name'])\n elif project_name_changed:\n project['name'] = project['name'].strip()\n parent_id = original_project.get('parent_id')\n if 'parent_id' in project and project.get('parent_id') != parent_id:\n raise exception.ForbiddenNotSecurity(\n _('Update of `parent_id` is not allowed.'))\n\n if ('is_domain' in project and\n project['is_domain'] != original_project['is_domain']):\n raise exception.ValidationError(\n message=_('Update of `is_domain` is not allowed.'))\n\n original_project_enabled = original_project.get('enabled', True)\n project_enabled = project.get('enabled', True)\n if not original_project_enabled and project_enabled:\n self._assert_all_parents_are_enabled(project_id)\n if original_project_enabled and not project_enabled:\n # NOTE(htruta): In order to disable a regular project, all its\n # children must already be disabled. However, to keep\n # compatibility with the existing domain behaviour, we allow a\n # project acting as a domain to be disabled irrespective of the\n # state of its children. Disabling a project acting as domain\n # effectively disables its children.\n if (not original_project.get('is_domain') and not cascade and not\n self._check_whole_subtree_is_disabled(project_id)):\n raise exception.ForbiddenNotSecurity(\n _('Cannot disable project %(project_id)s since its '\n 'subtree contains enabled projects.')\n % {'project_id': project_id})\n\n notifications.Audit.disabled(self._PROJECT, project_id,\n public=False)\n # Drop the computed assignments if the project is being disabled.\n # This ensures an accurate list of projects is returned when\n # listing projects/domains for a user based on role assignments.\n assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()\n\n if cascade:\n self._only_allow_enabled_to_update_cascade(project,\n original_project)\n self._update_project_enabled_cascade(project_id, project_enabled)\n\n try:\n project['is_domain'] = (project.get('is_domain') or\n original_project['is_domain'])\n ret = self.driver.update_project(project_id, project)\n except exception.Conflict:\n raise exception.Conflict(\n type='project',\n details=self._generate_project_name_conflict_msg(project))\n\n try:\n self.get_project.invalidate(self, project_id)\n self.get_project_by_name.invalidate(self, original_project['name'],\n original_project['domain_id'])\n if ('domain_id' in project and\n project['domain_id'] != original_project['domain_id']):\n # If the project's domain_id has been updated, invalidate user\n # role assignments cache region, as it may be caching inherited\n # assignments from the old domain to the specified project\n assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()\n finally:\n # attempt to send audit event even if the cache invalidation raises\n notifications.Audit.updated(self._PROJECT, project_id, initiator)\n if original_project['is_domain']:\n notifications.Audit.updated(self._DOMAIN, project_id,\n initiator)\n # If the domain is being disabled, issue the disable\n # notification as well\n if original_project_enabled and not project_enabled:\n # NOTE(lbragstad): When a domain is disabled, we have to\n # invalidate the entire token cache. With persistent\n # tokens, we did something similar where all tokens for a\n # specific domain were deleted when that domain was\n # disabled. This effectively offers the same behavior for\n # non-persistent tokens by removing them from the cache and\n # requiring the authorization context to be rebuilt the\n # next time they're validated.\n token_provider.TOKENS_REGION.invalidate()\n notifications.Audit.disabled(self._DOMAIN, project_id,\n public=False)\n\n return ret\n\n def _only_allow_enabled_to_update_cascade(self, project, original_project):\n for attr in project:\n if attr != 'enabled':\n if project.get(attr) != original_project.get(attr):\n raise exception.ValidationError(\n message=_('Cascade update is only allowed for '\n 'enabled attribute.'))\n\n def _update_project_enabled_cascade(self, project_id, enabled):\n subtree = self.list_projects_in_subtree(project_id)\n # Update enabled only if different from original value\n subtree_to_update = [child for child in subtree\n if child['enabled'] != enabled]\n for child in subtree_to_update:\n child['enabled'] = enabled\n\n if not enabled:\n # Does not in fact disable the project, only emits a\n # notification that it was disabled. The actual disablement\n # is done in the next line.\n notifications.Audit.disabled(self._PROJECT, child['id'],\n public=False)\n\n self.driver.update_project(child['id'], child)\n\n def update_project(self, project_id, project, initiator=None,\n cascade=False):\n ret = self._update_project(project_id, project, initiator, cascade)\n if ret['is_domain']:\n self.get_domain.invalidate(self, project_id)\n self.get_domain_by_name.invalidate(self, ret['name'])\n\n return ret\n\n def _post_delete_cleanup_project(self, project_id, project,\n initiator=None):\n try:\n self.get_project.invalidate(self, project_id)\n self.get_project_by_name.invalidate(self, project['name'],\n project['domain_id'])\n PROVIDERS.assignment_api.delete_project_assignments(project_id)\n # Invalidate user role assignments cache region, as it may\n # be caching role assignments where the target is\n # the specified project\n assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()\n PROVIDERS.credential_api.delete_credentials_for_project(project_id)\n PROVIDERS.trust_api.delete_trusts_for_project(project_id)\n PROVIDERS.unified_limit_api.delete_limits_for_project(project_id)\n finally:\n # attempt to send audit event even if the cache invalidation raises\n notifications.Audit.deleted(self._PROJECT, project_id, initiator)\n\n def delete_project(self, project_id, initiator=None, cascade=False):\n \"\"\"Delete one project or a subtree.\n\n :param cascade: If true, the specified project and all its\n sub-projects are deleted. Otherwise, only the specified\n project is deleted.\n :type cascade: boolean\n :raises keystone.exception.ValidationError: if project is a domain\n :raises keystone.exception.Forbidden: if project is not a leaf\n \"\"\"\n project = self.driver.get_project(project_id)\n if project.get('is_domain'):\n self._delete_domain(project, initiator)\n else:\n self._delete_project(project, initiator, cascade)\n\n def _delete_project(self, project, initiator=None, cascade=False):\n project_id = project['id']\n if project['is_domain'] and project['enabled']:\n raise exception.ValidationError(\n message=_('cannot delete an enabled project acting as a '\n 'domain. Please disable the project %s first.')\n % project.get('id'))\n\n if not self.is_leaf_project(project_id) and not cascade:\n raise exception.ForbiddenNotSecurity(\n _('Cannot delete the project %s since it is not a leaf in the '\n 'hierarchy. Use the cascade option if you want to delete a '\n 'whole subtree.')\n % project_id)\n\n if cascade:\n # Getting reversed project's subtrees list, i.e. from the leaves\n # to the root, so we do not break parent_id FK.\n subtree_list = self.list_projects_in_subtree(project_id)\n subtree_list.reverse()\n if not self._check_whole_subtree_is_disabled(\n project_id, subtree_list=subtree_list):\n raise exception.ForbiddenNotSecurity(\n _('Cannot delete project %(project_id)s since its subtree '\n 'contains enabled projects.')\n % {'project_id': project_id})\n\n project_list = subtree_list + [project]\n projects_ids = [x['id'] for x in project_list]\n\n ret = self.driver.delete_projects_from_ids(projects_ids)\n for prj in project_list:\n self._post_delete_cleanup_project(prj['id'], prj, initiator)\n else:\n ret = self.driver.delete_project(project_id)\n self._post_delete_cleanup_project(project_id, project, initiator)\n\n reason = (\n 'The token cache is being invalidate because project '\n '%(project_id)s was deleted. Authorization will be recalculated '\n 'and enforced accordingly the next time users authenticate or '\n 'validate a token.' % {'project_id': project_id}\n )\n notifications.invalidate_token_cache_notification(reason)\n return ret\n\n def _filter_projects_list(self, projects_list, user_id):\n user_projects = PROVIDERS.assignment_api.list_projects_for_user(\n user_id\n )\n user_projects_ids = set([proj['id'] for proj in user_projects])\n # Keep only the projects present in user_projects\n return [proj for proj in projects_list\n if proj['id'] in user_projects_ids]\n\n def _assert_valid_project_id(self, project_id):\n if project_id is None:\n msg = _('Project field is required and cannot be empty.')\n raise exception.ValidationError(message=msg)\n # Check if project_id exists\n self.get_project(project_id)\n\n def _include_limits(self, projects):\n \"\"\"Modify a list of projects to include limit information.\n\n :param projects: a list of project references including an `id`\n :type projects: list of dictionaries\n \"\"\"\n for project in projects:\n hints = driver_hints.Hints()\n hints.add_filter('project_id', project['id'])\n limits = PROVIDERS.unified_limit_api.list_limits(hints)\n project['limits'] = limits\n\n def list_project_parents(self, project_id, user_id=None,\n include_limits=False):\n self._assert_valid_project_id(project_id)\n parents = self.driver.list_project_parents(project_id)\n # If a user_id was provided, the returned list should be filtered\n # against the projects this user has access to.\n if user_id:\n parents = self._filter_projects_list(parents, user_id)\n if include_limits:\n self._include_limits(parents)\n return parents\n\n def _build_parents_as_ids_dict(self, project, parents_by_id):\n # NOTE(rodrigods): we don't rely in the order of the projects returned\n # by the list_project_parents() method. Thus, we create a project cache\n # (parents_by_id) in order to access each parent in constant time and\n # traverse up the hierarchy.\n def traverse_parents_hierarchy(project):\n parent_id = project.get('parent_id')\n if not parent_id:\n return None\n\n parent = parents_by_id[parent_id]\n return {parent_id: traverse_parents_hierarchy(parent)}\n\n return traverse_parents_hierarchy(project)\n\n def get_project_parents_as_ids(self, project):\n \"\"\"Get the IDs from the parents from a given project.\n\n The project IDs are returned as a structured dictionary traversing up\n the hierarchy to the top level project. For example, considering the\n following project hierarchy::\n\n A\n |\n +-B-+\n | |\n C D\n\n If we query for project C parents, the expected return is the following\n dictionary::\n\n 'parents': {\n B['id']: {\n A['id']: None\n }\n }\n\n \"\"\"\n parents_list = self.list_project_parents(project['id'])\n parents_as_ids = self._build_parents_as_ids_dict(\n project, {proj['id']: proj for proj in parents_list})\n return parents_as_ids\n\n def list_projects_in_subtree(self, project_id, user_id=None,\n include_limits=False):\n self._assert_valid_project_id(project_id)\n subtree = self.driver.list_projects_in_subtree(project_id)\n # If a user_id was provided, the returned list should be filtered\n # against the projects this user has access to.\n if user_id:\n subtree = self._filter_projects_list(subtree, user_id)\n if include_limits:\n self._include_limits(subtree)\n return subtree\n\n def _build_subtree_as_ids_dict(self, project_id, subtree_by_parent):\n # NOTE(rodrigods): we perform a depth first search to construct the\n # dictionaries representing each level of the subtree hierarchy. In\n # order to improve this traversal performance, we create a cache of\n # projects (subtree_py_parent) that accesses in constant time the\n # direct children of a given project.\n def traverse_subtree_hierarchy(project_id):\n children = subtree_by_parent.get(project_id)\n if not children:\n return None\n\n children_ids = {}\n for child in children:\n children_ids[child['id']] = traverse_subtree_hierarchy(\n child['id'])\n return children_ids\n\n return traverse_subtree_hierarchy(project_id)\n\n def get_projects_in_subtree_as_ids(self, project_id):\n \"\"\"Get the IDs from the projects in the subtree from a given project.\n\n The project IDs are returned as a structured dictionary representing\n their hierarchy. For example, considering the following project\n hierarchy::\n\n A\n |\n +-B-+\n | |\n C D\n\n If we query for project A subtree, the expected return is the following\n dictionary::\n\n 'subtree': {\n B['id']: {\n C['id']: None,\n D['id']: None\n }\n }\n\n \"\"\"\n def _projects_indexed_by_parent(projects_list):\n projects_by_parent = {}\n for proj in projects_list:\n parent_id = proj.get('parent_id')\n if parent_id:\n if parent_id in projects_by_parent:\n projects_by_parent[parent_id].append(proj)\n else:\n projects_by_parent[parent_id] = [proj]\n return projects_by_parent\n\n subtree_list = self.list_projects_in_subtree(project_id)\n subtree_as_ids = self._build_subtree_as_ids_dict(\n project_id, _projects_indexed_by_parent(subtree_list))\n return subtree_as_ids\n\n def list_domains_from_ids(self, domain_ids):\n \"\"\"List domains for the provided list of ids.\n\n :param domain_ids: list of ids\n\n :returns: a list of domain_refs.\n\n This method is used internally by the assignment manager to bulk read\n a set of domains given their ids.\n\n \"\"\"\n # Retrieve the projects acting as domains get their correspondent\n # domains\n projects = self.list_projects_from_ids(domain_ids)\n domains = [self._get_domain_from_project(project)\n for project in projects]\n\n return domains\n\n @MEMOIZE\n def get_domain(self, domain_id):\n try:\n # Retrieve the corresponding project that acts as a domain\n project = self.driver.get_project(domain_id)\n # the DB backend might not operate in case sensitive mode,\n # therefore verify for exact match of IDs\n if domain_id != project['id']:\n raise exception.DomainNotFound(domain_id=domain_id)\n except exception.ProjectNotFound:\n raise exception.DomainNotFound(domain_id=domain_id)\n\n # Return its correspondent domain\n return self._get_domain_from_project(project)\n\n @MEMOIZE\n def get_domain_by_name(self, domain_name):\n try:\n # Retrieve the corresponding project that acts as a domain\n project = self.driver.get_project_by_name(domain_name,\n domain_id=None)\n except exception.ProjectNotFound:\n raise exception.DomainNotFound(domain_id=domain_name)\n\n # Return its correspondent domain\n return self._get_domain_from_project(project)\n\n def _get_domain_from_project(self, project_ref):\n \"\"\"Create a domain ref from a project ref.\n\n Based on the provided project ref, create a domain ref, so that the\n result can be returned in response to a domain API call.\n \"\"\"\n if not project_ref['is_domain']:\n LOG.error('Asked to convert a non-domain project into a '\n 'domain - Domain: %(domain_id)s, Project ID: '\n '%(id)s, Project Name: %(project_name)s',\n {'domain_id': project_ref['domain_id'],\n 'id': project_ref['id'],\n 'project_name': project_ref['name']})\n raise exception.DomainNotFound(domain_id=project_ref['id'])\n\n domain_ref = project_ref.copy()\n # As well as the project specific attributes that we need to remove,\n # there is an old compatibility issue in that update project (as well\n # as extracting an extra attributes), also includes a copy of the\n # actual extra dict as well - something that update domain does not do.\n for k in ['parent_id', 'domain_id', 'is_domain', 'extra']:\n domain_ref.pop(k, None)\n\n return domain_ref\n\n def create_domain(self, domain_id, domain, initiator=None):\n if (CONF.resource.domain_name_url_safe != 'off' and\n utils.is_not_url_safe(domain['name'])):\n self._raise_reserved_character_exception('Domain', domain['name'])\n project_from_domain = base.get_project_from_domain(domain)\n is_domain_project = self.create_project(\n domain_id, project_from_domain, initiator)\n\n return self._get_domain_from_project(is_domain_project)\n\n @manager.response_truncated\n def list_domains(self, hints=None):\n projects = self.list_projects_acting_as_domain(hints)\n domains = [self._get_domain_from_project(project)\n for project in projects]\n return domains\n\n def update_domain(self, domain_id, domain, initiator=None):\n # TODO(henry-nash): We shouldn't have to check for the federated domain\n # here as well as _update_project, but currently our tests assume the\n # checks are done in a specific order. The tests should be refactored.\n self.assert_domain_not_federated(domain_id, domain)\n project = base.get_project_from_domain(domain)\n try:\n original_domain = self.driver.get_project(domain_id)\n project = self._update_project(domain_id, project, initiator)\n except exception.ProjectNotFound:\n raise exception.DomainNotFound(domain_id=domain_id)\n\n domain_from_project = self._get_domain_from_project(project)\n self.get_domain.invalidate(self, domain_id)\n self.get_domain_by_name.invalidate(self, original_domain['name'])\n\n return domain_from_project\n\n def delete_domain(self, domain_id, initiator=None):\n # Use the driver directly to get the project that acts as a domain and\n # prevent using old cached value.\n try:\n domain = self.driver.get_project(domain_id)\n except exception.ProjectNotFound:\n raise exception.DomainNotFound(domain_id=domain_id)\n self._delete_domain(domain, initiator)\n\n def _delete_domain(self, domain, initiator=None):\n # To help avoid inadvertent deletes, we insist that the domain\n # has been previously disabled. This also prevents a user deleting\n # their own domain since, once it is disabled, they won't be able\n # to get a valid token to issue this delete.\n if domain['enabled']:\n raise exception.ForbiddenNotSecurity(\n _('Cannot delete a domain that is enabled, please disable it '\n 'first.'))\n\n domain_id = domain['id']\n self._delete_domain_contents(domain_id)\n notifications.Audit.internal(\n notifications.DOMAIN_DELETED, domain_id\n )\n self._delete_project(domain, initiator)\n try:\n self.get_domain.invalidate(self, domain_id)\n self.get_domain_by_name.invalidate(self, domain['name'])\n # Delete any database stored domain config\n PROVIDERS.domain_config_api.delete_config_options(domain_id)\n PROVIDERS.domain_config_api.release_registration(domain_id)\n finally:\n # attempt to send audit event even if the cache invalidation raises\n notifications.Audit.deleted(self._DOMAIN, domain_id, initiator)\n\n def _delete_domain_contents(self, domain_id):\n \"\"\"Delete the contents of a domain.\n\n Before we delete a domain, we need to remove all the entities\n that are owned by it, i.e. Projects. To do this we\n call the delete function for these entities, which are\n themselves responsible for deleting any credentials and role grants\n associated with them as well as revoking any relevant tokens.\n\n \"\"\"\n def _delete_projects(project, projects, examined):\n if project['id'] in examined:\n msg = ('Circular reference or a repeated entry found '\n 'projects hierarchy - %(project_id)s.')\n LOG.error(msg, {'project_id': project['id']})\n return\n\n examined.add(project['id'])\n children = [proj for proj in projects\n if proj.get('parent_id') == project['id']]\n for proj in children:\n _delete_projects(proj, projects, examined)\n\n try:\n self._delete_project(project, initiator=None)\n except exception.ProjectNotFound:\n LOG.debug(('Project %(projectid)s not found when '\n 'deleting domain contents for %(domainid)s, '\n 'continuing with cleanup.'),\n {'projectid': project['id'],\n 'domainid': domain_id})\n\n proj_refs = self.list_projects_in_domain(domain_id)\n\n # Deleting projects recursively\n roots = [x for x in proj_refs if x.get('parent_id') == domain_id]\n examined = set()\n for project in roots:\n _delete_projects(project, proj_refs, examined)\n\n @manager.response_truncated\n def list_projects(self, hints=None):\n if hints:\n tag_filters = {}\n # Handle project tag filters separately\n for f in list(hints.filters):\n if f['name'] in TAG_SEARCH_FILTERS:\n tag_filters[f['name']] = f['value']\n hints.filters.remove(f)\n if tag_filters:\n tag_refs = self.driver.list_projects_by_tags(tag_filters)\n project_refs = self.driver.list_projects(hints)\n ref_ids = [ref['id'] for ref in tag_refs]\n return [ref for ref in project_refs if ref['id'] in ref_ids]\n return self.driver.list_projects(hints or driver_hints.Hints())\n\n # NOTE(henry-nash): list_projects_in_domain is actually an internal method\n # and not exposed via the API. Therefore there is no need to support\n # driver hints for it.\n def list_projects_in_domain(self, domain_id):\n return self.driver.list_projects_in_domain(domain_id)\n\n def list_projects_acting_as_domain(self, hints=None):\n return self.driver.list_projects_acting_as_domain(\n hints or driver_hints.Hints())\n\n @MEMOIZE\n def get_project(self, project_id):\n return self.driver.get_project(project_id)\n\n @MEMOIZE\n def get_project_by_name(self, project_name, domain_id):\n return self.driver.get_project_by_name(project_name, domain_id)\n\n def _require_matching_domain_id(self, new_ref, orig_ref):\n \"\"\"Ensure the current domain ID matches the reference one, if any.\n\n Provided we want domain IDs to be immutable, check whether any\n domain_id specified in the ref dictionary matches the existing\n domain_id for this entity.\n\n :param new_ref: the dictionary of new values proposed for this entity\n :param orig_ref: the dictionary of original values proposed for this\n entity\n :raises: :class:`keystone.exception.ValidationError`\n \"\"\"\n if 'domain_id' in new_ref:\n if new_ref['domain_id'] != orig_ref['domain_id']:\n raise exception.ValidationError(_('Cannot change Domain ID'))\n\n def create_project_tag(self, project_id, tag, initiator=None):\n \"\"\"Create a new tag on project.\n\n :param project_id: ID of a project to create a tag for\n :param tag: The string value of a tag to add\n\n :returns: The value of the created tag\n \"\"\"\n project = self.driver.get_project(project_id)\n tag_name = tag.strip()\n project['tags'].append(tag_name)\n self.update_project(project_id, {'tags': project['tags']})\n\n notifications.Audit.created(\n self._PROJECT_TAG, tag_name, initiator)\n return tag_name\n\n def get_project_tag(self, project_id, tag_name):\n \"\"\"Return information for a single tag on a project.\n\n :param project_id: ID of a project to retrive a tag from\n :param tag_name: Name of a tag to return\n\n :raises keystone.exception.ProjectTagNotFound: If the tag name\n does not exist on the project\n :returns: The tag value\n \"\"\"\n project = self.driver.get_project(project_id)\n if tag_name not in project.get('tags'):\n raise exception.ProjectTagNotFound(project_tag=tag_name)\n return tag_name\n\n def list_project_tags(self, project_id):\n \"\"\"List all tags on project.\n\n :param project_id: The ID of a project\n\n :returns: A list of tags from a project\n \"\"\"\n project = self.driver.get_project(project_id)\n return project.get('tags', [])\n\n def update_project_tags(self, project_id, tags, initiator=None):\n \"\"\"Update all tags on a project.\n\n :param project_id: The ID of the project to update\n :param tags: A list of tags to update on the project\n\n :returns: A list of tags\n \"\"\"\n self.driver.get_project(project_id)\n tag_list = [t.strip() for t in tags]\n project = {'tags': tag_list}\n self.update_project(project_id, project)\n return tag_list\n\n def delete_project_tag(self, project_id, tag):\n \"\"\"Delete single tag from project.\n\n :param project_id: The ID of the project\n :param tag: The tag value to delete\n\n :raises keystone.exception.ProjectTagNotFound: If the tag name\n does not exist on the project\n \"\"\"\n project = self.driver.get_project(project_id)\n try:\n project['tags'].remove(tag)\n except ValueError:\n raise exception.ProjectTagNotFound(project_tag=tag)\n self.update_project(project_id, project)\n notifications.Audit.deleted(self._PROJECT_TAG, tag)\n\n def check_project_depth(self, max_depth=None):\n \"\"\"Check project depth whether greater than input or not.\"\"\"\n if max_depth:\n exceeded_project_ids = self.driver.check_project_depth(max_depth)\n if exceeded_project_ids:\n raise exception.LimitTreeExceedError(exceeded_project_ids,\n max_depth)\n\n\nMEMOIZE_CONFIG = cache.get_memoization_decorator(group='domain_config')\n\n\nclass DomainConfigManager(manager.Manager):\n \"\"\"Default pivot point for the Domain Config backend.\"\"\"\n\n # NOTE(henry-nash): In order for a config option to be stored in the\n # standard table, it must be explicitly whitelisted. Options marked as\n # sensitive are stored in a separate table. Attempting to store options\n # that are not listed as either whitelisted or sensitive will raise an\n # exception.\n #\n # Only those options that affect the domain-specific driver support in\n # the identity manager are supported.\n\n driver_namespace = 'keystone.resource.domain_config'\n _provides_api = 'domain_config_api'\n\n # We explicitly state each whitelisted option instead of pulling all ldap\n # options from CONF and selectively pruning them to prevent a security\n # lapse. That way if a new ldap CONF key/value were to be added it wouldn't\n # automatically be added to the whitelisted options unless that is what was\n # intended. In which case, we explicitly add it to the list ourselves.\n whitelisted_options = {\n 'identity': ['driver', 'list_limit'],\n 'ldap': [\n 'url', 'user', 'suffix', 'query_scope', 'page_size',\n 'alias_dereferencing', 'debug_level', 'chase_referrals',\n 'user_tree_dn', 'user_filter', 'user_objectclass',\n 'user_id_attribute', 'user_name_attribute', 'user_mail_attribute',\n 'user_description_attribute', 'user_pass_attribute',\n 'user_enabled_attribute', 'user_enabled_invert',\n 'user_enabled_mask', 'user_enabled_default',\n 'user_attribute_ignore', 'user_default_project_id_attribute',\n 'user_enabled_emulation', 'user_enabled_emulation_dn',\n 'user_enabled_emulation_use_group_config',\n 'user_additional_attribute_mapping', 'group_tree_dn',\n 'group_filter', 'group_objectclass', 'group_id_attribute',\n 'group_name_attribute', 'group_members_are_ids',\n 'group_member_attribute', 'group_desc_attribute',\n 'group_attribute_ignore', 'group_additional_attribute_mapping',\n 'tls_cacertfile', 'tls_cacertdir', 'use_tls', 'tls_req_cert',\n 'use_pool', 'pool_size', 'pool_retry_max', 'pool_retry_delay',\n 'pool_connection_timeout', 'pool_connection_lifetime',\n 'use_auth_pool', 'auth_pool_size', 'auth_pool_connection_lifetime'\n ]\n }\n sensitive_options = {\n 'identity': [],\n 'ldap': ['password']\n }\n\n def __init__(self):\n super(DomainConfigManager, self).__init__(CONF.domain_config.driver)\n\n def _assert_valid_config(self, config):\n \"\"\"Ensure the options in the config are valid.\n\n This method is called to validate the request config in create and\n update manager calls.\n\n :param config: config structure being created or updated\n\n \"\"\"\n # Something must be defined in the request\n if not config:\n raise exception.InvalidDomainConfig(\n reason=_('No options specified'))\n\n # Make sure the groups/options defined in config itself are valid\n for group in config:\n if (not config[group] or not\n isinstance(config[group], dict)):\n msg = _('The value of group %(group)s specified in the '\n 'config should be a dictionary of options') % {\n 'group': group}\n raise exception.InvalidDomainConfig(reason=msg)\n for option in config[group]:\n self._assert_valid_group_and_option(group, option)\n\n def _assert_valid_group_and_option(self, group, option):\n \"\"\"Ensure the combination of group and option is valid.\n\n :param group: optional group name, if specified it must be one\n we support\n :param option: optional option name, if specified it must be one\n we support and a group must also be specified\n\n \"\"\"\n if not group and not option:\n # For all calls, it's OK for neither to be defined, it means you\n # are operating on all config options for that domain.\n return\n\n if not group and option:\n # Our API structure should prevent this from ever happening, so if\n # it does, then this is coding error.\n msg = _('Option %(option)s found with no group specified while '\n 'checking domain configuration request') % {\n 'option': option}\n raise exception.UnexpectedError(exception=msg)\n\n if (group and group not in self.whitelisted_options and\n group not in self.sensitive_options):\n msg = _('Group %(group)s is not supported '\n 'for domain specific configurations') % {'group': group}\n raise exception.InvalidDomainConfig(reason=msg)\n\n if option:\n if (option not in self.whitelisted_options[group] and option not in\n self.sensitive_options[group]):\n msg = _('Option %(option)s in group %(group)s is not '\n 'supported for domain specific configurations') % {\n 'group': group, 'option': option}\n raise exception.InvalidDomainConfig(reason=msg)\n\n def _is_sensitive(self, group, option):\n return option in self.sensitive_options[group]\n\n def _config_to_list(self, config):\n \"\"\"Build list of options for use by backend drivers.\"\"\"\n option_list = []\n for group in config:\n for option in config[group]:\n option_list.append({\n 'group': group, 'option': option,\n 'value': config[group][option],\n 'sensitive': self._is_sensitive(group, option)})\n\n return option_list\n\n def _option_dict(self, group, option):\n group_attr = getattr(CONF, group)\n return {'group': group, 'option': option,\n 'value': getattr(group_attr, option)}\n\n def _list_to_config(self, whitelisted, sensitive=None, req_option=None):\n \"\"\"Build config dict from a list of option dicts.\n\n :param whitelisted: list of dicts containing options and their groups,\n this has already been filtered to only contain\n those options to include in the output.\n :param sensitive: list of dicts containing sensitive options and their\n groups, this has already been filtered to only\n contain those options to include in the output.\n :param req_option: the individual option requested\n\n :returns: a config dict, including sensitive if specified\n\n \"\"\"\n the_list = whitelisted + (sensitive or [])\n if not the_list:\n return {}\n\n if req_option:\n # The request was specific to an individual option, so\n # no need to include the group in the output. We first check that\n # there is only one option in the answer (and that it's the right\n # one) - if not, something has gone wrong and we raise an error\n if len(the_list) > 1 or the_list[0]['option'] != req_option:\n LOG.error('Unexpected results in response for domain '\n 'config - %(count)s responses, first option is '\n '%(option)s, expected option %(expected)s',\n {'count': len(the_list), 'option': list[0]['option'],\n 'expected': req_option})\n raise exception.UnexpectedError(\n _('An unexpected error occurred when retrieving domain '\n 'configs'))\n return {the_list[0]['option']: the_list[0]['value']}\n\n config = {}\n for option in the_list:\n config.setdefault(option['group'], {})\n config[option['group']][option['option']] = option['value']\n\n return config\n\n def create_config(self, domain_id, config):\n \"\"\"Create config for a domain.\n\n :param domain_id: the domain in question\n :param config: the dict of config groups/options to assign to the\n domain\n\n Creates a new config, overwriting any previous config (no Conflict\n error will be generated).\n\n :returns: a dict of group dicts containing the options, with any that\n are sensitive removed\n :raises keystone.exception.InvalidDomainConfig: when the config\n contains options we do not support\n\n \"\"\"\n self._assert_valid_config(config)\n option_list = self._config_to_list(config)\n self.create_config_options(domain_id, option_list)\n # Since we are caching on the full substituted config, we just\n # invalidate here, rather than try and create the right result to\n # cache.\n self.get_config_with_sensitive_info.invalidate(self, domain_id)\n return self._list_to_config(self.list_config_options(domain_id))\n\n def get_config(self, domain_id, group=None, option=None):\n \"\"\"Get config, or partial config, for a domain.\n\n :param domain_id: the domain in question\n :param group: an optional specific group of options\n :param option: an optional specific option within the group\n\n :returns: a dict of group dicts containing the whitelisted options,\n filtered by group and option specified\n :raises keystone.exception.DomainConfigNotFound: when no config found\n that matches domain_id, group and option specified\n :raises keystone.exception.InvalidDomainConfig: when the config\n and group/option parameters specify an option we do not\n support\n\n An example response::\n\n {\n 'ldap': {\n 'url': 'myurl'\n 'user_tree_dn': 'OU=myou'},\n 'identity': {\n 'driver': 'ldap'}\n\n }\n\n \"\"\"\n self._assert_valid_group_and_option(group, option)\n whitelisted = self.list_config_options(domain_id, group, option)\n if whitelisted:\n return self._list_to_config(whitelisted, req_option=option)\n\n if option:\n msg = _('option %(option)s in group %(group)s') % {\n 'group': group, 'option': option}\n elif group:\n msg = _('group %(group)s') % {'group': group}\n else:\n msg = _('any options')\n raise exception.DomainConfigNotFound(\n domain_id=domain_id, group_or_option=msg)\n\n def get_security_compliance_config(self, domain_id, group, option=None):\n r\"\"\"Get full or partial security compliance config from configuration.\n\n :param domain_id: the domain in question\n :param group: a specific group of options\n :param option: an optional specific option within the group\n\n :returns: a dict of group dicts containing the whitelisted options,\n filtered by group and option specified\n :raises keystone.exception.InvalidDomainConfig: when the config\n and group/option parameters specify an option we do not\n support\n\n An example response::\n\n {\n 'security_compliance': {\n 'password_regex': '^(?=.*\\d)(?=.*[a-zA-Z]).{7,}$'\n 'password_regex_description':\n 'A password must consist of at least 1 letter, '\n '1 digit, and have a minimum length of 7 characters'\n }\n }\n\n \"\"\"\n if domain_id != CONF.identity.default_domain_id:\n msg = _('Reading security compliance information for any domain '\n 'other than the default domain is not allowed or '\n 'supported.')\n raise exception.InvalidDomainConfig(reason=msg)\n\n config_list = []\n readable_options = ['password_regex', 'password_regex_description']\n if option and option not in readable_options:\n msg = _('Reading security compliance values other than '\n 'password_regex and password_regex_description is not '\n 'allowed.')\n raise exception.InvalidDomainConfig(reason=msg)\n elif option and option in readable_options:\n config_list.append(self._option_dict(group, option))\n elif not option:\n for op in readable_options:\n config_list.append(self._option_dict(group, op))\n # We already validated that the group is the security_compliance group\n # so we can move along and start validating the options\n return self._list_to_config(config_list, req_option=option)\n\n def update_config(self, domain_id, config, group=None, option=None):\n \"\"\"Update config, or partial config, for a domain.\n\n :param domain_id: the domain in question\n :param config: the config dict containing and groups/options being\n updated\n :param group: an optional specific group of options, which if specified\n must appear in config, with no other groups\n :param option: an optional specific option within the group, which if\n specified must appear in config, with no other options\n\n The contents of the supplied config will be merged with the existing\n config for this domain, updating or creating new options if these did\n not previously exist. If group or option is specified, then the update\n will be limited to those specified items and the inclusion of other\n options in the supplied config will raise an exception, as will the\n situation when those options do not already exist in the current\n config.\n\n :returns: a dict of groups containing all whitelisted options\n :raises keystone.exception.InvalidDomainConfig: when the config\n and group/option parameters specify an option we do not\n support or one that does not exist in the original config\n\n \"\"\"\n def _assert_valid_update(domain_id, config, group=None, option=None):\n \"\"\"Ensure the combination of config, group and option is valid.\"\"\"\n self._assert_valid_config(config)\n self._assert_valid_group_and_option(group, option)\n\n # If a group has been specified, then the request is to\n # explicitly only update the options in that group - so the config\n # must not contain anything else. Further, that group must exist in\n # the original config. Likewise, if an option has been specified,\n # then the group in the config must only contain that option and it\n # also must exist in the original config.\n if group:\n if len(config) != 1 or (option and len(config[group]) != 1):\n if option:\n msg = _('Trying to update option %(option)s in group '\n '%(group)s, so that, and only that, option '\n 'must be specified in the config') % {\n 'group': group, 'option': option}\n else:\n msg = _('Trying to update group %(group)s, so that, '\n 'and only that, group must be specified in '\n 'the config') % {'group': group}\n raise exception.InvalidDomainConfig(reason=msg)\n\n # So we now know we have the right number of entries in the\n # config that align with a group/option being specified, but we\n # must also make sure they match.\n if group not in config:\n msg = _('request to update group %(group)s, but config '\n 'provided contains group %(group_other)s '\n 'instead') % {\n 'group': group,\n 'group_other': list(config.keys())[0]}\n raise exception.InvalidDomainConfig(reason=msg)\n if option and option not in config[group]:\n msg = _('Trying to update option %(option)s in group '\n '%(group)s, but config provided contains option '\n '%(option_other)s instead') % {\n 'group': group, 'option': option,\n 'option_other': list(config[group].keys())[0]}\n raise exception.InvalidDomainConfig(reason=msg)\n\n # Finally, we need to check if the group/option specified\n # already exists in the original config - since if not, to keep\n # with the semantics of an update, we need to fail with\n # a DomainConfigNotFound\n if not self._get_config_with_sensitive_info(domain_id,\n group, option):\n if option:\n msg = _('option %(option)s in group %(group)s') % {\n 'group': group, 'option': option}\n raise exception.DomainConfigNotFound(\n domain_id=domain_id, group_or_option=msg)\n else:\n msg = _('group %(group)s') % {'group': group}\n raise exception.DomainConfigNotFound(\n domain_id=domain_id, group_or_option=msg)\n\n update_config = config\n if group and option:\n # The config will just be a dict containing the option and\n # its value, so make it look like a single option under the\n # group in question\n update_config = {group: config}\n\n _assert_valid_update(domain_id, update_config, group, option)\n\n option_list = self._config_to_list(update_config)\n self.update_config_options(domain_id, option_list)\n\n self.get_config_with_sensitive_info.invalidate(self, domain_id)\n return self.get_config(domain_id)\n\n def delete_config(self, domain_id, group=None, option=None):\n \"\"\"Delete config, or partial config, for the domain.\n\n :param domain_id: the domain in question\n :param group: an optional specific group of options\n :param option: an optional specific option within the group\n\n If group and option are None, then the entire config for the domain\n is deleted. If group is not None, then just that group of options will\n be deleted. If group and option are both specified, then just that\n option is deleted.\n\n :raises keystone.exception.InvalidDomainConfig: when group/option\n parameters specify an option we do not support or one that\n does not exist in the original config.\n\n \"\"\"\n self._assert_valid_group_and_option(group, option)\n if group:\n # As this is a partial delete, then make sure the items requested\n # are valid and exist in the current config\n current_config = self._get_config_with_sensitive_info(domain_id)\n # Raise an exception if the group/options specified don't exist in\n # the current config so that the delete method provides the\n # correct error semantics.\n current_group = current_config.get(group)\n if not current_group:\n msg = _('group %(group)s') % {'group': group}\n raise exception.DomainConfigNotFound(\n domain_id=domain_id, group_or_option=msg)\n if option and not current_group.get(option):\n msg = _('option %(option)s in group %(group)s') % {\n 'group': group, 'option': option}\n raise exception.DomainConfigNotFound(\n domain_id=domain_id, group_or_option=msg)\n\n self.delete_config_options(domain_id, group, option)\n self.get_config_with_sensitive_info.invalidate(self, domain_id)\n\n def _get_config_with_sensitive_info(self, domain_id, group=None,\n option=None):\n \"\"\"Get config for a domain/group/option with sensitive info included.\n\n This is only used by the methods within this class, which may need to\n check individual groups or options.\n\n \"\"\"\n whitelisted = self.list_config_options(domain_id, group, option)\n sensitive = self.list_config_options(domain_id, group, option,\n sensitive=True)\n\n # Check if there are any sensitive substitutions needed. We first try\n # and simply ensure any sensitive options that have valid substitution\n # references in the whitelisted options are substituted. We then check\n # the resulting whitelisted option and raise a warning if there\n # appears to be an unmatched or incorrectly constructed substitution\n # reference. To avoid the risk of logging any sensitive options that\n # have already been substituted, we first take a copy of the\n # whitelisted option.\n\n # Build a dict of the sensitive options ready to try substitution\n sensitive_dict = {s['option']: s['value'] for s in sensitive}\n\n for each_whitelisted in whitelisted:\n if not isinstance(each_whitelisted['value'], six.string_types):\n # We only support substitutions into string types, if its an\n # integer, list etc. then just continue onto the next one\n continue\n\n # Store away the original value in case we need to raise a warning\n # after substitution.\n original_value = each_whitelisted['value']\n warning_msg = ''\n try:\n each_whitelisted['value'] = (\n each_whitelisted['value'] % sensitive_dict)\n except KeyError:\n warning_msg = (\n 'Found what looks like an unmatched config option '\n 'substitution reference - domain: %(domain)s, group: '\n '%(group)s, option: %(option)s, value: %(value)s. Perhaps '\n 'the config option to which it refers has yet to be '\n 'added?')\n except (ValueError, TypeError):\n warning_msg = (\n 'Found what looks like an incorrectly constructed '\n 'config option substitution reference - domain: '\n '%(domain)s, group: %(group)s, option: %(option)s, '\n 'value: %(value)s.')\n\n if warning_msg:\n LOG.warning(warning_msg, {\n 'domain': domain_id,\n 'group': each_whitelisted['group'],\n 'option': each_whitelisted['option'],\n 'value': original_value})\n\n return self._list_to_config(whitelisted, sensitive)\n\n @MEMOIZE_CONFIG\n def get_config_with_sensitive_info(self, domain_id):\n \"\"\"Get config for a domain with sensitive info included.\n\n This method is not exposed via the public API, but is used by the\n identity manager to initialize a domain with the fully formed config\n options.\n\n \"\"\"\n return self._get_config_with_sensitive_info(domain_id)\n\n def get_config_default(self, group=None, option=None):\n \"\"\"Get default config, or partial default config.\n\n :param group: an optional specific group of options\n :param option: an optional specific option within the group\n\n :returns: a dict of group dicts containing the default options,\n filtered by group and option if specified\n :raises keystone.exception.InvalidDomainConfig: when the config\n and group/option parameters specify an option we do not\n support (or one that is not whitelisted).\n\n An example response::\n\n {\n 'ldap': {\n 'url': 'myurl',\n 'user_tree_dn': 'OU=myou',\n ....},\n 'identity': {\n 'driver': 'ldap'}\n\n }\n\n \"\"\"\n self._assert_valid_group_and_option(group, option)\n config_list = []\n if group:\n if option:\n if option not in self.whitelisted_options[group]:\n msg = _('Reading the default for option %(option)s in '\n 'group %(group)s is not supported') % {\n 'option': option, 'group': group}\n raise exception.InvalidDomainConfig(reason=msg)\n config_list.append(self._option_dict(group, option))\n else:\n for each_option in self.whitelisted_options[group]:\n config_list.append(self._option_dict(group, each_option))\n else:\n for each_group in self.whitelisted_options:\n for each_option in self.whitelisted_options[each_group]:\n config_list.append(\n self._option_dict(each_group, each_option)\n )\n\n return self._list_to_config(config_list, req_option=option)\n"}}},{"rowIdx":542618,"cells":{"filename":{"kind":"string","value":"the-stack_106_30922"},"text":{"kind":"string","value":"\"\"\"\nThis module lets you practice one form of the ACCUMULATOR pattern,\nnamely, the \"IN GRAPHICS\" form which features:\n -- DRAWING OBJECTS via ACCUMULATING positions and/or sizes,\n as in: x = x + pixels\n\nAdditionally, it emphasizes that you must\n ** DO A CONCRETE EXAMPLE BY HAND **\nbefore you can implement a solution to the problem in Python. \n \nAuthors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher, Mark Hays,\n Aaron Wilkin, their colleagues, and Eddie Mannan.\n\"\"\" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.\n\nimport rosegraphics as rg\n\n\n# -----------------------------------------------------------------------------\n# Students: As you work each of these problems, ask yourself:\n# 1. Do I need a loop?\n# If so, HOW MANY LOOPS?\n#\n# 2. Where I need a loop, what needs to happen:\n# -- BEFORE the loop?\n# -- IN the loop?\n# -- AFTER the loop?\n# -----------------------------------------------------------------------------\ndef main():\n \"\"\" Calls the TEST functions in this module. \"\"\"\n run_test_draw_squares_from_circle()\n run_test_draw_circles_from_rectangle()\n run_test_draw_lines_from_rectangles()\n\n\ndef run_test_draw_squares_from_circle():\n \"\"\" Tests the draw_squares_from_circle function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the draw_squares_from_circle function:')\n print(' See the graphics windows that pop up.')\n print('--------------------------------------------------')\n\n # -------------------------------------------------------------------------\n # TWO tests on ONE window.\n # -------------------------------------------------------------------------\n title = 'Tests 1 and 2 of DRAW_SQUARES_FROM_CIRCLE: '\n title = title + ' 7 little squares from green circle, 4 big squares'\n window1 = rg.RoseWindow(650, 350, title)\n\n # Test 1:\n circle = rg.Circle(rg.Point(100, 100), 20)\n circle.fill_color = 'green'\n draw_squares_from_circle(7, circle, window1)\n\n # Test 2:\n circle = rg.Circle(rg.Point(350, 70), 50)\n draw_squares_from_circle(4, circle, window1)\n window1.close_on_mouse_click()\n\n # -------------------------------------------------------------------------\n # A third test on ANOTHER window.\n # -------------------------------------------------------------------------\n title = 'Test 3 of DRAW_SQUARES_FROM_CIRCLE: '\n title += ' 20 teeny squares from blue circle!'\n window2 = rg.RoseWindow(525, 300, title)\n\n # Test 3:\n circle = rg.Circle(rg.Point(50, 50), 10)\n circle.fill_color = 'blue'\n draw_squares_from_circle(20, circle, window2)\n\n window2.close_on_mouse_click()\n\n\ndef draw_squares_from_circle(n, circle, window):\n \"\"\"\n What comes in: Three arguments:\n -- A positive integer n.\n -- An rg.Circle.\n -- An rg.RoseWindow.\n What goes out: Nothing (i.e., None).\n Side effects:\n See draw_squares_from_circle.pdf in this project for pictures\n that may help you better understand the following specification:\n\n First draws the given rg.Circle on the given rg.RoseWindow.\n Then draws n rg.Squares on the given rg.RoseWindow, such that:\n -- The first rg.Square circumscribes the given rg.Circle.\n -- Each subsequent rg.Square has its upper-left quarter\n on top of the lower-right quarter of the previous rg.Square,\n so that the squares form an overlapping sequence\n that goes down and to the right.\n Must ** render ** but ** NOT close ** the window.\n\n Type hints:\n :type n: int\n :type circle: rg.Circle\n :type window: rg.RoseWindow\n \"\"\"\n circle.attach_to(window)\n square = rg.Square(circle.center, (circle.radius * 2))\n square.attach_to(window)\n for k in range(n - 1):\n square = rg.Square(rg.Point(square.center.x + circle.radius, square.center.y + circle.radius), (circle.radius * 2))\n square.attach_to(window)\n window.render()\n # -------------------------------------------------------------------------\n # DONE: 2. Implement and test this function.\n # Tests have been written for you (above).\n #\n # CONSIDER using the ACCUMULATOR IN GRAPHICS pattern,\n # as in draw_row_of_circles in m1e,\n # instead of directly using the loop variable.\n #\n ###########################################################################\n # HINT: To figure out the code that computes the necessary\n # positions of each square,\n # ** FIRST DO A CONCRETE EXAMPLE BY HAND! **\n ###########################################################################\n # -------------------------------------------------------------------------\n\n\ndef run_test_draw_circles_from_rectangle():\n \"\"\" Tests the draw_circles_from_rectangle function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the draw_circles_from_rectangle function:')\n print(' See the graphics windows that pop up.')\n print('--------------------------------------------------')\n\n title = 'Tests 1 and 2 of CIRCLES_FROM_RECTANGLE: '\n title = title + ' 4 Green Circles to Left, 5 Circles Up, 8 Blue Circles to Left, 3 Circles Up'\n window1 = rg.RoseWindow(650, 350, title)\n\n # Test 1:\n rectangle = rg.Rectangle(rg.Point(200, 200), rg.Point(250, 250))\n rectangle.fill_color = 'green'\n draw_circles_from_rectangle(4, 5, rectangle, window1)\n\n # Test 2:\n rectangle = rg.Rectangle(rg.Point(350, 350), rg.Point(370, 370))\n rectangle.fill_color = 'blue'\n draw_circles_from_rectangle(8, 3, rectangle, window1)\n window1.close_on_mouse_click()\n\n # -------------------------------------------------------------------------\n # A third test on ANOTHER window.\n # -------------------------------------------------------------------------\n title = 'Test 3 of CIRCLES_FROM-RECTANGLE: '\n title += ' 6 Yellow Circles Left, 10 Circles Up'\n window2 = rg.RoseWindow(525, 300, title)\n\n # Test 3:\n rectangle = rg.Rectangle(rg.Point(200, 200), rg.Point(250, 250))\n rectangle.fill_color = 'yellow'\n draw_circles_from_rectangle(6, 10, rectangle, window2)\n\n window2.close_on_mouse_click()\n # -------------------------------------------------------------------------\n # DONE: 3. Implement this TEST function.\n # It TESTS the draw_circles_from_rectangle function\n # defined below. Include at least ** 3 ** tests, of which\n # *** at least TWO tests are on ONE window and\n # *** at least ONE test is on a DIFFERENT window.\n #\n ###########################################################################\n # HINT: Consider using the same test cases as suggested by the\n # pictures in draw_circles_from_rectangle.pdf in this project.\n # Follow the same form as the example in a previous problem.\n ###########################################################################\n # -------------------------------------------------------------------------\n\n\ndef draw_circles_from_rectangle(m, n, rectangle, window):\n \"\"\"\n What comes in: Four arguments:\n -- Positive integers m and n.\n -- An rg.Rectangle.\n -- An rg.RoseWindow.\n What goes out: Nothing (i.e., None).\n Side effects:\n See draw_circles_from_rectangle.pdf in this project for pictures\n that may help you better understand the following specification:\n\n First draws the given rg.Rectangle on the given rg.RoseWindow.\n Then draws m rg.Circles on the given rg.RoseWindow, such that:\n -- The diameter of each rg.Circle is the same as the height\n of the given rg.Rectangle.\n -- The first rg.Circle is immediately to the left of the\n given rg.Rectangle\n -- Each subsequent rg.Circle is immediately to the left\n of the previous rg.Circle, so that the circles form a row\n that goes to the left.\n -- Each rg. Circle has the same fill_color as the given\n rg.Rectangle (and has no outline_color).\n Then draws n rg.Circles on the given RoseWindow, such that:\n -- The diameter of each rg.Circle is the same as the width\n of the given rg.Rectangle.\n -- The first rg.Circle is immediately above the\n given rg.Rectangle\n -- Each subsequent rg.Circle is immediately above the previous\n rg.Circle, so that the circles form a column that goes up.\n -- Each rg.Circle has the same outline_color as the given\n rg.Rectangle (and has no fill_color).\n Must ** render ** but ** NOT close ** the window.\n\n Type hints:\n :type m: int\n :type n: int\n :type rectangle: rg.Rectangle\n :type window: rg.RoseWindow\n \"\"\"\n rectangle.attach_to(window)\n circle1 = rg.Circle(rg.Point(rectangle.get_center().x, rectangle.corner_1.y - ((rectangle.get_width())/2)), (rectangle.get_width()/2))\n circle1.attach_to(window)\n for k in range(n - 1):\n circle1 = rg.Circle(rg.Point(circle1.center.x, (circle1.center.y - (circle1.radius * 2))), circle1.radius)\n circle1.attach_to(window)\n radius = rectangle.get_height()/2\n center = rg.Point(rectangle.get_center().x - (radius * 2), rectangle.get_center().y)\n circle2 = rg.Circle(center, radius)\n circle2.fill_color = 'green'\n circle2.attach_to(window)\n for k in range(m - 1):\n circle2 = rg.Circle(rg.Point(circle2.center.x - (radius * 2), circle2.center.y), radius)\n circle2.fill_color = 'green'\n circle2.attach_to(window)\n window.render()\n # -------------------------------------------------------------------------\n # DONE: 4. Implement and test this function.\n # Tests have been written for you (above).\n #\n # CONSIDER using the ACCUMULATOR IN GRAPHICS pattern,\n # as in draw_row_of_circles in m1e,\n # instead of directly using the loop variable.\n #\n ###########################################################################\n # HINT: To figure out the code that computes the necessary\n # positions of each circle,\n # ** FIRST DO A CONCRETE EXAMPLE BY HAND! **\n ###########################################################################\n # -------------------------------------------------------------------------\n\n\ndef run_test_draw_lines_from_rectangles():\n \"\"\" Tests the draw_lines_from_rectangles function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the draw_lines_from_rectangles function:')\n print(' See the graphics windows that pop up.')\n print('--------------------------------------------------')\n\n # TWO tests on ONE window.\n title = 'Tests 1 & 2 of DRAW_LINES_FROM_RECTANGLES:'\n title += ' 5 lines, 8 lines!'\n window1 = rg.RoseWindow(900, 400, title)\n\n rectangle1 = rg.Rectangle(rg.Point(100, 25), rg.Point(150, 125))\n rectangle2 = rg.Rectangle(rg.Point(300, 150), rg.Point(400, 175))\n rectangle1.outline_color = 'red'\n rectangle2.outline_color = 'blue'\n draw_lines_from_rectangles(rectangle1, rectangle2, 5, window1)\n\n rectangle1 = rg.Rectangle(rg.Point(870, 30), rg.Point(750, 100))\n rectangle2 = rg.Rectangle(rg.Point(700, 90), rg.Point(650, 60))\n rectangle2.outline_color = 'green'\n draw_lines_from_rectangles(rectangle1, rectangle2, 8, window1)\n\n window1.close_on_mouse_click()\n\n # A third test on ANOTHER window.\n title = 'Test 3 of DRAW_LINES_FROM_RECTANGLES: 11 lines!'\n window2 = rg.RoseWindow(700, 700, title)\n\n rectangle1 = rg.Rectangle(rg.Point(550, 200), rg.Point(650, 100))\n rectangle2 = rg.Rectangle(rg.Point(600, 50), rg.Point(650, 75))\n rectangle1.outline_color = 'brown'\n rectangle2.outline_color = 'cyan'\n rectangle2.outline_thickness = 10\n draw_lines_from_rectangles(rectangle1, rectangle2, 11, window2)\n\n window2.close_on_mouse_click()\n\n\ndef draw_lines_from_rectangles(rectangle1, rectangle2, n, window):\n \"\"\"\n What comes in: Four arguments:\n -- Two rg.Rectangles.\n -- A positive integer n.\n -- An rg.RoseWindow.\n What goes out: Nothing (i.e., None).\n Side effects:\n See draw_lines_from_rectangles.pdf in this project\n for pictures that may help you better understand\n the following specification:\n\n First draws the given rg.Rectangles on the given rg.RoseWindow.\n Then draws n rg.Lines on the given rg.RoseWindow, such that:\n -- The 1st rg.Line goes from the center of one of the\n 1st rg.Rectangle to the center of the 2nd rg.Rectangle.\n -- The 2nd rg.Line goes from the lower-left corner of the\n 1st rg.Rectangle and is parallel to the 1st rg.Line,\n with the same length and direction as the 1st rg.Line.\n -- Subsequent rg.Lines are shifted from the previous rg.Line in\n the same way that the 2nd rg.Line is shifted from the 1st.\n -- Each of the rg.Lines has thickness 5.\n -- The colors of the rg.Lines alternate, as follows:\n - The 1st, 3rd, 5th, ... rg.Line has color R1_color\n - The 2nd, 4th, 6th, ... rg.Line has color R2_color\n where\n - R1_color is the outline color of the 1st rg.Rectangle\n - R2_color is the outline color of the 2nd rg.Rectangle\n Must ** render ** but ** NOT close ** the window.\n\n Type hints:\n :type rectangle1: rg.Rectangle\n :type rectangle2: rg.Rectangle\n :type n: int\n :type window: rg.RoseWindow\n \"\"\"\n\n # -------------------------------------------------------------------------\n # TODO: 5. Implement and test this function.\n # Tests have been written for you (above).\n #\n # CONSIDER using the ACCUMULATOR IN GRAPHICS pattern,\n # as in draw_row_of_circles in m1e,\n # instead of directly using the loop variable.\n #\n ###########################################################################\n # HINT: To figure out the code that computes the necessary\n # endpoints for each line,\n # ** FIRST DO A CONCRETE EXAMPLE BY HAND! **\n ###########################################################################\n # -------------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------------\n# Calls main to start the ball rolling.\n# -----------------------------------------------------------------------------\nmain()\n"}}},{"rowIdx":542619,"cells":{"filename":{"kind":"string","value":"the-stack_106_30924"},"text":{"kind":"string","value":"#!/usr/bin/env python3\n\nfrom setuptools import setup, find_packages\n\nlong_description = \"\"\"\ntropohelper is a library to speed up creating resources using tropospher\nand cloudformation on AWS. Troposphere makes it much easier, but it can really\nmake a file for creating a stack large and repedative. Using these helper\nfunctions keeps things much more DRY.\n\"\"\"\n\nsetup(\n name='tropohelper',\n version=\"1.4.0\",\n description='tropohelper is a collection of troposphere helpers to promote DRY.',\n long_description=long_description,\n author='Michael Gorman',\n author_email='michael@michaeljgorman.com',\n url='https://github.com/mjgorman/tropohelper',\n packages=find_packages(),\n install_requires=['troposphere==2.4.6', 'awacs>=0.7.2'],\n test_suite='nose.collector',\n tests_require=['nose<2.0']\n)\n"}}},{"rowIdx":542620,"cells":{"filename":{"kind":"string","value":"the-stack_106_30925"},"text":{"kind":"string","value":"from random import *\n\nfrom PD_Naive.naive_pd import *\n\n# in1 = [(0, 000001), (0, 000010), (0, 000011), (1, 000100), (3, 000101), (3, 000110), (4, 000111), (4, 001000)]\ninp_1 = [(0, '00001'), (0, '00010'), (0, '00011'), (1, '00100'),\n (3, '00101'), (3, '00110'), (4, '00111'), (4, '01000')]\n\n\ndef coin():\n return randint(0, 1)\n\n\n#\n# in1q = [0, 0, 0, 1, 3, 3, 4, 4]\n# in1 = [(in1q[i], to_bin_with_leading_zeros(i+1, 5)) for i in range(len(in1q))]\n# print(in1)\n# len(in1q)\n# in1 = [(0, to_bin_with_leading_zeros(0))]\n# in1 = [(0, to_bin_with_leading_zeros(0))]\n\n\ndef to_bin_with_leading_zeros(n: int, length: int) -> str:\n s = bin(n)[2:]\n diff = length - len(s)\n if diff > 0:\n return \"0\" * diff + s\n elif diff == 0:\n return s\n else:\n assert False\n\n\ndef t1(m: int, f: int, l: int):\n \"\"\"\n :param m Quotients (q_i) range interval. (forall q_i, q_i in [m])\n :param f Number of elements in PD.\n :param l Remainder (r_i) length. (|r_i| = l)\n :return:\n \"\"\"\n assert f <= m\n quotient = list(range(f))\n remainder = [to_bin_with_leading_zeros(i, l) for i in range(f)]\n d = naive_pd(m, f, l)\n for i in range(f):\n q, r = quotient[i], remainder[i]\n d.insert(q, r)\n temp = d.lookup(q, r)\n if not temp:\n print(\"Failed in {:} lookup\".format(i))\n print(d)\n return False\n print(\"Passed t1.\")\n return True\n\n\ndef t2(m: int, f: int, l: int):\n assert f <= m\n quotient = list(range(f))\n remainder = [to_bin_with_leading_zeros(i, l) for i in range(f)]\n d = naive_pd(m, f, l)\n for i in range(f):\n q, r = quotient[i], remainder[i]\n d.insert(q, r)\n temp = d.lookup(q, r)\n if not temp:\n print(\"Failed in {:} lookup\".format(i))\n print(d)\n return False\n d.remove(q, r)\n temp = d.lookup(q, r)\n if temp:\n print(\"Failed in {:} lookup after deletion\".format(i))\n print(d)\n return False\n print(\"Passed t2.\")\n return True\n\n\n# random insertion and deletion\n\n\ndef t3(reps: int = 1 << 7):\n f = randint(8, 128)\n m = randint(f + 1, f * 2)\n l = randint(11, 31)\n assert f <= m\n\n def single_key():\n return randint(0, m), to_bin_with_leading_zeros(randint(1, (1 << l) - 1), l)\n\n elements = {single_key() for _ in range(f)}\n inserted_elements = set()\n assert len(elements) == f\n # quotient=sample(range(m), f)\n # assert len(quotient) == len(set(quotient))\n # remainder=[to_bin_with_leading_zeros(randint, l) for i in range(f)]\n d = naive_pd(m, f, l)\n for i in range(reps):\n # print(d.get_head_as_runs())\n if coin():\n if not elements:\n continue\n\n temp = sample(elements, 1)[0]\n elements.remove(temp)\n d.insert(*temp)\n inserted_elements.add(temp)\n if not d.lookup(*temp):\n print(\"Failed in {:} lookup. case 1.\".format(i))\n # print(d)\n return False\n else:\n if not inserted_elements:\n continue\n\n temp = sample(inserted_elements, 1)[0]\n if not d.lookup(*temp):\n print(\"Failed in {:} lookup. case 2.\".format(i))\n # print(d)\n return False\n\n inserted_elements.remove(temp)\n d.remove(*temp)\n elements.add(temp)\n if d.lookup(*temp):\n print(\"Failed in {:} lookup. False positive\".format(i))\n # print(d)\n return False\n\n #\n # q, r = quotient[i], remainder[i]\n # d.insert(q, r)\n # temp = d.lookup(q, r)\n # if not temp:\n # print(\"Failed in {:} lookup\".format(i))\n # print(d)\n # return False\n # d.remove(q, r)\n # temp = d.lookup(q, r)\n # if temp:\n # print(\"Failed in {:} lookup after deletion\".format(i))\n # print(d)\n # return False\n # print(\"Passed t3.\")\n return True\n\nint(\"10\"*4,2)\nint(\"110\"*2 +\"00\",2)\nbi = lambda n:bin(n)[2:]\ns = bi(170)\nbi(170)\nbi(216)\ns\n[i for i in range(len(s)) if s[i] == '0']\n\n170 & ()\ns1 = \"1110100110110\"\nlen(s1)\nt1(16, 3, 5)\nt1(16, 9, 5)\nt1(16, 15, 5)\n#\nt2(16, 3, 5)\nt2(16, 9, 5)\nt2(16, 15, 5)\n# sample(list(range(4)), 2)\nt3(1 << 9)\n\nfor i in range(5):\n assert t3(1 << 9)\n\nfor i in range(1 << 8):\n assert t3()\n print(i, end=\"\\t\")\n"}}},{"rowIdx":542621,"cells":{"filename":{"kind":"string","value":"the-stack_106_30927"},"text":{"kind":"string","value":"# SPDX-License-Identifier: MIT\n# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs\n# Copyright (C) 2012-2014 Bastian Kleineidam\n# Copyright (C) 2015-2020 Tobias Gruetzmacher\nimport re\nimport os\n\nimport pytest\nfrom xdist.dsession import LoadScopeScheduling\n\nfrom dosagelib.scraper import scrapers\n\n\ndef get_test_scrapers():\n \"\"\"Return scrapers that should be tested.\"\"\"\n if 'TESTALL' in os.environ:\n # test all comics (this will take some time)\n # ignore mangadex for now (site is temporary down)\n scraper_pattern = '^(?!MangaDex)'\n elif 'TESTCOMICS' in os.environ:\n scraper_pattern = os.environ['TESTCOMICS']\n else:\n # Get limited number of scraper tests as default\n testscrapernames = [\n # \"classic\" _BasicScraper\n 'AbstruseGoose',\n # complex _ParserScraper\n 'GoComics/CalvinAndHobbes',\n # _WordPressScraper\n 'GrrlPower',\n ]\n scraper_pattern = '^(' + '|'.join(testscrapernames) + ')$'\n\n matcher = re.compile(scraper_pattern)\n return [\n scraperobj for scraperobj in scrapers.get()\n if matcher.match(scraperobj.name)\n ]\n\n\ndef pytest_generate_tests(metafunc):\n if 'scraperobj' in metafunc.fixturenames:\n scrapers = get_test_scrapers()\n scraperids = [x.name for x in scrapers]\n metafunc.parametrize('scraperobj', scrapers, ids=scraperids)\n\n\nclass LoadModScheduling(LoadScopeScheduling):\n \"\"\"Implement load scheduling for comic modules. See xdist for details.\"\"\"\n\n def _split_scope(self, nodeid):\n mod, test = nodeid.split(\"::\", 1)\n return mod + \"::\" + test.split(\"/\", 1)[0]\n\n\n@pytest.mark.trylast\ndef pytest_xdist_make_scheduler(config, log):\n return LoadModScheduling(config, log)\n"}}},{"rowIdx":542622,"cells":{"filename":{"kind":"string","value":"the-stack_106_30928"},"text":{"kind":"string","value":"import os\r\nimport telegram\r\nfrom telegram.ext import Updater, CommandHandler\r\nfrom telegram import InlineKeyboardMarkup, InlineKeyboardButton\r\n\r\ndef start(update, context):\r\n \r\n \r\n Button1 = InlineKeyboardButton(\r\n text='Github',\r\n url='https://github.com/drewdev02'\r\n )\r\n Button2= InlineKeyboardButton(\r\n text='Me',\r\n url='https://t.me/Adrewdev'\r\n )\r\n \r\n update.message.reply_text(\r\n text='Hoy es un gran dia mi vida, usa \"/more\" para algo especial',\r\n reply_markup=InlineKeyboardMarkup([\r\n [Button1, Button2]\r\n ])\r\n )\r\n\r\ndef more(update, context):\r\n \r\n Button3 = InlineKeyboardButton(\r\n text='Tocame, My love',\r\n url='https://telegra.ph/De-mi-para-ti-por-ser-tu-d%C3%ADa-especial-03-08' # hacer un post en telegraph\r\n )\r\n \r\n \r\n update.message.reply_text(\r\n text='Happy Brithday!!!',\r\n reply_markup=InlineKeyboardMarkup([\r\n [Button3]\r\n ])\r\n )\r\n\r\nif __name__ == '__main__':\r\n \r\n token = os.environ['TOKEN']\r\n \r\n bot = telegram.Bot(token=token) \r\n \r\n updater = Updater(token=token, use_context=True)\r\n \r\n dp = updater.dispatcher\r\n\r\n dp.add_handler(CommandHandler('start', start))\r\n dp.add_handler(CommandHandler('more', more))\r\n \r\n updater.start_polling()\r\n print('bot is polling')\r\n updater.idle()\r\n"}}},{"rowIdx":542623,"cells":{"filename":{"kind":"string","value":"the-stack_106_30930"},"text":{"kind":"string","value":"from leapp.libraries.actor import library\nfrom leapp import reporting\nfrom leapp.libraries.common.testutils import create_report_mocked\n\n\nclass extract_tgz64_mocked(object):\n def __init__(self):\n self.called = 0\n self.s = None\n\n def __call__(self, s):\n self.called += 1\n self.s = s\n\n\nclass enable_service_mocked(object):\n def __init__(self):\n self.called = 0\n self.names = []\n\n def __call__(self, name):\n self.called += 1\n self.names.append(name)\n\n\nclass write_file_mocked(object):\n def __init__(self):\n self.called = 0\n self.name = None\n self.content = None\n\n def __call__(self, name, content):\n self.called += 1\n self.name = name\n self.content = content\n\n\nclass ntp2chrony_mocked(object):\n def __init__(self, lines):\n self.called = 0\n self.ignored_lines = lines\n self.args = None\n\n def __call__(self, *args):\n self.called += 1\n self.args = args\n return self.ignored_lines * ['a line']\n\n\ndef test_migration(monkeypatch):\n for ntp_services, chrony_services, ignored_lines in [\n ([], [], 0),\n (['ntpd'], ['chronyd'], 0),\n (['ntpdate'], ['chronyd'], 1),\n (['ntp-wait'], ['chrony-wait'], 0),\n (['ntpd', 'ntpdate', 'ntp-wait'], ['chronyd', 'chronyd', 'chrony-wait'], 1),\n ]:\n monkeypatch.setattr(reporting, 'create_report', create_report_mocked())\n monkeypatch.setattr(library, 'extract_tgz64', extract_tgz64_mocked())\n monkeypatch.setattr(library, 'enable_service', enable_service_mocked())\n monkeypatch.setattr(library, 'write_file', write_file_mocked())\n monkeypatch.setattr(library, 'ntp2chrony', ntp2chrony_mocked(ignored_lines))\n\n library.migrate_ntp(ntp_services, 'abcdef')\n\n if ntp_services:\n assert reporting.create_report.called == 1\n if ignored_lines > 0:\n assert 'configuration partially migrated to chrony' in \\\n reporting.create_report.report_fields['title']\n else:\n assert 'configuration migrated to chrony' in \\\n reporting.create_report.report_fields['title']\n\n assert library.extract_tgz64.called == 1\n assert library.extract_tgz64.s == 'abcdef'\n assert library.enable_service.called == len(chrony_services)\n assert library.enable_service.names == chrony_services\n assert library.write_file.called == (0 if 'ntpd' in ntp_services else 1)\n if library.write_file.called:\n assert library.write_file.name == '/etc/ntp.conf.nosources'\n assert 'without ntp configuration' in library.write_file.content\n assert library.ntp2chrony.called == 1\n assert library.ntp2chrony.args == (\n '/',\n '/etc/ntp.conf' if 'ntpd' in ntp_services else '/etc/ntp.conf.nosources',\n '/etc/ntp/step-tickers' if 'ntpdate' in ntp_services else '')\n else:\n assert reporting.create_report.called == 0\n assert library.extract_tgz64.called == 0\n assert library.enable_service.called == 0\n assert library.write_file.called == 0\n assert library.ntp2chrony.called == 0\n"}}},{"rowIdx":542624,"cells":{"filename":{"kind":"string","value":"the-stack_106_30933"},"text":{"kind":"string","value":"# Zulip's OpenAPI-based API documentation system is documented at\n# https://zulip.readthedocs.io/en/latest/documentation/api.html\n#\n# This file contains helper functions for generating cURL examples\n# based on Zulip's OpenAPI definitions, as well as test setup and\n# fetching of appropriate parameter values to use when running the\n# cURL examples as part of the tools/test-api test suite.\nfrom functools import wraps\nfrom typing import Any, Callable, Dict, List, Optional, Set, Tuple\n\nfrom django.utils.timezone import now as timezone_now\n\nfrom zerver.lib.actions import (\n do_add_linkifier,\n do_add_reaction,\n do_add_realm_playground,\n do_create_user,\n update_user_presence,\n)\nfrom zerver.lib.events import do_events_register\nfrom zerver.lib.initial_password import initial_password\nfrom zerver.lib.test_classes import ZulipTestCase\nfrom zerver.lib.upload import upload_message_file\nfrom zerver.lib.users import get_api_key\nfrom zerver.models import Client, Message, UserGroup, UserPresence, get_realm, get_user\n\nGENERATOR_FUNCTIONS: Dict[str, Callable[[], Dict[str, object]]] = {}\nREGISTERED_GENERATOR_FUNCTIONS: Set[str] = set()\nCALLED_GENERATOR_FUNCTIONS: Set[str] = set()\n# This is a List rather than just a string in order to make it easier\n# to write to it from another module.\nAUTHENTICATION_LINE: List[str] = [\"\"]\n\nhelpers = ZulipTestCase()\n\n\ndef openapi_param_value_generator(\n endpoints: List[str],\n) -> Callable[[Callable[[], Dict[str, object]]], Callable[[], Dict[str, object]]]:\n \"\"\"This decorator is used to register OpenAPI param value genarator functions\n with endpoints. Example usage:\n\n @openapi_param_value_generator([\"/messages/render:post\"])\n def ...\n \"\"\"\n\n def wrapper(generator_func: Callable[[], Dict[str, object]]) -> Callable[[], Dict[str, object]]:\n @wraps(generator_func)\n def _record_calls_wrapper() -> Dict[str, object]:\n CALLED_GENERATOR_FUNCTIONS.add(generator_func.__name__)\n return generator_func()\n\n REGISTERED_GENERATOR_FUNCTIONS.add(generator_func.__name__)\n for endpoint in endpoints:\n GENERATOR_FUNCTIONS[endpoint] = _record_calls_wrapper\n\n return _record_calls_wrapper\n\n return wrapper\n\n\ndef assert_all_helper_functions_called() -> None:\n \"\"\"Throws an exception if any registered helpers were not called by tests\"\"\"\n if REGISTERED_GENERATOR_FUNCTIONS == CALLED_GENERATOR_FUNCTIONS:\n return\n\n uncalled_functions = str(REGISTERED_GENERATOR_FUNCTIONS - CALLED_GENERATOR_FUNCTIONS)\n\n raise Exception(f\"Registered curl API generators were not called: {uncalled_functions}\")\n\n\ndef patch_openapi_example_values(\n entry: str,\n params: List[Dict[str, Any]],\n request_body: Optional[Dict[str, Any]] = None,\n) -> Tuple[List[Dict[str, object]], Optional[Dict[str, object]]]:\n if entry not in GENERATOR_FUNCTIONS:\n return params, request_body\n func = GENERATOR_FUNCTIONS[entry]\n realm_example_values: Dict[str, object] = func()\n\n for param in params:\n param_name = param[\"name\"]\n if param_name in realm_example_values:\n if \"content\" in param:\n param[\"content\"][\"application/json\"][\"example\"] = realm_example_values[param_name]\n else:\n param[\"example\"] = realm_example_values[param_name]\n\n if request_body is not None:\n properties = request_body[\"content\"][\"multipart/form-data\"][\"schema\"][\"properties\"]\n for key, property in properties.items():\n if key in realm_example_values:\n property[\"example\"] = realm_example_values[key]\n return params, request_body\n\n\n@openapi_param_value_generator([\"/fetch_api_key:post\"])\ndef fetch_api_key() -> Dict[str, object]:\n email = helpers.example_email(\"iago\")\n password = initial_password(email)\n\n return {\n \"username\": email,\n \"password\": password,\n }\n\n\n@openapi_param_value_generator(\n [\n \"/messages/{message_id}:get\",\n \"/messages/{message_id}/history:get\",\n \"/messages/{message_id}:patch\",\n \"/messages/{message_id}:delete\",\n ]\n)\ndef iago_message_id() -> Dict[str, object]:\n return {\n \"message_id\": helpers.send_stream_message(helpers.example_user(\"iago\"), \"Denmark\"),\n }\n\n\n@openapi_param_value_generator([\"/messages/{message_id}/reactions:delete\"])\ndef add_emoji_to_message() -> Dict[str, object]:\n user_profile = helpers.example_user(\"iago\")\n\n # from OpenAPI format data in zulip.yaml\n message_id = 43\n emoji_name = \"octopus\"\n emoji_code = \"1f419\"\n reaction_type = \"unicode_emoji\"\n\n message = Message.objects.select_related().get(id=message_id)\n do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)\n\n return {}\n\n\n@openapi_param_value_generator([\"/messages/flags:post\"])\ndef update_flags_message_ids() -> Dict[str, object]:\n stream_name = \"Venice\"\n helpers.subscribe(helpers.example_user(\"iago\"), stream_name)\n\n messages = []\n for _ in range(3):\n messages.append(helpers.send_stream_message(helpers.example_user(\"iago\"), stream_name))\n return {\n \"messages\": messages,\n }\n\n\n@openapi_param_value_generator([\"/mark_stream_as_read:post\", \"/users/me/{stream_id}/topics:get\"])\ndef get_venice_stream_id() -> Dict[str, object]:\n return {\n \"stream_id\": helpers.get_stream_id(\"Venice\"),\n }\n\n\n@openapi_param_value_generator([\"/streams/{stream_id}:patch\"])\ndef update_stream() -> Dict[str, object]:\n stream = helpers.subscribe(helpers.example_user(\"iago\"), \"temp_stream 1\")\n return {\n \"stream_id\": stream.id,\n }\n\n\n@openapi_param_value_generator([\"/streams/{stream_id}:delete\"])\ndef create_temp_stream_and_get_id() -> Dict[str, object]:\n stream = helpers.subscribe(helpers.example_user(\"iago\"), \"temp_stream 2\")\n return {\n \"stream_id\": stream.id,\n }\n\n\n@openapi_param_value_generator([\"/mark_topic_as_read:post\"])\ndef get_denmark_stream_id_and_topic() -> Dict[str, object]:\n stream_name = \"Denmark\"\n topic_name = \"Tivoli Gardens\"\n\n helpers.subscribe(helpers.example_user(\"iago\"), stream_name)\n helpers.send_stream_message(helpers.example_user(\"hamlet\"), stream_name, topic_name=topic_name)\n\n return {\n \"stream_id\": helpers.get_stream_id(stream_name),\n \"topic_name\": topic_name,\n }\n\n\n@openapi_param_value_generator([\"/users/me/subscriptions/properties:post\"])\ndef update_subscription_data() -> Dict[str, object]:\n profile = helpers.example_user(\"iago\")\n helpers.subscribe(profile, \"Verona\")\n helpers.subscribe(profile, \"social\")\n return {\n \"subscription_data\": [\n {\"stream_id\": helpers.get_stream_id(\"Verona\"), \"property\": \"pin_to_top\", \"value\": True},\n {\"stream_id\": helpers.get_stream_id(\"social\"), \"property\": \"color\", \"value\": \"#f00f00\"},\n ],\n }\n\n\n@openapi_param_value_generator([\"/users/me/subscriptions:delete\"])\ndef delete_subscription_data() -> Dict[str, object]:\n iago = helpers.example_user(\"iago\")\n zoe = helpers.example_user(\"ZOE\")\n helpers.subscribe(iago, \"Verona\")\n helpers.subscribe(iago, \"social\")\n helpers.subscribe(zoe, \"Verona\")\n helpers.subscribe(zoe, \"social\")\n return {}\n\n\n@openapi_param_value_generator([\"/events:get\"])\ndef get_events() -> Dict[str, object]:\n profile = helpers.example_user(\"iago\")\n helpers.subscribe(profile, \"Verona\")\n client = Client.objects.create(name=\"curl-test-client-1\")\n response = do_events_register(profile, client, event_types=[\"message\", \"realm_emoji\"])\n helpers.send_stream_message(helpers.example_user(\"hamlet\"), \"Verona\")\n return {\n \"queue_id\": response[\"queue_id\"],\n \"last_event_id\": response[\"last_event_id\"],\n }\n\n\n@openapi_param_value_generator([\"/events:delete\"])\ndef delete_event_queue() -> Dict[str, object]:\n profile = helpers.example_user(\"iago\")\n client = Client.objects.create(name=\"curl-test-client-2\")\n response = do_events_register(profile, client, event_types=[\"message\"])\n return {\n \"queue_id\": response[\"queue_id\"],\n \"last_event_id\": response[\"last_event_id\"],\n }\n\n\n@openapi_param_value_generator([\"/users/{user_id_or_email}/presence:get\"])\ndef get_user_presence() -> Dict[str, object]:\n iago = helpers.example_user(\"iago\")\n client = Client.objects.create(name=\"curl-test-client-3\")\n update_user_presence(iago, client, timezone_now(), UserPresence.ACTIVE, False)\n return {}\n\n\n@openapi_param_value_generator([\"/users:post\"])\ndef create_user() -> Dict[str, object]:\n return {\n \"email\": helpers.nonreg_email(\"test\"),\n }\n\n\n@openapi_param_value_generator([\"/user_groups/create:post\"])\ndef create_user_group_data() -> Dict[str, object]:\n return {\n \"members\": [helpers.example_user(\"hamlet\").id, helpers.example_user(\"othello\").id],\n }\n\n\n@openapi_param_value_generator(\n [\"/user_groups/{user_group_id}:patch\", \"/user_groups/{user_group_id}:delete\"]\n)\ndef get_temp_user_group_id() -> Dict[str, object]:\n user_group, _ = UserGroup.objects.get_or_create(name=\"temp\", realm=get_realm(\"zulip\"))\n return {\n \"user_group_id\": user_group.id,\n }\n\n\n@openapi_param_value_generator([\"/realm/filters/{filter_id}:delete\"])\ndef remove_realm_filters() -> Dict[str, object]:\n filter_id = do_add_linkifier(\n get_realm(\"zulip\"), \"#(?P[0-9]{2,8})\", \"https://github.com/zulip/zulip/pull/%(id)s\"\n )\n return {\n \"filter_id\": filter_id,\n }\n\n\n@openapi_param_value_generator([\"/realm/emoji/{emoji_name}:post\", \"/user_uploads:post\"])\ndef upload_custom_emoji() -> Dict[str, object]:\n return {\n \"filename\": \"zerver/tests/images/animated_img.gif\",\n }\n\n\n@openapi_param_value_generator([\"/realm/playgrounds:post\"])\ndef add_realm_playground() -> Dict[str, object]:\n return {\n \"name\": \"Python2 playground\",\n \"pygments_language\": \"Python2\",\n \"url_prefix\": \"https://python2.example.com\",\n }\n\n\n@openapi_param_value_generator([\"/realm/playgrounds/{playground_id}:delete\"])\ndef remove_realm_playground() -> Dict[str, object]:\n playground_info = dict(\n name=\"Python playground\",\n pygments_language=\"Python\",\n url_prefix=\"https://python.example.com\",\n )\n playground_id = do_add_realm_playground(get_realm(\"zulip\"), **playground_info)\n return {\n \"playground_id\": playground_id,\n }\n\n\n@openapi_param_value_generator([\"/users/{user_id}:delete\"])\ndef deactivate_user() -> Dict[str, object]:\n user_profile = do_create_user(\n email=\"testuser@zulip.com\",\n password=None,\n full_name=\"test_user\",\n realm=get_realm(\"zulip\"),\n acting_user=None,\n )\n return {\"user_id\": user_profile.id}\n\n\n@openapi_param_value_generator([\"/users/me:delete\"])\ndef deactivate_own_user() -> Dict[str, object]:\n test_user_email = \"delete-test@zulip.com\"\n deactivate_test_user = do_create_user(\n test_user_email,\n \"secret\",\n get_realm(\"zulip\"),\n \"Mr. Delete\",\n role=200,\n acting_user=None,\n )\n realm = get_realm(\"zulip\")\n test_user = get_user(test_user_email, realm)\n test_user_api_key = get_api_key(test_user)\n # change authentication line to allow test_client to delete itself.\n AUTHENTICATION_LINE[0] = f\"{deactivate_test_user.email}:{test_user_api_key}\"\n return {}\n\n\n@openapi_param_value_generator([\"/attachments/{attachment_id}:delete\"])\ndef remove_attachment() -> Dict[str, object]:\n user_profile = helpers.example_user(\"iago\")\n url = upload_message_file(\"dummy.txt\", len(b\"zulip!\"), \"text/plain\", b\"zulip!\", user_profile)\n attachment_id = url.replace(\"/user_uploads/\", \"\").split(\"/\")[0]\n\n return {\"attachment_id\": attachment_id}\n"}}},{"rowIdx":542625,"cells":{"filename":{"kind":"string","value":"the-stack_106_30934"},"text":{"kind":"string","value":"# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass Lmdb(MakefilePackage):\n \"\"\"Symas LMDB is an extraordinarily fast, memory-efficient database we\n developed for the Symas OpenLDAP Project. With memory-mapped files, it\n has the read performance of a pure in-memory database while retaining\n the persistence of standard disk-based databases.\"\"\"\n\n homepage = \"https://lmdb.tech/\"\n url = \"https://github.com/LMDB/lmdb/archive/LMDB_0.9.21.tar.gz\"\n\n version('0.9.29', sha256='22054926b426c66d8f2bc22071365df6e35f3aacf19ad943bc6167d4cae3bebb')\n version('0.9.24', sha256='44602436c52c29d4f301f55f6fd8115f945469b868348e3cddaf91ab2473ea26')\n version('0.9.22', sha256='f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28')\n version('0.9.21', sha256='1187b635a4cc415bb6972bba346121f81edd996e99b8f0816151d4090f90b559')\n version('0.9.16', sha256='49d7b40949f2ced9bc8b23ea6a89e75471a1c9126537a8b268c318a00b84322b')\n\n build_directory = 'libraries/liblmdb'\n\n @property\n def build_targets(self):\n return ['CC={0}'.format(spack_cc)]\n\n @property\n def install_targets(self):\n return ['prefix={0}'.format(self.prefix), 'install']\n\n @run_after('install')\n def install_pkgconfig(self):\n mkdirp(self.prefix.lib.pkgconfig)\n\n with open(join_path(self.prefix.lib.pkgconfig, 'lmdb.pc'), 'w') as f:\n f.write('prefix={0}\\n'.format(self.prefix))\n f.write('exec_prefix=${prefix}\\n')\n f.write('libdir={0}\\n'.format(self.prefix.lib))\n f.write('includedir={0}\\n'.format(self.prefix.include))\n f.write('\\n')\n f.write('Name: LMDB\\n')\n f.write('Description: Symas LMDB is an extraordinarily fast, '\n 'memory-efficient database.\\n')\n f.write('Version: {0}\\n'.format(self.spec.version))\n f.write('Cflags: -I${includedir}\\n')\n f.write('Libs: -L${libdir} -llmdb\\n')\n"}}},{"rowIdx":542626,"cells":{"filename":{"kind":"string","value":"the-stack_106_30935"},"text":{"kind":"string","value":"# basic functions are inspired by Tinygrad's own implementation\nimport numpy as np\n\nfrom .function import Function\n\n\ndef unbroadcast(out, in_shape):\n \"\"\"Sum the gradients of the output in the case that broadcasting was performed\n during the calculation of a result. This effectively avoids explicitly splitting\n a broadcasting operation into several clone modules beforehand.\n \"\"\"\n # if the input is a scalar, sum over every dimension\n if in_shape == (1,):\n sum_axis = None\n return out.sum(axis=sum_axis).reshape(in_shape)\n\n original_in_shape = in_shape\n\n # if it's an (n,) shape vector change its shape to mimic (1, n, 1, ...) according to output shape\n if len(in_shape) == 1:\n n = in_shape[0]\n index = out.shape[::-1].index(n)\n temp_axis = [n if i == index else 1 for i in range(len(out.shape))]\n in_shape = temp_axis[::-1]\n\n # finally, sum the axis where broadcasting took place\n sum_axis = tuple([dim for dim in range(len(in_shape)) if in_shape[dim]==1 and out.shape[dim]>1])\n return out.sum(axis=sum_axis).reshape(original_in_shape)\n\n\n# basic tensor operations\nclass Add(Function):\n @staticmethod\n def forward(context, x1, x2):\n context.save_for_backward(x1.shape, x2.shape)\n return x1 + x2\n\n @staticmethod\n def backward(context, output_grads):\n # y = x1 + x2 ||| dy/dx1 = dy/dx2 = 1\n # the local gradient of the sum operator will be 1 for both inputs. Now just multiply the\n # local gradient with the incoming gradient to get the gradient of the target function\n # w.r.t. the inputs and keep the chain rule going\n x1_shape, x2_shape = context.saved_data\n return unbroadcast(output_grads, x1_shape), unbroadcast(output_grads, x2_shape)\n\n\nclass Sub(Function):\n @staticmethod\n def forward(context, x1, x2):\n context.save_for_backward(x1.shape, x2.shape)\n return x1 - x2\n\n @staticmethod\n def backward(context, output_grads):\n # y = x1 - x2 ||| dy/x1 = 1 dy/x2 = -1\n x1_shape, x2_shape = context.saved_data\n return unbroadcast(output_grads, x1_shape), unbroadcast(-output_grads, x2_shape)\n\n\nclass Mul(Function):\n @staticmethod\n def forward(context, x1, x2):\n context.save_for_backward(x1, x2)\n return x1 * x2\n\n @staticmethod\n def backward(context, output_grads):\n # y = x1 * x2 ||| dy/x1 = x2 dy/x2 = x1\n x1, x2 = context.saved_data\n return unbroadcast(x2 * output_grads, x1.shape), unbroadcast(x1 * output_grads, x2.shape)\n\n\nclass Div(Function):\n @staticmethod\n def forward(context, x1, x2):\n context.save_for_backward(x1, x2)\n return x1 / x2\n\n @staticmethod\n def backward(context, output_grads):\n # y = x1 / x2 ||| dy/x1 = (1x2) dy/x2 = x1 * d(1/x2)/x2 = x1 * -(1/x2**2)\n x1, x2 = context.saved_data\n return (unbroadcast((1/x2) * output_grads, x1.shape),\n unbroadcast(x1 * (-1/x2**2) * output_grads, x2.shape))\n\n\nclass Pow(Function):\n @staticmethod\n def forward(context, x, y):\n context.save_for_backward(x, y)\n return x ** y\n\n @staticmethod\n def backward(context, output_grads):\n x, y = context.saved_data\n x_non_negative = x.copy()\n x_non_negative[x_non_negative < 0] = np.nan\n return (unbroadcast(y * (x**(y-1.0)) * output_grads, x.shape),\n unbroadcast((x**y) * np.log(x_non_negative) * output_grads, y.shape))\n\n\nclass Matmul(Function):\n @staticmethod\n def forward(context, x, y):\n context.save_for_backward(x, y)\n return x @ y\n\n @staticmethod\n def backward(context, output_grads):\n x, y = context.saved_data\n x_shape = x.shape\n y_shape = y.shape\n\n if len(x.shape) == 1:\n x = np.expand_dims(x, axis=0)\n if len(y.shape) == 1:\n y = np.expand_dims(y, axis=1)\n if len(output_grads.shape) == 1:\n output_grads = np.expand_dims(output_grads, axis=0)\n\n x_grad = unbroadcast(output_grads@y.T, x.shape)\n y_grad = unbroadcast(x.T@output_grads, y.shape)\n\n return x_grad.reshape(x_shape), y_grad.reshape(y_shape)\n\n\nclass Mean(Function):\n @staticmethod\n def forward(context, array):\n div_coeff = 1 / array.size\n context.save_for_backward(div_coeff, array.shape)\n\n pre_sum = array * div_coeff\n return pre_sum.sum()\n\n @staticmethod\n def backward(context, output_grads):\n div_coeff, input_shape = context.saved_data\n weighted_grads = output_grads * div_coeff\n return np.ones(input_shape) * weighted_grads\n\n\nclass Reshape(Function):\n @staticmethod\n def forward(context, array, shape):\n shape = shape.astype('int')\n context.save_for_backward(array.shape)\n return array.reshape(shape)\n\n @staticmethod\n def backward(context, output_grads):\n input_shape, = context.saved_data\n return output_grads.reshape(input_shape)\n\n\nclass Transpose(Function):\n @staticmethod\n def forward(context, array, order):\n if np.isnan(order).all():\n order = None\n else:\n order = order.astype('int')\n\n context.save_for_backward(order)\n return array.transpose(order)\n\n @staticmethod\n def backward(context, output_grads):\n order, = context.saved_data\n if order is None:\n return output_grads.transpose()\n un_transpose = [order[idx] for idx in order]\n return output_grads.transpose(un_transpose)\n\n\nclass SumSelf(Function):\n @staticmethod\n def forward(context, array, axis=None, keepdims=False):\n context.save_for_backward(axis, array.shape, keepdims)\n return array.sum(axis, keepdims=keepdims, dtype='float32')\n\n @staticmethod\n def backward(context, output_grads):\n # the dimensions of the output grad are the ones from the original input\n # regardless of keepdims\n axis, input_shape, keepdims = context.saved_data\n if not keepdims and input_shape != (1,):\n output_grads = np.expand_dims(output_grads, axis)\n\n grads = np.zeros(input_shape, dtype='float32') + output_grads\n return grads.reshape(input_shape)\n\n\nclass Exp(Function):\n @staticmethod\n def forward(context, array):\n result = np.exp(array)\n context.save_for_backward(result)\n return result\n\n @staticmethod\n def backward(context, output_grads):\n forward_result, = context.saved_data\n return forward_result * output_grads\n\n\nclass Log(Function):\n @staticmethod\n def forward(context, array):\n context.save_for_backward(array)\n return np.log(array)\n\n @staticmethod\n def backward(context, output_grads):\n EPSILON = 1e-9\n forward_input, = context.saved_data\n return (1/(forward_input+EPSILON)) * output_grads\n\n\n# nn functions\nclass ReLU(Function):\n @staticmethod\n def forward(context, array):\n mask = array > 0\n context.save_for_backward(mask)\n return array * mask\n\n @staticmethod\n def backward(context, output_grads):\n mask, = context.saved_data\n return output_grads * mask\n\n\nclass SoftMax(Function):\n @staticmethod\n def forward(context, array):\n # if there are problems, look into the numerically stable implementation\n input_shape = array.shape\n n_dims = len(input_shape)\n\n # treat all vectors as column vectors\n if n_dims == 1:\n array = np.expand_dims(array, axis=0)\n n_dims = 2\n\n exp = np.exp(array)\n result = exp / np.sum(exp, axis=(n_dims-1), keepdims=True)\n\n context.save_for_backward(input_shape, result)\n\n return result.reshape(input_shape)\n\n @staticmethod\n def backward(context, output_grads):\n input_shape, forward_result = context.saved_data\n\n # great further explanation from https://stackoverflow.com/a/36280783\n # compute J[i, j] for i != j resulting in -softmax_i * softmax_j\n jacobian = -forward_result[..., np.newaxis] * forward_result[:, np.newaxis, :]\n\n # get the diagonal indices (i=j) and fill them with softmax_i * (1 - softmax_i)\n idx_y, idx_x = np.diag_indices_from(jacobian[0])\n jacobian[:, idx_y, idx_x] = forward_result * (1. - forward_result)\n\n # reduce the jacobian down to a gradient w.r.t. the inputs:\n # a column of the jacobian tells you how every output is affected by a particular input,\n # output_grads tell you how every output affects the target function,\n # so by multiplying output_grads by column j and summing the result\n # you will get the total influence of input j over all the outputs\n output_grads = output_grads[..., np.newaxis, :]\n return (output_grads @ jacobian).reshape(input_shape)\n\n\nclass CrossEntropy(Function):\n @staticmethod\n def forward(context, in_tensor, targets):\n # targets will be used as indices so integers are required\n targets = targets.astype('int')\n context.save_for_backward(in_tensor, targets)\n\n # select only the inputs that will affect the loss\n n = in_tensor.shape[0]\n inputs_in_target_indices = in_tensor[range(n), targets]\n\n # apply cross-entropy loss to those inputs and return the average\n log_result = -np.log(inputs_in_target_indices)\n return np.sum(log_result) * (1/n)\n\n @staticmethod\n def backward(context, output_grads):\n EPSILON = 1e-9\n in_tensor, targets = context.saved_data\n\n n = in_tensor.shape[0]\n\n # every local gradient will be 0, except the ones corresponding to the inputs\n # used to calculate the forward pass, those will have regular -1/x grad\n local_grads = np.zeros_like(in_tensor)\n local_grads[range(n), targets] = -1/(in_tensor[range(n), targets]+EPSILON)\n local_grads *= (1/n)\n return local_grads * output_grads\n\n\n# nn module operations\nclass Linear(Function):\n # i = out_features\n # j = in_features\n # m = number of examples in the batch\n @staticmethod\n def forward(context, array, weight, bias):\n context.save_for_backward(array, weight, bias.shape)\n return array @ weight.T + bias # Y[mxi] = X[mxj] @ W.T[jxi] + b[1xi]\n\n @staticmethod\n def backward(context, output_grads):\n array, weight, bias_shape = context.saved_data\n dX = output_grads @ weight # dJ/dX[mxj] = dJ/dY[mxi] @ W[ixj]\n dW = output_grads.T @ array # dJ/dW[ixj] = dJ/dY.T[ixm] @ X[mxj]\n db = unbroadcast(output_grads, bias_shape) # dJ/db[ix1] = unbroadcast(dJ/db, b.shape)\n return dX, dW, db\n\n\nclass NaiveConv2d(Function):\n @staticmethod\n def forward(context, array, weight, stride, padding):\n pass\n\n @staticmethod\n def backward(context, output_grads):\n pass\n\n\nclass Conv2d(Function):\n @staticmethod\n def forward(context, array, weight, stride, padding):\n pass\n\n @staticmethod\n def backward(context, output_grads):\n pass\n\n\n"}}},{"rowIdx":542627,"cells":{"filename":{"kind":"string","value":"the-stack_106_30936"},"text":{"kind":"string","value":"def special_for(iterable):\n iterator = iter(iterable)\n while True:\n try:\n iterator*5\n next(iterator)\n except StopIteration:\n break\n\n\nclass MyGen:\n current = 0\n def __init__(self, first, last):\n self.first = first\n self.last = last\n MyGen.current = self.first #this line allows us to use the current number as the starting point for the iteration\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if MyGen.current < self.last:\n num = MyGen.current\n MyGen.current += 1\n return num\n raise StopIteration\n\ngen = MyGen(1,100)\nfor i in gen:\n print(i)\n"}}},{"rowIdx":542628,"cells":{"filename":{"kind":"string","value":"the-stack_106_30937"},"text":{"kind":"string","value":"import torch\n\nimport utility\nimport data\nimport model\nimport loss\nfrom option import args\nfrom trainer import Trainer\n\ntorch.manual_seed(args.seed)\ncheckpoint = utility.checkpoint(args)\n\ndef main():\n global model\n global loss\n if args.data_test == ['video']:\n from videotester import VideoTester\n model = model.Model(args, checkpoint)\n t = VideoTester(args, model, checkpoint)\n t.test()\n else:\n if checkpoint.ok:\n loader = data.Data(args)\n model = model.Model(args, checkpoint)\n loss = loss.Loss(args, checkpoint) if not args.test_only else None\n t = Trainer(args, loader, model, loss, checkpoint)\n while not t.terminate():\n t.train()\n t.test()\n\n checkpoint.done()\n\nif __name__ == '__main__':\n main()\n"}}},{"rowIdx":542629,"cells":{"filename":{"kind":"string","value":"the-stack_106_30938"},"text":{"kind":"string","value":"import logging\nimport os\n\nfrom airflow.models import BaseOperator\nfrom airflow.exceptions import AirflowException\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.utils.decorators import apply_defaults\n\nfrom subprocess import check_output, CalledProcessError\n\n\nclass JavaOperator(BaseOperator):\n \"\"\"\n :param maven_coordinate: Metadata of the jar in Maven you want to download\n :type maven_coordinate: list\n :param repositories: Where the jar is located in Maven.\n :type repositories: list\n :param main_class: The location of user-defined main class you want to execute\n :type main_class: string\n :type op_args: list\n :param op_args: a list of positional arguments that will get unpacked in the order you provided\n when executing your jar\n :param op_kwargs: a dictionary of keyword arguments that will get unpacked in an arbitary but deterministic order\n when executing your jar after the positional arguments\n :type op_kwargs: dict\n :param fetch_mode: Mode to use when downloading the jars\n By default, it will fetch things missing from cache.\n Here is a list of modes available \n :type fetch_mode: string\n :param cache_directory: The location of where the jars are cached.\n By default, they are located at your user folder under '.coursier/cache/v1'\n :type cache_directory: string\n :param extra_coursier_params: a list of strings that can be args or kwargs\n :type extra_coursier_params: list\n \"\"\"\n template_fields = ('main_class', 'repositories', 'op_args', 'op_kwargs',\n 'fetch_mode', 'cache_directory', 'extra_coursier_params')\n ui_color = '#F5C957'\n\n @apply_defaults\n def __init__(self,\n maven_coordinates,\n repositories=None,\n main_class=None,\n op_args=None,\n op_kwargs=None,\n fetch_mode='missing',\n cache_directory=os.path.join(os.path.expanduser('~'), '.coursier/cache/v1'),\n extra_coursier_params=None,\n *args, **kwargs):\n super(JavaOperator, self).__init__(*args, **kwargs)\n\n self.maven_coordinates = maven_coordinates\n self.repositories = repositories\n self.main_class = main_class\n self.op_args = op_args or []\n self.op_kwargs = op_kwargs or {}\n self.fetch_mode = fetch_mode\n self.cache_directory = cache_directory\n self.extra_coursier_params = extra_coursier_params or []\n\n def run_coursier(self):\n \"\"\"\n Builds a bash command to download all transitive dependencies of a maven coordinate.\n It can return java -cp compatible output for executing.\n\n This is done through coursier. Find more information at: https://github.com/coursier/coursier.\n \"\"\"\n cmd = ['coursier', 'fetch']\n\n cmd.extend(self.extra_coursier_params)\n cmd.extend(['--mode', self.fetch_mode])\n cmd.extend(['--cache', self.cache_directory])\n\n if self.main_class:\n cmd.extend(['--classpath'])\n\n for repo in self.repositories:\n cmd.extend(['--repository', repo])\n for coordinate in self.maven_coordinates:\n if not isinstance(coordinate, MavenCoordinate):\n raise AirflowException('Please use the MavenCoordinate class. Current type: {0}, current value: {1}'\n .format(type(coordinate), coordinate))\n cmd.extend([coordinate.get_coordinate(), '--artifact-type', coordinate.packaging])\n\n logging.info('Executing %s', cmd)\n try:\n return check_output(cmd)\n except CalledProcessError:\n raise AirflowException(\"Failed to fetch requested maven coordinates\")\n\n def execute(self, context):\n \"\"\"\n Runs the coursier command.\n\n Returns jvm exit code instead of main's exit code. When an exception is\n caught, the exit code returned will be 0 meaning a false positive result.\n It is best to include `System.exit()` with some non-zero value when an exception happens\n if that is the intended flow.\n \"\"\"\n output = self.run_coursier()\n if self.main_class:\n cmd = ['java', '-cp', '\"'+output+'\"', self.main_class]\n cmd.extend(self.op_args)\n for k, v in self.op_kwargs.items():\n cmd.extend([k, v])\n\n bash_command = ' '.join(cmd)\n BashOperator(bash_command=bash_command, task_id='inner_bash').execute(self)\n else:\n logging.info(output)\n\n\nclass MavenCoordinate:\n \"\"\"\n For accuracy, copy/paste information direct from Nexus.\n Generally, leave packaging as default.\n\n Find more information here: https://maven.apache.org/pom.html#Maven_Coordinates\n :param group_id: Unique identification amongst an organization or a project\n :type group_id: string\n :param artifact_id: The name that the project is known by\n :type artifact_id: string\n :param version: Version of project\n :type version: string\n :param packaging: Type of project\n :type packaging: string\n \"\"\"\n def __init__(self,\n group_id,\n artifact_id,\n version,\n packaging='jar,bundle'\n ):\n self.group_id = group_id\n self.artifact_id = artifact_id\n self.version = version\n self.packaging = packaging\n\n def __repr__(self):\n return self.get_coordinate()\n\n def get_coordinate(self):\n return ':'.join([self.group_id, self.artifact_id, self.version])\n"}}},{"rowIdx":542630,"cells":{"filename":{"kind":"string","value":"the-stack_106_30940"},"text":{"kind":"string","value":"\"\"\"\nEDIT NOTICE\n\nFile edited from original in https://github.com/castorini/hedwig\nby Bernal Jimenez Gutierrez (jimenezgutierrez.1@osu.edu)\nin May 2020\n\"\"\"\n\nimport csv\n\nimport sys\nimport numpy as np\nfrom nltk.tokenize import sent_tokenize\nimport torch\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\nclass InputFeaturesText(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, label_id):\n self.input_ids = input_ids\n self.label_id = label_id\n\nclass BertProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"\n Gets a collection of `InputExample`s for the train set\n :param data_dir:\n :return:\n \"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"\n Gets a collection of `InputExample`s for the dev set\n :param data_dir:\n :return:\n \"\"\"\n raise NotImplementedError()\n\n def get_test_examples(self, data_dir):\n \"\"\"\n Gets a collection of `InputExample`s for the test set\n :param data_dir:\n :return:\n \"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"\n Gets a list of possible labels in the dataset\n :return:\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"\n Reads a Tab Separated Values (TSV) file\n :param input_file:\n :param quotechar:\n :return:\n \"\"\"\n import sys\n csv.field_size_limit(sys.maxsize)\n\n with open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(str(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines\n\n\ndef convert_examples_to_features(examples, max_seq_length, tokenizer, print_examples=False):\n \"\"\"\n Loads a data file into a list of InputBatch objects\n :param examples:\n :param max_seq_length:\n :param tokenizer:\n :param print_examples:\n :return: a list of InputBatch objects\n \"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n #Replacing new lines with [SEP] tokens\n text_a = example.text_a.replace('\\\\n','[SEP]')\n tokens_a = tokenizer.tokenize(text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = [float(x) for x in example.label]\n\n if print_examples and ex_index < 5:\n print(\"tokens: %s\" % \" \".join([str(x) for x in tokens]))\n print(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n print(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n print(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n print(\"label: %s\" % example.label)\n\n features.append(InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features\n\ndef convert_examples_to_features_long(examples, max_seq_length, tokenizer, print_examples=False, model_type='longformer'):\n \"\"\"\n Loads a data file into a list of InputBatch objects\n :param examples:\n :param max_seq_length:\n :param tokenizer:\n :param print_examples:\n :return: a list of InputBatch objects\n \"\"\"\n\n features = []\n\n encoded_out = tokenizer.batch_encode_plus([example.text_a.replace('\\\\n','') for example in examples],\n add_special_tokens=True,\n max_length=max_seq_length,\n pad_to_max_length=True,\n return_token_type_ids=True)\n\n input_ids = encoded_out['input_ids']\n attention_masks = encoded_out['attention_mask']\n segment_ids = encoded_out['token_type_ids']\n\n for example,ids, masks, segments in zip(examples,input_ids, attention_masks, segment_ids):\n\n if model_type == 'longformer':\n masks[0] = 2\n\n label_id = [float(x) for x in example.label]\n\n features.append(InputFeatures(input_ids=ids,\n input_mask=masks,\n segment_ids=segments,\n label_id=label_id))\n return features\n\n\ndef convert_examples_to_hierarchical_features(examples, max_seq_length, tokenizer, print_examples=False):\n \"\"\"\n Loads a data file into a list of InputBatch objects\n :param examples:\n :param max_seq_length:\n :param tokenizer:\n :param print_examples:\n :return: a list of InputBatch objects\n \"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = [tokenizer.tokenize(line) for line in sent_tokenize(example.text_a)]\n tokens_b = None\n\n if example.text_b:\n tokens_b = [tokenizer.tokenize(line) for line in sent_tokenize(example.text_b)]\n # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length\n # Account for [CLS], [SEP], [SEP]\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP]\n for i0 in range(len(tokens_a)):\n if len(tokens_a[i0]) > max_seq_length - 2:\n tokens_a[i0] = tokens_a[i0][:(max_seq_length - 2)]\n\n tokens = [[\"[CLS]\"] + line + [\"[SEP]\"] for line in tokens_a]\n segment_ids = [[0] * len(line) for line in tokens]\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = list()\n for line in tokens:\n input_ids.append(tokenizer.convert_tokens_to_ids(line))\n\n # Input mask has 1 for real tokens and 0 for padding tokens\n input_mask = [[1] * len(line_ids) for line_ids in input_ids]\n\n # Zero-pad up to the sequence length.\n padding = [[0] * (max_seq_length - len(line_ids)) for line_ids in input_ids]\n for i0 in range(len(input_ids)):\n input_ids[i0] += padding[i0]\n input_mask[i0] += padding[i0]\n segment_ids[i0] += padding[i0]\n\n label_id = [float(x) for x in example.label]\n\n if print_examples and ex_index < 5:\n print(\"tokens: %s\" % \" \".join([str(x) for x in tokens]))\n print(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n print(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n print(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n print(\"label: %s\" % example.label)\n\n features.append(InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"\n Truncates a sequence pair in place to the maximum length\n :param tokens_a:\n :param tokens_b:\n :param max_length:\n :return:\n \"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n"}}},{"rowIdx":542631,"cells":{"filename":{"kind":"string","value":"the-stack_106_30941"},"text":{"kind":"string","value":"# Copyright 2011 OpenStack Foundation.\n# Copyright 2012, Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nException related utilities.\n\"\"\"\n\nimport logging\nimport sys\nimport time\nimport traceback\n\nimport six\n\nfrom egodocker.common.gettextutils import _LE\n\n\nclass save_and_reraise_exception(object):\n \"\"\"Save current exception, run some code and then re-raise.\n\n In some cases the exception context can be cleared, resulting in None\n being attempted to be re-raised after an exception handler is run. This\n can happen when eventlet switches greenthreads or when running an\n exception handler, code raises and catches an exception. In both\n cases the exception context will be cleared.\n\n To work around this, we save the exception state, run handler code, and\n then re-raise the original exception. If another exception occurs, the\n saved exception is logged and the new exception is re-raised.\n\n In some cases the caller may not want to re-raise the exception, and\n for those circumstances this context provides a reraise flag that\n can be used to suppress the exception. For example::\n\n except Exception:\n with save_and_reraise_exception() as ctxt:\n decide_if_need_reraise()\n if not should_be_reraised:\n ctxt.reraise = False\n\n If another exception occurs and reraise flag is False,\n the saved exception will not be logged.\n\n If the caller wants to raise new exception during exception handling\n he/she sets reraise to False initially with an ability to set it back to\n True if needed::\n\n except Exception:\n with save_and_reraise_exception(reraise=False) as ctxt:\n [if statements to determine whether to raise a new exception]\n # Not raising a new exception, so reraise\n ctxt.reraise = True\n \"\"\"\n def __init__(self, reraise=True):\n self.reraise = reraise\n\n def __enter__(self):\n self.type_, self.value, self.tb, = sys.exc_info()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type is not None:\n if self.reraise:\n logging.error(_LE('Original exception being dropped: %s'),\n traceback.format_exception(self.type_,\n self.value,\n self.tb))\n return False\n if self.reraise:\n six.reraise(self.type_, self.value, self.tb)\n\n\ndef forever_retry_uncaught_exceptions(infunc):\n def inner_func(*args, **kwargs):\n last_log_time = 0\n last_exc_message = None\n exc_count = 0\n while True:\n try:\n return infunc(*args, **kwargs)\n except Exception as exc:\n this_exc_message = six.u(str(exc))\n if this_exc_message == last_exc_message:\n exc_count += 1\n else:\n exc_count = 1\n # Do not log any more frequently than once a minute unless\n # the exception message changes\n cur_time = int(time.time())\n if (cur_time - last_log_time > 60 or\n this_exc_message != last_exc_message):\n logging.exception(\n _LE('Unexpected exception occurred %d time(s)... '\n 'retrying.') % exc_count)\n last_log_time = cur_time\n last_exc_message = this_exc_message\n exc_count = 0\n # This should be a very rare event. In case it isn't, do\n # a sleep.\n time.sleep(1)\n return inner_func\n"}}},{"rowIdx":542632,"cells":{"filename":{"kind":"string","value":"the-stack_106_30942"},"text":{"kind":"string","value":"#!/usr/bin/env python3\n\n\"\"\"\nThis module handles all interaction with NCBI's BLAST API, including launching new\nremote searches, polling for completion status, and retrieval of results.\n\"\"\"\n\nimport re\nimport time\nimport logging\nimport requests\n\nfrom cblaster import helpers\nfrom cblaster.classes import Hit\n\n\nLOG = logging.getLogger(__name__)\n\nBLAST_API_URL = \"https://blast.ncbi.nlm.nih.gov/Blast.cgi\"\n\n\ndef start(\n sequences=None,\n query_file=None,\n query_ids=None,\n database=\"nr\",\n program=\"blastp\",\n megablast=False,\n filtering=\"F\",\n evalue=10,\n nucl_reward=None,\n nucl_penalty=None,\n gap_costs=\"11 1\",\n matrix=\"BLOSUM62\",\n hitlist_size=5000,\n threshold=11,\n word_size=6,\n comp_based_stats=2,\n entrez_query=None,\n):\n \"\"\"Launch a remote BLAST search using NCBI BLAST API.\n\n Note that the HITLIST_SIZE, ALIGNMENTS and DESCRIPTIONS parameters must all be set\n together in order to mimic max_target_seqs behaviour.\n\n Usage guidelines:\n 1. Don't contact server more than once every 10 seconds\n 2. Don't poll for a single RID more than once a minute\n 3. Use URL parameter email/tool\n 4. Run scripts weekends or 9pm-5am Eastern time on weekdays if >50 searches\n\n For a full description of the parameters, see:\n\n 1. `BLAST API documentation`\n 2. `BLAST documentation\n `\n\n Parameters:\n sequences (dict): Query sequence dict generated by helpers.get_sequences()\n query_file (str): Path to a query FASTA file\n query_ids (list): Collection of NCBI sequence identifiers\n database (str): Target NCBI BLAST database\n program (str): BLAST variant to run\n megablast (bool): Enable megaBLAST option (only with BLASTn)\n filtering (str): Low complexity filtering\n evalue (float): E-value cutoff\n nucl_reward (int): Reward for matching bases (only with BLASTN/megaBLAST)\n nucl_penalty (int): Penalty for mismatched bases (only with BLASTN/megaBLAST)\n gap_costs (str): Gap existence and extension costs\n matrix (str): Scoring matrix name\n hitlist_size (int): Number of database sequences to keep\n threshold (int): Neighbouring score for initial words\n word_size (int): Size of word for initial matches\n comp_based_stats (int): Composition based statistics algorithm\n entrez_query (str): NCBI Entrez search term for pre-filtering the BLAST database\n Returns:\n rid (str): Request Identifier (RID) assigned to the search\n rtoe (int): Request Time Of Execution (RTOE), estimated run time of the search\n \"\"\"\n if not sequences:\n sequences = helpers.get_sequences(\n query_file=query_file,\n query_ids=query_ids\n )\n\n query = helpers.sequences_to_fasta(sequences)\n\n parameters = {\n \"CMD\": \"PUT\",\n \"DATABASE\": database,\n \"PROGRAM\": program,\n \"FILTER\": filtering,\n \"EXPECT\": evalue,\n \"GAPCOSTS\": gap_costs,\n \"MATRIX\": matrix,\n \"HITLIST_SIZE\": hitlist_size,\n \"ALIGNMENTS\": hitlist_size,\n \"DESCRIPTIONS\": hitlist_size,\n \"WORD_SIZE\": word_size,\n \"COMPOSITION_BASED_STATISTICS\": comp_based_stats,\n }\n\n if entrez_query:\n parameters[\"ENTREZ_QUERY\"] = entrez_query\n\n if program == \"blastn\":\n if megablast:\n parameters[\"MEGABLAST\"] = \"on\"\n if nucl_reward:\n parameters[\"NUCL_REWARD\"] = nucl_reward\n if nucl_penalty:\n parameters[\"NUCL_PENALTY\"] = nucl_penalty\n else:\n # Does not apply to blastn\n parameters[\"THRESHOLD\"] = threshold\n\n response = requests.post(BLAST_API_URL, files={\"QUERY\": query}, params=parameters)\n\n LOG.debug(\"Search parameters: %s\", parameters)\n LOG.debug(\"Search URL: %s\", response.url)\n\n rid, rtoe = re.findall(r\"(?:RID|RTOE) = (.+?)[\\n\\s]\", response.text)\n return rid, int(rtoe)\n\n\ndef check(rid):\n \"\"\"Check completion status of a BLAST search given a Request Identifier (RID).\n\n Arguments:\n rid (str): NCBI BLAST search request identifier (RID)\n Returns:\n bool: Search has completed successfully and hits were reported\n Raises:\n ValueError:\n Search has failed. This is caused either by program error (in which case,\n NCBI requests you submit an error report with the RID) or expiration of\n the RID (only stored for 24 hours).\n ValueError:\n Search has completed successfully, but no hits were reported.\n \"\"\"\n parameters = {\"CMD\": \"Get\", \"RID\": rid, \"FORMAT_OBJECT\": \"SearchInfo\"}\n\n response = requests.get(BLAST_API_URL, params=parameters)\n\n LOG.debug(response.url)\n\n search = re.findall(r\"(?:Status|ThereAreHits)=(.+?)[\\n\\s]\", response.text)\n\n if len(search) == 1:\n status = search[0]\n if status in (\"UNKNOWN\", \"FAILED\"):\n raise ValueError(f\"Search {rid} failed (status={status})\")\n if status == \"WAITING\":\n return False\n\n if search == [\"READY\", \"yes\"]:\n return True\n\n raise ValueError(\"Search completed, but found no hits\")\n\n\ndef retrieve(rid, hitlist_size=5000):\n \"\"\"Retrieve BLAST results corresponding to a given Request Identifier (RID).\n\n Arguments:\n rid (str): NCBI BLAST search request identifiers (RID)\n hitlist_size (int): Total number of hits to retrieve\n Returns:\n list: BLAST search results split by newline, with HTML parts removed\n \"\"\"\n\n parameters = {\n \"CMD\": \"Get\",\n \"RID\": rid,\n \"FORMAT_TYPE\": \"Tabular\",\n \"FORMAT_OBJECT\": \"Alignment\",\n \"HITLIST_SIZE\": hitlist_size,\n \"ALIGNMENTS\": hitlist_size,\n \"DESCRIPTIONS\": hitlist_size,\n \"NCBI_GI\": \"F\",\n }\n\n LOG.debug(parameters)\n\n response = requests.get(BLAST_API_URL, params=parameters)\n\n LOG.debug(response.url)\n\n # Remove HTML junk and info lines\n # BLAST results are stored inside
 tags\n    return [\n        line\n        for line in re.search(\"
(.+?)
\", response.text, re.DOTALL)\n .group(1)\n .split(\"\\n\")\n if line and not line.startswith(\"#\")\n ]\n\n\ndef poll(rid, delay=60, max_retries=-1):\n \"\"\"Poll BLAST API with given Request Identifier (RID) until results are returned.\n\n As per NCBI usage guidelines, this function will only poll once per minute; this is\n calculated each time such that wait is constant (i.e. accounts for differing\n response time on the status check).\n\n Arguments:\n rid (str): NCBI BLAST search request identifier (RID)\n delay (int): Total delay (seconds) between polling\n max_retries (int): Maximum number of polling attempts (-1 for unlimited)\n Returns:\n list: BLAST search results split by newline\n \"\"\"\n if delay < 60:\n raise ValueError(\"Delay must be at least 60s\")\n\n retries, previous = 0, 0\n while True:\n current = time.time()\n wait = previous - current + delay\n if wait > 0:\n time.sleep(wait)\n previous = current + wait\n else:\n previous = current\n\n LOG.info(\"Checking search status...\")\n\n if check(rid):\n LOG.info(\"Search has completed successfully!\")\n return\n\n if max_retries > 0 and retries == max_retries:\n raise ValueError(f\"Reached maximum retry limit {max_retries}\")\n\n retries += 1\n\n\ndef parse(\n handle,\n sequences=None,\n query_file=None,\n query_ids=None,\n max_evalue=0.01,\n min_identity=30,\n min_coverage=50,\n):\n \"\"\"Parse Tabular results from remote BLAST search performed via API.\n\n Since the API provides no option for returning query coverage, which is a metric we\n want to use for filtering hits, query sequences must be passed to this function so\n that their lengths can be compared to the alignment length.\n\n Arguments:\n handle (list):\n File handle (or file handle-like) object corresponding to BLAST results. Note\n that this function expects an iterable of tab-delimited lines and performs no\n validation/error checking\n sequences (dict): Query sequences\n query_file (str): Path to FASTA format query file\n query_ids (list): NCBI sequence identifiers\n max_evalue (float): Maximum e-value\n min_identity (float): Minimum percent identity\n min_coverage (float): Minimum percent query coverage\n Returns:\n list: Hit objects corresponding to criteria passing BLAST hits\n \"\"\"\n if not sequences:\n sequences = helpers.get_sequences(query_file, query_ids)\n\n hits = []\n for line in handle:\n qid, sid, pident, *_, qstart, qend, _, _, evalue, score, _ = line.split(\"\\t\")\n\n # Manually calculate query coverage\n coverage = (int(qend) - int(qstart) + 1) / len(sequences[qid]) * 100\n\n hit = Hit(\n query=qid,\n subject=sid,\n identity=pident,\n coverage=coverage,\n evalue=evalue,\n bitscore=score,\n )\n\n if (\n float(hit.identity) > min_identity\n and float(hit.coverage) > min_coverage\n and hit.evalue < max_evalue\n ):\n hits.append(hit)\n\n if len(hits) == 0:\n raise ValueError(\"No results found\")\n\n return hits\n\n\ndef search(\n rid=None,\n sequences=None,\n query_file=None,\n query_ids=None,\n min_identity=0.3,\n min_coverage=0.5,\n max_evalue=0.01,\n blast_file=None,\n **kwargs,\n):\n \"\"\"Perform a remote BLAST search via the NCBI's BLAST API.\n\n This function launches a new search given a query FASTA file or list of valid NCBI\n identifiers, polls the API to check the completion status of the search, then\n retrieves and parses the results.\n\n It is also possible to call other BLAST variants using the program argument.\n\n Arguments:\n rid (str): NCBI BLAST search request identifier (RID)\n sequences (dict): Query sequences\n query_file (str): Path to FASTA format query file\n query_ids (list): NCBI sequence identifiers\n min_identity (float): Minimum percent identity\n min_coverage (float): Minimum percent query coverage\n max_evalue (float): Maximum e-value\n blast_file (TextIOWrapper): file blast results are written to\n Returns:\n list: Hit objects corresponding to criteria passing BLAST hits\n \"\"\"\n if not rid:\n LOG.info(\"Launching new search\")\n\n # Start search, get request identifier (RID) and execution ETA (RTOE)\n rid, rtoe = start(\n sequences=sequences,\n query_file=query_file,\n query_ids=query_ids,\n **kwargs\n )\n\n LOG.info(\"Request Identifier (RID): %s\", rid)\n LOG.info(\"Request Time Of Execution (RTOE): %ss\", rtoe)\n\n # Wait the RTOE (sec) before bothering to poll\n time.sleep(rtoe)\n\n LOG.info(\"Polling NCBI for completion status\")\n poll(rid)\n\n LOG.info(\"Retrieving results for search %s\", rid)\n results = retrieve(rid)\n\n if blast_file:\n LOG.info(\"Writing BLAST hit table to %s\", blast_file.name)\n blast = \"\\n\".join(results)\n blast_file.write(blast)\n\n # Parse results for hits\n LOG.info(\"Parsing results...\")\n results = parse(\n results,\n sequences=sequences,\n query_file=query_file,\n query_ids=query_ids,\n max_evalue=max_evalue,\n min_identity=min_identity,\n min_coverage=min_coverage,\n )\n\n return rid, results\n"}}},{"rowIdx":542633,"cells":{"filename":{"kind":"string","value":"the-stack_106_30943"},"text":{"kind":"string","value":"from typing import Set, Callable, Dict, List, Any\nfrom structures import Point\n\n\nclass Vertex:\n __position: Point\n __children: Set['Vertex']\n __parents: Set['Vertex']\n __connectivity: Dict['Vertex', 'Vertex']\n __aux: Dict[Any, Any]\n\n def __init__(self, position: Point, store_connectivity: bool = False) -> None:\n self.__position = position\n self.__children = set()\n self.__parents = set()\n self.__connectivity = {self: self}\n self.__store_connectivity = store_connectivity\n self.__cost = None\n self.__aux = {}\n\n def __add_connectivity(self, vertex_added: 'Vertex'):\n\n if vertex_added is self:\n return\n\n # update connectivity of this vertex\n self.__connectivity[vertex_added] = vertex_added\n\n connectivity_keys = self.connectivity.keys()\n vertex_added_connectivity_keys = vertex_added.connectivity.keys()\n new_connection_keys = vertex_added_connectivity_keys - connectivity_keys\n\n for vertex_key in new_connection_keys:\n if vertex_key:\n self.__connectivity[vertex_key] = vertex_added\n\n # get connectivity of all connections correct\n connectivity_keys = self.connectivity.keys()\n vertex_added.connectivity[self] = self\n for vertex_to_update in self.__connectivity:\n vertex_connectivity_keys = vertex_to_update.connectivity.keys()\n new_connection_keys = connectivity_keys - vertex_connectivity_keys\n for vertex_key in new_connection_keys:\n if self in vertex_to_update.connectivity:\n new_vertex_path_target = self\n else:\n new_vertex_path_target = vertex_added\n new_vertex_path_step = vertex_to_update.connectivity[new_vertex_path_target]\n vertex_to_update.connectivity[vertex_key] = new_vertex_path_step\n\n # Adding #\n\n def add_child(self, child: 'Vertex') -> None:\n self.__children.add(child)\n if self.__store_connectivity:\n self.__add_connectivity(child)\n\n\n def add_parent(self, parent: 'Vertex') -> None:\n self.__parents.add(parent)\n if self.__store_connectivity:\n self.__add_connectivity(parent)\n\n # Removing #\n\n def remove_child(self, child: 'Vertex') -> None:\n self.__children.remove(child)\n # ToDo No remove connecitivty implemented, as it is not trivial\n\n def remove_parent(self, parent: 'Vertex') -> None:\n self.__parents.remove(parent)\n # No remove connecitivty implemented, as it is not trivial\n\n # Setting #\n\n def set_child(self, child: 'Vertex'):\n self.__children.clear()\n # ToDo No remove connecitivty implemented, as it is not trivial\n self.__children.add(child)\n if self.__store_connectivity:\n child.__add_connectivity(self)\n self.__add_connectivity(child)\n\n def set_parent(self, parent: 'Vertex'):\n self.__parents.clear()\n # ToDo No remove connecitivty implemented, as it is not trivial\n self.__parents.add(parent)\n if self.__store_connectivity:\n parent.__add_connectivity(self)\n self.__add_connectivity(parent)\n\n # Visiting #\n\n def visit_children(self, f: Callable[['Vertex'], bool]) -> None:\n for child in self.__children:\n child.visit_children(f)\n if not f(self):\n return\n\n def visit_parents(self, f: Callable[['Vertex'], bool]) -> None:\n for child in self.__children:\n child.visit_children(f)\n if not f(self):\n return\n\n # Properties #\n\n @property\n def cost(self) -> float:\n return self.__cost\n\n @property\n def position(self) -> Point:\n return self.__position\n\n @property\n def children(self) -> Set['Vertex']:\n return self.__children\n\n @property\n def parents(self) -> Set['Vertex']:\n return self.__parents\n\n @property\n def connectivity(self) -> Dict['Vertex', 'Vertex']:\n return self.__connectivity\n\n @property\n def aux(self) -> Dict[Any, Any]:\n return self.__aux\n\n # Setters #\n\n @cost.setter\n def cost(self, val: float):\n self.__cost = val\n\n"}}},{"rowIdx":542634,"cells":{"filename":{"kind":"string","value":"the-stack_106_30944"},"text":{"kind":"string","value":"__version__ = '2021.12'\n__url__ = 'https://github.com/Paradoxis/Flask-Unsign-Wordlist'\n__author__ = 'Luke Paris (Paradoxis)'\n\nimport os, sys\n\nfrom flask_unsign_wordlist.exceptions import NoSuchWordlist\n\n\ndef get(name: str='all') -> str:\n \"\"\"\n Get the path to a flask-unsign wordlist\n :param name: Wordlist name ('.txt' is implied)\n :return: Absolute path to a wordlist\n \"\"\"\n cwd = os.path.dirname(__file__)\n path = os.path.join(cwd, 'wordlists', name + '.txt')\n\n if not os.path.isfile(path):\n raise NoSuchWordlist(f'No known wordlist found with name: {name!r}')\n\n return path\n\n\ndef main():\n \"\"\"CLI entry point\"\"\"\n try:\n path = get(sys.argv[1] if len(sys.argv) != 1 else 'all')\n sys.stdout.write(path)\n\n except NoSuchWordlist as e:\n print(str(e), file=sys.stderr)\n return 1\n\n\nif __name__ == '__main__':\n exit(main() or 0)\n\n"}}},{"rowIdx":542635,"cells":{"filename":{"kind":"string","value":"the-stack_106_30945"},"text":{"kind":"string","value":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA wrapper class for Spark DataFrame to behave similar to pandas DataFrame.\n\"\"\"\nfrom collections import defaultdict, namedtuple\nfrom collections.abc import Mapping\nimport re\nimport warnings\nimport inspect\nimport json\nimport types\nfrom functools import partial, reduce\nimport sys\nfrom itertools import zip_longest, chain\nfrom types import TracebackType\nfrom typing import (\n Any,\n Callable,\n Dict,\n Generic,\n IO,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n cast,\n no_type_check,\n TYPE_CHECKING,\n)\nimport datetime\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import ( # type: ignore[attr-defined]\n is_bool_dtype,\n is_list_like,\n is_dict_like,\n is_scalar,\n)\nfrom pandas.tseries.frequencies import DateOffset, to_offset\n\nif TYPE_CHECKING:\n from pandas.io.formats.style import Styler\n\nfrom pandas.core.dtypes.common import infer_dtype_from_object\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.core.dtypes.inference import is_sequence\nfrom pyspark import StorageLevel\nfrom pyspark.sql import Column, DataFrame as SparkDataFrame, functions as F\nfrom pyspark.sql.functions import pandas_udf\nfrom pyspark.sql.types import (\n ArrayType,\n BooleanType,\n DataType,\n DoubleType,\n NumericType,\n Row,\n StringType,\n StructField,\n StructType,\n DecimalType,\n TimestampType,\n TimestampNTZType,\n)\nfrom pyspark.sql.window import Window\n\nfrom pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.\nfrom pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, Label, Name, Scalar, T\nfrom pyspark.pandas.accessors import PandasOnSparkFrameMethods\nfrom pyspark.pandas.config import option_context, get_option\nfrom pyspark.pandas.spark import functions as SF\nfrom pyspark.pandas.spark.accessors import SparkFrameMethods, CachedSparkFrameMethods\nfrom pyspark.pandas.utils import (\n align_diff_frames,\n column_labels_level,\n combine_frames,\n default_session,\n is_name_like_tuple,\n is_name_like_value,\n is_testing,\n name_like_string,\n same_anchor,\n scol_for,\n validate_arguments_and_invoke_function,\n validate_axis,\n validate_bool_kwarg,\n validate_how,\n validate_mode,\n verify_temp_column_name,\n log_advice,\n)\nfrom pyspark.pandas.generic import Frame\nfrom pyspark.pandas.internal import (\n InternalField,\n InternalFrame,\n HIDDEN_COLUMNS,\n NATURAL_ORDER_COLUMN_NAME,\n SPARK_INDEX_NAME_FORMAT,\n SPARK_DEFAULT_INDEX_NAME,\n SPARK_DEFAULT_SERIES_NAME,\n SPARK_INDEX_NAME_PATTERN,\n)\nfrom pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame\nfrom pyspark.pandas.ml import corr\nfrom pyspark.pandas.typedef.typehints import (\n as_spark_type,\n infer_return_type,\n pandas_on_spark_type,\n spark_type_to_pandas_dtype,\n DataFrameType,\n SeriesType,\n ScalarType,\n create_tuple_for_frame_type,\n)\nfrom pyspark.pandas.plot import PandasOnSparkPlotAccessor\n\nif TYPE_CHECKING:\n from pyspark.sql._typing import OptionalPrimitiveType\n\n from pyspark.pandas.groupby import DataFrameGroupBy\n from pyspark.pandas.resample import DataFrameResampler\n from pyspark.pandas.indexes import Index\n from pyspark.pandas.series import Series\n\n\n# These regular expression patterns are complied and defined here to avoid to compile the same\n# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.\n# Two patterns basically seek the footer string from Pandas'\nREPR_PATTERN = re.compile(r\"\\n\\n\\[(?P[0-9]+) rows x (?P[0-9]+) columns\\]$\")\nREPR_HTML_PATTERN = re.compile(\n r\"\\n\\(?P[0-9]+) rows × (?P[0-9]+) columns\\<\\/p\\>\\n\\<\\/div\\>$\"\n)\n\n\n_flex_doc_FRAME = \"\"\"\nGet {desc} of dataframe and other, element-wise (binary operator `{op_name}`).\n\nEquivalent to ``{equiv}``. With reverse version, `{reverse}`.\n\nAmong flexible wrappers (`add`, `sub`, `mul`, `div`) to\narithmetic operators: `+`, `-`, `*`, `/`, `//`.\n\nParameters\n----------\nother : scalar\n Any single data\n\nReturns\n-------\nDataFrame\n Result of the arithmetic operation.\n\nExamples\n--------\n>>> df = ps.DataFrame({{'angles': [0, 3, 4],\n... 'degrees': [360, 180, 360]}},\n... index=['circle', 'triangle', 'rectangle'],\n... columns=['angles', 'degrees'])\n>>> df\n angles degrees\ncircle 0 360\ntriangle 3 180\nrectangle 4 360\n\nAdd a scalar with operator version which return the same\nresults. Also reverse version.\n\n>>> df + 1\n angles degrees\ncircle 1 361\ntriangle 4 181\nrectangle 5 361\n\n>>> df.add(1)\n angles degrees\ncircle 1 361\ntriangle 4 181\nrectangle 5 361\n\n>>> df.add(df)\n angles degrees\ncircle 0 720\ntriangle 6 360\nrectangle 8 720\n\n>>> df + df + df\n angles degrees\ncircle 0 1080\ntriangle 9 540\nrectangle 12 1080\n\n>>> df.radd(1)\n angles degrees\ncircle 1 361\ntriangle 4 181\nrectangle 5 361\n\nDivide and true divide by constant with reverse version.\n\n>>> df / 10\n angles degrees\ncircle 0.0 36.0\ntriangle 0.3 18.0\nrectangle 0.4 36.0\n\n>>> df.div(10)\n angles degrees\ncircle 0.0 36.0\ntriangle 0.3 18.0\nrectangle 0.4 36.0\n\n>>> df.rdiv(10)\n angles degrees\ncircle inf 0.027778\ntriangle 3.333333 0.055556\nrectangle 2.500000 0.027778\n\n>>> df.truediv(10)\n angles degrees\ncircle 0.0 36.0\ntriangle 0.3 18.0\nrectangle 0.4 36.0\n\n>>> df.rtruediv(10)\n angles degrees\ncircle inf 0.027778\ntriangle 3.333333 0.055556\nrectangle 2.500000 0.027778\n\nSubtract by constant with reverse version.\n\n>>> df - 1\n angles degrees\ncircle -1 359\ntriangle 2 179\nrectangle 3 359\n\n>>> df.sub(1)\n angles degrees\ncircle -1 359\ntriangle 2 179\nrectangle 3 359\n\n>>> df.rsub(1)\n angles degrees\ncircle 1 -359\ntriangle -2 -179\nrectangle -3 -359\n\nMultiply by constant with reverse version.\n\n>>> df * 1\n angles degrees\ncircle 0 360\ntriangle 3 180\nrectangle 4 360\n\n>>> df.mul(1)\n angles degrees\ncircle 0 360\ntriangle 3 180\nrectangle 4 360\n\n>>> df.rmul(1)\n angles degrees\ncircle 0 360\ntriangle 3 180\nrectangle 4 360\n\nFloor Divide by constant with reverse version.\n\n>>> df // 10\n angles degrees\ncircle 0.0 36.0\ntriangle 0.0 18.0\nrectangle 0.0 36.0\n\n>>> df.floordiv(10)\n angles degrees\ncircle 0.0 36.0\ntriangle 0.0 18.0\nrectangle 0.0 36.0\n\n>>> df.rfloordiv(10) # doctest: +SKIP\n angles degrees\ncircle inf 0.0\ntriangle 3.0 0.0\nrectangle 2.0 0.0\n\nMod by constant with reverse version.\n\n>>> df % 2\n angles degrees\ncircle 0 0\ntriangle 1 0\nrectangle 0 0\n\n>>> df.mod(2)\n angles degrees\ncircle 0 0\ntriangle 1 0\nrectangle 0 0\n\n>>> df.rmod(2)\n angles degrees\ncircle NaN 2\ntriangle 2.0 2\nrectangle 2.0 2\n\nPower by constant with reverse version.\n\n>>> df ** 2\n angles degrees\ncircle 0.0 129600.0\ntriangle 9.0 32400.0\nrectangle 16.0 129600.0\n\n>>> df.pow(2)\n angles degrees\ncircle 0.0 129600.0\ntriangle 9.0 32400.0\nrectangle 16.0 129600.0\n\n>>> df.rpow(2)\n angles degrees\ncircle 1.0 2.348543e+108\ntriangle 8.0 1.532496e+54\nrectangle 16.0 2.348543e+108\n\"\"\"\n\n\nclass DataFrame(Frame, Generic[T]):\n \"\"\"\n pandas-on-Spark DataFrame that corresponds to pandas DataFrame logically. This holds Spark\n DataFrame internally.\n\n :ivar _internal: an internal immutable Frame to manage metadata.\n :type _internal: InternalFrame\n\n Parameters\n ----------\n data : numpy ndarray (structured or homogeneous), dict, pandas DataFrame, Spark DataFrame \\\n or pandas-on-Spark Series\n Dict can contain Series, arrays, constants, or list-like objects\n Note that if `data` is a pandas DataFrame, a Spark DataFrame, and a pandas-on-Spark Series,\n other arguments should not be used.\n index : Index or array-like\n Index to use for resulting frame. Will default to RangeIndex if\n no indexing information part of input data and no index provided\n columns : Index or array-like\n Column labels to use for resulting frame. Will default to\n RangeIndex (0, 1, 2, ..., n) if no column labels are provided\n dtype : dtype, default None\n Data type to force. Only a single dtype is allowed. If None, infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n\n Examples\n --------\n Constructing DataFrame from a dictionary.\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = ps.DataFrame(data=d, columns=['col1', 'col2'])\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Constructing DataFrame from pandas DataFrame\n\n >>> df = ps.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Notice that the inferred dtype is int64.\n\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n To enforce a single dtype:\n\n >>> df = ps.DataFrame(data=d, dtype=np.int8)\n >>> df.dtypes\n col1 int8\n col2 int8\n dtype: object\n\n Constructing DataFrame from numpy ndarray:\n\n >>> df2 = ps.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df2 # doctest: +SKIP\n a b c d e\n 0 3 1 4 9 8\n 1 4 8 4 8 4\n 2 7 6 5 6 7\n 3 8 7 9 1 0\n 4 2 5 4 3 9\n \"\"\"\n\n def __init__( # type: ignore[no-untyped-def]\n self, data=None, index=None, columns=None, dtype=None, copy=False\n ):\n if isinstance(data, InternalFrame):\n assert index is None\n assert columns is None\n assert dtype is None\n assert not copy\n internal = data\n elif isinstance(data, SparkDataFrame):\n assert index is None\n assert columns is None\n assert dtype is None\n assert not copy\n internal = InternalFrame(spark_frame=data, index_spark_columns=None)\n elif isinstance(data, ps.Series):\n assert index is None\n assert columns is None\n assert dtype is None\n assert not copy\n data = data.to_frame()\n internal = data._internal\n else:\n if isinstance(data, pd.DataFrame):\n assert index is None\n assert columns is None\n assert dtype is None\n assert not copy\n pdf = data\n else:\n pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)\n internal = InternalFrame.from_pandas(pdf)\n\n object.__setattr__(self, \"_internal_frame\", internal)\n\n @property\n def _pssers(self) -> Dict[Label, \"Series\"]:\n \"\"\"Return a dict of column label -> Series which anchors `self`.\"\"\"\n from pyspark.pandas.series import Series\n\n if not hasattr(self, \"_psseries\"):\n object.__setattr__(\n self,\n \"_psseries\",\n {label: Series(data=self, index=label) for label in self._internal.column_labels},\n )\n else:\n psseries = cast(Dict[Label, Series], self._psseries) # type: ignore[has-type]\n assert len(self._internal.column_labels) == len(psseries), (\n len(self._internal.column_labels),\n len(psseries),\n )\n if any(self is not psser._psdf for psser in psseries.values()):\n # Refresh the dict to contain only Series anchoring `self`.\n self._psseries = {\n label: (\n psseries[label]\n if self is psseries[label]._psdf\n else Series(data=self, index=label)\n )\n for label in self._internal.column_labels\n }\n return self._psseries\n\n @property\n def _internal(self) -> InternalFrame:\n return cast(InternalFrame, self._internal_frame) # type: ignore[has-type]\n\n def _update_internal_frame(\n self, internal: InternalFrame, requires_same_anchor: bool = True\n ) -> None:\n \"\"\"\n Update InternalFrame with the given one.\n\n If the column_label is changed or the new InternalFrame is not the same `anchor`,\n disconnect the link to the Series and create a new one.\n\n If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored\n and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy,\n updating the underlying Spark DataFrame which need to combine a different Spark DataFrame.\n\n :param internal: the new InternalFrame\n :param requires_same_anchor: whether checking the same anchor\n \"\"\"\n from pyspark.pandas.series import Series\n\n if hasattr(self, \"_psseries\"):\n psseries = {}\n\n for old_label, new_label in zip_longest(\n self._internal.column_labels, internal.column_labels\n ):\n if old_label is not None:\n psser = self._pssers[old_label]\n\n renamed = old_label != new_label\n not_same_anchor = requires_same_anchor and not same_anchor(internal, psser)\n\n if renamed or not_same_anchor:\n psdf: DataFrame = DataFrame(self._internal.select_column(old_label))\n psser._update_anchor(psdf)\n psser = None\n else:\n psser = None\n if new_label is not None:\n if psser is None:\n psser = Series(data=self, index=new_label)\n psseries[new_label] = psser\n\n self._psseries = psseries\n\n self._internal_frame = internal\n\n if hasattr(self, \"_repr_pandas_cache\"):\n del self._repr_pandas_cache\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Return an int representing the number of array dimensions.\n\n return 2 for DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', None],\n ... columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 5\n NaN 7 8\n >>> df.ndim\n 2\n \"\"\"\n return 2\n\n @property\n def axes(self) -> List:\n \"\"\"\n Return a list representing the axes of the DataFrame.\n\n It has the row axis labels and column axis labels as the only members.\n They are returned in that order.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.axes\n [Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')]\n \"\"\"\n return [self.index, self.columns]\n\n def _reduce_for_stat_function(\n self,\n sfun: Callable[[\"Series\"], Column],\n name: str,\n axis: Optional[Axis] = None,\n numeric_only: bool = True,\n skipna: bool = True,\n **kwargs: Any,\n ) -> \"Series\":\n \"\"\"\n Applies sfun to each column and returns a pd.Series where the number of rows equal the\n number of columns.\n\n Parameters\n ----------\n sfun : either an 1-arg function that takes a Column and returns a Column, or\n a 2-arg function that takes a Column and its DataType and returns a Column.\n axis: used only for sanity check because series only support index axis.\n name : original pandas API name.\n axis : axis to apply. 0 or 1, or 'index' or 'columns.\n numeric_only : bool, default True\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter\n currently.\n skipna : bool, default True\n Exclude NA/null values when computing the result.\n \"\"\"\n from pyspark.pandas.series import Series, first_series\n\n axis = validate_axis(axis)\n if axis == 0:\n min_count = kwargs.get(\"min_count\", 0)\n\n exprs = [SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)]\n new_column_labels = []\n for label in self._internal.column_labels:\n psser = self._psser_for(label)\n\n is_numeric_or_boolean = isinstance(\n psser.spark.data_type, (NumericType, BooleanType)\n )\n keep_column = not numeric_only or is_numeric_or_boolean\n\n if keep_column:\n if not skipna and get_option(\"compute.eager_check\") and psser.hasnans:\n scol = F.first(F.lit(np.nan))\n else:\n scol = sfun(psser)\n\n if min_count > 0:\n scol = F.when(Frame._count_expr(psser) >= min_count, scol)\n\n exprs.append(scol.alias(name_like_string(label)))\n new_column_labels.append(label)\n\n if len(exprs) == 1:\n return Series([])\n\n sdf = self._internal.spark_frame.select(*exprs)\n\n # The data is expected to be small so it's fine to transpose/use default index.\n with ps.option_context(\"compute.max_rows\", 1):\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],\n column_labels=new_column_labels,\n column_label_names=self._internal.column_label_names,\n )\n return first_series(DataFrame(internal).transpose())\n\n else:\n # Here we execute with the first 1000 to get the return type.\n # If the records were less than 1000, it uses pandas API directly for a shortcut.\n limit = get_option(\"compute.shortcut_limit\")\n pdf = self.head(limit + 1)._to_internal_pandas()\n pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only, **kwargs)\n if len(pdf) <= limit:\n return Series(pser)\n\n @pandas_udf(returnType=as_spark_type(pser.dtype.type)) # type: ignore[call-overload]\n def calculate_columns_axis(*cols: pd.Series) -> pd.Series:\n return getattr(pd.concat(cols, axis=1), name)(\n axis=axis, numeric_only=numeric_only, **kwargs\n )\n\n column_name = verify_temp_column_name(\n self._internal.spark_frame.select(self._internal.index_spark_columns),\n \"__calculate_columns_axis__\",\n )\n sdf = self._internal.spark_frame.select(\n self._internal.index_spark_columns\n + [calculate_columns_axis(*self._internal.data_spark_columns).alias(column_name)]\n )\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n )\n return first_series(DataFrame(internal)).rename(pser.name)\n\n def _psser_for(self, label: Label) -> \"Series\":\n \"\"\"\n Create Series with a proper column label.\n\n The given label must be verified to exist in `InternalFrame.column_labels`.\n\n For example, in some method, self is like:\n\n >>> self = ps.range(3)\n\n `self._psser_for(label)` can be used with `InternalFrame.column_labels`:\n\n >>> self._psser_for(self._internal.column_labels[0])\n 0 0\n 1 1\n 2 2\n Name: id, dtype: int64\n\n `self._psser_for(label)` must not be used directly with user inputs.\n In that case, `self[label]` should be used instead, which checks the label exists or not:\n\n >>> self['id']\n 0 0\n 1 1\n 2 2\n Name: id, dtype: int64\n \"\"\"\n return self._pssers[label]\n\n def _apply_series_op(\n self, op: Callable[[\"Series\"], Union[\"Series\", Column]], should_resolve: bool = False\n ) -> \"DataFrame\":\n applied = []\n for label in self._internal.column_labels:\n applied.append(op(self._psser_for(label)))\n internal = self._internal.with_new_columns(applied)\n if should_resolve:\n internal = internal.resolved_copy\n return DataFrame(internal)\n\n # Arithmetic Operators\n def _map_series_op(self, op: str, other: Any) -> \"DataFrame\":\n from pyspark.pandas.base import IndexOpsMixin\n\n if not isinstance(other, DataFrame) and (\n isinstance(other, IndexOpsMixin) or is_sequence(other)\n ):\n raise TypeError(\n \"%s with a sequence is currently not supported; \"\n \"however, got %s.\" % (op, type(other).__name__)\n )\n\n if isinstance(other, DataFrame):\n if self._internal.column_labels_level != other._internal.column_labels_level:\n raise ValueError(\"cannot join with no overlapping index names\")\n\n if not same_anchor(self, other):\n # Different DataFrames\n def apply_op(\n psdf: DataFrame,\n this_column_labels: List[Label],\n that_column_labels: List[Label],\n ) -> Iterator[Tuple[\"Series\", Label]]:\n for this_label, that_label in zip(this_column_labels, that_column_labels):\n yield (\n getattr(psdf._psser_for(this_label), op)(\n psdf._psser_for(that_label)\n ).rename(this_label),\n this_label,\n )\n\n return align_diff_frames(apply_op, self, other, fillna=True, how=\"full\")\n else:\n applied = []\n column_labels = []\n for label in self._internal.column_labels:\n if label in other._internal.column_labels:\n applied.append(getattr(self._psser_for(label), op)(other._psser_for(label)))\n else:\n applied.append(\n SF.lit(None)\n .cast(self._internal.spark_type_for(label))\n .alias(name_like_string(label))\n )\n column_labels.append(label)\n for label in other._internal.column_labels:\n if label not in column_labels:\n applied.append(\n SF.lit(None)\n .cast(other._internal.spark_type_for(label))\n .alias(name_like_string(label))\n )\n column_labels.append(label)\n internal = self._internal.with_new_columns(applied, column_labels=column_labels)\n return DataFrame(internal)\n else:\n return self._apply_series_op(lambda psser: getattr(psser, op)(other))\n\n def __add__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"add\", other)\n\n def __radd__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"radd\", other)\n\n def __truediv__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"truediv\", other)\n\n def __rtruediv__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rtruediv\", other)\n\n def __mul__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"mul\", other)\n\n def __rmul__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rmul\", other)\n\n def __sub__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"sub\", other)\n\n def __rsub__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rsub\", other)\n\n def __pow__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"pow\", other)\n\n def __rpow__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rpow\", other)\n\n def __mod__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"mod\", other)\n\n def __rmod__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rmod\", other)\n\n def __floordiv__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"floordiv\", other)\n\n def __rfloordiv__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rfloordiv\", other)\n\n def __abs__(self) -> \"DataFrame\":\n return self._apply_series_op(lambda psser: abs(psser))\n\n def __neg__(self) -> \"DataFrame\":\n return self._apply_series_op(lambda psser: -psser)\n\n def add(self, other: Any) -> \"DataFrame\":\n return self + other\n\n # create accessor for plot\n plot = CachedAccessor(\"plot\", PandasOnSparkPlotAccessor)\n\n # create accessor for Spark related methods.\n spark = CachedAccessor(\"spark\", SparkFrameMethods)\n\n # create accessor for pandas-on-Spark specific methods.\n pandas_on_spark = CachedAccessor(\"pandas_on_spark\", PandasOnSparkFrameMethods)\n\n # keep the name \"koalas\" for backward compatibility.\n koalas = CachedAccessor(\"koalas\", PandasOnSparkFrameMethods)\n\n @no_type_check\n def hist(self, bins=10, **kwds):\n return self.plot.hist(bins, **kwds)\n\n hist.__doc__ = PandasOnSparkPlotAccessor.hist.__doc__\n\n @no_type_check\n def boxplot(self, **kwds):\n return self.plot.box(**kwds)\n\n boxplot.__doc__ = PandasOnSparkPlotAccessor.box.__doc__\n\n @no_type_check\n def kde(self, bw_method=None, ind=None, **kwds):\n return self.plot.kde(bw_method, ind, **kwds)\n\n kde.__doc__ = PandasOnSparkPlotAccessor.kde.__doc__\n\n add.__doc__ = _flex_doc_FRAME.format(\n desc=\"Addition\", op_name=\"+\", equiv=\"dataframe + other\", reverse=\"radd\"\n )\n\n def radd(self, other: Any) -> \"DataFrame\":\n return other + self\n\n radd.__doc__ = _flex_doc_FRAME.format(\n desc=\"Addition\", op_name=\"+\", equiv=\"other + dataframe\", reverse=\"add\"\n )\n\n def div(self, other: Any) -> \"DataFrame\":\n return self / other\n\n div.__doc__ = _flex_doc_FRAME.format(\n desc=\"Floating division\", op_name=\"/\", equiv=\"dataframe / other\", reverse=\"rdiv\"\n )\n\n divide = div\n\n def rdiv(self, other: Any) -> \"DataFrame\":\n return other / self\n\n rdiv.__doc__ = _flex_doc_FRAME.format(\n desc=\"Floating division\", op_name=\"/\", equiv=\"other / dataframe\", reverse=\"div\"\n )\n\n def truediv(self, other: Any) -> \"DataFrame\":\n return self / other\n\n truediv.__doc__ = _flex_doc_FRAME.format(\n desc=\"Floating division\", op_name=\"/\", equiv=\"dataframe / other\", reverse=\"rtruediv\"\n )\n\n def rtruediv(self, other: Any) -> \"DataFrame\":\n return other / self\n\n rtruediv.__doc__ = _flex_doc_FRAME.format(\n desc=\"Floating division\", op_name=\"/\", equiv=\"other / dataframe\", reverse=\"truediv\"\n )\n\n def mul(self, other: Any) -> \"DataFrame\":\n return self * other\n\n mul.__doc__ = _flex_doc_FRAME.format(\n desc=\"Multiplication\", op_name=\"*\", equiv=\"dataframe * other\", reverse=\"rmul\"\n )\n\n multiply = mul\n\n def rmul(self, other: Any) -> \"DataFrame\":\n return other * self\n\n rmul.__doc__ = _flex_doc_FRAME.format(\n desc=\"Multiplication\", op_name=\"*\", equiv=\"other * dataframe\", reverse=\"mul\"\n )\n\n def sub(self, other: Any) -> \"DataFrame\":\n return self - other\n\n sub.__doc__ = _flex_doc_FRAME.format(\n desc=\"Subtraction\", op_name=\"-\", equiv=\"dataframe - other\", reverse=\"rsub\"\n )\n\n subtract = sub\n\n def rsub(self, other: Any) -> \"DataFrame\":\n return other - self\n\n rsub.__doc__ = _flex_doc_FRAME.format(\n desc=\"Subtraction\", op_name=\"-\", equiv=\"other - dataframe\", reverse=\"sub\"\n )\n\n def mod(self, other: Any) -> \"DataFrame\":\n return self % other\n\n mod.__doc__ = _flex_doc_FRAME.format(\n desc=\"Modulo\", op_name=\"%\", equiv=\"dataframe % other\", reverse=\"rmod\"\n )\n\n def rmod(self, other: Any) -> \"DataFrame\":\n return other % self\n\n rmod.__doc__ = _flex_doc_FRAME.format(\n desc=\"Modulo\", op_name=\"%\", equiv=\"other % dataframe\", reverse=\"mod\"\n )\n\n def pow(self, other: Any) -> \"DataFrame\":\n return self ** other\n\n pow.__doc__ = _flex_doc_FRAME.format(\n desc=\"Exponential power of series\", op_name=\"**\", equiv=\"dataframe ** other\", reverse=\"rpow\"\n )\n\n def rpow(self, other: Any) -> \"DataFrame\":\n return other ** self\n\n rpow.__doc__ = _flex_doc_FRAME.format(\n desc=\"Exponential power\", op_name=\"**\", equiv=\"other ** dataframe\", reverse=\"pow\"\n )\n\n def floordiv(self, other: Any) -> \"DataFrame\":\n return self // other\n\n floordiv.__doc__ = _flex_doc_FRAME.format(\n desc=\"Integer division\", op_name=\"//\", equiv=\"dataframe // other\", reverse=\"rfloordiv\"\n )\n\n def rfloordiv(self, other: Any) -> \"DataFrame\":\n return other // self\n\n rfloordiv.__doc__ = _flex_doc_FRAME.format(\n desc=\"Integer division\", op_name=\"//\", equiv=\"other // dataframe\", reverse=\"floordiv\"\n )\n\n # Comparison Operators\n def __eq__(self, other: Any) -> \"DataFrame\": # type: ignore[override]\n return self._map_series_op(\"eq\", other)\n\n def __ne__(self, other: Any) -> \"DataFrame\": # type: ignore[override]\n return self._map_series_op(\"ne\", other)\n\n def __lt__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"lt\", other)\n\n def __le__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"le\", other)\n\n def __ge__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"ge\", other)\n\n def __gt__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"gt\", other)\n\n def eq(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.eq(1)\n a b\n a True True\n b False False\n c False True\n d False False\n \"\"\"\n return self == other\n\n equals = eq\n\n def gt(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is greater than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.gt(2)\n a b\n a False False\n b False False\n c True False\n d True False\n \"\"\"\n return self > other\n\n def ge(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is greater than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.ge(1)\n a b\n a True True\n b True False\n c True True\n d True False\n \"\"\"\n return self >= other\n\n def lt(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is less than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.lt(1)\n a b\n a False False\n b False False\n c False False\n d False False\n \"\"\"\n return self < other\n\n def le(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is less than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.le(2)\n a b\n a True True\n b True False\n c False True\n d False False\n \"\"\"\n return self <= other\n\n def ne(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is not equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.ne(1)\n a b\n a False False\n b True True\n c True False\n d True True\n \"\"\"\n return self != other\n\n def applymap(self, func: Callable[[Any], Any]) -> \"DataFrame\":\n \"\"\"\n Apply a function to a Dataframe elementwise.\n\n This method applies a function that accepts and returns a scalar\n to every element of a DataFrame.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def square(x) -> np.int32:\n ... return x ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n Parameters\n ----------\n func : callable\n Python function, returns a single value from a single value.\n\n Returns\n -------\n DataFrame\n Transformed DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2.12], [3.356, 4.567]])\n >>> df\n 0 1\n 0 1.000 2.120\n 1 3.356 4.567\n\n >>> def str_len(x) -> int:\n ... return len(str(x))\n >>> df.applymap(str_len)\n 0 1\n 0 3 4\n 1 5 5\n\n >>> def power(x) -> float:\n ... return x ** 2\n >>> df.applymap(power)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.applymap(lambda x: x ** 2)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n \"\"\"\n\n # TODO: We can implement shortcut theoretically since it creates new DataFrame\n # anyway and we don't have to worry about operations on different DataFrames.\n return self._apply_series_op(lambda psser: psser.apply(func))\n\n # TODO: not all arguments are implemented comparing to pandas' for now.\n def aggregate(self, func: Union[List[str], Dict[Name, List[str]]]) -> \"DataFrame\":\n \"\"\"Aggregate using one or more operations over the specified axis.\n\n Parameters\n ----------\n func : dict or a list\n a dict mapping from column name (string) to\n aggregate functions (list of strings).\n If a list is given, the aggregation is performed against\n all columns.\n\n Returns\n -------\n DataFrame\n\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n See Also\n --------\n DataFrame.apply : Invoke function on DataFrame.\n DataFrame.transform : Only perform transforming type operations.\n DataFrame.groupby : Perform operations over groups.\n Series.aggregate : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9],\n ... [np.nan, np.nan, np.nan]],\n ... columns=['A', 'B', 'C'])\n\n >>> df\n A B C\n 0 1.0 2.0 3.0\n 1 4.0 5.0 6.0\n 2 7.0 8.0 9.0\n 3 NaN NaN NaN\n\n Aggregate these functions over the rows.\n\n >>> df.agg(['sum', 'min'])[['A', 'B', 'C']].sort_index()\n A B C\n min 1.0 2.0 3.0\n sum 12.0 15.0 18.0\n\n Different aggregations per column.\n\n >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']].sort_index()\n A B\n max NaN 8.0\n min 1.0 2.0\n sum 12.0 NaN\n\n For multi-index columns:\n\n >>> df.columns = pd.MultiIndex.from_tuples([(\"X\", \"A\"), (\"X\", \"B\"), (\"Y\", \"C\")])\n >>> df.agg(['sum', 'min'])[[(\"X\", \"A\"), (\"X\", \"B\"), (\"Y\", \"C\")]].sort_index()\n X Y\n A B C\n min 1.0 2.0 3.0\n sum 12.0 15.0 18.0\n\n >>> aggregated = df.agg({(\"X\", \"A\") : ['sum', 'min'], (\"X\", \"B\") : ['min', 'max']})\n >>> aggregated[[(\"X\", \"A\"), (\"X\", \"B\")]].sort_index() # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n max NaN 8.0\n min 1.0 2.0\n sum 12.0 NaN\n \"\"\"\n from pyspark.pandas.groupby import GroupBy\n\n if isinstance(func, list):\n if all((isinstance(f, str) for f in func)):\n func = dict([(column, func) for column in self.columns])\n else:\n raise ValueError(\n \"If the given function is a list, it \"\n \"should only contains function names as strings.\"\n )\n\n if not isinstance(func, dict) or not all(\n is_name_like_value(key)\n and (\n isinstance(value, str)\n or (isinstance(value, list) and all(isinstance(v, str) for v in value))\n )\n for key, value in func.items()\n ):\n raise ValueError(\n \"aggs must be a dict mapping from column name to aggregate \"\n \"functions (string or list of strings).\"\n )\n\n with option_context(\"compute.default_index_type\", \"distributed\"):\n psdf: DataFrame = DataFrame(GroupBy._spark_groupby(self, func))\n\n # The codes below basically converts:\n #\n # A B\n # sum min min max\n # 0 12.0 1.0 2.0 8.0\n #\n # to:\n # A B\n # max NaN 8.0\n # min 1.0 2.0\n # sum 12.0 NaN\n #\n # Aggregated output is usually pretty much small.\n\n return psdf.stack().droplevel(0)[list(func.keys())]\n\n agg = aggregate\n\n def corr(self, method: str = \"pearson\") -> \"DataFrame\":\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'spearman'}\n * pearson : standard correlation coefficient\n * spearman : Spearman rank correlation\n\n Returns\n -------\n y : DataFrame\n\n See Also\n --------\n Series.corr\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.corr('pearson')\n dogs cats\n dogs 1.000000 -0.851064\n cats -0.851064 1.000000\n\n >>> df.corr('spearman')\n dogs cats\n dogs 1.000000 -0.948683\n cats -0.948683 1.000000\n\n Notes\n -----\n There are behavior differences between pandas-on-Spark and pandas.\n\n * the `method` argument only accepts 'pearson', 'spearman'\n * the data should not contain NaNs. pandas-on-Spark will return an error.\n * pandas-on-Spark doesn't support the following argument(s).\n\n * `min_periods` argument is not supported\n \"\"\"\n return cast(DataFrame, ps.from_pandas(corr(self, method)))\n\n # TODO: add axis parameter and support more methods\n def corrwith(\n self, other: DataFrameOrSeries, drop: bool = False, method: str = \"pearson\"\n ) -> \"Series\":\n \"\"\"\n Compute pairwise correlation.\n\n Pairwise correlation is computed between rows or columns of\n DataFrame with rows or columns of Series or DataFrame. DataFrames\n are first aligned along both axes before computing the\n correlations.\n\n .. versionadded:: 3.4.0\n\n Parameters\n ----------\n other : DataFrame, Series\n Object with which to compute correlations.\n\n drop : bool, default False\n Drop missing indices from result.\n\n method : str, default 'pearson'\n Method of correlation, one of:\n\n * pearson : standard correlation coefficient\n\n Returns\n -------\n Series\n Pairwise correlations.\n\n See Also\n --------\n DataFrame.corr : Compute pairwise correlation of columns.\n\n Examples\n --------\n >>> df1 = ps.DataFrame({\n ... \"A\":[1, 5, 7, 8],\n ... \"X\":[5, 8, 4, 3],\n ... \"C\":[10, 4, 9, 3]})\n >>> df1.corrwith(df1[[\"X\", \"C\"]])\n X 1.0\n C 1.0\n A NaN\n dtype: float64\n\n >>> df2 = ps.DataFrame({\n ... \"A\":[5, 3, 6, 4],\n ... \"B\":[11, 2, 4, 3],\n ... \"C\":[4, 3, 8, 5]})\n\n >>> with ps.option_context(\"compute.ops_on_diff_frames\", True):\n ... df1.corrwith(df2)\n A -0.041703\n C 0.395437\n X NaN\n B NaN\n dtype: float64\n\n >>> with ps.option_context(\"compute.ops_on_diff_frames\", True):\n ... df2.corrwith(df1.X)\n A -0.597614\n B -0.151186\n C -0.642857\n dtype: float64\n \"\"\"\n from pyspark.pandas.series import Series, first_series\n\n if (method is not None) and (method not in [\"pearson\"]):\n raise NotImplementedError(\"corrwith currently works only for method='pearson'\")\n if not isinstance(other, (DataFrame, Series)):\n raise TypeError(\"unsupported type: {}\".format(type(other).__name__))\n\n right_is_series = isinstance(other, Series)\n\n if same_anchor(self, other):\n combined = self\n this = self\n that = other\n else:\n combined = combine_frames(self, other, how=\"inner\")\n this = combined[\"this\"]\n that = combined[\"that\"]\n\n this_numeric_column_labels: List[Label] = []\n for column_label in this._internal.column_labels:\n if isinstance(this._internal.spark_type_for(column_label), (NumericType, BooleanType)):\n this_numeric_column_labels.append(column_label)\n\n that_numeric_column_labels: List[Label] = []\n for column_label in that._internal.column_labels:\n if isinstance(that._internal.spark_type_for(column_label), (NumericType, BooleanType)):\n that_numeric_column_labels.append(column_label)\n\n intersect_numeric_column_labels: List[Label] = []\n diff_numeric_column_labels: List[Label] = []\n corr_scols = []\n if right_is_series:\n intersect_numeric_column_labels = this_numeric_column_labels\n that_scol = that._internal.spark_column_for(that_numeric_column_labels[0])\n for numeric_column_label in intersect_numeric_column_labels:\n this_scol = this._internal.spark_column_for(numeric_column_label)\n corr_scols.append(\n F.corr(this_scol.cast(\"double\"), that_scol.cast(\"double\")).alias(\n name_like_string(numeric_column_label)\n )\n )\n else:\n for numeric_column_label in this_numeric_column_labels:\n if numeric_column_label in that_numeric_column_labels:\n intersect_numeric_column_labels.append(numeric_column_label)\n else:\n diff_numeric_column_labels.append(numeric_column_label)\n for numeric_column_label in that_numeric_column_labels:\n if numeric_column_label not in this_numeric_column_labels:\n diff_numeric_column_labels.append(numeric_column_label)\n for numeric_column_label in intersect_numeric_column_labels:\n this_scol = this._internal.spark_column_for(numeric_column_label)\n that_scol = that._internal.spark_column_for(numeric_column_label)\n corr_scols.append(\n F.corr(this_scol.cast(\"double\"), that_scol.cast(\"double\")).alias(\n name_like_string(numeric_column_label)\n )\n )\n\n corr_labels: List[Label] = intersect_numeric_column_labels\n if not drop:\n for numeric_column_label in diff_numeric_column_labels:\n corr_scols.append(\n SF.lit(None).cast(\"double\").alias(name_like_string(numeric_column_label))\n )\n corr_labels.append(numeric_column_label)\n\n sdf = combined._internal.spark_frame.select(\n *[SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)], *corr_scols\n ).limit(\n 1\n ) # limit(1) to avoid returning more than 1 row when intersection is empty\n\n # The data is expected to be small so it's fine to transpose/use default index.\n with ps.option_context(\"compute.max_rows\", 1):\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],\n column_labels=corr_labels,\n column_label_names=self._internal.column_label_names,\n )\n return first_series(DataFrame(internal).transpose())\n\n def iteritems(self) -> Iterator[Tuple[Name, \"Series\"]]:\n \"\"\"\n Iterator over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Returns\n -------\n label : object\n The column names for the DataFrame being iterated over.\n content : Series\n The column entries belonging to each label, as a Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'species': ['bear', 'bear', 'marsupial'],\n ... 'population': [1864, 22000, 80000]},\n ... index=['panda', 'polar', 'koala'],\n ... columns=['species', 'population'])\n >>> df\n species population\n panda bear 1864\n polar bear 22000\n koala marsupial 80000\n\n >>> for label, content in df.iteritems():\n ... print('label:', label)\n ... print('content:', content.to_string())\n ...\n label: species\n content: panda bear\n polar bear\n koala marsupial\n label: population\n content: panda 1864\n polar 22000\n koala 80000\n \"\"\"\n return (\n (label if len(label) > 1 else label[0], self._psser_for(label))\n for label in self._internal.column_labels\n )\n\n def iterrows(self) -> Iterator[Tuple[Name, pd.Series]]:\n \"\"\"\n Iterate over DataFrame rows as (index, Series) pairs.\n\n Yields\n ------\n index : label or tuple of label\n The index of the row. A tuple for a `MultiIndex`.\n data : pandas.Series\n The data of the row as a Series.\n\n it : generator\n A generator that iterates over the rows of the frame.\n\n Notes\n -----\n\n 1. Because ``iterrows`` returns a Series for each row,\n it does **not** preserve dtypes across the rows (dtypes are\n preserved across columns for DataFrames). For example,\n\n >>> df = ps.DataFrame([[1, 1.5]], columns=['int', 'float'])\n >>> row = next(df.iterrows())[1]\n >>> row\n int 1.0\n float 1.5\n Name: 0, dtype: float64\n >>> print(row['int'].dtype)\n float64\n >>> print(df['int'].dtype)\n int64\n\n To preserve dtypes while iterating over the rows, it is better\n to use :meth:`itertuples` which returns namedtuples of the values\n and which is generally faster than ``iterrows``.\n\n 2. You should **never modify** something you are iterating over.\n This is not guaranteed to work in all cases. Depending on the\n data types, the iterator returns a copy and not a view, and writing\n to it will have no effect.\n \"\"\"\n\n columns = self.columns\n internal_index_columns = self._internal.index_spark_column_names\n internal_data_columns = self._internal.data_spark_column_names\n\n def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]:\n k = (\n row[internal_index_columns[0]]\n if len(internal_index_columns) == 1\n else tuple(row[c] for c in internal_index_columns)\n )\n v = [row[c] for c in internal_data_columns]\n return k, v\n\n for k, v in map(\n extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()\n ):\n s = pd.Series(v, index=columns, name=k)\n yield k, s\n\n def itertuples(\n self, index: bool = True, name: Optional[str] = \"PandasOnSpark\"\n ) -> Iterator[Tuple]:\n \"\"\"\n Iterate over DataFrame rows as namedtuples.\n\n Parameters\n ----------\n index : bool, default True\n If True, return the index as the first element of the tuple.\n name : str or None, default \"PandasOnSpark\"\n The name of the returned namedtuples or None to return regular\n tuples.\n\n Returns\n -------\n iterator\n An object to iterate over namedtuples for each row in the\n DataFrame with the first field possibly being the index and\n following fields being the column values.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)\n pairs.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n The column names will be renamed to positional names if they are\n invalid Python identifiers, repeated, or start with an underscore.\n On python versions < 3.7 regular tuples are returned for DataFrames\n with a large number of columns (>254).\n\n Examples\n --------\n >>> df = ps.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},\n ... index=['dog', 'hawk'])\n >>> df\n num_legs num_wings\n dog 4 0\n hawk 2 2\n\n >>> for row in df.itertuples():\n ... print(row)\n ...\n PandasOnSpark(Index='dog', num_legs=4, num_wings=0)\n PandasOnSpark(Index='hawk', num_legs=2, num_wings=2)\n\n By setting the `index` parameter to False we can remove the index\n as the first element of the tuple:\n\n >>> for row in df.itertuples(index=False):\n ... print(row)\n ...\n PandasOnSpark(num_legs=4, num_wings=0)\n PandasOnSpark(num_legs=2, num_wings=2)\n\n With the `name` parameter set we set a custom name for the yielded\n namedtuples:\n\n >>> for row in df.itertuples(name='Animal'):\n ... print(row)\n ...\n Animal(Index='dog', num_legs=4, num_wings=0)\n Animal(Index='hawk', num_legs=2, num_wings=2)\n \"\"\"\n fields = list(self.columns)\n if index:\n fields.insert(0, \"Index\")\n\n index_spark_column_names = self._internal.index_spark_column_names\n data_spark_column_names = self._internal.data_spark_column_names\n\n def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]:\n k = (\n row[index_spark_column_names[0]]\n if len(index_spark_column_names) == 1\n else tuple(row[c] for c in index_spark_column_names)\n )\n v = [row[c] for c in data_spark_column_names]\n return k, v\n\n can_return_named_tuples = sys.version_info >= (3, 7) or len(self.columns) + index < 255\n\n if name is not None and can_return_named_tuples:\n itertuple = namedtuple(name, fields, rename=True) # type: ignore[misc]\n for k, v in map(\n extract_kv_from_spark_row,\n self._internal.resolved_copy.spark_frame.toLocalIterator(),\n ):\n yield itertuple._make(([k] if index else []) + list(v))\n else:\n for k, v in map(\n extract_kv_from_spark_row,\n self._internal.resolved_copy.spark_frame.toLocalIterator(),\n ):\n yield tuple(([k] if index else []) + list(v))\n\n def items(self) -> Iterator[Tuple[Name, \"Series\"]]:\n \"\"\"This is an alias of ``iteritems``.\"\"\"\n return self.iteritems()\n\n def to_clipboard(self, excel: bool = True, sep: Optional[str] = None, **kwargs: Any) -> None:\n \"\"\"\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n .. note:: This method should only be used if the resulting DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n excel : bool, default True\n - True, use the provided separator, writing in a csv format for\n allowing easy pasting into excel.\n - False, write a string representation of the object to the\n clipboard.\n\n sep : str, default ``'\\\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n See Also\n --------\n read_clipboard : Read text from clipboard.\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = ps.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n\n This function also works for Series:\n\n >>> df = ps.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # 0, 1\n ... # 1, 2\n ... # 2, 3\n ... # 3, 4\n ... # 4, 5\n ... # 5, 6\n ... # 6, 7\n \"\"\"\n\n args = locals()\n psdf = self\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args\n )\n\n def to_html(\n self,\n buf: Optional[IO[str]] = None,\n columns: Optional[Sequence[Name]] = None,\n col_space: Optional[Union[str, int, Dict[Name, Union[str, int]]]] = None,\n header: bool = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[\n Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]\n ] = None,\n float_format: Optional[Callable[[float], str]] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n justify: Optional[str] = None,\n max_rows: Optional[int] = None,\n max_cols: Optional[int] = None,\n show_dimensions: bool = False,\n decimal: str = \".\",\n bold_rows: bool = True,\n classes: Optional[Union[str, list, tuple]] = None,\n escape: bool = True,\n notebook: bool = False,\n border: Optional[int] = None,\n table_id: Optional[str] = None,\n render_links: bool = False,\n ) -> Optional[str]:\n \"\"\"\n Render a DataFrame as an HTML table.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n bold_rows : bool, default True\n Make the row labels bold in the output.\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table.\n escape : bool, default True\n Convert the characters <, >, and & to HTML-safe sequences.\n notebook : {True, False}, default False\n Whether the generated HTML is for IPython Notebook.\n border : int\n A ``border=border`` attribute is included in the opening\n `` tag. Default ``pd.options.html.border``.\n table_id : str, optional\n A css id is included in the opening `
` tag if specified.\n render_links : bool, default False\n Convert URLs to HTML links (only works with pandas 0.24+).\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_string : Convert DataFrame to a string.\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n if max_rows is not None:\n psdf = self.head(max_rows)\n else:\n psdf = self\n\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args\n )\n\n def to_string(\n self,\n buf: Optional[IO[str]] = None,\n columns: Optional[Sequence[Name]] = None,\n col_space: Optional[Union[str, int, Dict[Name, Union[str, int]]]] = None,\n header: bool = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[\n Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]\n ] = None,\n float_format: Optional[Callable[[float], str]] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n justify: Optional[str] = None,\n max_rows: Optional[int] = None,\n max_cols: Optional[int] = None,\n show_dimensions: bool = False,\n decimal: str = \".\",\n line_width: Optional[int] = None,\n ) -> Optional[str]:\n \"\"\"\n Render a DataFrame to a console-friendly tabular output.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n line_width : int, optional\n Width to wrap a line in characters.\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_html : Convert DataFrame to HTML.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])\n >>> print(df.to_string())\n col1 col2\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> print(df.to_string(max_rows=2))\n col1 col2\n 0 1 4\n 1 2 5\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n if max_rows is not None:\n psdf = self.head(max_rows)\n else:\n psdf = self\n\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args\n )\n\n def to_dict(self, orient: str = \"dict\", into: Type = dict) -> Union[List, Mapping]:\n \"\"\"\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n .. note:: This method should only be used if the resulting pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - 'dict' (default) : dict like {column -> {index -> value}}\n - 'list' : dict like {column -> [values]}\n - 'series' : dict like {column -> Series(values)}\n - 'split' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n into : class, default dict\n The collections.abc.Mapping subclass used for all Mappings\n in the return value. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n dict, list or collections.abc.Mapping\n Return a collections.abc.Mapping object representing the DataFrame.\n The resulting transformation depends on the `orient` parameter.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2],\n ... 'col2': [0.5, 0.75]},\n ... index=['row1', 'row2'],\n ... columns=['col1', 'col2'])\n >>> df\n col1 col2\n row1 1 0.50\n row2 2 0.75\n\n >>> df_dict = df.to_dict()\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]\n\n You can specify the return orientation.\n\n >>> df_dict = df.to_dict('series')\n >>> sorted(df_dict.items())\n [('col1', row1 1\n row2 2\n Name: col1, dtype: int64), ('col2', row1 0.50\n row2 0.75\n Name: col2, dtype: float64)]\n\n >>> df_dict = df.to_dict('split')\n >>> sorted(df_dict.items()) # doctest: +ELLIPSIS\n [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]\n\n >>> df_dict = df.to_dict('records')\n >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS\n [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]\n\n >>> df_dict = df.to_dict('index')\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]\n\n You can also specify the mapping type.\n\n >>> from collections import OrderedDict, defaultdict\n >>> df.to_dict(into=OrderedDict)\n OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \\\n('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])\n\n If you want a `defaultdict`, you need to initialize it:\n\n >>> dd = defaultdict(list)\n >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS\n [defaultdict(, {'col..., 'col...}), \\\ndefaultdict(, {'col..., 'col...})]\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n psdf = self\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args\n )\n\n def to_latex(\n self,\n buf: Optional[IO[str]] = None,\n columns: Optional[List[Name]] = None,\n col_space: Optional[int] = None,\n header: bool = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[\n Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]\n ] = None,\n float_format: Optional[Callable[[float], str]] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n bold_rows: bool = False,\n column_format: Optional[str] = None,\n longtable: Optional[bool] = None,\n escape: Optional[bool] = None,\n encoding: Optional[str] = None,\n decimal: str = \".\",\n multicolumn: Optional[bool] = None,\n multicolumn_format: Optional[str] = None,\n multirow: Optional[bool] = None,\n ) -> Optional[str]:\n r\"\"\"\n Render an object to a LaTeX tabular environment table.\n\n Render an object to a tabular environment table. You can splice this into a LaTeX\n document. Requires usepackage{booktabs}.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, consider alternative formats.\n\n Parameters\n ----------\n buf : file descriptor or None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given, it is assumed to be aliases\n for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default ‘NaN’\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns’ elements by position or name. The result of\n each function must be a unicode string. List must be of length equal to the number of\n columns.\n float_format : str, optional\n Format string for floating point numbers.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print every multiindex key at\n each row. By default, the value will be read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By\n default, ‘l’ will be used for all columns except columns of numbers, which default\n to ‘r’.\n longtable : bool, optional\n By default, the value will be read from the pandas config module. Use a longtable\n environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX\n preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config module. When set to False\n prevents from escaping latex special characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file, defaults to ‘ascii’ on\n Python 2 and ‘utf-8’ on Python 3.\n decimal : str, default ‘.’\n Character recognized as decimal separator, e.g. ‘,’ in Europe.\n multicolumn : bool, default True\n Use multicolumn to enhance MultiIndex columns. The default will be read from the config\n module.\n multicolumn_format : str, default ‘l’\n The alignment for multicolumns, similar to column_format The default will be read from\n the config module.\n multirow : bool, default False\n Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your\n LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read from the pandas config\n module.\n\n Returns\n -------\n str or None\n If buf is None, returns the resulting LateX format as a string. Otherwise returns None.\n\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n\n Examples\n --------\n >>> df = ps.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']},\n ... columns=['name', 'mask', 'weapon'])\n >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE\n \\begin{tabular}{lll}\n \\toprule\n name & mask & weapon \\\\\n \\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n \\bottomrule\n \\end{tabular}\n \n \"\"\"\n\n args = locals()\n psdf = self\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args\n )\n\n # TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic\n # when creating arrays)\n def transpose(self) -> \"DataFrame\":\n \"\"\"\n Transpose index and columns.\n\n Reflect the DataFrame over its main diagonal by writing rows as columns\n and vice-versa. The property :attr:`.T` is an accessor to the method\n :meth:`transpose`.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the 'compute.max_rows' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE\n ... ps.DataFrame({'a': range(1001)}).transpose()\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n 'compute.max_rows', this operation is considerably expensive.\n\n Returns\n -------\n DataFrame\n The transposed DataFrame.\n\n Notes\n -----\n Transposing a DataFrame with mixed dtypes will result in a homogeneous\n DataFrame with the coerced dtype. For instance, if int and float have\n to be placed in same column, it becomes float. If type coercion is not\n possible, it fails.\n\n Also, note that the values in index should be unique because they become\n unique column names.\n\n In addition, if Spark 2.3 is used, the types should always be exactly same.\n\n Examples\n --------\n **Square DataFrame with homogeneous dtype**\n\n >>> d1 = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df1 = ps.DataFrame(data=d1, columns=['col1', 'col2'])\n >>> df1\n col1 col2\n 0 1 3\n 1 2 4\n\n >>> df1_transposed = df1.T.sort_index() # doctest: +SKIP\n >>> df1_transposed # doctest: +SKIP\n 0 1\n col1 1 2\n col2 3 4\n\n When the dtype is homogeneous in the original DataFrame, we get a\n transposed DataFrame with the same dtype:\n\n >>> df1.dtypes\n col1 int64\n col2 int64\n dtype: object\n >>> df1_transposed.dtypes # doctest: +SKIP\n 0 int64\n 1 int64\n dtype: object\n\n **Non-square DataFrame with mixed dtypes**\n\n >>> d2 = {'score': [9.5, 8],\n ... 'kids': [0, 0],\n ... 'age': [12, 22]}\n >>> df2 = ps.DataFrame(data=d2, columns=['score', 'kids', 'age'])\n >>> df2\n score kids age\n 0 9.5 0 12\n 1 8.0 0 22\n\n >>> df2_transposed = df2.T.sort_index() # doctest: +SKIP\n >>> df2_transposed # doctest: +SKIP\n 0 1\n age 12.0 22.0\n kids 0.0 0.0\n score 9.5 8.0\n\n When the DataFrame has mixed dtypes, we get a transposed DataFrame with\n the coerced dtype:\n\n >>> df2.dtypes\n score float64\n kids int64\n age int64\n dtype: object\n\n >>> df2_transposed.dtypes # doctest: +SKIP\n 0 float64\n 1 float64\n dtype: object\n \"\"\"\n max_compute_count = get_option(\"compute.max_rows\")\n if max_compute_count is not None:\n pdf = self.head(max_compute_count + 1)._to_internal_pandas()\n if len(pdf) > max_compute_count:\n raise ValueError(\n \"Current DataFrame has more then the given limit {0} rows. \"\n \"Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' \"\n \"to retrieve to retrieve more than {0} rows. Note that, before changing the \"\n \"'compute.max_rows', this operation is considerably expensive.\".format(\n max_compute_count\n )\n )\n return DataFrame(pdf.transpose())\n\n # Explode the data to be pairs.\n #\n # For instance, if the current input DataFrame is as below:\n #\n # +------+------+------+------+------+\n # |index1|index2|(a,x1)|(a,x2)|(b,x3)|\n # +------+------+------+------+------+\n # | y1| z1| 1| 0| 0|\n # | y2| z2| 0| 50| 0|\n # | y3| z3| 3| 2| 1|\n # +------+------+------+------+------+\n #\n # Output of `exploded_df` becomes as below:\n #\n # +-----------------+-----------------+-----------------+-----+\n # | index|__index_level_0__|__index_level_1__|value|\n # +-----------------+-----------------+-----------------+-----+\n # |{\"a\":[\"y1\",\"z1\"]}| a| x1| 1|\n # |{\"a\":[\"y1\",\"z1\"]}| a| x2| 0|\n # |{\"a\":[\"y1\",\"z1\"]}| b| x3| 0|\n # |{\"a\":[\"y2\",\"z2\"]}| a| x1| 0|\n # |{\"a\":[\"y2\",\"z2\"]}| a| x2| 50|\n # |{\"a\":[\"y2\",\"z2\"]}| b| x3| 0|\n # |{\"a\":[\"y3\",\"z3\"]}| a| x1| 3|\n # |{\"a\":[\"y3\",\"z3\"]}| a| x2| 2|\n # |{\"a\":[\"y3\",\"z3\"]}| b| x3| 1|\n # +-----------------+-----------------+-----------------+-----+\n pairs = F.explode(\n F.array(\n *[\n F.struct(\n *[\n SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i))\n for i, col in enumerate(label)\n ],\n *[self._internal.spark_column_for(label).alias(\"value\")],\n )\n for label in self._internal.column_labels\n ]\n )\n )\n\n exploded_df = self._internal.spark_frame.withColumn(\"pairs\", pairs).select(\n [\n F.to_json(\n F.struct(\n F.array(*[scol for scol in self._internal.index_spark_columns]).alias(\"a\")\n )\n ).alias(\"index\"),\n F.col(\"pairs.*\"),\n ]\n )\n\n # After that, executes pivot with key and its index column.\n # Note that index column should contain unique values since column names\n # should be unique.\n internal_index_columns = [\n SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)\n ]\n pivoted_df = exploded_df.groupBy(internal_index_columns).pivot(\"index\")\n\n transposed_df = pivoted_df.agg(F.first(F.col(\"value\")))\n\n new_data_columns = list(\n filter(lambda x: x not in internal_index_columns, transposed_df.columns)\n )\n\n column_labels = [\n None if len(label) == 1 and label[0] is None else label\n for label in (tuple(json.loads(col)[\"a\"]) for col in new_data_columns)\n ]\n\n internal = InternalFrame(\n spark_frame=transposed_df,\n index_spark_columns=[scol_for(transposed_df, col) for col in internal_index_columns],\n index_names=self._internal.column_label_names,\n column_labels=column_labels,\n data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns],\n column_label_names=self._internal.index_names,\n )\n\n return DataFrame(internal)\n\n T = property(transpose)\n\n def apply(\n self, func: Callable, axis: Axis = 0, args: Sequence[Any] = (), **kwds: Any\n ) -> Union[\"Series\", \"DataFrame\", \"Index\"]:\n \"\"\"\n Apply a function along an axis of the DataFrame.\n\n Objects passed to the function are Series objects whose index is\n either the DataFrame's index (``axis=0``) or the DataFrame's columns\n (``axis=1``).\n\n See also `Transform and apply a function\n `_.\n\n .. note:: when `axis` is 0 or 'index', the `func` is unable to access\n to the whole input series. pandas-on-Spark internally splits the input series into\n multiple batches and calls `func` with each batch multiple times. Therefore, operations\n such as global aggregations are impossible. See the example below.\n\n >>> # This case does not return the length of whole series but of the batch internally\n ... # used.\n ... def length(s) -> int:\n ... return len(s)\n ...\n >>> df = ps.DataFrame({'A': range(1000)})\n >>> df.apply(length, axis=0) # doctest: +SKIP\n 0 83\n 1 83\n 2 83\n ...\n 10 83\n 11 83\n dtype: int32\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify the return type as `Series` or scalar value in ``func``,\n for instance, as below:\n\n >>> def square(s) -> ps.Series[np.int32]:\n ... return s ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n In case when axis is 1, it requires to specify `DataFrame` or scalar value\n with type hints as below:\n\n >>> def plus_one(x) -> ps.DataFrame[int, [float, float]]:\n ... return x + 1\n\n If the return type is specified as `DataFrame`, the output column names become\n `c0, c1, c2 ... cn`. These names are positionally mapped to the returned\n DataFrame in ``func``.\n\n To specify the column names, you can assign them in a pandas friendly style as below:\n\n >>> def plus_one(x) -> ps.DataFrame[(\"index\", int), [(\"a\", float), (\"b\", float)]]:\n ... return x + 1\n\n >>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})\n >>> def plus_one(x) -> ps.DataFrame[\n ... (pdf.index.name, pdf.index.dtype), zip(pdf.dtypes, pdf.columns)]:\n ... return x + 1\n\n Parameters\n ----------\n func : function\n Function to apply to each column or row.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis along which the function is applied:\n\n * 0 or 'index': apply function to each column.\n * 1 or 'columns': apply function to each row.\n args : tuple\n Positional arguments to pass to `func` in addition to the\n array/series.\n **kwds\n Additional keyword arguments to pass as keywords arguments to\n `func`.\n\n Returns\n -------\n Series or DataFrame\n Result of applying ``func`` along the given axis of the\n DataFrame.\n\n See Also\n --------\n DataFrame.applymap : For elementwise operations.\n DataFrame.aggregate : Only perform aggregating type operations.\n DataFrame.transform : Only perform transforming type operations.\n Series.apply : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([[4, 9]] * 3, columns=['A', 'B'])\n >>> df\n A B\n 0 4 9\n 1 4 9\n 2 4 9\n\n Using a numpy universal function (in this case the same as\n ``np.sqrt(df)``):\n\n >>> def sqrt(x) -> ps.Series[float]:\n ... return np.sqrt(x)\n ...\n >>> df.apply(sqrt, axis=0)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.apply(np.sqrt, axis=0)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n When `axis` is 1 or 'columns', it applies the function for each row.\n\n >>> def summation(x) -> np.int64:\n ... return np.sum(x)\n ...\n >>> df.apply(summation, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n Likewise, you can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.apply(np.sum, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n >>> df.apply(max, axis=1)\n 0 9\n 1 9\n 2 9\n dtype: int64\n\n Returning a list-like will result in a Series\n\n >>> df.apply(lambda x: [1, 2], axis=1)\n 0 [1, 2]\n 1 [1, 2]\n 2 [1, 2]\n dtype: object\n\n In order to specify the types when `axis` is '1', it should use DataFrame[...]\n annotation. In this case, the column names are automatically generated.\n\n >>> def identify(x) -> ps.DataFrame[('index', int), [('A', np.int64), ('B', np.int64)]]:\n ... return x\n ...\n >>> df.apply(identify, axis=1) # doctest: +NORMALIZE_WHITESPACE\n A B\n index\n 0 4 9\n 1 4 9\n 2 4 9\n\n You can also specify extra arguments.\n\n >>> def plus_two(a, b, c) -> ps.DataFrame[np.int64, [np.int64, np.int64]]:\n ... return a + b + c\n ...\n >>> df.apply(plus_two, axis=1, args=(1,), c=3)\n c0 c1\n 0 8 13\n 1 8 13\n 2 8 13\n \"\"\"\n from pyspark.pandas.groupby import GroupBy\n from pyspark.pandas.series import first_series\n\n if not isinstance(func, types.FunctionType):\n assert callable(func), \"the first argument should be a callable function.\"\n f = func\n # Note that the return type hint specified here affects actual return\n # type in Spark (e.g., infer_return_type). And, MyPy does not allow\n # redefinition of a function.\n func = lambda *args, **kwargs: f(*args, **kwargs) # noqa: E731\n\n axis = validate_axis(axis)\n should_return_series = False\n spec = inspect.getfullargspec(func)\n return_sig = spec.annotations.get(\"return\", None)\n should_infer_schema = return_sig is None\n should_retain_index = should_infer_schema\n\n def apply_func(pdf: pd.DataFrame) -> pd.DataFrame:\n pdf_or_pser = pdf.apply(func, axis=axis, args=args, **kwds) # type: ignore[arg-type]\n if isinstance(pdf_or_pser, pd.Series):\n return pdf_or_pser.to_frame()\n else:\n return pdf_or_pser\n\n self_applied: DataFrame = DataFrame(self._internal.resolved_copy)\n\n column_labels: Optional[List[Label]] = None\n if should_infer_schema:\n # Here we execute with the first 1000 to get the return type.\n # If the records were less than 1000, it uses pandas API directly for a shortcut.\n log_advice(\n \"If the type hints is not specified for `apply`, \"\n \"it is expensive to infer the data type internally.\"\n )\n limit = get_option(\"compute.shortcut_limit\")\n pdf = self_applied.head(limit + 1)._to_internal_pandas()\n applied = pdf.apply(func, axis=axis, args=args, **kwds) # type: ignore[arg-type]\n psser_or_psdf = ps.from_pandas(applied)\n if len(pdf) <= limit:\n return psser_or_psdf\n\n psdf = psser_or_psdf\n if isinstance(psser_or_psdf, ps.Series):\n should_return_series = True\n psdf = psser_or_psdf._psdf\n\n index_fields = [field.normalize_spark_type() for field in psdf._internal.index_fields]\n data_fields = [field.normalize_spark_type() for field in psdf._internal.data_fields]\n\n return_schema = StructType([field.struct_field for field in index_fields + data_fields])\n\n output_func = GroupBy._make_pandas_df_builder_func(\n self_applied, apply_func, return_schema, retain_index=should_retain_index\n )\n sdf = self_applied._internal.to_internal_spark_frame.mapInPandas(\n lambda iterator: map(output_func, iterator), schema=return_schema\n )\n\n # If schema is inferred, we can restore indexes too.\n internal = psdf._internal.with_new_sdf(\n spark_frame=sdf, index_fields=index_fields, data_fields=data_fields\n )\n else:\n return_type = infer_return_type(func)\n require_index_axis = isinstance(return_type, SeriesType)\n require_column_axis = isinstance(return_type, DataFrameType)\n index_fields = None\n\n if require_index_axis:\n if axis != 0:\n raise TypeError(\n \"The given function should specify a scalar or a series as its type \"\n \"hints when axis is 0 or 'index'; however, the return type \"\n \"was %s\" % return_sig\n )\n dtype = cast(SeriesType, return_type).dtype\n spark_type = cast(SeriesType, return_type).spark_type\n data_fields = [\n InternalField(\n dtype=dtype, struct_field=StructField(name=name, dataType=spark_type)\n )\n for name in self_applied.columns\n ]\n return_schema = StructType([field.struct_field for field in data_fields])\n elif require_column_axis:\n if axis != 1:\n raise TypeError(\n \"The given function should specify a scalar or a frame as its type \"\n \"hints when axis is 1 or 'column'; however, the return type \"\n \"was %s\" % return_sig\n )\n index_fields = cast(DataFrameType, return_type).index_fields\n should_retain_index = len(index_fields) > 0\n data_fields = cast(DataFrameType, return_type).data_fields\n return_schema = cast(DataFrameType, return_type).spark_type\n else:\n # any axis is fine.\n should_return_series = True\n spark_type = cast(ScalarType, return_type).spark_type\n dtype = cast(ScalarType, return_type).dtype\n data_fields = [\n InternalField(\n dtype=dtype,\n struct_field=StructField(\n name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type\n ),\n )\n ]\n return_schema = StructType([field.struct_field for field in data_fields])\n column_labels = [None]\n\n output_func = GroupBy._make_pandas_df_builder_func(\n self_applied, apply_func, return_schema, retain_index=should_retain_index\n )\n sdf = self_applied._internal.to_internal_spark_frame.mapInPandas(\n lambda iterator: map(output_func, iterator), schema=return_schema\n )\n\n index_spark_columns = None\n index_names: Optional[List[Optional[Tuple[Any, ...]]]] = None\n\n if should_retain_index:\n index_spark_columns = [\n scol_for(sdf, index_field.struct_field.name) for index_field in index_fields\n ]\n\n if not any(\n [\n SPARK_INDEX_NAME_PATTERN.match(index_field.struct_field.name)\n for index_field in index_fields\n ]\n ):\n index_names = [(index_field.struct_field.name,) for index_field in index_fields]\n internal = InternalFrame(\n spark_frame=sdf,\n index_names=index_names,\n index_spark_columns=index_spark_columns,\n index_fields=index_fields,\n data_fields=data_fields,\n column_labels=column_labels,\n )\n\n result: DataFrame = DataFrame(internal)\n if should_return_series:\n return first_series(result)\n else:\n return result\n\n def transform(\n self, func: Callable[..., \"Series\"], axis: Axis = 0, *args: Any, **kwargs: Any\n ) -> \"DataFrame\":\n \"\"\"\n Call ``func`` on self producing a Series with transformed values\n and that has the same length as its input.\n\n See also `Transform and apply a function\n `_.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def square(x) -> ps.Series[np.int32]:\n ... return x ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n .. note:: the series within ``func`` is actually multiple pandas series as the\n segments of the whole pandas-on-Spark series; therefore, the length of each series\n is not guaranteed. As an example, an aggregation against each series\n does work as a global aggregation but an aggregation of each segment. See\n below:\n\n >>> def func(x) -> ps.Series[np.int32]:\n ... return x + sum(x)\n\n Parameters\n ----------\n func : function\n Function to use for transforming the data. It must work when pandas Series\n is passed.\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n *args\n Positional arguments to pass to func.\n **kwargs\n Keyword arguments to pass to func.\n\n Returns\n -------\n DataFrame\n A DataFrame that must have the same length as self.\n\n Raises\n ------\n Exception : If the returned DataFrame has a different length than self.\n\n See Also\n --------\n DataFrame.aggregate : Only perform aggregating type operations.\n DataFrame.apply : Invoke function on DataFrame.\n Series.transform : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])\n >>> df\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n\n >>> def square(x) -> ps.Series[np.int32]:\n ... return x ** 2\n >>> df.transform(square)\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.transform(lambda x: x ** 2)\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n For multi-index columns:\n\n >>> df.columns = [('X', 'A'), ('X', 'B')]\n >>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n >>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n\n You can also specify extra arguments.\n\n >>> def calculation(x, y, z) -> ps.Series[int]:\n ... return x ** y + z\n >>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 20 21\n 1 21 1044\n 2 1044 59069\n \"\"\"\n if not isinstance(func, types.FunctionType):\n assert callable(func), \"the first argument should be a callable function.\"\n f = func\n # Note that the return type hint specified here affects actual return\n # type in Spark (e.g., infer_return_type). And, MyPy does not allow\n # redefinition of a function.\n func = lambda *args, **kwargs: f(*args, **kwargs) # noqa: E731\n\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n spec = inspect.getfullargspec(func)\n return_sig = spec.annotations.get(\"return\", None)\n should_infer_schema = return_sig is None\n\n if should_infer_schema:\n # Here we execute with the first 1000 to get the return type.\n # If the records were less than 1000, it uses pandas API directly for a shortcut.\n log_advice(\n \"If the type hints is not specified for `transform`, \"\n \"it is expensive to infer the data type internally.\"\n )\n limit = get_option(\"compute.shortcut_limit\")\n pdf = self.head(limit + 1)._to_internal_pandas()\n transformed = pdf.transform(func, axis, *args, **kwargs) # type: ignore[arg-type]\n psdf: DataFrame = DataFrame(transformed)\n if len(pdf) <= limit:\n return psdf\n\n applied = []\n data_fields = []\n for input_label, output_label in zip(\n self._internal.column_labels, psdf._internal.column_labels\n ):\n psser = self._psser_for(input_label)\n\n field = psdf._internal.field_for(output_label).normalize_spark_type()\n data_fields.append(field)\n\n return_schema = field.spark_type\n applied.append(\n psser.pandas_on_spark._transform_batch(\n func=lambda c: func(c, *args, **kwargs),\n return_type=SeriesType(field.dtype, return_schema),\n )\n )\n\n internal = self._internal.with_new_columns(applied, data_fields=data_fields)\n return DataFrame(internal)\n else:\n return self._apply_series_op(\n lambda psser: psser.pandas_on_spark.transform_batch(func, *args, **kwargs)\n )\n\n def pop(self, item: Name) -> \"DataFrame\":\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey','mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n\n Also support for MultiIndex\n\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey','mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')]\n >>> df.columns = pd.MultiIndex.from_tuples(columns)\n >>> df\n a b\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('a')\n name class\n 0 falcon bird\n 1 parrot bird\n 2 lion mammal\n 3 monkey mammal\n\n >>> df\n b\n max_speed\n 0 389.0\n 1 24.0\n 2 80.5\n 3 NaN\n \"\"\"\n result = self[item]\n self._update_internal_frame(self.drop(columns=item)._internal)\n return result\n\n # TODO: add axis parameter can work when '1' or 'columns'\n def xs(self, key: Name, axis: Axis = 0, level: Optional[int] = None) -> DataFrameOrSeries:\n \"\"\"\n Return cross-section from the DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : 0 or 'index', default 0\n Axis to retrieve cross-section on.\n currently only support 0 or 'index'\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n\n Returns\n -------\n DataFrame or Series\n Cross-section from the original DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = ps.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n locomotion\n walks 4 0\n\n >>> df.xs(('mammal', 'dog', 'walks')) # doctest: +NORMALIZE_WHITESPACE\n num_legs 4\n num_wings 0\n Name: (mammal, dog, walks), dtype: int64\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n \"\"\"\n from pyspark.pandas.series import first_series\n\n if not is_name_like_value(key):\n raise TypeError(\"'key' should be a scalar value or tuple that contains scalar values\")\n\n if level is not None and is_name_like_tuple(key):\n raise KeyError(key)\n\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n if not is_name_like_tuple(key):\n key = (key,)\n if len(key) > self._internal.index_level:\n raise KeyError(\n \"Key length ({}) exceeds index depth ({})\".format(\n len(key), self._internal.index_level\n )\n )\n if level is None:\n level = 0\n\n rows = [\n self._internal.index_spark_columns[lvl] == index for lvl, index in enumerate(key, level)\n ]\n internal = self._internal.with_filter(reduce(lambda x, y: x & y, rows))\n\n if len(key) == self._internal.index_level:\n psdf: DataFrame = DataFrame(internal)\n pdf = psdf.head(2)._to_internal_pandas()\n if len(pdf) == 0:\n raise KeyError(key)\n elif len(pdf) > 1:\n return psdf\n else:\n return first_series(DataFrame(pdf.transpose()))\n else:\n index_spark_columns = (\n internal.index_spark_columns[:level]\n + internal.index_spark_columns[level + len(key) :]\n )\n index_names = internal.index_names[:level] + internal.index_names[level + len(key) :]\n index_fields = internal.index_fields[:level] + internal.index_fields[level + len(key) :]\n\n internal = internal.copy(\n index_spark_columns=index_spark_columns,\n index_names=index_names,\n index_fields=index_fields,\n ).resolved_copy\n return DataFrame(internal)\n\n def between_time(\n self,\n start_time: Union[datetime.time, str],\n end_time: Union[datetime.time, str],\n include_start: bool = True,\n include_end: bool = True,\n axis: Axis = 0,\n ) -> \"DataFrame\":\n \"\"\"\n Select values between particular times of the day (example: 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n Initial time as a time filter limit.\n end_time : datetime.time or str\n End time as a time filter limit.\n include_start : bool, default True\n Whether the start time needs to be included in the result.\n include_end : bool, default True\n Whether the end time needs to be included in the result.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine range time on index or columns value.\n\n Returns\n -------\n DataFrame\n Data from the original object filtered to the specified dates range.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)\n >>> psdf\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> psdf.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> psdf.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n \"\"\"\n axis = validate_axis(axis)\n\n if axis != 0:\n raise NotImplementedError(\"between_time currently only works for axis=0\")\n\n if not isinstance(self.index, ps.DatetimeIndex):\n raise TypeError(\"Index must be DatetimeIndex\")\n\n psdf = self.copy()\n psdf.index.name = verify_temp_column_name(psdf, \"__index_name__\")\n return_types = [psdf.index.dtype] + list(psdf.dtypes)\n\n def pandas_between_time( # type: ignore[no-untyped-def]\n pdf,\n ) -> ps.DataFrame[return_types]: # type: ignore[valid-type]\n return pdf.between_time(start_time, end_time, include_start, include_end).reset_index()\n\n # apply_batch will remove the index of the pandas-on-Spark DataFrame and attach a\n # default index, which will never be used. So use \"distributed\" index as a dummy to\n # avoid overhead.\n with option_context(\"compute.default_index_type\", \"distributed\"):\n psdf = psdf.pandas_on_spark.apply_batch(pandas_between_time)\n\n return DataFrame(\n self._internal.copy(\n spark_frame=psdf._internal.spark_frame,\n index_spark_columns=psdf._internal.data_spark_columns[:1],\n index_fields=psdf._internal.data_fields[:1],\n data_spark_columns=psdf._internal.data_spark_columns[1:],\n data_fields=psdf._internal.data_fields[1:],\n )\n )\n\n # TODO: implement axis=1\n def at_time(\n self, time: Union[datetime.time, str], asof: bool = False, axis: Axis = 0\n ) -> \"DataFrame\":\n \"\"\"\n Select values at particular time of day (example: 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n Returns\n -------\n DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> idx = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)\n >>> psdf\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> psdf.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n \"\"\"\n if asof:\n raise NotImplementedError(\"'asof' argument is not supported\")\n\n axis = validate_axis(axis)\n\n if axis != 0:\n raise NotImplementedError(\"at_time currently only works for axis=0\")\n\n if not isinstance(self.index, ps.DatetimeIndex):\n raise TypeError(\"Index must be DatetimeIndex\")\n\n psdf = self.copy()\n psdf.index.name = verify_temp_column_name(psdf, \"__index_name__\")\n return_types = [psdf.index.dtype] + list(psdf.dtypes)\n\n def pandas_at_time( # type: ignore[no-untyped-def]\n pdf,\n ) -> ps.DataFrame[return_types]: # type: ignore[valid-type]\n return pdf.at_time(time, asof, axis).reset_index()\n\n # apply_batch will remove the index of the pandas-on-Spark DataFrame and attach\n # a default index, which will never be used. So use \"distributed\" index as a dummy\n # to avoid overhead.\n with option_context(\"compute.default_index_type\", \"distributed\"):\n psdf = psdf.pandas_on_spark.apply_batch(pandas_at_time)\n\n return DataFrame(\n self._internal.copy(\n spark_frame=psdf._internal.spark_frame,\n index_spark_columns=psdf._internal.data_spark_columns[:1],\n index_fields=psdf._internal.data_fields[:1],\n data_spark_columns=psdf._internal.data_spark_columns[1:],\n data_fields=psdf._internal.data_fields[1:],\n )\n )\n\n def where(\n self,\n cond: DataFrameOrSeries,\n other: Union[DataFrameOrSeries, Any] = np.nan,\n axis: Axis = None,\n ) -> \"DataFrame\":\n \"\"\"\n Replace values where the condition is False.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is True, keep the original value. Where False,\n replace with corresponding value from other.\n other : scalar, DataFrame\n Entries where cond is False are replaced with corresponding value from other.\n axis : int, default None\n Can only be set to 0 at the moment for compatibility with pandas.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n >>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})\n >>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})\n >>> df1\n A B\n 0 0 100\n 1 1 200\n 2 2 300\n 3 3 400\n 4 4 500\n >>> df2\n A B\n 0 0 -100\n 1 -1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> df1.where(df1 > 0).sort_index()\n A B\n 0 NaN 100.0\n 1 1.0 200.0\n 2 2.0 300.0\n 3 3.0 400.0\n 4 4.0 500.0\n\n >>> df1.where(df1 > 1, 10).sort_index()\n A B\n 0 10 100\n 1 10 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n >>> df1.where(df1 > 1, df1 + 100).sort_index()\n A B\n 0 100 100\n 1 101 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n >>> df1.where(df1 > 1, df2).sort_index()\n A B\n 0 0 100\n 1 -1 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n When the column name of cond is different from self, it treats all values are False\n\n >>> cond = ps.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0\n >>> cond\n C D\n 0 True False\n 1 False True\n 2 False False\n 3 True False\n 4 False True\n\n >>> df1.where(cond).sort_index()\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n When the type of cond is Series, it just check boolean regardless of column name\n\n >>> cond = ps.Series([1, 2]) > 1\n >>> cond\n 0 False\n 1 True\n dtype: bool\n\n >>> df1.where(cond).sort_index()\n A B\n 0 NaN NaN\n 1 1.0 200.0\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n from pyspark.pandas.series import Series\n\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n tmp_cond_col_name = \"__tmp_cond_col_{}__\".format\n tmp_other_col_name = \"__tmp_other_col_{}__\".format\n\n psdf = self.copy()\n\n tmp_cond_col_names = [\n tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels\n ]\n if isinstance(cond, DataFrame):\n cond = cond[\n [\n (\n cond._internal.spark_column_for(label)\n if label in cond._internal.column_labels\n else SF.lit(False)\n ).alias(name)\n for label, name in zip(self._internal.column_labels, tmp_cond_col_names)\n ]\n ]\n psdf[tmp_cond_col_names] = cond\n elif isinstance(cond, Series):\n cond = cond.to_frame()\n cond = cond[\n [cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names]\n ]\n psdf[tmp_cond_col_names] = cond\n else:\n raise TypeError(\"type of cond must be a DataFrame or Series\")\n\n tmp_other_col_names = [\n tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels\n ]\n if isinstance(other, DataFrame):\n other = other[\n [\n (\n other._internal.spark_column_for(label)\n if label in other._internal.column_labels\n else SF.lit(np.nan)\n ).alias(name)\n for label, name in zip(self._internal.column_labels, tmp_other_col_names)\n ]\n ]\n psdf[tmp_other_col_names] = other\n elif isinstance(other, Series):\n other = other.to_frame()\n other = other[\n [other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names]\n ]\n psdf[tmp_other_col_names] = other\n else:\n for label in self._internal.column_labels:\n psdf[tmp_other_col_name(name_like_string(label))] = other\n\n # above logic make spark dataframe looks like below:\n # +-----------------+---+---+------------------+-------------------+------------------+--...\n # |__index_level_0__| A| B|__tmp_cond_col_A__|__tmp_other_col_A__|__tmp_cond_col_B__|__...\n # +-----------------+---+---+------------------+-------------------+------------------+--...\n # | 0| 0|100| true| 0| false| ...\n # | 1| 1|200| false| -1| false| ...\n # | 3| 3|400| true| -3| false| ...\n # | 2| 2|300| false| -2| true| ...\n # | 4| 4|500| false| -4| false| ...\n # +-----------------+---+---+------------------+-------------------+------------------+--...\n\n data_spark_columns = []\n for label in self._internal.column_labels:\n data_spark_columns.append(\n F.when(\n psdf[tmp_cond_col_name(name_like_string(label))].spark.column,\n psdf._internal.spark_column_for(label),\n )\n .otherwise(psdf[tmp_other_col_name(name_like_string(label))].spark.column)\n .alias(psdf._internal.spark_column_name_for(label))\n )\n\n return DataFrame(\n psdf._internal.with_new_columns(\n data_spark_columns, column_labels=self._internal.column_labels # TODO: dtypes?\n )\n )\n\n def mask(\n self, cond: DataFrameOrSeries, other: Union[DataFrameOrSeries, Any] = np.nan\n ) -> \"DataFrame\":\n \"\"\"\n Replace values where the condition is True.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is False, keep the original value. Where True,\n replace with corresponding value from other.\n other : scalar, DataFrame\n Entries where cond is True are replaced with corresponding value from other.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n >>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})\n >>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})\n >>> df1\n A B\n 0 0 100\n 1 1 200\n 2 2 300\n 3 3 400\n 4 4 500\n >>> df2\n A B\n 0 0 -100\n 1 -1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> df1.mask(df1 > 0).sort_index()\n A B\n 0 0.0 NaN\n 1 NaN NaN\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n >>> df1.mask(df1 > 1, 10).sort_index()\n A B\n 0 0 10\n 1 1 10\n 2 10 10\n 3 10 10\n 4 10 10\n\n >>> df1.mask(df1 > 1, df1 + 100).sort_index()\n A B\n 0 0 200\n 1 1 300\n 2 102 400\n 3 103 500\n 4 104 600\n\n >>> df1.mask(df1 > 1, df2).sort_index()\n A B\n 0 0 -100\n 1 1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n from pyspark.pandas.series import Series\n\n if not isinstance(cond, (DataFrame, Series)):\n raise TypeError(\"type of cond must be a DataFrame or Series\")\n\n cond_inversed = cond._apply_series_op(lambda psser: ~psser)\n return self.where(cond_inversed, other)\n\n @property\n def index(self) -> \"Index\":\n \"\"\"The index (row labels) Column of the DataFrame.\n\n Currently not supported when the DataFrame has no index.\n\n See Also\n --------\n Index\n \"\"\"\n from pyspark.pandas.indexes.base import Index\n\n return Index._new_instance(self)\n\n @property\n def empty(self) -> bool:\n \"\"\"\n Returns true if the current DataFrame is empty. Otherwise, returns false.\n\n Examples\n --------\n >>> ps.range(10).empty\n False\n\n >>> ps.range(0).empty\n True\n\n >>> ps.DataFrame({}, index=list('abc')).empty\n True\n \"\"\"\n return (\n len(self._internal.column_labels) == 0\n or self._internal.resolved_copy.spark_frame.rdd.isEmpty()\n )\n\n @property\n def style(self) -> \"Styler\":\n \"\"\"\n Property returning a Styler object containing methods for\n building a styled HTML representation for the DataFrame.\n\n .. note:: currently it collects top 1000 rows and return its\n pandas `pandas.io.formats.style.Styler` instance.\n\n Examples\n --------\n >>> ps.range(1001).style # doctest: +SKIP\n \n \"\"\"\n max_results = get_option(\"compute.max_rows\")\n pdf = self.head(max_results + 1)._to_internal_pandas()\n if len(pdf) > max_results:\n warnings.warn(\"'style' property will only use top %s rows.\" % max_results, UserWarning)\n return pdf.head(max_results).style\n\n def set_index(\n self,\n keys: Union[Name, List[Name]],\n drop: bool = True,\n append: bool = False,\n inplace: bool = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"Set the DataFrame index (row labels) using one or more existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of the correct length). The index can replace the\n existing index or expand on it.\n\n Parameters\n ----------\n keys : label or array-like or list of labels/arrays\n This parameter can be either a single column key, a single array of\n the same length as the calling DataFrame, or a list containing an\n arbitrary combination of column keys and arrays. Here, \"array\"\n encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.\n drop : bool, default True\n Delete columns to be used as the new index.\n append : bool, default False\n Whether to append columns to existing index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n\n Returns\n -------\n DataFrame\n Changed row labels.\n\n See Also\n --------\n DataFrame.reset_index : Opposite of set_index.\n\n Examples\n --------\n >>> df = ps.DataFrame({'month': [1, 4, 7, 10],\n ... 'year': [2012, 2014, 2013, 2014],\n ... 'sale': [55, 40, 84, 31]},\n ... columns=['month', 'year', 'sale'])\n >>> df\n month year sale\n 0 1 2012 55\n 1 4 2014 40\n 2 7 2013 84\n 3 10 2014 31\n\n Set the index to become the 'month' column:\n\n >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE\n year sale\n month\n 1 2012 55\n 4 2014 40\n 7 2013 84\n 10 2014 31\n\n Create a MultiIndex using columns 'year' and 'month':\n\n >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE\n sale\n year month\n 2012 1 55\n 2014 4 40\n 2013 7 84\n 2014 10 31\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n key_list: List[Label]\n if is_name_like_tuple(keys):\n key_list = [cast(Label, keys)]\n elif is_name_like_value(keys):\n key_list = [(keys,)]\n else:\n key_list = [key if is_name_like_tuple(key) else (key,) for key in keys]\n columns = set(self._internal.column_labels)\n for key in key_list:\n if key not in columns:\n raise KeyError(name_like_string(key))\n\n if drop:\n column_labels = [\n label for label in self._internal.column_labels if label not in key_list\n ]\n else:\n column_labels = self._internal.column_labels\n if append:\n index_spark_columns = self._internal.index_spark_columns + [\n self._internal.spark_column_for(label) for label in key_list\n ]\n index_names = self._internal.index_names + key_list\n index_fields = self._internal.index_fields + [\n self._internal.field_for(label) for label in key_list\n ]\n else:\n index_spark_columns = [self._internal.spark_column_for(label) for label in key_list]\n index_names = key_list\n index_fields = [self._internal.field_for(label) for label in key_list]\n\n internal = self._internal.copy(\n index_spark_columns=index_spark_columns,\n index_names=index_names,\n index_fields=index_fields,\n column_labels=column_labels,\n data_spark_columns=[self._internal.spark_column_for(label) for label in column_labels],\n data_fields=[self._internal.field_for(label) for label in column_labels],\n )\n\n if inplace:\n self._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal)\n\n def reset_index(\n self,\n level: Optional[Union[int, Name, Sequence[Union[int, Name]]]] = None,\n drop: bool = False,\n inplace: bool = False,\n col_level: int = 0,\n col_fill: str = \"\",\n ) -> Optional[\"DataFrame\"]:\n \"\"\"Reset the index, or a level of it.\n\n For DataFrame with multi-level index, return new DataFrame with labeling information in\n the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.\n For a standard index, the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default.\n drop : bool, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n col_level : int or str, default 0\n If the columns have multiple levels, determines which level the\n labels are inserted into. By default it is inserted into the first\n level.\n col_fill : object, default ''\n If the columns have multiple levels, determines how the other\n levels are named. If None then the index name is repeated.\n\n Returns\n -------\n DataFrame\n DataFrame with the new index.\n\n See Also\n --------\n DataFrame.set_index : Opposite of reset_index.\n\n Examples\n --------\n >>> df = ps.DataFrame([('bird', 389.0),\n ... ('bird', 24.0),\n ... ('mammal', 80.5),\n ... ('mammal', np.nan)],\n ... index=['falcon', 'parrot', 'lion', 'monkey'],\n ... columns=('class', 'max_speed'))\n >>> df\n class max_speed\n falcon bird 389.0\n parrot bird 24.0\n lion mammal 80.5\n monkey mammal NaN\n\n When we reset the index, the old index is added as a column. Unlike pandas, pandas-on-Spark\n does not automatically add a sequential index. The following 0, 1, 2, 3 are only\n there when we display the DataFrame.\n\n >>> df.reset_index()\n index class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n We can use the `drop` parameter to avoid the old index being added as\n a column:\n\n >>> df.reset_index(drop=True)\n class max_speed\n 0 bird 389.0\n 1 bird 24.0\n 2 mammal 80.5\n 3 mammal NaN\n\n You can also use `reset_index` with `MultiIndex`.\n\n >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),\n ... ('bird', 'parrot'),\n ... ('mammal', 'lion'),\n ... ('mammal', 'monkey')],\n ... names=['class', 'name'])\n >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),\n ... ('species', 'type')])\n >>> df = ps.DataFrame([(389.0, 'fly'),\n ... ( 24.0, 'fly'),\n ... ( 80.5, 'run'),\n ... (np.nan, 'jump')],\n ... index=index,\n ... columns=columns)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n speed species\n max type\n class name\n bird falcon 389.0 fly\n parrot 24.0 fly\n mammal lion 80.5 run\n monkey NaN jump\n\n If the index has multiple levels, we can reset a subset of them:\n\n >>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE\n class speed species\n max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we are not dropping the index, by default, it is placed in the top\n level. We can place it in another level:\n\n >>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE\n speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n When the index is inserted under another level, we can specify under\n which one with the parameter `col_fill`:\n\n >>> df.reset_index(level='class', col_level=1,\n ... col_fill='species') # doctest: +NORMALIZE_WHITESPACE\n species speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we specify a nonexistent level for `col_fill`, it is created:\n\n >>> df.reset_index(level='class', col_level=1,\n ... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE\n genus speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n multi_index = self._internal.index_level > 1\n\n def rename(index: int) -> Label:\n if multi_index:\n return (\"level_{}\".format(index),)\n else:\n if (\"index\",) not in self._internal.column_labels:\n return (\"index\",)\n else:\n return (\"level_{}\".format(index),)\n\n if level is None:\n new_column_labels = [\n name if name is not None else rename(i)\n for i, name in enumerate(self._internal.index_names)\n ]\n new_data_spark_columns = [\n scol.alias(name_like_string(label))\n for scol, label in zip(self._internal.index_spark_columns, new_column_labels)\n ]\n new_data_fields = self._internal.index_fields\n\n index_spark_columns = []\n index_names = []\n index_fields = []\n else:\n if is_list_like(level):\n level = list(cast(Sequence[Union[int, Name]], level))\n if isinstance(level, int) or is_name_like_tuple(level):\n level_list = [cast(Union[int, Label], level)]\n elif is_name_like_value(level):\n level_list = [(level,)]\n else:\n level_list = [\n lvl if isinstance(lvl, int) or is_name_like_tuple(lvl) else (lvl,)\n for lvl in level\n ]\n\n if all(isinstance(lvl, int) for lvl in level_list):\n int_level_list = cast(List[int], level_list)\n for lev in int_level_list:\n if lev >= self._internal.index_level:\n raise IndexError(\n \"Too many levels: Index has only {} level, not {}\".format(\n self._internal.index_level, lev + 1\n )\n )\n idx = int_level_list\n elif all(is_name_like_tuple(lev) for lev in level_list):\n idx = []\n for label in cast(List[Label], level_list):\n try:\n i = self._internal.index_names.index(label)\n idx.append(i)\n except ValueError:\n if multi_index:\n raise KeyError(\"Level unknown not found\")\n else:\n raise KeyError(\n \"Level unknown must be same as name ({})\".format(\n name_like_string(self._internal.index_names[0])\n )\n )\n else:\n raise ValueError(\"Level should be all int or all string.\")\n idx.sort()\n\n new_column_labels = []\n new_data_spark_columns = []\n new_data_fields = []\n\n index_spark_columns = self._internal.index_spark_columns.copy()\n index_names = self._internal.index_names.copy()\n index_fields = self._internal.index_fields.copy()\n\n for i in idx[::-1]:\n name = index_names.pop(i)\n new_column_labels.insert(0, name if name is not None else rename(i))\n\n scol = index_spark_columns.pop(i)\n new_data_spark_columns.insert(0, scol.alias(name_like_string(name)))\n\n new_data_fields.insert(0, index_fields.pop(i).copy(name=name_like_string(name)))\n\n if drop:\n new_data_spark_columns = []\n new_column_labels = []\n new_data_fields = []\n\n for label in new_column_labels:\n if label in self._internal.column_labels:\n raise ValueError(\"cannot insert {}, already exists\".format(name_like_string(label)))\n\n if self._internal.column_labels_level > 1:\n column_depth = len(self._internal.column_labels[0])\n if col_level >= column_depth:\n raise IndexError(\n \"Too many levels: Index has only {} levels, not {}\".format(\n column_depth, col_level + 1\n )\n )\n if any(col_level + len(label) > column_depth for label in new_column_labels):\n raise ValueError(\"Item must have length equal to number of levels.\")\n new_column_labels = [\n tuple(\n ([col_fill] * col_level)\n + list(label)\n + ([col_fill] * (column_depth - (len(label) + col_level)))\n )\n for label in new_column_labels\n ]\n\n internal = self._internal.copy(\n index_spark_columns=index_spark_columns,\n index_names=index_names,\n index_fields=index_fields,\n column_labels=new_column_labels + self._internal.column_labels,\n data_spark_columns=new_data_spark_columns + self._internal.data_spark_columns,\n data_fields=new_data_fields + self._internal.data_fields,\n )\n\n if inplace:\n self._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal)\n\n def isnull(self) -> \"DataFrame\":\n \"\"\"\n Detects missing values for items in the current Dataframe.\n\n Return a boolean same-sized Dataframe indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values.\n\n See Also\n --------\n DataFrame.notnull\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.isnull()\n 0 1\n 0 False False\n 1 False True\n 2 False True\n 3 False False\n\n >>> df = ps.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])\n >>> df.isnull()\n 0 1 2\n 0 True False True\n 1 False True False\n \"\"\"\n return self._apply_series_op(lambda psser: psser.isnull())\n\n isna = isnull\n\n def notnull(self) -> \"DataFrame\":\n \"\"\"\n Detects non-missing values for items in the current Dataframe.\n\n This function takes a dataframe and indicates whether it's\n values are valid (not missing, which is ``NaN`` in numeric\n datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).\n\n See Also\n --------\n DataFrame.isnull\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.notnull()\n 0 1\n 0 True True\n 1 True False\n 2 True False\n 3 True True\n\n >>> df = ps.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])\n >>> df.notnull()\n 0 1 2\n 0 True True True\n 1 True False True\n \"\"\"\n return self._apply_series_op(lambda psser: psser.notnull())\n\n notna = notnull\n\n def insert(\n self,\n loc: int,\n column: Name,\n value: Union[Scalar, \"Series\", Iterable],\n allow_duplicates: bool = False,\n ) -> None:\n \"\"\"\n Insert column into DataFrame at specified location.\n\n Raises a ValueError if `column` is already contained in the DataFrame,\n unless `allow_duplicates` is set to True.\n\n Parameters\n ----------\n loc : int\n Insertion index. Must verify 0 <= loc <= len(columns).\n column : str, number, or hashable object\n Label of the inserted column.\n value : int, Series, or array-like\n allow_duplicates : bool, optional\n\n Examples\n --------\n >>> psdf = ps.DataFrame([1, 2, 3])\n >>> psdf.sort_index()\n 0\n 0 1\n 1 2\n 2 3\n >>> psdf.insert(0, 'x', 4)\n >>> psdf.sort_index()\n x 0\n 0 4 1\n 1 4 2\n 2 4 3\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n\n >>> psdf.insert(1, 'y', [5, 6, 7])\n >>> psdf.sort_index()\n x y 0\n 0 4 5 1\n 1 4 6 2\n 2 4 7 3\n\n >>> psdf.insert(2, 'z', ps.Series([8, 9, 10]))\n >>> psdf.sort_index()\n x y z 0\n 0 4 5 8 1\n 1 4 6 9 2\n 2 4 7 10 3\n\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n if not isinstance(loc, int):\n raise TypeError(\"loc must be int\")\n\n assert 0 <= loc <= len(self.columns)\n assert allow_duplicates is False\n\n if not is_name_like_value(column):\n raise TypeError(\n '\"column\" should be a scalar value or tuple that contains scalar values'\n )\n\n # TODO(SPARK-37723): Support tuple for non-MultiIndex column name.\n if is_name_like_tuple(column):\n if self._internal.column_labels_level > 1:\n if len(column) != len(self.columns.levels): # type: ignore[attr-defined]\n # To be consistent with pandas\n raise ValueError('\"column\" must have length equal to number of column levels.')\n else:\n raise NotImplementedError(\n \"Assigning column name as tuple is only supported for MultiIndex columns \"\n \"for now.\"\n )\n\n if column in self.columns:\n raise ValueError(\"cannot insert %s, already exists\" % str(column))\n\n psdf = self.copy()\n psdf[column] = value\n columns = psdf.columns[:-1].insert(loc, psdf.columns[-1])\n psdf = psdf[columns]\n self._update_internal_frame(psdf._internal)\n\n # TODO: add frep and axis parameter\n def shift(self, periods: int = 1, fill_value: Optional[Any] = None) -> \"DataFrame\":\n \"\"\"\n Shift DataFrame by desired number of periods.\n\n .. note:: the current implementation of shift uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n The default depends on the dtype of self. For numeric data, np.nan is used.\n\n Returns\n -------\n Copy of input DataFrame, shifted.\n\n Examples\n --------\n >>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]},\n ... columns=['Col1', 'Col2', 'Col3'])\n\n >>> df.shift(periods=3)\n Col1 Col2 Col3\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 10.0 13.0 17.0\n 4 20.0 23.0 27.0\n\n >>> df.shift(periods=3, fill_value=0)\n Col1 Col2 Col3\n 0 0 0 0\n 1 0 0 0\n 2 0 0 0\n 3 10 13 17\n 4 20 23 27\n\n \"\"\"\n return self._apply_series_op(\n lambda psser: psser._shift(periods, fill_value), should_resolve=True\n )\n\n # TODO: axis should support 1 or 'columns' either at this moment\n def diff(self, periods: int = 1, axis: Axis = 0) -> \"DataFrame\":\n \"\"\"\n First discrete difference of element.\n\n Calculates the difference of a DataFrame element compared with another element in the\n DataFrame (default is the element in the same column of the previous row).\n\n .. note:: the current implementation of diff uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for calculating difference, accepts negative values.\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n diffed : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [1, 1, 2, 3, 5, 8],\n ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])\n >>> df\n a b c\n 0 1 1 1\n 1 2 1 4\n 2 3 2 9\n 3 4 3 16\n 4 5 5 25\n 5 6 8 36\n\n >>> df.diff()\n a b c\n 0 NaN NaN NaN\n 1 1.0 0.0 3.0\n 2 1.0 1.0 5.0\n 3 1.0 1.0 7.0\n 4 1.0 2.0 9.0\n 5 1.0 3.0 11.0\n\n Difference with previous column\n\n >>> df.diff(periods=3)\n a b c\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 3.0 2.0 15.0\n 4 3.0 4.0 21.0\n 5 3.0 6.0 27.0\n\n Difference with following row\n\n >>> df.diff(periods=-1)\n a b c\n 0 -1.0 0.0 -3.0\n 1 -1.0 -1.0 -5.0\n 2 -1.0 -1.0 -7.0\n 3 -1.0 -2.0 -9.0\n 4 -1.0 -3.0 -11.0\n 5 NaN NaN NaN\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n return self._apply_series_op(lambda psser: psser._diff(periods), should_resolve=True)\n\n # TODO: axis should support 1 or 'columns' either at this moment\n def nunique(\n self,\n axis: Axis = 0,\n dropna: bool = True,\n approx: bool = False,\n rsd: float = 0.05,\n ) -> \"Series\":\n \"\"\"\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n dropna : bool, default True\n Don’t include NaN in the count.\n approx: bool, default False\n If False, will use the exact algorithm and return the exact number of unique.\n If True, it uses the HyperLogLog approximate algorithm, which is significantly faster\n for large amount of data.\n Note: This parameter is specific to pandas-on-Spark and is not found in pandas.\n rsd: float, default 0.05\n Maximum estimation error allowed in the HyperLogLog algorithm.\n Note: Just like ``approx`` this parameter is specific to pandas-on-Spark.\n\n Returns\n -------\n The number of unique values per column as a pandas-on-Spark Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})\n >>> df.nunique()\n A 3\n B 1\n dtype: int64\n\n >>> df.nunique(dropna=False)\n A 3\n B 2\n dtype: int64\n\n On big data, we recommend using the approximate algorithm to speed up this function.\n The result will be very close to the exact unique count.\n\n >>> df.nunique(approx=True)\n A 3\n B 1\n dtype: int64\n \"\"\"\n from pyspark.pandas.series import first_series\n\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n sdf = self._internal.spark_frame.select(\n [SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)]\n + [\n self._psser_for(label)._nunique(dropna, approx, rsd)\n for label in self._internal.column_labels\n ]\n )\n\n # The data is expected to be small so it's fine to transpose/use default index.\n with ps.option_context(\"compute.max_rows\", 1):\n internal = self._internal.copy(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],\n index_names=[None],\n index_fields=[None],\n data_spark_columns=[\n scol_for(sdf, col) for col in self._internal.data_spark_column_names\n ],\n data_fields=None,\n )\n return first_series(DataFrame(internal).transpose())\n\n def round(self, decimals: Union[int, Dict[Name, int], \"Series\"] = 0) -> \"DataFrame\":\n \"\"\"\n Round a DataFrame to a variable number of decimal places.\n\n Parameters\n ----------\n decimals : int, dict, Series\n Number of decimal places to round each column to. If an int is\n given, round each column to the same number of places.\n Otherwise dict and Series round to variable numbers of places.\n Column names should be in the keys if `decimals` is a\n dict-like, or in the index if `decimals` is a Series. Any\n columns not included in `decimals` will be left as is. Elements\n of `decimals` which are not columns of the input will be\n ignored.\n\n .. note:: If `decimals` is a Series, it is expected to be small,\n as all the data is loaded into the driver's memory.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n Series.round\n\n Examples\n --------\n >>> df = ps.DataFrame({'A':[0.028208, 0.038683, 0.877076],\n ... 'B':[0.992815, 0.645646, 0.149370],\n ... 'C':[0.173891, 0.577595, 0.491027]},\n ... columns=['A', 'B', 'C'],\n ... index=['first', 'second', 'third'])\n >>> df\n A B C\n first 0.028208 0.992815 0.173891\n second 0.038683 0.645646 0.577595\n third 0.877076 0.149370 0.491027\n\n >>> df.round(2)\n A B C\n first 0.03 0.99 0.17\n second 0.04 0.65 0.58\n third 0.88 0.15 0.49\n\n >>> df.round({'A': 1, 'C': 2})\n A B C\n first 0.0 0.992815 0.17\n second 0.0 0.645646 0.58\n third 0.9 0.149370 0.49\n\n >>> decimals = ps.Series([1, 0, 2], index=['A', 'B', 'C'])\n >>> df.round(decimals)\n A B C\n first 0.0 1.0 0.17\n second 0.0 1.0 0.58\n third 0.9 0.0 0.49\n \"\"\"\n if isinstance(decimals, ps.Series):\n decimals_dict = {\n k if isinstance(k, tuple) else (k,): v\n for k, v in decimals._to_internal_pandas().items()\n }\n elif isinstance(decimals, dict):\n decimals_dict = {k if is_name_like_tuple(k) else (k,): v for k, v in decimals.items()}\n elif isinstance(decimals, int):\n decimals_dict = {k: decimals for k in self._internal.column_labels}\n else:\n raise TypeError(\"decimals must be an integer, a dict-like or a Series\")\n\n def op(psser: ps.Series) -> Union[ps.Series, Column]:\n label = psser._column_label\n if label in decimals_dict:\n return F.round(psser.spark.column, decimals_dict[label])\n else:\n return psser\n\n return self._apply_series_op(op)\n\n def _mark_duplicates(\n self,\n subset: Optional[Union[Name, List[Name]]] = None,\n keep: Union[bool, str] = \"first\",\n ) -> Tuple[SparkDataFrame, str]:\n if subset is None:\n subset_list = self._internal.column_labels\n else:\n if is_name_like_tuple(subset):\n subset_list = [cast(Label, subset)]\n elif is_name_like_value(subset):\n subset_list = [(subset,)]\n else:\n subset_list = [sub if is_name_like_tuple(sub) else (sub,) for sub in subset]\n diff = set(subset_list).difference(set(self._internal.column_labels))\n if len(diff) > 0:\n raise KeyError(\", \".join([name_like_string(d) for d in diff]))\n group_cols = [self._internal.spark_column_name_for(label) for label in subset_list]\n\n sdf = self._internal.resolved_copy.spark_frame\n\n column = verify_temp_column_name(sdf, \"__duplicated__\")\n\n if keep == \"first\" or keep == \"last\":\n if keep == \"first\":\n ord_func = F.asc\n else:\n ord_func = F.desc\n window = (\n Window.partitionBy(*group_cols)\n .orderBy(ord_func(NATURAL_ORDER_COLUMN_NAME))\n .rowsBetween(Window.unboundedPreceding, Window.currentRow)\n )\n sdf = sdf.withColumn(column, F.row_number().over(window) > 1)\n elif not keep:\n window = Window.partitionBy(*group_cols).rowsBetween(\n Window.unboundedPreceding, Window.unboundedFollowing\n )\n sdf = sdf.withColumn(column, F.count(\"*\").over(window) > 1)\n else:\n raise ValueError(\"'keep' only supports 'first', 'last' and False\")\n return sdf, column\n\n def duplicated(\n self,\n subset: Optional[Union[Name, List[Name]]] = None,\n keep: Union[bool, str] = \"first\",\n ) -> \"Series\":\n \"\"\"\n Return boolean Series denoting duplicate rows, optionally only considering certain columns.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates,\n by default use all of the columns\n keep : {'first', 'last', False}, default 'first'\n - ``first`` : Mark duplicates as ``True`` except for the first occurrence.\n - ``last`` : Mark duplicates as ``True`` except for the last occurrence.\n - False : Mark all duplicates as ``True``.\n\n Returns\n -------\n duplicated : Series\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},\n ... columns = ['a', 'b', 'c'])\n >>> df\n a b c\n 0 1 1 1\n 1 1 1 1\n 2 1 1 1\n 3 3 4 5\n\n >>> df.duplicated().sort_index()\n 0 False\n 1 True\n 2 True\n 3 False\n dtype: bool\n\n Mark duplicates as ``True`` except for the last occurrence.\n\n >>> df.duplicated(keep='last').sort_index()\n 0 True\n 1 True\n 2 False\n 3 False\n dtype: bool\n\n Mark all duplicates as ``True``.\n\n >>> df.duplicated(keep=False).sort_index()\n 0 True\n 1 True\n 2 True\n 3 False\n dtype: bool\n \"\"\"\n from pyspark.pandas.series import first_series\n\n sdf, column = self._mark_duplicates(subset, keep)\n\n sdf = sdf.select(\n self._internal.index_spark_columns\n + [scol_for(sdf, column).alias(SPARK_DEFAULT_SERIES_NAME)]\n )\n return first_series(\n DataFrame(\n InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n column_labels=[None],\n data_spark_columns=[scol_for(sdf, SPARK_DEFAULT_SERIES_NAME)],\n )\n )\n )\n\n # TODO: support other as DataFrame or array-like\n def dot(self, other: \"Series\") -> \"Series\":\n \"\"\"\n Compute the matrix multiplication between the DataFrame and other.\n\n This method computes the matrix product between the DataFrame and the\n values of an other Series\n\n It can also be called using ``self @ other`` in Python >= 3.5.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the 'compute.max_rows' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context(\n ... 'compute.max_rows', 1000, \"compute.ops_on_diff_frames\", True\n ... ): # doctest: +NORMALIZE_WHITESPACE\n ... psdf = ps.DataFrame({'a': range(1001)})\n ... psser = ps.Series([2], index=['a'])\n ... psdf.dot(psser)\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n 'compute.max_rows', this operation is considerably expensive.\n\n Parameters\n ----------\n other : Series\n The other object to compute the matrix product with.\n\n Returns\n -------\n Series\n Return the matrix product between self and other as a Series.\n\n See Also\n --------\n Series.dot: Similar method for Series.\n\n Notes\n -----\n The dimensions of DataFrame and other must be compatible in order to\n compute the matrix multiplication. In addition, the column names of\n DataFrame and the index of other must contain the same values, as they\n will be aligned prior to the multiplication.\n\n The dot method for Series computes the inner product, instead of the\n matrix product here.\n\n Examples\n --------\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n >>> psdf = ps.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n >>> psser = ps.Series([1, 1, 2, 1])\n >>> psdf.dot(psser)\n 0 -4\n 1 5\n dtype: int64\n\n Note how shuffling of the objects does not change the result.\n\n >>> psser2 = psser.reindex([1, 0, 2, 3])\n >>> psdf.dot(psser2)\n 0 -4\n 1 5\n dtype: int64\n >>> psdf @ psser2\n 0 -4\n 1 5\n dtype: int64\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n if not isinstance(other, ps.Series):\n raise TypeError(\"Unsupported type {}\".format(type(other).__name__))\n else:\n return cast(ps.Series, other.dot(self.transpose())).rename(None)\n\n def __matmul__(self, other: \"Series\") -> \"Series\":\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(other)\n\n def to_table(\n self,\n name: str,\n format: Optional[str] = None,\n mode: str = \"w\",\n partition_cols: Optional[Union[str, List[str]]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: Any,\n ) -> None:\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `to_table`, \"\n \"the existing index is lost when converting to table.\"\n )\n mode = validate_mode(mode)\n return self.spark.to_table(name, format, mode, partition_cols, index_col, **options)\n\n to_table.__doc__ = SparkFrameMethods.to_table.__doc__\n\n def to_delta(\n self,\n path: str,\n mode: str = \"w\",\n partition_cols: Optional[Union[str, List[str]]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: \"OptionalPrimitiveType\",\n ) -> None:\n \"\"\"\n Write the DataFrame out as a Delta Lake table.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default 'w'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.\n\n - 'append' (equivalent to 'a'): Append the new data to existing data.\n - 'overwrite' (equivalent to 'w'): Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Delta Lake.\n\n See Also\n --------\n read_delta\n DataFrame.to_parquet\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),\n ... country=['KR', 'US', 'JP'],\n ... code=[1, 2 ,3]), columns=['date', 'country', 'code'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n Create a new Delta Lake table, partitioned by one column:\n\n >>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date') # doctest: +SKIP\n\n Partitioned by two columns:\n\n >>> df.to_delta('%s/to_delta/bar' % path,\n ... partition_cols=['date', 'country']) # doctest: +SKIP\n\n Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:\n\n >>> df.to_delta('%s/to_delta/bar' % path,\n ... mode='overwrite', replaceWhere='date >= \"2012-01-01\"') # doctest: +SKIP\n \"\"\"\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `to_delta`, \"\n \"the existing index is lost when converting to Delta.\"\n )\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\") # type: ignore[assignment]\n\n mode = validate_mode(mode)\n self.spark.to_spark_io(\n path=path,\n mode=mode,\n format=\"delta\",\n partition_cols=partition_cols,\n index_col=index_col,\n **options,\n )\n\n def to_parquet(\n self,\n path: str,\n mode: str = \"w\",\n partition_cols: Optional[Union[str, List[str]]] = None,\n compression: Optional[str] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: Any,\n ) -> None:\n \"\"\"\n Write the DataFrame out as a Parquet file or directory.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default 'w'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.\n\n - 'append' (equivalent to 'a'): Append the new data to existing data.\n - 'overwrite' (equivalent to 'w'): Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}\n Compression codec to use when saving to file. If None is set, it uses the\n value specified in `spark.sql.parquet.compression.codec`.\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Spark's data source.\n\n See Also\n --------\n read_parquet\n DataFrame.to_delta\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),\n ... country=['KR', 'US', 'JP'],\n ... code=[1, 2 ,3]), columns=['date', 'country', 'code'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n >>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')\n\n >>> df.to_parquet(\n ... '%s/to_parquet/foo.parquet' % path,\n ... mode = 'overwrite',\n ... partition_cols=['date', 'country'])\n \"\"\"\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `to_parquet`, \"\n \"the existing index is lost when converting to Parquet.\"\n )\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\")\n\n mode = validate_mode(mode)\n builder = self.to_spark(index_col=index_col).write.mode(mode)\n if partition_cols is not None:\n builder.partitionBy(partition_cols)\n if compression is not None:\n builder.option(\"compression\", compression)\n builder.options(**options).format(\"parquet\").save(path)\n\n def to_orc(\n self,\n path: str,\n mode: str = \"w\",\n partition_cols: Optional[Union[str, List[str]]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: \"OptionalPrimitiveType\",\n ) -> None:\n \"\"\"\n Write the DataFrame out as a ORC file or directory.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default 'w'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.\n\n - 'append' (equivalent to 'a'): Append the new data to existing data.\n - 'overwrite' (equivalent to 'w'): Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Spark's data source.\n\n See Also\n --------\n read_orc\n DataFrame.to_delta\n DataFrame.to_parquet\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),\n ... country=['KR', 'US', 'JP'],\n ... code=[1, 2 ,3]), columns=['date', 'country', 'code'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n >>> df.to_orc('%s/to_orc/foo.orc' % path, partition_cols='date')\n\n >>> df.to_orc(\n ... '%s/to_orc/foo.orc' % path,\n ... mode = 'overwrite',\n ... partition_cols=['date', 'country'])\n \"\"\"\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `to_orc`, \"\n \"the existing index is lost when converting to ORC.\"\n )\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\") # type: ignore[assignment]\n\n mode = validate_mode(mode)\n self.spark.to_spark_io(\n path=path,\n mode=mode,\n format=\"orc\",\n partition_cols=partition_cols,\n index_col=index_col,\n **options,\n )\n\n def to_spark_io(\n self,\n path: Optional[str] = None,\n format: Optional[str] = None,\n mode: str = \"overwrite\",\n partition_cols: Optional[Union[str, List[str]]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: \"OptionalPrimitiveType\",\n ) -> None:\n \"\"\"An alias for :func:`DataFrame.spark.to_spark_io`.\n See :meth:`pyspark.pandas.spark.accessors.SparkFrameMethods.to_spark_io`.\n\n .. deprecated:: 3.2.0\n Use :func:`DataFrame.spark.to_spark_io` instead.\n \"\"\"\n warnings.warn(\"Deprecated in 3.2, Use DataFrame.spark.to_spark_io instead.\", FutureWarning)\n return self.spark.to_spark_io(path, format, mode, partition_cols, index_col, **options)\n\n to_spark_io.__doc__ = SparkFrameMethods.to_spark_io.__doc__\n\n def to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame:\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `to_spark`, \"\n \"the existing index is lost when converting to Spark DataFrame.\"\n )\n return self._to_spark(index_col)\n\n to_spark.__doc__ = SparkFrameMethods.__doc__\n\n def _to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame:\n \"\"\"\n Same as `to_spark()`, without issueing the advice log when `index_col` is not specified\n for internal usage.\n \"\"\"\n return self.spark.frame(index_col)\n\n def to_pandas(self) -> pd.DataFrame:\n \"\"\"\n Return a pandas DataFrame.\n\n .. note:: This method should only be used if the resulting pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.to_pandas()\n dogs cats\n 0 0.2 0.3\n 1 0.0 0.6\n 2 0.6 0.0\n 3 0.2 0.1\n \"\"\"\n log_advice(\n \"`to_pandas` loads all data into the driver's memory. \"\n \"It should only be used if the resulting pandas DataFrame is expected to be small.\"\n )\n return self._to_pandas()\n\n def _to_pandas(self) -> pd.DataFrame:\n \"\"\"\n Same as `to_pandas()`, without issueing the advice log for internal usage.\n \"\"\"\n return self._internal.to_pandas_frame.copy()\n\n def assign(self, **kwargs: Any) -> \"DataFrame\":\n \"\"\"\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable, Series or Index}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though pandas-on-Spark doesn't check it).\n If the values are not callable, (e.g. a Series or a literal),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Examples\n --------\n >>> df = ps.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence and you can also\n create multiple columns within the same assign.\n\n >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,\n ... temp_k=df['temp_c'] + 273.15,\n ... temp_idx=df.index)\n >>> assigned[['temp_c', 'temp_f', 'temp_k', 'temp_idx']]\n temp_c temp_f temp_k temp_idx\n Portland 17.0 62.6 290.15 Portland\n Berkeley 25.0 77.0 298.15 Berkeley\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible\n but you cannot refer to newly created or modified columns. This\n feature is supported in pandas for Python 3.6 and later but not in\n pandas-on-Spark. In pandas-on-Spark, all items are computed first,\n and then assigned.\n \"\"\"\n return self._assign(kwargs)\n\n def _assign(self, kwargs: Any) -> \"DataFrame\":\n assert isinstance(kwargs, dict)\n from pyspark.pandas.indexes import MultiIndex\n from pyspark.pandas.series import IndexOpsMixin\n\n for k, v in kwargs.items():\n is_invalid_assignee = (\n not (isinstance(v, (IndexOpsMixin, Column)) or callable(v) or is_scalar(v))\n ) or isinstance(v, MultiIndex)\n if is_invalid_assignee:\n raise TypeError(\n \"Column assignment doesn't support type \" \"{0}\".format(type(v).__name__)\n )\n if callable(v):\n kwargs[k] = v(self)\n\n pairs = {\n (k if is_name_like_tuple(k) else (k,)): (\n (v.spark.column, v._internal.data_fields[0])\n if isinstance(v, IndexOpsMixin) and not isinstance(v, MultiIndex)\n else (v, None)\n if isinstance(v, Column)\n else (SF.lit(v), None)\n )\n for k, v in kwargs.items()\n }\n\n scols = []\n data_fields = []\n for label in self._internal.column_labels:\n for i in range(len(label)):\n if label[: len(label) - i] in pairs:\n scol, field = pairs[label[: len(label) - i]]\n\n name = self._internal.spark_column_name_for(label)\n scol = scol.alias(name)\n if field is not None:\n field = field.copy(name=name)\n break\n else:\n scol = self._internal.spark_column_for(label)\n field = self._internal.field_for(label)\n scols.append(scol)\n data_fields.append(field)\n\n column_labels = self._internal.column_labels.copy()\n for label, (scol, field) in pairs.items():\n if label not in set(i[: len(label)] for i in self._internal.column_labels):\n name = name_like_string(label)\n scols.append(scol.alias(name))\n if field is not None:\n field = field.copy(name=name)\n data_fields.append(field)\n\n column_labels.append(label)\n\n level = self._internal.column_labels_level\n column_labels = [\n tuple(list(label) + ([\"\"] * (level - len(label)))) for label in column_labels\n ]\n\n internal = self._internal.with_new_columns(\n scols, column_labels=column_labels, data_fields=data_fields\n )\n return DataFrame(internal)\n\n @staticmethod\n def from_records(\n data: Union[np.ndarray, List[tuple], dict, pd.DataFrame],\n index: Union[str, list, np.ndarray] = None,\n exclude: list = None,\n columns: list = None,\n coerce_float: bool = False,\n nrows: int = None,\n ) -> \"DataFrame\":\n \"\"\"\n Convert structured or record ndarray to DataFrame.\n\n Parameters\n ----------\n data : ndarray (structured dtype), list of tuples, dict, or DataFrame\n index : string, list of fields, array-like\n Field of array to use as the index, alternately a specific set of input labels to use\n exclude : sequence, default None\n Columns or fields to exclude\n columns : sequence, default None\n Column names to use. If the passed data do not have names associated with them, this\n argument provides names for the columns. Otherwise this argument indicates the order of\n the columns in the result (any names not found in the data will become all-NA columns)\n coerce_float : boolean, default False\n Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to\n floating point, useful for SQL result sets\n nrows : int, default None\n Number of rows to read if data is an iterator\n\n Returns\n -------\n df : DataFrame\n\n Examples\n --------\n Use dict as input\n\n >>> ps.DataFrame.from_records({'A': [1, 2, 3]})\n A\n 0 1\n 1 2\n 2 3\n\n Use list of tuples as input\n\n >>> ps.DataFrame.from_records([(1, 2), (3, 4)])\n 0 1\n 0 1 2\n 1 3 4\n\n Use NumPy array as input\n\n >>> ps.DataFrame.from_records(np.eye(3))\n 0 1 2\n 0 1.0 0.0 0.0\n 1 0.0 1.0 0.0\n 2 0.0 0.0 1.0\n \"\"\"\n return DataFrame(\n pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows)\n )\n\n def to_records(\n self,\n index: bool = True,\n column_dtypes: Optional[Union[str, Dtype, Dict[Name, Union[str, Dtype]]]] = None,\n index_dtypes: Optional[Union[str, Dtype, Dict[Name, Union[str, Dtype]]]] = None,\n ) -> np.recarray:\n \"\"\"\n Convert DataFrame to a NumPy record array.\n\n Index will be included as the first field of the record array if\n requested.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is\n expected to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n index : bool, default True\n Include index in resulting record array, stored in 'index'\n field or using the index label, if set.\n column_dtypes : str, type, dict, default None\n If a string or type, the data type to store all columns. If\n a dictionary, a mapping of column names and indices (zero-indexed)\n to specific data types.\n index_dtypes : str, type, dict, default None\n If a string or type, the data type to store all index levels. If\n a dictionary, a mapping of index level names and indices\n (zero-indexed) to specific data types.\n This mapping is applied only if `index=True`.\n\n Returns\n -------\n numpy.recarray\n NumPy ndarray with the DataFrame labels as fields and each row\n of the DataFrame as entries.\n\n See Also\n --------\n DataFrame.from_records: Convert structured or record ndarray\n to DataFrame.\n numpy.recarray: An ndarray that allows field access using\n attributes, analogous to typed columns in a\n spreadsheet.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},\n ... index=['a', 'b'])\n >>> df\n A B\n a 1 0.50\n b 2 0.75\n\n >>> df.to_records() # doctest: +SKIP\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '>> df.to_records(index=False) # doctest: +SKIP\n rec.array([(1, 0.5 ), (2, 0.75)],\n dtype=[('A', '>> df.to_records(column_dtypes={\"A\": \"int32\"}) # doctest: +SKIP\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '>> df.to_records(index_dtypes=\" \"DataFrame\":\n \"\"\"\n Make a copy of this object's indices and data.\n\n Parameters\n ----------\n deep : bool, default True\n this parameter is not supported but just dummy parameter to match pandas.\n\n Returns\n -------\n copy : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},\n ... columns=['x', 'y', 'z', 'w'])\n >>> df\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n >>> df_copy = df.copy()\n >>> df_copy\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n \"\"\"\n return DataFrame(self._internal)\n\n def dropna(\n self,\n axis: Axis = 0,\n how: str = \"any\",\n thresh: Optional[int] = None,\n subset: Optional[Union[Name, List[Name]]] = None,\n inplace: bool = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Remove missing values.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Determine if rows or columns which contain missing values are\n removed.\n\n * 0, or 'index' : Drop rows which contain missing values.\n how : {'any', 'all'}, default 'any'\n Determine if row or column is removed from DataFrame, when we have\n at least one NA or all NA.\n\n * 'any' : If any NA values are present, drop that row or column.\n * 'all' : If all values are NA, drop that row or column.\n\n thresh : int, optional\n Require that many non-NA values.\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries dropped from it.\n\n See Also\n --------\n DataFrame.drop : Drop specified labels from columns.\n DataFrame.isnull: Indicate missing values.\n DataFrame.notnull : Indicate existing (non-missing) values.\n\n Examples\n --------\n >>> df = ps.DataFrame({\"name\": ['Alfred', 'Batman', 'Catwoman'],\n ... \"toy\": [None, 'Batmobile', 'Bullwhip'],\n ... \"born\": [None, \"1940-04-25\", None]},\n ... columns=['name', 'toy', 'born'])\n >>> df\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Drop the rows where at least one element is missing.\n\n >>> df.dropna()\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Drop the columns where at least one element is missing.\n\n >>> df.dropna(axis='columns')\n name\n 0 Alfred\n 1 Batman\n 2 Catwoman\n\n Drop the rows where all elements are missing.\n\n >>> df.dropna(how='all')\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Keep only the rows with at least 2 non-NA values.\n\n >>> df.dropna(thresh=2)\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Define in which columns to look for missing values.\n\n >>> df.dropna(subset=['name', 'born'])\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.dropna(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25\n \"\"\"\n axis = validate_axis(axis)\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if thresh is None:\n if how is None:\n raise TypeError(\"must specify how or thresh\")\n elif how not in (\"any\", \"all\"):\n raise ValueError(\"invalid how option: {h}\".format(h=how))\n\n labels: Optional[List[Label]]\n if subset is not None:\n if isinstance(subset, str):\n labels = [(subset,)]\n elif isinstance(subset, tuple):\n labels = [subset]\n else:\n labels = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]\n else:\n labels = None\n\n if axis == 0:\n if labels is not None:\n invalids = [label for label in labels if label not in self._internal.column_labels]\n if len(invalids) > 0:\n raise KeyError(invalids)\n else:\n labels = self._internal.column_labels\n\n cnt = reduce(\n lambda x, y: x + y,\n [\n F.when(self._psser_for(label).notna().spark.column, 1).otherwise(0)\n for label in labels\n ],\n SF.lit(0),\n )\n if thresh is not None:\n pred = cnt >= SF.lit(int(thresh))\n elif how == \"any\":\n pred = cnt == SF.lit(len(labels))\n elif how == \"all\":\n pred = cnt > SF.lit(0)\n\n internal = self._internal.with_filter(pred)\n if inplace:\n self._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal)\n else:\n assert axis == 1\n\n internal = self._internal.resolved_copy\n\n if labels is not None:\n if any(len(lbl) != internal.index_level for lbl in labels):\n raise ValueError(\n \"The length of each subset must be the same as the index size.\"\n )\n\n cond = reduce(\n lambda x, y: x | y,\n [\n reduce(\n lambda x, y: x & y,\n [\n scol == SF.lit(part)\n for part, scol in zip(lbl, internal.index_spark_columns)\n ],\n )\n for lbl in labels\n ],\n )\n\n internal = internal.with_filter(cond)\n\n psdf: DataFrame = DataFrame(internal)\n\n null_counts = []\n for label in internal.column_labels:\n psser = psdf._psser_for(label)\n cond = psser.isnull().spark.column\n null_counts.append(\n F.sum(F.when(~cond, 1).otherwise(0)).alias(name_like_string(label))\n )\n\n counts = internal.spark_frame.select(null_counts + [F.count(\"*\")]).head()\n\n if thresh is not None:\n column_labels = [\n label\n for label, cnt in zip(internal.column_labels, counts)\n if (cnt or 0) >= int(thresh)\n ]\n elif how == \"any\":\n column_labels = [\n label\n for label, cnt in zip(internal.column_labels, counts)\n if (cnt or 0) == counts[-1]\n ]\n elif how == \"all\":\n column_labels = [\n label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) > 0\n ]\n\n psdf = self[column_labels]\n if inplace:\n self._update_internal_frame(psdf._internal)\n return None\n else:\n return psdf\n\n # TODO: add 'limit' when value parameter exists\n def fillna(\n self,\n value: Optional[Union[Any, Dict[Name, Any]]] = None,\n method: Optional[str] = None,\n axis: Optional[Axis] = None,\n inplace: bool = False,\n limit: Optional[int] = None,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"Fill NA/NaN values.\n\n .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window\n without specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n value : scalar, dict, Series\n Value to use to fill holes. alternately a dict/Series of values\n specifying which value to use for each column.\n DataFrame is not supported.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series pad / ffill: propagate last valid\n observation forward to next valid backfill / bfill:\n use NEXT valid observation to fill gap\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries filled.\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 1.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 1.0 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 1.0 4\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError(\"fillna currently only works for axis=0 or axis='index'\")\n\n if value is not None:\n if not isinstance(value, (float, int, str, bool, dict, pd.Series)):\n raise TypeError(\"Unsupported type %s\" % type(value).__name__)\n if limit is not None:\n raise ValueError(\"limit parameter for value is not support now\")\n if isinstance(value, pd.Series):\n value = value.to_dict()\n if isinstance(value, dict):\n for v in value.values():\n if not isinstance(v, (float, int, str, bool)):\n raise TypeError(\"Unsupported type %s\" % type(v).__name__)\n value = {k if is_name_like_tuple(k) else (k,): v for k, v in value.items()}\n\n def op(psser: ps.Series) -> ps.Series:\n label = psser._column_label\n for k, v in value.items():\n if k == label[: len(k)]:\n return psser._fillna(\n value=value[k], method=method, axis=axis, limit=limit\n )\n else:\n return psser\n\n else:\n\n def op(psser: ps.Series) -> ps.Series:\n return psser._fillna(value=value, method=method, axis=axis, limit=limit)\n\n elif method is not None:\n\n def op(psser: ps.Series) -> ps.Series:\n return psser._fillna(value=value, method=method, axis=axis, limit=limit)\n\n else:\n raise ValueError(\"Must specify a fillna 'value' or 'method' parameter.\")\n\n psdf = self._apply_series_op(op, should_resolve=(method is not None))\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if inplace:\n self._update_internal_frame(psdf._internal, requires_same_anchor=False)\n return None\n else:\n return psdf\n\n def interpolate(\n self,\n method: str = \"linear\",\n limit: Optional[int] = None,\n limit_direction: Optional[str] = None,\n limit_area: Optional[str] = None,\n ) -> \"DataFrame\":\n if method not in [\"linear\"]:\n raise NotImplementedError(\"interpolate currently works only for method='linear'\")\n if (limit is not None) and (not limit > 0):\n raise ValueError(\"limit must be > 0.\")\n if (limit_direction is not None) and (\n limit_direction not in [\"forward\", \"backward\", \"both\"]\n ):\n raise ValueError(\"invalid limit_direction: '{}'\".format(limit_direction))\n if (limit_area is not None) and (limit_area not in [\"inside\", \"outside\"]):\n raise ValueError(\"invalid limit_area: '{}'\".format(limit_area))\n\n numeric_col_names = []\n for label in self._internal.column_labels:\n psser = self._psser_for(label)\n if isinstance(psser.spark.data_type, (NumericType, BooleanType)):\n numeric_col_names.append(psser.name)\n\n psdf = self[numeric_col_names]\n return psdf._apply_series_op(\n lambda psser: psser._interpolate(\n method=method, limit=limit, limit_direction=limit_direction, limit_area=limit_area\n ),\n should_resolve=True,\n )\n\n def replace(\n self,\n to_replace: Optional[Union[Any, List, Tuple, Dict]] = None,\n value: Optional[Any] = None,\n inplace: bool = False,\n limit: Optional[int] = None,\n regex: bool = False,\n method: str = \"pad\",\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Returns a new DataFrame replacing a value with another value.\n\n Parameters\n ----------\n to_replace : int, float, string, list, tuple or dict\n Value to be replaced.\n value : int, float, string, list or tuple\n Value to use to replace holes. The replacement value must be an int, float,\n or string.\n If value is a list or tuple, value should be of the same length with to_replace.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n\n Returns\n -------\n DataFrame\n Object after replacement.\n\n Examples\n --------\n >>> df = ps.DataFrame({\"name\": ['Ironman', 'Captain America', 'Thor', 'Hulk'],\n ... \"weapon\": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},\n ... columns=['name', 'weapon'])\n >>> df\n name weapon\n 0 Ironman Mark-45\n 1 Captain America Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n Scalar `to_replace` and `value`\n\n >>> df.replace('Ironman', 'War-Machine')\n name weapon\n 0 War-Machine Mark-45\n 1 Captain America Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n List like `to_replace` and `value`\n\n >>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)\n >>> df\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n Dicts can be used to specify different replacement values for different existing values\n To use a dict in this way the value parameter should be None\n\n >>> df.replace({'Mjolnir': 'Stormbuster'})\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n\n Dict can specify that different values should be replaced in different columns\n The value parameter should not be None in this case\n\n >>> df.replace({'weapon': 'Mjolnir'}, 'Stormbuster')\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n\n Nested dictionaries\n The value parameter should be None to use a nested dict in this way\n\n >>> df.replace({'weapon': {'Mjolnir': 'Stormbuster'}})\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n \"\"\"\n if method != \"pad\":\n raise NotImplementedError(\"replace currently works only for method='pad\")\n if limit is not None:\n raise NotImplementedError(\"replace currently works only when limit=None\")\n if regex is not False:\n raise NotImplementedError(\"replace currently doesn't supports regex\")\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if value is not None and not isinstance(value, (int, float, str, list, tuple, dict)):\n raise TypeError(\"Unsupported type {}\".format(type(value).__name__))\n if to_replace is not None and not isinstance(\n to_replace, (int, float, str, list, tuple, dict)\n ):\n raise TypeError(\"Unsupported type {}\".format(type(to_replace).__name__))\n\n if isinstance(value, (list, tuple)) and isinstance(to_replace, (list, tuple)):\n if len(value) != len(to_replace):\n raise ValueError(\"Length of to_replace and value must be same\")\n\n if isinstance(to_replace, dict) and (\n value is not None or all(isinstance(i, dict) for i in to_replace.values())\n ):\n to_replace_dict = to_replace\n\n def op(psser: ps.Series) -> ps.Series:\n if psser.name in to_replace_dict:\n return psser.replace(\n to_replace=to_replace_dict[psser.name], value=value, regex=regex\n )\n else:\n return psser\n\n else:\n\n def op(psser: ps.Series) -> ps.Series:\n return psser.replace(to_replace=to_replace, value=value, regex=regex)\n\n psdf = self._apply_series_op(op)\n if inplace:\n self._update_internal_frame(psdf._internal)\n return None\n else:\n return psdf\n\n def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> \"DataFrame\":\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values.\n\n Parameters\n ----------\n lower : float or int, default None\n Minimum threshold value. All values below this threshold will be set to it.\n upper : float or int, default None\n Maximum threshold value. All values above this threshold will be set to it.\n\n Returns\n -------\n DataFrame\n DataFrame with the values outside the clip boundaries replaced.\n\n Examples\n --------\n >>> ps.DataFrame({'A': [0, 2, 4]}).clip(1, 3)\n A\n 0 1\n 1 2\n 2 3\n\n Notes\n -----\n One difference between this implementation and pandas is that running\n pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with \"TypeError: '<=' not supported\n between instances of 'str' and 'int'\" while ps.DataFrame({'A': ['a', 'b']}).clip(0, 1)\n will output the original DataFrame, simply ignoring the incompatible types.\n \"\"\"\n if is_list_like(lower) or is_list_like(upper):\n raise TypeError(\n \"List-like value are not supported for 'lower' and 'upper' at the \" + \"moment\"\n )\n\n if lower is None and upper is None:\n return self\n\n return self._apply_series_op(lambda psser: psser.clip(lower=lower, upper=upper))\n\n def head(self, n: int = 5) -> \"DataFrame\":\n \"\"\"\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n obj_head : same type as caller\n The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n \"\"\"\n if n < 0:\n n = len(self) + n\n if n <= 0:\n return DataFrame(self._internal.with_filter(SF.lit(False)))\n else:\n sdf = self._internal.resolved_copy.spark_frame\n if get_option(\"compute.ordered_head\"):\n sdf = sdf.orderBy(NATURAL_ORDER_COLUMN_NAME)\n return DataFrame(self._internal.with_new_sdf(sdf.limit(n)))\n\n def last(self, offset: Union[str, DateOffset]) -> \"DataFrame\":\n \"\"\"\n Select final periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the last few rows based on a date offset.\n\n Parameters\n ----------\n offset : str or DateOffset\n The offset length of the data that will be selected. For instance,\n '3D' will display all the rows having their index within the last 3 days.\n\n Returns\n -------\n DataFrame\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n Examples\n --------\n\n >>> index = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)\n >>> psdf\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> psdf.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calendar days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n \"\"\"\n # Check index type should be format DateTime\n if not isinstance(self.index, ps.DatetimeIndex):\n raise TypeError(\"'last' only supports a DatetimeIndex\")\n\n offset_: Optional[DateOffset] = to_offset(offset)\n assert offset_ is not None\n\n from_date = cast(datetime.datetime, self.index.max()) - offset_ # type: ignore[operator]\n\n return cast(DataFrame, self.loc[from_date:])\n\n def first(self, offset: Union[str, DateOffset]) -> \"DataFrame\":\n \"\"\"\n Select first periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the first few rows based on a date offset.\n\n Parameters\n ----------\n offset : str or DateOffset\n The offset length of the data that will be selected. For instance,\n '3D' will display all the rows having their index within the first 3 days.\n\n Returns\n -------\n DataFrame\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n Examples\n --------\n\n >>> index = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)\n >>> psdf\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> psdf.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calendar days were returned, not the first\n 3 observed days in the dataset, and therefore data for 2018-04-13 was\n not returned.\n \"\"\"\n # Check index type should be format DatetimeIndex\n if not isinstance(self.index, ps.DatetimeIndex):\n raise TypeError(\"'first' only supports a DatetimeIndex\")\n\n offset_: Optional[DateOffset] = to_offset(offset)\n assert offset_ is not None\n\n to_date = cast(datetime.datetime, self.index.min()) + offset_ # type: ignore[operator]\n\n return cast(DataFrame, self.loc[:to_date]) # type: ignore[misc]\n\n def pivot_table(\n self,\n values: Optional[Union[Name, List[Name]]] = None,\n index: Optional[List[Name]] = None,\n columns: Optional[Name] = None,\n aggfunc: Union[str, Dict[Name, str]] = \"mean\",\n fill_value: Optional[Any] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame. The levels in\n the pivot table will be stored in MultiIndex objects (hierarchical\n indexes) on the index and columns of the result DataFrame.\n\n Parameters\n ----------\n values : column to aggregate.\n They should be either a list less than three or a string.\n index : column (string) or list of columns\n If an array is passed, it must be the same length as the data.\n The list should contain string.\n columns : column\n Columns used in the pivot operation. Only one column is supported and\n it should be a string.\n aggfunc : function (string), dict, default mean\n If dict is passed, the key is column to aggregate and value\n is function or list of functions.\n fill_value : scalar, default None\n Value to replace missing values with.\n\n Returns\n -------\n table : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": [\"foo\", \"foo\", \"foo\", \"foo\", \"foo\",\n ... \"bar\", \"bar\", \"bar\", \"bar\"],\n ... \"B\": [\"one\", \"one\", \"one\", \"two\", \"two\",\n ... \"one\", \"one\", \"two\", \"two\"],\n ... \"C\": [\"small\", \"large\", \"large\", \"small\",\n ... \"small\", \"large\", \"small\", \"small\",\n ... \"large\"],\n ... \"D\": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n ... \"E\": [2, 4, 5, 5, 6, 6, 8, 9, 9]},\n ... columns=['A', 'B', 'C', 'D', 'E'])\n >>> df\n A B C D E\n 0 foo one small 1 2\n 1 foo one large 2 4\n 2 foo one large 2 5\n 3 foo two small 3 5\n 4 foo two small 3 6\n 5 bar one large 4 6\n 6 bar one small 5 8\n 7 bar two small 6 9\n 8 bar two large 7 9\n\n This first example aggregates values by taking the sum.\n\n >>> table = df.pivot_table(values='D', index=['A', 'B'],\n ... columns='C', aggfunc='sum')\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n C large small\n A B\n bar one 4.0 5\n two 7.0 6\n foo one 4.0 1\n two NaN 6\n\n We can also fill missing values using the `fill_value` parameter.\n\n >>> table = df.pivot_table(values='D', index=['A', 'B'],\n ... columns='C', aggfunc='sum', fill_value=0)\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n C large small\n A B\n bar one 4 5\n two 7 6\n foo one 4 1\n two 0 6\n\n We can also calculate multiple types of aggregations for any given\n value column.\n\n >>> table = df.pivot_table(values=['D'], index =['C'],\n ... columns=\"A\", aggfunc={'D': 'mean'})\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n D\n A bar foo\n C\n large 5.5 2.000000\n small 5.5 2.333333\n\n The next example aggregates on multiple values.\n\n >>> table = df.pivot_table(index=['C'], columns=\"A\", values=['D', 'E'],\n ... aggfunc={'D': 'mean', 'E': 'sum'})\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n D E\n A bar foo bar foo\n C\n large 5.5 2.000000 15 9\n small 5.5 2.333333 17 13\n \"\"\"\n if not is_name_like_value(columns):\n raise TypeError(\"columns should be one column name.\")\n\n if not is_name_like_value(values) and not (\n isinstance(values, list) and all(is_name_like_value(v) for v in values)\n ):\n raise TypeError(\"values should be one column or list of columns.\")\n\n if not isinstance(aggfunc, str) and (\n not isinstance(aggfunc, dict)\n or not all(\n is_name_like_value(key) and isinstance(value, str) for key, value in aggfunc.items()\n )\n ):\n raise TypeError(\n \"aggfunc must be a dict mapping from column name \"\n \"to aggregate functions (string).\"\n )\n\n if isinstance(aggfunc, dict) and index is None:\n raise NotImplementedError(\n \"pivot_table doesn't support aggfunc\" \" as dict and without index.\"\n )\n if isinstance(values, list) and index is None:\n raise NotImplementedError(\"values can't be a list without index.\")\n\n if columns not in self.columns:\n raise ValueError(\"Wrong columns {}.\".format(name_like_string(columns)))\n if not is_name_like_tuple(columns):\n columns = (columns,)\n\n if isinstance(values, list):\n values = [col if is_name_like_tuple(col) else (col,) for col in values]\n if not all(\n isinstance(self._internal.spark_type_for(col), NumericType) for col in values\n ):\n raise TypeError(\"values should be a numeric type.\")\n else:\n values = values if is_name_like_tuple(values) else (values,)\n if not isinstance(self._internal.spark_type_for(values), NumericType):\n raise TypeError(\"values should be a numeric type.\")\n\n if isinstance(aggfunc, str):\n if isinstance(values, list):\n agg_cols = [\n F.expr(\n \"{1}(`{0}`) as `{0}`\".format(\n self._internal.spark_column_name_for(value), aggfunc\n )\n )\n for value in values\n ]\n else:\n agg_cols = [\n F.expr(\n \"{1}(`{0}`) as `{0}`\".format(\n self._internal.spark_column_name_for(values), aggfunc\n )\n )\n ]\n elif isinstance(aggfunc, dict):\n aggfunc = {\n key if is_name_like_tuple(key) else (key,): value for key, value in aggfunc.items()\n }\n agg_cols = [\n F.expr(\n \"{1}(`{0}`) as `{0}`\".format(self._internal.spark_column_name_for(key), value)\n )\n for key, value in aggfunc.items()\n ]\n agg_columns = [key for key, _ in aggfunc.items()]\n\n if set(agg_columns) != set(values):\n raise ValueError(\"Columns in aggfunc must be the same as values.\")\n\n sdf = self._internal.resolved_copy.spark_frame\n if index is None:\n sdf = (\n sdf.groupBy()\n .pivot(pivot_col=self._internal.spark_column_name_for(columns))\n .agg(*agg_cols)\n )\n\n elif isinstance(index, list):\n index = [label if is_name_like_tuple(label) else (label,) for label in index]\n sdf = (\n sdf.groupBy([self._internal.spark_column_name_for(label) for label in index])\n .pivot(pivot_col=self._internal.spark_column_name_for(columns))\n .agg(*agg_cols)\n )\n else:\n raise TypeError(\"index should be a None or a list of columns.\")\n\n if fill_value is not None and isinstance(fill_value, (int, float)):\n sdf = sdf.fillna(fill_value)\n\n psdf: DataFrame\n if index is not None:\n index_columns = [self._internal.spark_column_name_for(label) for label in index]\n index_fields = [self._internal.field_for(label) for label in index]\n\n if isinstance(values, list):\n data_columns = [column for column in sdf.columns if column not in index_columns]\n\n if len(values) > 1:\n # If we have two values, Spark will return column's name\n # in this format: column_values, where column contains\n # their values in the DataFrame and values is\n # the column list passed to the pivot_table().\n # E.g. if column is b and values is ['b','e'],\n # then ['2_b', '2_e', '3_b', '3_e'].\n\n # We sort the columns of Spark DataFrame by values.\n data_columns.sort(key=lambda x: x.split(\"_\", 1)[1])\n sdf = sdf.select(index_columns + data_columns)\n\n column_name_to_index = dict(\n zip(self._internal.data_spark_column_names, self._internal.column_labels)\n )\n column_labels = [\n tuple(list(column_name_to_index[name.split(\"_\")[1]]) + [name.split(\"_\")[0]])\n for name in data_columns\n ]\n column_label_names = (\n [cast(Optional[Name], None)] * column_labels_level(values)\n ) + [columns]\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_columns],\n index_names=index,\n index_fields=index_fields,\n column_labels=column_labels,\n data_spark_columns=[scol_for(sdf, col) for col in data_columns],\n column_label_names=column_label_names,\n )\n psdf = DataFrame(internal)\n else:\n column_labels = [tuple(list(values[0]) + [column]) for column in data_columns]\n column_label_names = ([cast(Optional[Name], None)] * len(values[0])) + [columns]\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_columns],\n index_names=index,\n index_fields=index_fields,\n column_labels=column_labels,\n data_spark_columns=[scol_for(sdf, col) for col in data_columns],\n column_label_names=column_label_names,\n )\n psdf = DataFrame(internal)\n else:\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_columns],\n index_names=index,\n index_fields=index_fields,\n column_label_names=[columns],\n )\n psdf = DataFrame(internal)\n else:\n if isinstance(values, list):\n index_values = values[-1]\n else:\n index_values = values\n index_map: Dict[str, Optional[Label]] = {}\n for i, index_value in enumerate(index_values):\n colname = SPARK_INDEX_NAME_FORMAT(i)\n sdf = sdf.withColumn(colname, SF.lit(index_value))\n index_map[colname] = None\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_map.keys()],\n index_names=list(index_map.values()),\n column_label_names=[columns],\n )\n psdf = DataFrame(internal)\n\n psdf_columns = psdf.columns\n if isinstance(psdf_columns, pd.MultiIndex):\n psdf.columns = psdf_columns.set_levels(\n psdf_columns.levels[-1].astype( # type: ignore[index]\n spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)\n ),\n level=-1,\n )\n else:\n psdf.columns = psdf_columns.astype(\n spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)\n )\n\n return psdf\n\n def pivot(\n self,\n index: Optional[Name] = None,\n columns: Optional[Name] = None,\n values: Optional[Name] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Return reshaped DataFrame organized by given index / column values.\n\n Reshape data (produce a \"pivot\" table) based on column values. Uses\n unique values from specified `index` / `columns` to form axes of the\n resulting DataFrame. This function does not support data\n aggregation.\n\n Parameters\n ----------\n index : string, optional\n Column to use to make new frame's index. If None, uses\n existing index.\n columns : string\n Column to use to make new frame's columns.\n values : string, object or a list of the previous\n Column(s) to use for populating new frame's values.\n\n Returns\n -------\n DataFrame\n Returns reshaped DataFrame.\n\n See Also\n --------\n DataFrame.pivot_table : Generalization of pivot that can handle\n duplicate values for one index/column pair.\n\n Examples\n --------\n >>> df = ps.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',\n ... 'two'],\n ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],\n ... 'baz': [1, 2, 3, 4, 5, 6],\n ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']},\n ... columns=['foo', 'bar', 'baz', 'zoo'])\n >>> df\n foo bar baz zoo\n 0 one A 1 x\n 1 one B 2 y\n 2 one C 3 z\n 3 two A 4 q\n 4 two B 5 w\n 5 two C 6 t\n\n >>> df.pivot(index='foo', columns='bar', values='baz').sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n 0 1.0 NaN NaN\n 1 NaN 2.0 NaN\n 2 NaN NaN 3.0\n 3 4.0 NaN NaN\n 4 NaN 5.0 NaN\n 5 NaN NaN 6.0\n\n Notice that, unlike pandas raises an ValueError when duplicated values are found,\n pandas-on-Spark's pivot still works with its first value it meets during operation because\n pivot is an expensive operation and it is preferred to permissively execute over failing\n fast when processing large data.\n\n >>> df = ps.DataFrame({\"foo\": ['one', 'one', 'two', 'two'],\n ... \"bar\": ['A', 'A', 'B', 'C'],\n ... \"baz\": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz'])\n >>> df\n foo bar baz\n 0 one A 1\n 1 one A 2\n 2 two B 3\n 3 two C 4\n\n >>> df.pivot(index='foo', columns='bar', values='baz').sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n foo\n one 1.0 NaN NaN\n two NaN 3.0 4.0\n\n It also support multi-index and multi-index column.\n >>> df.columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'baz')])\n\n >>> df = df.set_index(('a', 'bar'), append=True)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n a b\n foo baz\n (a, bar)\n 0 A one 1\n 1 A one 2\n 2 B two 3\n 3 C two 4\n\n >>> df.pivot(columns=('a', 'foo'), values=('b', 'baz')).sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n ('a', 'foo') one two\n (a, bar)\n 0 A 1.0 NaN\n 1 A 2.0 NaN\n 2 B NaN 3.0\n 3 C NaN 4.0\n\n \"\"\"\n if columns is None:\n raise ValueError(\"columns should be set.\")\n\n if values is None:\n raise ValueError(\"values should be set.\")\n\n should_use_existing_index = index is not None\n if should_use_existing_index:\n df = self\n index_labels = [index]\n else:\n # The index after `reset_index()` will never be used, so use \"distributed\" index\n # as a dummy to avoid overhead.\n with option_context(\"compute.default_index_type\", \"distributed\"):\n df = self.reset_index()\n index_labels = df._internal.column_labels[: self._internal.index_level]\n\n df = df.pivot_table(index=index_labels, columns=columns, values=values, aggfunc=\"first\")\n\n if should_use_existing_index:\n return df\n else:\n internal = df._internal.copy(index_names=self._internal.index_names)\n return DataFrame(internal)\n\n @property\n def columns(self) -> pd.Index:\n \"\"\"The column labels of the DataFrame.\"\"\"\n names = [\n name if name is None or len(name) > 1 else name[0]\n for name in self._internal.column_label_names\n ]\n if self._internal.column_labels_level > 1:\n columns = pd.MultiIndex.from_tuples(self._internal.column_labels, names=names)\n else:\n columns = pd.Index([label[0] for label in self._internal.column_labels], name=names[0])\n return columns\n\n @columns.setter\n def columns(self, columns: Union[pd.Index, List[Name]]) -> None:\n if isinstance(columns, pd.MultiIndex):\n column_labels = columns.tolist()\n else:\n column_labels = [\n col if is_name_like_tuple(col, allow_none=False) else (col,) for col in columns\n ]\n\n if len(self._internal.column_labels) != len(column_labels):\n raise ValueError(\n \"Length mismatch: Expected axis has {} elements, \"\n \"new values have {} elements\".format(\n len(self._internal.column_labels), len(column_labels)\n )\n )\n\n column_label_names: Optional[List]\n if isinstance(columns, pd.Index):\n column_label_names = [\n name if is_name_like_tuple(name) else (name,) for name in columns.names\n ]\n else:\n column_label_names = None\n\n pssers = [\n self._psser_for(label).rename(name)\n for label, name in zip(self._internal.column_labels, column_labels)\n ]\n self._update_internal_frame(\n self._internal.with_new_columns(pssers, column_label_names=column_label_names)\n )\n\n @property\n def dtypes(self) -> pd.Series:\n \"\"\"Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column. The result's index is the original\n DataFrame's columns. Columns with mixed types are stored with the object dtype.\n\n Returns\n -------\n pd.Series\n The data type of each column.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': list('abc'),\n ... 'b': list(range(1, 4)),\n ... 'c': np.arange(3, 6).astype('i1'),\n ... 'd': np.arange(4.0, 7.0, dtype='float64'),\n ... 'e': [True, False, True],\n ... 'f': pd.date_range('20130101', periods=3)},\n ... columns=['a', 'b', 'c', 'd', 'e', 'f'])\n >>> df.dtypes\n a object\n b int64\n c int8\n d float64\n e bool\n f datetime64[ns]\n dtype: object\n \"\"\"\n return pd.Series(\n [self._psser_for(label).dtype for label in self._internal.column_labels],\n index=pd.Index(\n [label if len(label) > 1 else label[0] for label in self._internal.column_labels]\n ),\n )\n\n def select_dtypes(\n self,\n include: Optional[Union[str, List[str]]] = None,\n exclude: Optional[Union[str, List[str]]] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Return a subset of the DataFrame's columns based on the column dtypes.\n\n Parameters\n ----------\n include, exclude : scalar or list-like\n A selection of dtypes or strings to be included/excluded. At least\n one of these parameters must be supplied. It also takes Spark SQL\n DDL type strings, for instance, 'string' and 'date'.\n\n Returns\n -------\n DataFrame\n The subset of the frame including the dtypes in ``include`` and\n excluding the dtypes in ``exclude``.\n\n Raises\n ------\n ValueError\n * If both of ``include`` and ``exclude`` are empty\n\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df.select_dtypes()\n Traceback (most recent call last):\n ...\n ValueError: at least one of include or exclude must be nonempty\n\n * If ``include`` and ``exclude`` have overlapping elements\n\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df.select_dtypes(include='a', exclude='a')\n Traceback (most recent call last):\n ...\n ValueError: include and exclude overlap on {'a'}\n\n Notes\n -----\n * To select datetimes, use ``np.datetime64``, ``'datetime'`` or\n ``'datetime64'``\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3,\n ... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])\n >>> df\n a b c d\n 0 1 True 1.0 a\n 1 2 False 2.0 b\n 2 1 True 1.0 a\n 3 2 False 2.0 b\n 4 1 True 1.0 a\n 5 2 False 2.0 b\n\n >>> df.select_dtypes(include='bool')\n b\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n\n >>> df.select_dtypes(include=['float64'], exclude=['int'])\n c\n 0 1.0\n 1 2.0\n 2 1.0\n 3 2.0\n 4 1.0\n 5 2.0\n\n >>> df.select_dtypes(exclude=['int'])\n b c d\n 0 True 1.0 a\n 1 False 2.0 b\n 2 True 1.0 a\n 3 False 2.0 b\n 4 True 1.0 a\n 5 False 2.0 b\n\n Spark SQL DDL type strings can be used as well.\n\n >>> df.select_dtypes(exclude=['string'])\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n 2 1 True 1.0\n 3 2 False 2.0\n 4 1 True 1.0\n 5 2 False 2.0\n \"\"\"\n from pyspark.sql.types import _parse_datatype_string\n\n include_list: List[str]\n if not is_list_like(include):\n include_list = [cast(str, include)] if include is not None else []\n else:\n include_list = list(include)\n exclude_list: List[str]\n if not is_list_like(exclude):\n exclude_list = [cast(str, exclude)] if exclude is not None else []\n else:\n exclude_list = list(exclude)\n\n if not any((include_list, exclude_list)):\n raise ValueError(\"at least one of include or exclude must be \" \"nonempty\")\n\n # can't both include AND exclude!\n if set(include_list).intersection(set(exclude_list)):\n raise ValueError(\n \"include and exclude overlap on {inc_ex}\".format(\n inc_ex=set(include_list).intersection(set(exclude_list))\n )\n )\n\n # Handle Spark types\n include_spark_type = []\n for inc in include_list:\n try:\n include_spark_type.append(_parse_datatype_string(inc))\n except BaseException:\n pass\n\n exclude_spark_type = []\n for exc in exclude_list:\n try:\n exclude_spark_type.append(_parse_datatype_string(exc))\n except BaseException:\n pass\n\n # Handle pandas types\n include_numpy_type = []\n for inc in include_list:\n try:\n include_numpy_type.append(infer_dtype_from_object(inc))\n except BaseException:\n pass\n\n exclude_numpy_type = []\n for exc in exclude_list:\n try:\n exclude_numpy_type.append(infer_dtype_from_object(exc))\n except BaseException:\n pass\n\n column_labels = []\n for label in self._internal.column_labels:\n if len(include_list) > 0:\n should_include = (\n infer_dtype_from_object(self._psser_for(label).dtype.name) in include_numpy_type\n or self._internal.spark_type_for(label) in include_spark_type\n )\n else:\n should_include = not (\n infer_dtype_from_object(self._psser_for(label).dtype.name) in exclude_numpy_type\n or self._internal.spark_type_for(label) in exclude_spark_type\n )\n\n if should_include:\n column_labels.append(label)\n\n return DataFrame(\n self._internal.with_new_columns([self._psser_for(label) for label in column_labels])\n )\n\n def droplevel(\n self, level: Union[int, Name, List[Union[int, Name]]], axis: Axis = 0\n ) -> \"DataFrame\":\n \"\"\"\n Return DataFrame with requested index / column level(s) removed.\n\n Parameters\n ----------\n level: int, str, or list-like\n If a string is given, must be the name of a level If list-like, elements must\n be names or positional indexes of levels.\n\n axis: {0 or ‘index’, 1 or ‘columns’}, default 0\n\n Returns\n -------\n DataFrame with requested index / column level(s) removed.\n\n Examples\n --------\n >>> df = ps.DataFrame(\n ... [[3, 4], [7, 8], [11, 12]],\n ... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=[\"a\", \"b\"]),\n ... )\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... ('c', 'e'), ('d', 'f')\n ... ], names=['level_1', 'level_2'])\n\n >>> df # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel('a') # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel('level_2', axis=1) # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n \"\"\"\n axis = validate_axis(axis)\n if axis == 0:\n if not isinstance(level, (tuple, list)): # huh?\n level = [level]\n\n names = self.index.names\n nlevels = self._internal.index_level\n\n int_level = set()\n for n in level:\n if isinstance(n, int):\n if n < 0:\n n = n + nlevels\n if n < 0:\n raise IndexError(\n \"Too many levels: Index has only {} levels, \"\n \"{} is not a valid level number\".format(nlevels, (n - nlevels))\n )\n if n >= nlevels:\n raise IndexError(\n \"Too many levels: Index has only {} levels, not {}\".format(\n nlevels, (n + 1)\n )\n )\n else:\n if n not in names:\n raise KeyError(\"Level {} not found\".format(n))\n n = names.index(n)\n int_level.add(n)\n\n if len(level) >= nlevels:\n raise ValueError(\n \"Cannot remove {} levels from an index with {} levels: \"\n \"at least one level must be left.\".format(len(level), nlevels)\n )\n\n index_spark_columns, index_names, index_fields = zip(\n *[\n item\n for i, item in enumerate(\n zip(\n self._internal.index_spark_columns,\n self._internal.index_names,\n self._internal.index_fields,\n )\n )\n if i not in int_level\n ]\n )\n\n internal = self._internal.copy(\n index_spark_columns=list(index_spark_columns),\n index_names=list(index_names),\n index_fields=list(index_fields),\n )\n return DataFrame(internal)\n else:\n psdf = self.copy()\n psdf.columns = psdf.columns.droplevel(level) # type: ignore[arg-type]\n return psdf\n\n def drop(\n self,\n labels: Optional[Union[Name, List[Name]]] = None,\n axis: Optional[Axis] = 0,\n index: Union[Name, List[Name]] = None,\n columns: Union[Name, List[Name]] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Drop specified labels from columns.\n\n Remove rows and/or columns by specifying label names and corresponding axis,\n or by specifying directly index and/or column names.\n Drop rows of a MultiIndex DataFrame is not supported yet.\n\n Parameters\n ----------\n labels : single label or list-like\n Column labels to drop.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionchanged:: 3.3\n Set dropping by index by default.\n index : single label or list-like\n Alternative to specifying axis (``labels, axis=0``\n is quivalent to ``index=columns``).\n\n .. versionchanged:: 3.3\n Added dropping rows by 'index'.\n columns : single label or list-like\n Alternative to specifying axis (``labels, axis=1``\n is equivalent to ``columns=labels``).\n\n Returns\n -------\n dropped : DataFrame\n\n See Also\n --------\n Series.dropna\n\n Examples\n --------\n >>> df = ps.DataFrame(np.arange(12).reshape(3, 4), columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 0 1 2 3\n 1 4 5 6 7\n 2 8 9 10 11\n\n Drop columns\n\n >>> df.drop(['B', 'C'], axis=1)\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n >>> df.drop(columns=['B', 'C'])\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n Drop a row by index\n\n >>> df.drop([0, 1])\n A B C D\n 2 8 9 10 11\n\n >>> df.drop(index=[0, 1], columns='A')\n B C D\n 2 9 10 11\n\n Also support dropping columns for MultiIndex\n\n >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},\n ... columns=['x', 'y', 'z', 'w'])\n >>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')]\n >>> df.columns = pd.MultiIndex.from_tuples(columns)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n a b\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n >>> df.drop(labels='a', axis=1) # doctest: +NORMALIZE_WHITESPACE\n b\n z w\n 0 5 7\n 1 6 8\n\n Notes\n -----\n Currently, dropping rows of a MultiIndex DataFrame is not supported yet.\n \"\"\"\n if labels is not None:\n if index is not None or columns is not None:\n raise ValueError(\"Cannot specify both 'labels' and 'index'/'columns'\")\n axis = validate_axis(axis)\n if axis == 1:\n return self.drop(index=index, columns=labels)\n else:\n return self.drop(index=labels, columns=columns)\n else:\n if index is None and columns is None:\n raise ValueError(\"Need to specify at least one of 'labels' or 'columns' or 'index'\")\n\n internal = self._internal\n if index is not None:\n if is_name_like_tuple(index) or is_name_like_value(index):\n index = [index]\n\n if len(index) > 0:\n if internal.index_level == 1:\n internal = internal.resolved_copy\n\n if len(index) <= ps.get_option(\"compute.isin_limit\"):\n self_index_type = self.index.spark.data_type\n cond = ~internal.index_spark_columns[0].isin(\n [SF.lit(label).cast(self_index_type) for label in index]\n )\n internal = internal.with_filter(cond)\n else:\n index_sdf_col = \"__index\"\n index_sdf = default_session().createDataFrame(\n pd.DataFrame({index_sdf_col: index})\n )\n joined_sdf = internal.spark_frame.join(\n other=F.broadcast(index_sdf),\n on=(\n internal.index_spark_columns[0]\n == scol_for(index_sdf, index_sdf_col)\n ),\n how=\"anti\",\n )\n internal = internal.with_new_sdf(joined_sdf)\n else:\n raise NotImplementedError(\n \"Drop rows of MultiIndex DataFrame is not supported yet\"\n )\n if columns is not None:\n if is_name_like_tuple(columns):\n columns = [columns]\n elif is_name_like_value(columns):\n columns = [(columns,)]\n else:\n columns = [col if is_name_like_tuple(col) else (col,) for col in columns]\n\n if len(columns) > 0:\n drop_column_labels = set(\n label\n for label in internal.column_labels\n for col in columns\n if label[: len(col)] == col\n )\n if len(drop_column_labels) == 0:\n raise KeyError(columns)\n\n keep_columns_and_labels = [\n (column, label)\n for column, label in zip(\n self._internal.data_spark_column_names, self._internal.column_labels\n )\n if label not in drop_column_labels\n ]\n\n cols, labels = (\n zip(*keep_columns_and_labels)\n if len(keep_columns_and_labels) > 0\n else ([], [])\n )\n internal = internal.with_new_columns(\n [self._psser_for(label) for label in labels]\n )\n return DataFrame(internal)\n\n def _prepare_sort_by_scols(self, by: Union[Name, List[Name]]) -> List[Column]:\n if is_name_like_value(by):\n by = [by]\n else:\n assert is_list_like(by), type(by)\n new_by = []\n for colname in by:\n ser = self[colname]\n if not isinstance(ser, ps.Series):\n raise ValueError(\n \"The column %s is not unique. For a multi-index, the label must be a tuple \"\n \"with elements corresponding to each level.\" % name_like_string(colname)\n )\n new_by.append(ser.spark.column)\n return new_by\n\n def _sort(\n self,\n by: List[Column],\n ascending: Union[bool, List[bool]],\n na_position: str,\n keep: str = \"first\",\n ) -> \"DataFrame\":\n if isinstance(ascending, bool):\n ascending = [ascending] * len(by)\n if len(ascending) != len(by):\n raise ValueError(\n \"Length of ascending ({}) != length of by ({})\".format(len(ascending), len(by))\n )\n if na_position not in (\"first\", \"last\"):\n raise ValueError(\"invalid na_position: '{}'\".format(na_position))\n\n # Mapper: Get a spark column function for (ascending, na_position) combination\n mapper = {\n (True, \"first\"): Column.asc_nulls_first,\n (True, \"last\"): Column.asc_nulls_last,\n (False, \"first\"): Column.desc_nulls_first,\n (False, \"last\"): Column.desc_nulls_last,\n }\n by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)]\n\n natural_order_scol = F.col(NATURAL_ORDER_COLUMN_NAME)\n\n if keep == \"last\":\n natural_order_scol = Column.desc(natural_order_scol)\n elif keep == \"all\":\n raise NotImplementedError(\"`keep`=all is not implemented yet.\")\n elif keep != \"first\":\n raise ValueError('keep must be either \"first\", \"last\" or \"all\".')\n sdf = self._internal.resolved_copy.spark_frame.sort(*by, natural_order_scol)\n return DataFrame(self._internal.with_new_sdf(sdf))\n\n def sort_values(\n self,\n by: Union[Name, List[Name]],\n ascending: Union[bool, List[bool]] = True,\n inplace: bool = False,\n na_position: str = \"last\",\n ignore_index: bool = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Sort by the values along either axis.\n\n Parameters\n ----------\n by : str or list of str\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n if True, perform operation in-place\n na_position : {'first', 'last'}, default 'last'\n `first` puts NaNs at the beginning, `last` puts NaNs at the end\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n Returns\n -------\n sorted_obj : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'col1': ['A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 9, 8, 7, 4],\n ... 'col3': [0, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'],\n ... index=['a', 'b', 'c', 'd', 'e'])\n >>> df\n col1 col2 col3\n a A 2 0\n b B 9 9\n c None 8 4\n d D 7 2\n e C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n a A 2 0\n b B 9 9\n e C 4 3\n d D 7 2\n c None 8 4\n\n Ignore index for the resulting axis\n\n >>> df.sort_values(by=['col1'], ignore_index=True)\n col1 col2 col3\n 0 A 2 0\n 1 B 9 9\n 2 C 4 3\n 3 D 7 2\n 4 None 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n d D 7 2\n e C 4 3\n b B 9 9\n a A 2 0\n c None 8 4\n\n Sort by multiple columns\n\n >>> df = ps.DataFrame({\n ... 'col1': ['A', 'A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'])\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 None 8 4\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n new_by = self._prepare_sort_by_scols(by)\n\n psdf = self._sort(by=new_by, ascending=ascending, na_position=na_position)\n\n if inplace:\n if ignore_index:\n psdf.reset_index(drop=True, inplace=inplace)\n self._update_internal_frame(psdf._internal)\n return None\n else:\n return psdf.reset_index(drop=True) if ignore_index else psdf\n\n def sort_index(\n self,\n axis: Axis = 0,\n level: Optional[Union[int, List[int]]] = None,\n ascending: bool = True,\n inplace: bool = False,\n kind: str = None,\n na_position: str = \"last\",\n ignore_index: bool = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Sort object by labels (along an axis)\n\n Parameters\n ----------\n axis : index, columns to direct sorting. Currently, only axis = 0 is supported.\n level : int or level name or list of ints or list of level names\n if not None, sort on values in specified index level(s)\n ascending : boolean, default True\n Sort ascending vs. descending\n inplace : bool, default False\n if True, perform operation in-place\n kind : str, default None\n pandas-on-Spark does not allow specifying the sorting algorithm at the moment,\n default None\n na_position : {‘first’, ‘last’}, default ‘last’\n first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for\n MultiIndex.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 3.4.0\n\n Returns\n -------\n sorted_obj : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])\n\n >>> df.sort_index()\n A\n a 1.0\n b 2.0\n NaN NaN\n\n >>> df.sort_index(ascending=False)\n A\n b 2.0\n a 1.0\n NaN NaN\n\n >>> df.sort_index(na_position='first')\n A\n NaN NaN\n a 1.0\n b 2.0\n\n >>> df.sort_index(ignore_index=True)\n A\n 0 1.0\n 1 2.0\n 2 NaN\n\n >>> df.sort_index(inplace=True)\n >>> df\n A\n a 1.0\n b 2.0\n NaN NaN\n\n >>> df = ps.DataFrame({'A': range(4), 'B': range(4)[::-1]},\n ... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],\n ... columns=['A', 'B'])\n\n >>> df.sort_index()\n A B\n a 0 3 0\n 1 2 1\n b 0 1 2\n 1 0 3\n\n >>> df.sort_index(level=1) # doctest: +SKIP\n A B\n a 0 3 0\n b 0 1 2\n a 1 2 1\n b 1 0 3\n\n >>> df.sort_index(level=[1, 0])\n A B\n a 0 3 0\n b 0 1 2\n a 1 2 1\n b 1 0 3\n\n >>> df.sort_index(ignore_index=True)\n A B\n 0 3 0\n 1 2 1\n 2 1 2\n 3 0 3\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError(\"No other axis than 0 are supported at the moment\")\n if kind is not None:\n raise NotImplementedError(\n \"Specifying the sorting algorithm is not supported at the moment.\"\n )\n\n if level is None or (is_list_like(level) and len(level) == 0): # type: ignore[arg-type]\n by = self._internal.index_spark_columns\n elif is_list_like(level):\n by = [\n self._internal.index_spark_columns[lvl] for lvl in level # type: ignore[union-attr]\n ]\n else:\n by = [self._internal.index_spark_columns[level]] # type: ignore[index]\n\n psdf = self._sort(by=by, ascending=ascending, na_position=na_position)\n if inplace:\n if ignore_index:\n psdf.reset_index(drop=True, inplace=inplace)\n self._update_internal_frame(psdf._internal)\n return None\n else:\n return psdf.reset_index(drop=True) if ignore_index else psdf\n\n def swaplevel(\n self, i: Union[int, Name] = -2, j: Union[int, Name] = -1, axis: Axis = 0\n ) -> \"DataFrame\":\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis.\n\n Parameters\n ----------\n i, j : int or str\n Levels of the indices to be swapped. Can pass level name as string.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to swap levels on. 0 or 'index' for row-wise, 1 or\n 'columns' for column-wise.\n\n Returns\n -------\n DataFrame\n DataFrame with levels swapped in MultiIndex.\n\n Examples\n --------\n >>> midx = pd.MultiIndex.from_arrays(\n ... [['red', 'blue'], [1, 2], ['s', 'm']], names = ['color', 'number', 'size'])\n >>> midx # doctest: +SKIP\n MultiIndex([( 'red', 1, 's'),\n ('blue', 2, 'm')],\n names=['color', 'number', 'size'])\n\n Swap levels in a MultiIndex on index.\n\n >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}, index=midx)\n >>> psdf # doctest: +NORMALIZE_WHITESPACE\n x y\n color number size\n red 1 s 5 5\n blue 2 m 6 6\n\n >>> psdf.swaplevel() # doctest: +NORMALIZE_WHITESPACE\n x y\n color size number\n red s 1 5 5\n blue m 2 6 6\n\n >>> psdf.swaplevel(0, 1) # doctest: +NORMALIZE_WHITESPACE\n x y\n number color size\n 1 red s 5 5\n 2 blue m 6 6\n\n >>> psdf.swaplevel('number', 'size') # doctest: +NORMALIZE_WHITESPACE\n x y\n color size number\n red s 1 5 5\n blue m 2 6 6\n\n Swap levels in a MultiIndex on columns.\n\n >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]})\n >>> psdf.columns = midx\n >>> psdf\n color red blue\n number 1 2\n size s m\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(axis=1)\n color red blue\n size s m\n number 1 2\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(axis=1)\n color red blue\n size s m\n number 1 2\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(0, 1, axis=1)\n number 1 2\n color red blue\n size s m\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel('number', 'color', axis=1)\n number 1 2\n color red blue\n size s m\n 0 5 5\n 1 6 6\n \"\"\"\n axis = validate_axis(axis)\n if axis == 0:\n internal = self._swaplevel_index(i, j)\n else:\n assert axis == 1\n internal = self._swaplevel_columns(i, j)\n\n return DataFrame(internal)\n\n def swapaxes(self, i: Axis, j: Axis, copy: bool = True) -> \"DataFrame\":\n \"\"\"\n Interchange axes and swap values axes appropriately.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the 'compute.max_rows' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE\n ... ps.DataFrame({'a': range(1001)}).swapaxes(i=0, j=1)\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n 'compute.max_rows', this operation is considerably expensive.\n\n Parameters\n ----------\n i: {0 or 'index', 1 or 'columns'}. The axis to swap.\n j: {0 or 'index', 1 or 'columns'}. The axis to swap.\n copy : bool, default True.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> psdf = ps.DataFrame(\n ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['x', 'y', 'z'], columns=['a', 'b', 'c']\n ... )\n >>> psdf\n a b c\n x 1 2 3\n y 4 5 6\n z 7 8 9\n >>> psdf.swapaxes(i=1, j=0)\n x y z\n a 1 4 7\n b 2 5 8\n c 3 6 9\n >>> psdf.swapaxes(i=1, j=1)\n a b c\n x 1 2 3\n y 4 5 6\n z 7 8 9\n \"\"\"\n assert copy is True\n\n i = validate_axis(i)\n j = validate_axis(j)\n\n return self.copy() if i == j else self.transpose()\n\n def _swaplevel_columns(self, i: Union[int, Name], j: Union[int, Name]) -> InternalFrame:\n assert isinstance(self.columns, pd.MultiIndex)\n for index in (i, j):\n if not isinstance(index, int) and index not in self.columns.names:\n raise KeyError(\"Level %s not found\" % index)\n\n i = i if isinstance(i, int) else self.columns.names.index(i)\n j = j if isinstance(j, int) else self.columns.names.index(j)\n for index in (i, j):\n if index >= len(self.columns) or index < -len(self.columns):\n raise IndexError(\n \"Too many levels: Columns have only %s levels, \"\n \"%s is not a valid level number\" % (self._internal.index_level, index)\n )\n\n column_label_names = self._internal.column_label_names.copy()\n column_label_names[i], column_label_names[j], = (\n column_label_names[j],\n column_label_names[i],\n )\n column_labels = self._internal._column_labels\n column_label_list = [list(label) for label in column_labels]\n for label_list in column_label_list:\n label_list[i], label_list[j] = label_list[j], label_list[i]\n column_labels = [tuple(x) for x in column_label_list]\n internal = self._internal.copy(\n column_label_names=list(column_label_names), column_labels=list(column_labels)\n )\n return internal\n\n def _swaplevel_index(self, i: Union[int, Name], j: Union[int, Name]) -> InternalFrame:\n assert isinstance(self.index, ps.MultiIndex)\n for index in (i, j):\n if not isinstance(index, int) and index not in self.index.names:\n raise KeyError(\"Level %s not found\" % index)\n\n i = i if isinstance(i, int) else self.index.names.index(i)\n j = j if isinstance(j, int) else self.index.names.index(j)\n for index in (i, j):\n if index >= self._internal.index_level or index < -self._internal.index_level:\n raise IndexError(\n \"Too many levels: Index has only %s levels, \"\n \"%s is not a valid level number\" % (self._internal.index_level, index)\n )\n\n index_map = list(\n zip(\n self._internal.index_spark_columns,\n self._internal.index_names,\n self._internal.index_fields,\n )\n )\n index_map[i], index_map[j] = index_map[j], index_map[i]\n index_spark_columns, index_names, index_fields = zip(*index_map)\n internal = self._internal.copy(\n index_spark_columns=list(index_spark_columns),\n index_names=list(index_names),\n index_fields=list(index_fields),\n )\n return internal\n\n def nlargest(\n self, n: int, columns: Union[Name, List[Name]], keep: str = \"first\"\n ) -> \"DataFrame\":\n \"\"\"\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=False).head(n)``, but more\n performant in pandas.\n In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,\n the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of rows to return.\n columns : label or list of labels\n Column label(s) to order by.\n keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet.\n Determines which duplicates (if any) to keep.\n - ``first`` : Keep the first occurrence.\n - ``last`` : Keep the last occurrence.\n\n Returns\n -------\n DataFrame\n The first `n` rows ordered by the given columns in descending\n order.\n\n See Also\n --------\n DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in\n ascending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Notes\n -----\n\n This function cannot be used with all column types. For example, when\n specifying columns with `object` or `category` dtypes, ``TypeError`` is\n raised.\n\n Examples\n --------\n >>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],\n ... 'Y': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nlargest`` to select the three\n rows having the largest values in column \"X\".\n\n >>> df.nlargest(n=3, columns='X')\n X Y\n 5 7.0 11\n 4 6.0 10\n 3 5.0 9\n\n To order by the largest values in column \"Y\" and then \"X\", we can\n specify multiple columns like in the next example.\n\n >>> df.nlargest(n=3, columns=['Y', 'X'])\n X Y\n 6 NaN 12\n 5 7.0 11\n 4 6.0 10\n\n The examples below show how ties are resolved, which is decided by `keep`.\n\n >>> tied_df = ps.DataFrame({'X': [1, 2, 2, 3, 3]}, index=['a', 'b', 'c', 'd', 'e'])\n >>> tied_df\n X\n a 1\n b 2\n c 2\n d 3\n e 3\n\n When using keep='first' (by default), ties are resolved in order:\n\n >>> tied_df.nlargest(3, 'X')\n X\n d 3\n e 3\n b 2\n\n >>> tied_df.nlargest(3, 'X', keep='first')\n X\n d 3\n e 3\n b 2\n\n When using keep='last', ties are resolved in reverse order:\n\n >>> tied_df.nlargest(3, 'X', keep='last')\n X\n e 3\n d 3\n c 2\n \"\"\"\n by_scols = self._prepare_sort_by_scols(columns)\n return self._sort(by=by_scols, ascending=False, na_position=\"last\", keep=keep).head(n=n)\n\n def nsmallest(\n self, n: int, columns: Union[Name, List[Name]], keep: str = \"first\"\n ) -> \"DataFrame\":\n \"\"\"\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,\n but more performant. In pandas-on-Spark, thanks to Spark's lazy execution and query\n optimizer, the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n columns : list or str\n Column name or names to order by.\n keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet.\n Determines which duplicates (if any) to keep.\n - ``first`` : Keep the first occurrence.\n - ``last`` : Keep the last occurrence.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.nlargest : Return the first `n` rows ordered by `columns` in\n descending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Examples\n --------\n >>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],\n ... 'Y': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nsmallest`` to select the\n three rows having the smallest values in column \"X\".\n\n >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n\n To order by the smallest values in column \"Y\" and then \"X\", we can\n specify multiple columns like in the next example.\n\n >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n\n The examples below show how ties are resolved, which is decided by `keep`.\n\n >>> tied_df = ps.DataFrame({'X': [1, 1, 2, 2, 3]}, index=['a', 'b', 'c', 'd', 'e'])\n >>> tied_df\n X\n a 1\n b 1\n c 2\n d 2\n e 3\n\n When using keep='first' (by default), ties are resolved in order:\n\n >>> tied_df.nsmallest(3, 'X')\n X\n a 1\n b 1\n c 2\n\n >>> tied_df.nsmallest(3, 'X', keep='first')\n X\n a 1\n b 1\n c 2\n\n When using keep='last', ties are resolved in reverse order:\n\n >>> tied_df.nsmallest(3, 'X', keep='last')\n X\n b 1\n a 1\n d 2\n \"\"\"\n by_scols = self._prepare_sort_by_scols(columns)\n return self._sort(by=by_scols, ascending=True, na_position=\"last\", keep=keep).head(n=n)\n\n def isin(self, values: Union[List, Dict]) -> \"DataFrame\":\n \"\"\"\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable or dict\n The sequence of values to test. If values is a dict,\n the keys must be the column names, which must match.\n Series and DataFrame are not supported.\n\n Returns\n -------\n DataFrame\n DataFrame of booleans showing whether each element in the DataFrame\n is contained in values.\n\n Examples\n --------\n >>> df = ps.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},\n ... index=['falcon', 'dog'],\n ... columns=['num_legs', 'num_wings'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n\n When ``values`` is a list check whether every value in the DataFrame\n is present in the list (which animals have 0 or 2 legs or wings)\n\n >>> df.isin([0, 2])\n num_legs num_wings\n falcon True True\n dog False True\n\n When ``values`` is a dict, we can pass values to check for each\n column separately:\n\n >>> df.isin({'num_wings': [0, 3]})\n num_legs num_wings\n falcon False False\n dog False True\n \"\"\"\n if isinstance(values, (pd.DataFrame, pd.Series)):\n raise NotImplementedError(\"DataFrame and Series are not supported\")\n if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):\n raise AttributeError(\n \"'DataFrame' object has no attribute %s\"\n % (set(values.keys()).difference(self.columns))\n )\n\n data_spark_columns = []\n if isinstance(values, dict):\n for i, col in enumerate(self.columns):\n if col in values:\n item = values[col]\n item = item.tolist() if isinstance(item, np.ndarray) else list(item)\n\n scol = self._internal.spark_column_for(self._internal.column_labels[i]).isin(\n [SF.lit(v) for v in item]\n )\n scol = F.coalesce(scol, F.lit(False))\n else:\n scol = SF.lit(False)\n data_spark_columns.append(scol.alias(self._internal.data_spark_column_names[i]))\n elif is_list_like(values):\n values = (\n cast(np.ndarray, values).tolist()\n if isinstance(values, np.ndarray)\n else list(values)\n )\n\n for label in self._internal.column_labels:\n scol = self._internal.spark_column_for(label).isin([SF.lit(v) for v in values])\n scol = F.coalesce(scol, F.lit(False))\n data_spark_columns.append(scol.alias(self._internal.spark_column_name_for(label)))\n else:\n raise TypeError(\"Values should be iterable, Series, DataFrame or dict.\")\n\n return DataFrame(\n self._internal.with_new_columns(\n data_spark_columns,\n data_fields=[\n field.copy(dtype=np.dtype(\"bool\"), spark_type=BooleanType(), nullable=False)\n for field in self._internal.data_fields\n ],\n )\n )\n\n @property\n def shape(self) -> Tuple[int, int]:\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4],\n ... 'col3': [5, 6]})\n >>> df.shape\n (2, 3)\n \"\"\"\n return len(self), len(self.columns)\n\n def merge(\n self,\n right: \"DataFrame\",\n how: str = \"inner\",\n on: Optional[Union[Name, List[Name]]] = None,\n left_on: Optional[Union[Name, List[Name]]] = None,\n right_on: Optional[Union[Name, List[Name]]] = None,\n left_index: bool = False,\n right_index: bool = False,\n suffixes: Tuple[str, str] = (\"_x\", \"_y\"),\n ) -> \"DataFrame\":\n \"\"\"\n Merge DataFrame objects with a database-style join.\n\n The index of the resulting DataFrame will be one of the following:\n - 0...n if no index is used for merging\n - Index of the left DataFrame if merged only on the index of the right DataFrame\n - Index of the right DataFrame if merged only on the index of the left DataFrame\n - All involved indices if merged using the indices of both DataFrames\n e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will\n be an index (x, a, b)\n\n Parameters\n ----------\n right: Object to merge with.\n how: Type of merge to be performed.\n {'left', 'right', 'outer', 'inner'}, default 'inner'\n\n left: use only keys from left frame, similar to a SQL left outer join; not preserve\n key order unlike pandas.\n right: use only keys from right frame, similar to a SQL right outer join; not preserve\n key order unlike pandas.\n outer: use union of keys from both frames, similar to a SQL full outer join; sort keys\n lexicographically.\n inner: use intersection of keys from both frames, similar to a SQL inner join;\n not preserve the order of the left keys unlike pandas.\n on: Column or index level names to join on. These must be found in both DataFrames. If on\n is None and not merging on indexes then this defaults to the intersection of the\n columns in both DataFrames.\n left_on: Column or index level names to join on in the left DataFrame. Can also\n be an array or list of arrays of the length of the left DataFrame.\n These arrays are treated as if they are columns.\n right_on: Column or index level names to join on in the right DataFrame. Can also\n be an array or list of arrays of the length of the right DataFrame.\n These arrays are treated as if they are columns.\n left_index: Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index or a number of\n columns) must match the number of levels.\n right_index: Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\n suffixes: Suffix to apply to overlapping column names in the left and right side,\n respectively.\n\n Returns\n -------\n DataFrame\n A DataFrame of the two merged objects.\n\n See Also\n --------\n DataFrame.join : Join columns of another DataFrame.\n DataFrame.update : Modify in place using non-NA values from another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Examples\n --------\n >>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [1, 2, 3, 5]},\n ... columns=['lkey', 'value'])\n >>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [5, 6, 7, 8]},\n ... columns=['rkey', 'value'])\n >>> df1\n lkey value\n 0 foo 1\n 1 bar 2\n 2 baz 3\n 3 foo 5\n >>> df2\n rkey value\n 0 foo 5\n 1 bar 6\n 2 baz 7\n 3 foo 8\n\n Merge df1 and df2 on the lkey and rkey columns. The value columns have\n the default suffixes, _x and _y, appended.\n\n >>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')\n >>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS\n lkey value_x rkey value_y\n ...bar 2 bar 6\n ...baz 3 baz 7\n ...foo 1 foo 5\n ...foo 1 foo 8\n ...foo 5 foo 5\n ...foo 5 foo 8\n\n >>> left_psdf = ps.DataFrame({'A': [1, 2]})\n >>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2])\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True).sort_index()\n A B\n 1 2 x\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='left').sort_index()\n A B\n 0 1 None\n 1 2 x\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='right').sort_index()\n A B\n 1 2.0 x\n 2 NaN y\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='outer').sort_index()\n A B\n 0 1.0 None\n 1 2.0 x\n 2 NaN y\n\n Notes\n -----\n As described in #263, joining string columns currently returns None for missing values\n instead of NaN.\n \"\"\"\n\n def to_list(os: Optional[Union[Name, List[Name]]]) -> List[Label]:\n if os is None:\n return []\n elif is_name_like_tuple(os):\n return [cast(Label, os)]\n elif is_name_like_value(os):\n return [(os,)]\n else:\n return [o if is_name_like_tuple(o) else (o,) for o in os]\n\n if isinstance(right, ps.Series):\n right = right.to_frame()\n\n if on:\n if left_on or right_on:\n raise ValueError(\n 'Can only pass argument \"on\" OR \"left_on\" and \"right_on\", '\n \"not a combination of both.\"\n )\n left_key_names = list(map(self._internal.spark_column_name_for, to_list(on)))\n right_key_names = list(map(right._internal.spark_column_name_for, to_list(on)))\n else:\n # TODO: need special handling for multi-index.\n if left_index:\n left_key_names = self._internal.index_spark_column_names\n else:\n left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on)))\n if right_index:\n right_key_names = right._internal.index_spark_column_names\n else:\n right_key_names = list(\n map(right._internal.spark_column_name_for, to_list(right_on))\n )\n\n if left_key_names and not right_key_names:\n raise ValueError(\"Must pass right_on or right_index=True\")\n if right_key_names and not left_key_names:\n raise ValueError(\"Must pass left_on or left_index=True\")\n if not left_key_names and not right_key_names:\n common = list(self.columns.intersection(right.columns))\n if len(common) == 0:\n raise ValueError(\n \"No common columns to perform merge on. Merge options: \"\n \"left_on=None, right_on=None, left_index=False, right_index=False\"\n )\n left_key_names = list(map(self._internal.spark_column_name_for, to_list(common)))\n right_key_names = list(map(right._internal.spark_column_name_for, to_list(common)))\n if len(left_key_names) != len(right_key_names):\n raise ValueError(\"len(left_keys) must equal len(right_keys)\")\n\n # We should distinguish the name to avoid ambiguous column name after merging.\n right_prefix = \"__right_\"\n right_key_names = [right_prefix + right_key_name for right_key_name in right_key_names]\n\n how = validate_how(how)\n\n def resolve(internal: InternalFrame, side: str) -> InternalFrame:\n def rename(col: str) -> str:\n return \"__{}_{}\".format(side, col)\n\n internal = internal.resolved_copy\n sdf = internal.spark_frame\n sdf = sdf.select(\n *[\n scol_for(sdf, col).alias(rename(col))\n for col in sdf.columns\n if col not in HIDDEN_COLUMNS\n ],\n *HIDDEN_COLUMNS,\n )\n return internal.copy(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, rename(col)) for col in internal.index_spark_column_names\n ],\n index_fields=[\n field.copy(name=rename(field.name)) for field in internal.index_fields\n ],\n data_spark_columns=[\n scol_for(sdf, rename(col)) for col in internal.data_spark_column_names\n ],\n data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],\n )\n\n left_internal = self._internal.resolved_copy\n right_internal = resolve(right._internal, \"right\")\n\n left_table = left_internal.spark_frame.alias(\"left_table\")\n right_table = right_internal.spark_frame.alias(\"right_table\")\n\n left_key_columns = [scol_for(left_table, label) for label in left_key_names]\n right_key_columns = [scol_for(right_table, label) for label in right_key_names]\n\n join_condition = reduce(\n lambda x, y: x & y,\n [lkey == rkey for lkey, rkey in zip(left_key_columns, right_key_columns)],\n )\n\n joined_table = left_table.join(right_table, join_condition, how=how)\n\n # Unpack suffixes tuple for convenience\n left_suffix = suffixes[0]\n right_suffix = suffixes[1]\n\n # Append suffixes to columns with the same name to avoid conflicts later\n duplicate_columns = set(left_internal.column_labels) & set(right_internal.column_labels)\n\n exprs = []\n data_columns = []\n column_labels = []\n\n def left_scol_for(label: Label) -> Column:\n return scol_for(left_table, left_internal.spark_column_name_for(label))\n\n def right_scol_for(label: Label) -> Column:\n return scol_for(right_table, right_internal.spark_column_name_for(label))\n\n for label in left_internal.column_labels:\n col = left_internal.spark_column_name_for(label)\n scol = left_scol_for(label)\n if label in duplicate_columns:\n spark_column_name = left_internal.spark_column_name_for(label)\n if (\n spark_column_name in left_key_names\n and (right_prefix + spark_column_name) in right_key_names\n ):\n right_scol = right_scol_for(label)\n if how == \"right\":\n scol = right_scol.alias(col)\n elif how == \"full\":\n scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col)\n else:\n pass\n else:\n col = col + left_suffix\n scol = scol.alias(col)\n label = tuple([str(label[0]) + left_suffix] + list(label[1:]))\n exprs.append(scol)\n data_columns.append(col)\n column_labels.append(label)\n for label in right_internal.column_labels:\n # recover `right_prefix` here.\n col = right_internal.spark_column_name_for(label)[len(right_prefix) :]\n scol = right_scol_for(label).alias(col)\n if label in duplicate_columns:\n spark_column_name = left_internal.spark_column_name_for(label)\n if (\n spark_column_name in left_key_names\n and (right_prefix + spark_column_name) in right_key_names\n ):\n continue\n else:\n col = col + right_suffix\n scol = scol.alias(col)\n label = tuple([str(label[0]) + right_suffix] + list(label[1:]))\n exprs.append(scol)\n data_columns.append(col)\n column_labels.append(label)\n\n left_index_scols = left_internal.index_spark_columns\n right_index_scols = right_internal.index_spark_columns\n\n # Retain indices if they are used for joining\n if left_index:\n if right_index:\n if how in (\"inner\", \"left\"):\n exprs.extend(left_index_scols)\n index_spark_column_names = left_internal.index_spark_column_names\n index_names = left_internal.index_names\n elif how == \"right\":\n exprs.extend(right_index_scols)\n index_spark_column_names = right_internal.index_spark_column_names\n index_names = right_internal.index_names\n else:\n index_spark_column_names = left_internal.index_spark_column_names\n index_names = left_internal.index_names\n for col, left_scol, right_scol in zip(\n index_spark_column_names, left_index_scols, right_index_scols\n ):\n scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol)\n exprs.append(scol.alias(col))\n else:\n exprs.extend(right_index_scols)\n index_spark_column_names = right_internal.index_spark_column_names\n index_names = right_internal.index_names\n elif right_index:\n exprs.extend(left_index_scols)\n index_spark_column_names = left_internal.index_spark_column_names\n index_names = left_internal.index_names\n else:\n index_spark_column_names = []\n index_names = []\n\n selected_columns = joined_table.select(*exprs)\n\n internal = InternalFrame(\n spark_frame=selected_columns,\n index_spark_columns=[\n scol_for(selected_columns, col) for col in index_spark_column_names\n ],\n index_names=index_names,\n column_labels=column_labels,\n data_spark_columns=[scol_for(selected_columns, col) for col in data_columns],\n )\n return DataFrame(internal)\n\n def join(\n self,\n right: \"DataFrame\",\n on: Optional[Union[Name, List[Name]]] = None,\n how: str = \"left\",\n lsuffix: str = \"\",\n rsuffix: str = \"\",\n ) -> \"DataFrame\":\n \"\"\"\n Join columns of another DataFrame.\n\n Join columns with `right` DataFrame either on index or on a key column. Efficiently join\n multiple DataFrame objects by index at once by passing a list.\n\n Parameters\n ----------\n right: DataFrame, Series\n on: str, list of str, or array-like, optional\n Column or index level name(s) in the caller to join on the index in `right`, otherwise\n joins index-on-index. If multiple values given, the `right` DataFrame must have a\n MultiIndex. Can pass an array as the join key if it is not already contained in the\n calling DataFrame. Like an Excel VLOOKUP operation.\n how: {'left', 'right', 'outer', 'inner'}, default 'left'\n How to handle the operation of the two objects.\n\n * left: use `left` frame’s index (or column if on is specified).\n * right: use `right`’s index.\n * outer: form union of `left` frame’s index (or column if on is specified) with\n right’s index, and sort it. lexicographically.\n * inner: form intersection of `left` frame’s index (or column if on is specified)\n with `right`’s index, preserving the order of the `left`’s one.\n lsuffix : str, default ''\n Suffix to use from left frame's overlapping columns.\n rsuffix : str, default ''\n Suffix to use from `right` frame's overlapping columns.\n\n Returns\n -------\n DataFrame\n A dataframe containing columns from both the `left` and `right`.\n\n See Also\n --------\n DataFrame.merge: For column(s)-on-columns(s) operations.\n DataFrame.update : Modify in place using non-NA values from another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Notes\n -----\n Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame\n objects.\n\n Examples\n --------\n >>> psdf1 = ps.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],\n ... 'A': ['A0', 'A1', 'A2', 'A3']},\n ... columns=['key', 'A'])\n >>> psdf2 = ps.DataFrame({'key': ['K0', 'K1', 'K2'],\n ... 'B': ['B0', 'B1', 'B2']},\n ... columns=['key', 'B'])\n >>> psdf1\n key A\n 0 K0 A0\n 1 K1 A1\n 2 K2 A2\n 3 K3 A3\n >>> psdf2\n key B\n 0 K0 B0\n 1 K1 B1\n 2 K2 B2\n\n Join DataFrames using their indexes.\n\n >>> join_psdf = psdf1.join(psdf2, lsuffix='_left', rsuffix='_right')\n >>> join_psdf.sort_values(by=join_psdf.columns)\n key_left A key_right B\n 0 K0 A0 K0 B0\n 1 K1 A1 K1 B1\n 2 K2 A2 K2 B2\n 3 K3 A3 None None\n\n If we want to join using the key columns, we need to set key to be the index in both df and\n right. The joined DataFrame will have key as its index.\n\n >>> join_psdf = psdf1.set_index('key').join(psdf2.set_index('key'))\n >>> join_psdf.sort_values(by=join_psdf.columns) # doctest: +NORMALIZE_WHITESPACE\n A B\n key\n K0 A0 B0\n K1 A1 B1\n K2 A2 B2\n K3 A3 None\n\n Another option to join using the key columns is to use the on parameter. DataFrame.join\n always uses right’s index but we can use any column in df. This method not preserve the\n original DataFrame’s index in the result unlike pandas.\n\n >>> join_psdf = psdf1.join(psdf2.set_index('key'), on='key')\n >>> join_psdf.index\n Int64Index([0, 1, 2, 3], dtype='int64')\n \"\"\"\n if isinstance(right, ps.Series):\n common = list(self.columns.intersection([right.name]))\n else:\n common = list(self.columns.intersection(right.columns))\n if len(common) > 0 and not lsuffix and not rsuffix:\n raise ValueError(\n \"columns overlap but no suffix specified: \" \"{rename}\".format(rename=common)\n )\n\n need_set_index = False\n if on:\n if not is_list_like(on):\n on = [on]\n if len(on) != right._internal.index_level:\n raise ValueError(\n 'len(left_on) must equal the number of levels in the index of \"right\"'\n )\n\n need_set_index = len(set(on) & set(self.index.names)) == 0\n if need_set_index:\n self = self.set_index(on)\n join_psdf = self.merge(\n right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix)\n )\n return join_psdf.reset_index() if need_set_index else join_psdf\n\n def combine_first(self, other: \"DataFrame\") -> \"DataFrame\":\n \"\"\"\n Update null elements with value in the same location in `other`.\n\n Combine two DataFrame objects by filling null values in one DataFrame\n with non-null values from other DataFrame. The row and column indexes\n of the resulting DataFrame will be the union of the two.\n\n .. versionadded:: 3.3.0\n\n Parameters\n ----------\n other : DataFrame\n Provided DataFrame to use to fill null values.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> ps.set_option(\"compute.ops_on_diff_frames\", True)\n >>> df1 = ps.DataFrame({'A': [None, 0], 'B': [None, 4]})\n >>> df2 = ps.DataFrame({'A': [1, 1], 'B': [3, 3]})\n\n >>> df1.combine_first(df2).sort_index()\n A B\n 0 1.0 3.0\n 1 0.0 4.0\n\n Null values still persist if the location of that null value does not exist in other\n\n >>> df1 = ps.DataFrame({'A': [None, 0], 'B': [4, None]})\n >>> df2 = ps.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])\n\n >>> df1.combine_first(df2).sort_index()\n A B C\n 0 NaN 4.0 NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n >>> ps.reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n if not isinstance(other, DataFrame):\n raise TypeError(\"`combine_first` only allows `DataFrame` for parameter `other`\")\n if same_anchor(self, other):\n combined = self\n this = self\n that = other\n else:\n combined = combine_frames(self, other)\n this = combined[\"this\"]\n that = combined[\"that\"]\n\n intersect_column_labels = set(self._internal.column_labels).intersection(\n set(other._internal.column_labels)\n )\n\n column_labels, data_spark_columns = [], []\n for column_label in this._internal.column_labels:\n this_scol = this._internal.spark_column_for(column_label)\n if column_label in intersect_column_labels:\n that_scol = that._internal.spark_column_for(column_label)\n this_scol_name = this._internal.spark_column_name_for(column_label)\n combined_scol = (\n F.when(this_scol.isNull(), that_scol).otherwise(this_scol).alias(this_scol_name)\n )\n data_spark_columns.append(combined_scol)\n else:\n data_spark_columns.append(this_scol)\n column_labels.append(column_label)\n\n for column_label in that._internal.column_labels:\n if column_label not in intersect_column_labels:\n that_scol = that._internal.spark_column_for(column_label)\n data_spark_columns.append(that_scol)\n column_labels.append(column_label)\n\n internal = combined._internal.copy(\n column_labels=column_labels,\n data_spark_columns=data_spark_columns,\n data_fields=None, # TODO: dtype?\n column_label_names=self._internal.column_label_names,\n )\n return DataFrame(internal)\n\n def append(\n self,\n other: \"DataFrame\",\n ignore_index: bool = False,\n verify_integrity: bool = False,\n sort: bool = False,\n ) -> \"DataFrame\":\n \"\"\"\n Append rows of other to the end of caller, returning a new object.\n\n Columns in other that are not in the caller are added as new columns.\n\n Parameters\n ----------\n other : DataFrame or Series/dict-like object, or list of these\n The data to append.\n\n ignore_index : boolean, default False\n If True, do not use the index labels.\n\n verify_integrity : boolean, default False\n If True, raise ValueError on creating index with duplicates.\n\n sort : boolean, default False\n Currently not supported.\n\n Returns\n -------\n appended : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2], [3, 4]], columns=list('AB'))\n\n >>> df.append(df)\n A B\n 0 1 2\n 1 3 4\n 0 1 2\n 1 3 4\n\n >>> df.append(df, ignore_index=True)\n A B\n 0 1 2\n 1 3 4\n 2 1 2\n 3 3 4\n \"\"\"\n if isinstance(other, ps.Series):\n raise TypeError(\"DataFrames.append() does not support appending Series to DataFrames\")\n if sort:\n raise NotImplementedError(\"The 'sort' parameter is currently not supported\")\n\n if not ignore_index:\n index_scols = self._internal.index_spark_columns\n if len(index_scols) != other._internal.index_level:\n raise ValueError(\"Both DataFrames have to have the same number of index levels\")\n\n if verify_integrity and len(index_scols) > 0:\n if (\n self._internal.spark_frame.select(index_scols)\n .intersect(\n other._internal.spark_frame.select(other._internal.index_spark_columns)\n )\n .count()\n ) > 0:\n raise ValueError(\"Indices have overlapping values\")\n\n # Lazy import to avoid circular dependency issues\n from pyspark.pandas.namespace import concat\n\n return cast(DataFrame, concat([self, other], ignore_index=ignore_index))\n\n # TODO: add 'filter_func' and 'errors' parameter\n def update(self, other: \"DataFrame\", join: str = \"left\", overwrite: bool = True) -> None:\n \"\"\"\n Modify in place using non-NA values from another DataFrame.\n Aligns on indices. There is no return value.\n\n Parameters\n ----------\n other : DataFrame, or Series\n join : 'left', default 'left'\n Only left join is implemented, keeping the index and columns of the original object.\n overwrite : bool, default True\n How to handle non-NA values for overlapping keys:\n\n * True: overwrite original DataFrame's values with values from `other`.\n * False: only update values that are NA in the original DataFrame.\n\n Returns\n -------\n None : method directly changes calling object\n\n See Also\n --------\n DataFrame.merge : For column(s)-on-columns(s) operations.\n DataFrame.join : Join columns of another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 1 4\n 1 2 5\n 2 3 6\n\n The DataFrame's length does not increase as a result of the update,\n only values at matching index/column labels are updated.\n\n >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 a d\n 1 b e\n 2 c f\n\n For Series, it's name attribute must be set.\n\n >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])\n >>> new_column = ps.Series(['d', 'e'], name='B', index=[0, 2])\n >>> df.update(new_column)\n >>> df.sort_index()\n A B\n 0 a d\n 1 b y\n 2 c e\n\n If `other` contains None the corresponding values are not updated in the original dataframe.\n\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': [4, None, 6]}, columns=['B'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 1 4.0\n 1 2 500.0\n 2 3 6.0\n \"\"\"\n if join != \"left\":\n raise NotImplementedError(\"Only left join is supported\")\n\n if isinstance(other, ps.Series):\n other = other.to_frame()\n\n update_columns = list(\n set(self._internal.column_labels).intersection(set(other._internal.column_labels))\n )\n update_sdf = self.join(\n other[update_columns], rsuffix=\"_new\"\n )._internal.resolved_copy.spark_frame\n\n data_fields = self._internal.data_fields.copy()\n for column_labels in update_columns:\n column_name = self._internal.spark_column_name_for(column_labels)\n old_col = scol_for(update_sdf, column_name)\n new_col = scol_for(\n update_sdf, other._internal.spark_column_name_for(column_labels) + \"_new\"\n )\n if overwrite:\n update_sdf = update_sdf.withColumn(\n column_name, F.when(new_col.isNull(), old_col).otherwise(new_col)\n )\n else:\n update_sdf = update_sdf.withColumn(\n column_name, F.when(old_col.isNull(), new_col).otherwise(old_col)\n )\n data_fields[self._internal.column_labels.index(column_labels)] = None\n sdf = update_sdf.select(\n *[scol_for(update_sdf, col) for col in self._internal.spark_column_names],\n *HIDDEN_COLUMNS,\n )\n internal = self._internal.with_new_sdf(sdf, data_fields=data_fields)\n self._update_internal_frame(internal, requires_same_anchor=False)\n\n # TODO: ddof should be implemented.\n def cov(self, min_periods: Optional[int] = None) -> \"DataFrame\":\n \"\"\"\n Compute pairwise covariance of columns, excluding NA/null values.\n\n Compute the pairwise covariance among the series of a DataFrame.\n The returned data frame is the `covariance matrix\n `__ of the columns\n of the DataFrame.\n\n Both NA and null values are automatically excluded from the\n calculation. (See the note below about bias from missing values.)\n A threshold can be set for the minimum number of\n observations for each value created. Comparisons with observations\n below this threshold will be returned as ``NaN``.\n\n This method is generally used for the analysis of time series data to\n understand the relationship between different measures\n across time.\n\n .. versionadded:: 3.3.0\n\n Parameters\n ----------\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n\n Returns\n -------\n DataFrame\n The covariance matrix of the series of the DataFrame.\n\n See Also\n --------\n Series.cov : Compute covariance with another Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],\n ... columns=['dogs', 'cats'])\n >>> df.cov()\n dogs cats\n dogs 0.666667 -1.000000\n cats -1.000000 1.666667\n\n >>> np.random.seed(42)\n >>> df = ps.DataFrame(np.random.randn(1000, 5),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df.cov()\n a b c d e\n a 0.998438 -0.020161 0.059277 -0.008943 0.014144\n b -0.020161 1.059352 -0.008543 -0.024738 0.009826\n c 0.059277 -0.008543 1.010670 -0.001486 -0.000271\n d -0.008943 -0.024738 -0.001486 0.921297 -0.013692\n e 0.014144 0.009826 -0.000271 -0.013692 0.977795\n\n **Minimum number of periods**\n\n This method also supports an optional ``min_periods`` keyword\n that specifies the required minimum number of non-NA observations for\n each column pair in order to have a valid result:\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(20, 3),\n ... columns=['a', 'b', 'c'])\n >>> df.loc[df.index[:5], 'a'] = np.nan\n >>> df.loc[df.index[5:10], 'b'] = np.nan\n >>> sdf = ps.from_pandas(df)\n >>> sdf.cov(min_periods=12)\n a b c\n a 0.316741 NaN -0.150812\n b NaN 1.248003 0.191417\n c -0.150812 0.191417 0.895202\n \"\"\"\n min_periods = 1 if min_periods is None else min_periods\n\n # Only compute covariance for Boolean and Numeric except Decimal\n psdf = self[\n [\n col\n for col in self.columns\n if isinstance(self[col].spark.data_type, BooleanType)\n or (\n isinstance(self[col].spark.data_type, NumericType)\n and not isinstance(self[col].spark.data_type, DecimalType)\n )\n ]\n ]\n\n num_cols = len(psdf.columns)\n cov = np.zeros([num_cols, num_cols])\n\n if num_cols == 0:\n return DataFrame()\n\n if len(psdf) < min_periods:\n cov.fill(np.nan)\n return DataFrame(cov, columns=psdf.columns, index=psdf.columns)\n\n data_cols = psdf._internal.data_spark_column_names\n cov_scols = []\n count_not_null_scols = []\n\n # Count number of null row between two columns\n # Example:\n # a b c\n # 0 1 1 1\n # 1 NaN 2 2\n # 2 3 NaN 3\n # 3 4 4 4\n #\n # a b c\n # a count(a, a) count(a, b) count(a, c)\n # b count(b, b) count(b, c)\n # c count(c, c)\n #\n # count_not_null_scols =\n # [F.count(a, a), F.count(a, b), F.count(a, c), F.count(b, b), F.count(b, c), F.count(c, c)]\n for r in range(0, num_cols):\n for c in range(r, num_cols):\n count_not_null_scols.append(\n F.count(\n F.when(F.col(data_cols[r]).isNotNull() & F.col(data_cols[c]).isNotNull(), 1)\n )\n )\n\n count_not_null = (\n psdf._internal.spark_frame.replace(float(\"nan\"), None)\n .select(*count_not_null_scols)\n .head(1)[0]\n )\n\n # Calculate covariance between two columns\n # Example:\n # with min_periods = 3\n # a b c\n # 0 1 1 1\n # 1 NaN 2 2\n # 2 3 NaN 3\n # 3 4 4 4\n #\n # a b c\n # a cov(a, a) None cov(a, c)\n # b cov(b, b) cov(b, c)\n # c cov(c, c)\n #\n # cov_scols = [F.cov(a, a), None, F.cov(a, c), F.cov(b, b), F.cov(b, c), F.cov(c, c)]\n step = 0\n for r in range(0, num_cols):\n step += r\n for c in range(r, num_cols):\n cov_scols.append(\n F.covar_samp(\n F.col(data_cols[r]).cast(\"double\"), F.col(data_cols[c]).cast(\"double\")\n )\n if count_not_null[r * num_cols + c - step] >= min_periods\n else F.lit(None)\n )\n\n pair_cov = psdf._internal.spark_frame.select(*cov_scols).head(1)[0]\n\n # Convert from row to 2D array\n # Example:\n # pair_cov = [cov(a, a), None, cov(a, c), cov(b, b), cov(b, c), cov(c, c)]\n #\n # cov =\n #\n # a b c\n # a cov(a, a) None cov(a, c)\n # b cov(b, b) cov(b, c)\n # c cov(c, c)\n step = 0\n for r in range(0, num_cols):\n step += r\n for c in range(r, num_cols):\n cov[r][c] = pair_cov[r * num_cols + c - step]\n\n # Copy values\n # Example:\n # cov =\n # a b c\n # a cov(a, a) None cov(a, c)\n # b None cov(b, b) cov(b, c)\n # c cov(a, c) cov(b, c) cov(c, c)\n cov = cov + cov.T - np.diag(np.diag(cov))\n return DataFrame(cov, columns=psdf.columns, index=psdf.columns)\n\n def sample(\n self,\n n: Optional[int] = None,\n frac: Optional[float] = None,\n replace: bool = False,\n random_state: Optional[int] = None,\n ignore_index: bool = False,\n ) -> \"DataFrame\":\n \"\"\"\n Return a random sample of items from an axis of object.\n\n Please call this function using named argument by specifying the ``frac`` argument.\n\n You can use `random_state` for reproducibility. However, note that different from pandas,\n specifying a seed in pandas-on-Spark/Spark does not guarantee the sampled rows will\n be fixed. The result set depends on not only the seed, but also how the data is distributed\n across machines and to some extent network randomness when shuffle operations are involved.\n Even in the simplest case, the result set will depend on the system's CPU core count.\n\n Parameters\n ----------\n n : int, optional\n Number of items to return. This is currently NOT supported. Use frac instead.\n frac : float, optional\n Fraction of axis items to return.\n replace : bool, default False\n Sample with or without replacement.\n random_state : int, optional\n Seed for the random number generator (if int).\n ignore_index : bool, default False\n If True, the resulting index will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 3.4.0\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing the sampled items.\n\n Examples\n --------\n >>> df = ps.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'],\n ... columns=['num_legs', 'num_wings', 'num_specimen_seen'])\n >>> df # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n A random 25% sample of the ``DataFrame``.\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n\n A random 50% sample of the ``DataFrame``, while ignoring the index.\n\n >>> df.sample(frac=0.5, random_state=1, ignore_index=True) # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n 0 4 0 2\n 1 8 0 1\n 2 0 0 8\n\n Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,\n so the same items could appear more than once.\n\n >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP\n falcon 2\n spider 8\n spider 8\n Name: num_legs, dtype: int64\n\n Specifying the exact number of items to return is not supported at the moment.\n\n >>> df.sample(n=5) # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n NotImplementedError: Function sample currently does not support specifying ...\n \"\"\"\n # Note: we don't run any of the doctests because the result can change depending on the\n # system's core count.\n if n is not None:\n raise NotImplementedError(\n \"Function sample currently does not support specifying \"\n \"exact number of items to return. Use frac instead.\"\n )\n\n if frac is None:\n raise ValueError(\"frac must be specified.\")\n\n sdf = self._internal.resolved_copy.spark_frame.sample(\n withReplacement=replace, fraction=frac, seed=random_state\n )\n if ignore_index:\n return DataFrame(sdf.drop(*self._internal.index_spark_column_names))\n else:\n return DataFrame(self._internal.with_new_sdf(sdf))\n\n def astype(self, dtype: Union[str, Dtype, Dict[Name, Union[str, Dtype]]]) -> \"DataFrame\":\n \"\"\"\n Cast a pandas-on-Spark object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas-on-Spark object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')\n >>> df\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert to float type:\n\n >>> df.astype('float')\n a b\n 0 1.0 1.0\n 1 2.0 2.0\n 2 3.0 3.0\n\n Convert to int64 type back:\n\n >>> df.astype('int64')\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert column a to float type:\n\n >>> df.astype({'a': float})\n a b\n 0 1.0 1\n 1 2.0 2\n 2 3.0 3\n\n \"\"\"\n applied = []\n if is_dict_like(dtype):\n dtype_dict = cast(Dict[Name, Union[str, Dtype]], dtype)\n for col_name in dtype_dict.keys():\n if col_name not in self.columns:\n raise KeyError(\n \"Only a column name can be used for the \"\n \"key in a dtype mappings argument.\"\n )\n for col_name, col in self.items():\n if col_name in dtype_dict:\n applied.append(col.astype(dtype=dtype_dict[col_name]))\n else:\n applied.append(col)\n else:\n for col_name, col in self.items():\n applied.append(col.astype(dtype=cast(Union[str, Dtype], dtype)))\n return DataFrame(self._internal.with_new_columns(applied))\n\n def add_prefix(self, prefix: str) -> \"DataFrame\":\n \"\"\"\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n DataFrame\n New DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n assert isinstance(prefix, str)\n return self._apply_series_op(\n lambda psser: psser.rename(tuple([prefix + i for i in psser._column_label]))\n )\n\n def add_suffix(self, suffix: str) -> \"DataFrame\":\n \"\"\"\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add before each label.\n\n Returns\n -------\n DataFrame\n New DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n assert isinstance(suffix, str)\n return self._apply_series_op(\n lambda psser: psser.rename(tuple([i + suffix for i in psser._column_label]))\n )\n\n # TODO: include, and exclude should be implemented.\n def describe(self, percentiles: Optional[List[float]] = None) -> \"DataFrame\":\n \"\"\"\n Generate descriptive statistics that summarize the central tendency,\n dispersion and shape of a dataset's distribution, excluding\n ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]\n A list of percentiles to be computed.\n\n Returns\n -------\n DataFrame\n Summary statistics of the Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the observations.\n\n Notes\n -----\n For numeric data, the result's index will include ``count``,\n ``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.\n\n For object data (e.g. strings or timestamps), the result’s index will include\n ``count``, ``unique``, ``top``, and ``freq``.\n The ``top`` is the most common value. The ``freq`` is the most common value’s frequency.\n Timestamps also include the ``first`` and ``last`` items.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = ps.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.0\n 50% 2.0\n 75% 3.0\n max 3.0\n dtype: float64\n\n Describing a ``DataFrame``. Only numeric fields are returned.\n\n >>> df = ps.DataFrame({'numeric1': [1, 2, 3],\n ... 'numeric2': [4.0, 5.0, 6.0],\n ... 'object': ['a', 'b', 'c']\n ... },\n ... columns=['numeric1', 'numeric2', 'object'])\n >>> df.describe()\n numeric1 numeric2\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 25% 1.0 4.0\n 50% 2.0 5.0\n 75% 3.0 6.0\n max 3.0 6.0\n\n For multi-index columns:\n\n >>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')]\n >>> df.describe() # doctest: +NORMALIZE_WHITESPACE\n num\n a b\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 25% 1.0 4.0\n 50% 2.0 5.0\n 75% 3.0 6.0\n max 3.0 6.0\n\n >>> df[('num', 'b')].describe()\n count 3.0\n mean 5.0\n std 1.0\n min 4.0\n 25% 4.0\n 50% 5.0\n 75% 6.0\n max 6.0\n Name: (num, b), dtype: float64\n\n Describing a ``DataFrame`` and selecting custom percentiles.\n\n >>> df = ps.DataFrame({'numeric1': [1, 2, 3],\n ... 'numeric2': [4.0, 5.0, 6.0]\n ... },\n ... columns=['numeric1', 'numeric2'])\n >>> df.describe(percentiles = [0.85, 0.15])\n numeric1 numeric2\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 15% 1.0 4.0\n 50% 2.0 5.0\n 85% 3.0 6.0\n max 3.0 6.0\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric1.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.0\n 50% 2.0\n 75% 3.0\n max 3.0\n Name: numeric1, dtype: float64\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute and selecting custom percentiles.\n\n >>> df.numeric1.describe(percentiles = [0.85, 0.15])\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 15% 1.0\n 50% 2.0\n 85% 3.0\n max 3.0\n Name: numeric1, dtype: float64\n \"\"\"\n psser_numeric: List[Series] = []\n psser_string: List[Series] = []\n psser_timestamp: List[Series] = []\n spark_data_types: List[DataType] = []\n column_labels: Optional[List[Label]] = []\n column_names: List[str] = []\n for label in self._internal.column_labels:\n psser = self._psser_for(label)\n spark_data_type = psser.spark.data_type\n if isinstance(spark_data_type, NumericType):\n psser_numeric.append(psser)\n column_labels.append(label)\n spark_data_types.append(spark_data_type)\n elif isinstance(spark_data_type, (TimestampType, TimestampNTZType)):\n psser_timestamp.append(psser)\n column_labels.append(label)\n spark_data_types.append(spark_data_type)\n else:\n psser_string.append(psser)\n column_names.append(self._internal.spark_column_name_for(label))\n\n if percentiles is not None:\n if any((p < 0.0) or (p > 1.0) for p in percentiles):\n raise ValueError(\"Percentiles should all be in the interval [0, 1]\")\n # appending 50% if not in percentiles already\n percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles\n else:\n percentiles = [0.25, 0.5, 0.75]\n\n # Identify the cases\n is_all_string_type = (\n len(psser_numeric) == 0 and len(psser_timestamp) == 0 and len(psser_string) > 0\n )\n is_all_numeric_type = len(psser_numeric) > 0 and len(psser_timestamp) == 0\n has_timestamp_type = len(psser_timestamp) > 0\n has_numeric_type = len(psser_numeric) > 0\n\n if is_all_string_type:\n # Handling string type columns\n # We will retrive the `count`, `unique`, `top` and `freq`.\n internal = self._internal.resolved_copy\n exprs_string = [\n internal.spark_column_for(psser._column_label) for psser in psser_string\n ]\n sdf = internal.spark_frame.select(*exprs_string)\n\n # Get `count` & `unique` for each columns\n counts, uniques = map(lambda x: x[1:], sdf.summary(\"count\", \"count_distinct\").take(2))\n # Handling Empty DataFrame\n if len(counts) == 0 or counts[0] == \"0\":\n data = dict()\n for psser in psser_string:\n data[psser.name] = [0, 0, np.nan, np.nan]\n return DataFrame(data, index=[\"count\", \"unique\", \"top\", \"freq\"])\n\n # Get `top` & `freq` for each columns\n tops = []\n freqs = []\n # TODO(SPARK-37711): We should do it in single pass since invoking Spark job\n # for every columns is too expensive.\n for column in exprs_string:\n top, freq = sdf.groupby(column).count().sort(\"count\", ascending=False).first()\n tops.append(str(top))\n freqs.append(str(freq))\n\n stats = [counts, uniques, tops, freqs]\n stats_names = [\"count\", \"unique\", \"top\", \"freq\"]\n\n result: DataFrame = DataFrame(\n data=stats,\n index=stats_names,\n columns=column_names,\n )\n elif is_all_numeric_type:\n # Handling numeric columns\n exprs_numeric = [\n psser._dtype_op.nan_to_null(psser).spark.column for psser in psser_numeric\n ]\n formatted_perc = [\"{:.0%}\".format(p) for p in sorted(percentiles)]\n stats = [\"count\", \"mean\", \"stddev\", \"min\", *formatted_perc, \"max\"]\n\n # In this case, we can simply use `summary` to calculate the stats.\n sdf = self._internal.spark_frame.select(*exprs_numeric).summary(*stats)\n sdf = sdf.replace(\"stddev\", \"std\", subset=[\"summary\"])\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, \"summary\")],\n column_labels=column_labels,\n data_spark_columns=[\n scol_for(sdf, self._internal.spark_column_name_for(label))\n for label in column_labels\n ],\n )\n result = DataFrame(internal).astype(\"float64\")\n elif has_timestamp_type:\n internal = self._internal.resolved_copy\n column_names = [\n internal.spark_column_name_for(column_label) for column_label in column_labels\n ]\n column_length = len(column_labels)\n\n # Apply stat functions for each column.\n count_exprs = map(F.count, column_names)\n min_exprs = map(F.min, column_names)\n # Here we try to flat the multiple map into single list that contains each calculated\n # percentile using `chain`.\n # e.g. flat the `[, ]`\n # to `[Column<'percentile_approx(A, 0.2, 10000)'>,\n # Column<'percentile_approx(B, 0.2, 10000)'>,\n # Column<'percentile_approx(A, 0.5, 10000)'>,\n # Column<'percentile_approx(B, 0.5, 10000)'>]`\n perc_exprs = chain(\n *[\n map(F.percentile_approx, column_names, [percentile] * column_length)\n for percentile in percentiles\n ]\n )\n max_exprs = map(F.max, column_names)\n mean_exprs = []\n for column_name, spark_data_type in zip(column_names, spark_data_types):\n mean_exprs.append(F.mean(column_name).astype(spark_data_type))\n exprs = [*count_exprs, *mean_exprs, *min_exprs, *perc_exprs, *max_exprs]\n\n formatted_perc = [\"{:.0%}\".format(p) for p in sorted(percentiles)]\n stats_names = [\"count\", \"mean\", \"min\", *formatted_perc, \"max\"]\n\n # If not all columns are timestamp type,\n # we also need to calculate the `std` for numeric columns\n if has_numeric_type:\n std_exprs = []\n for label, spark_data_type in zip(column_labels, spark_data_types):\n column_name = label[0]\n if isinstance(spark_data_type, (TimestampType, TimestampNTZType)):\n std_exprs.append(F.lit(None).alias(\"stddev_samp({})\".format(column_name)))\n else:\n std_exprs.append(F.stddev(column_name))\n exprs.extend(std_exprs)\n stats_names.append(\"std\")\n\n # Select stats for all columns at once.\n sdf = internal.spark_frame.select(exprs)\n stat_values = sdf.first()\n\n num_stats = int(len(exprs) / column_length)\n # `column_name_stats_kv` is key-value store that has column name as key, and\n # the stats as values e.g. {\"A\": [{count_value}, {min_value}, ...],\n # \"B\": [{count_value}, {min_value} ...]}\n column_name_stats_kv: Dict[str, List[str]] = defaultdict(list)\n for i, column_name in enumerate(column_names):\n for first_stat_idx in range(num_stats):\n column_name_stats_kv[column_name].append(\n stat_values[(first_stat_idx * column_length) + i]\n )\n\n # For timestamp type columns, we should cast the column type to string.\n for key, spark_data_type in zip(column_name_stats_kv, spark_data_types):\n if isinstance(spark_data_type, (TimestampType, TimestampNTZType)):\n column_name_stats_kv[key] = [str(value) for value in column_name_stats_kv[key]]\n\n result: DataFrame = DataFrame( # type: ignore[no-redef]\n data=column_name_stats_kv,\n index=stats_names,\n columns=column_names,\n )\n else:\n # Empty DataFrame without column\n raise ValueError(\"Cannot describe a DataFrame without columns\")\n\n return result\n\n def drop_duplicates(\n self,\n subset: Optional[Union[Name, List[Name]]] = None,\n keep: Union[bool, str] = \"first\",\n inplace: bool = False,\n ignore_index: bool = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Return DataFrame with duplicate rows removed, optionally only\n considering certain columns.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to keep.\n - ``first`` : Drop duplicates except for the first occurrence.\n - ``last`` : Drop duplicates except for the last occurrence.\n - False : Drop all duplicates.\n inplace : boolean, default False\n Whether to drop duplicates in place or to return a copy.\n ignore_index : boolean, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n Returns\n -------\n DataFrame\n DataFrame with duplicates removed or None if ``inplace=True``.\n\n >>> df = ps.DataFrame(\n ... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])\n >>> df\n a b\n 0 1 a\n 1 2 a\n 2 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates().sort_index()\n a b\n 0 1 a\n 1 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates(ignore_index=True).sort_index()\n a b\n 0 1 a\n 1 2 a\n 2 2 c\n 3 3 d\n\n >>> df.drop_duplicates('a').sort_index()\n a b\n 0 1 a\n 1 2 a\n 4 3 d\n\n >>> df.drop_duplicates(['a', 'b']).sort_index()\n a b\n 0 1 a\n 1 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates(keep='last').sort_index()\n a b\n 0 1 a\n 2 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates(keep=False).sort_index()\n a b\n 0 1 a\n 3 2 c\n 4 3 d\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n sdf, column = self._mark_duplicates(subset, keep)\n\n sdf = sdf.where(~scol_for(sdf, column)).drop(column)\n internal = self._internal.with_new_sdf(sdf)\n psdf: DataFrame = DataFrame(internal)\n\n if inplace:\n if ignore_index:\n psdf.reset_index(drop=True, inplace=inplace)\n self._update_internal_frame(psdf._internal)\n return None\n else:\n return psdf.reset_index(drop=True) if ignore_index else psdf\n\n def reindex(\n self,\n labels: Optional[Sequence[Any]] = None,\n index: Optional[Union[\"Index\", Sequence[Any]]] = None,\n columns: Optional[Union[pd.Index, Sequence[Any]]] = None,\n axis: Optional[Axis] = None,\n copy: Optional[bool] = True,\n fill_value: Optional[Any] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Conform DataFrame to new index with optional filling logic, placing\n NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n labels: array-like, optional\n New labels / index to conform the axis specified by ‘axis’ to.\n index, columns: array-like, optional\n New labels / index to conform to, should be specified using keywords.\n Preferably an Index object to avoid duplicating data\n axis: int or str, optional\n Axis to target. Can be either the axis name (‘index’, ‘columns’) or\n number (0, 1).\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n\n Returns\n -------\n DataFrame with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n\n Examples\n --------\n\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']\n >>> df = ps.DataFrame({\n ... 'http_status': [200, 200, 404, 404, 301],\n ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index,\n ... columns=['http_status', 'response_time'])\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',\n ... 'Chrome']\n >>> df.reindex(new_index).sort_index()\n http_status response_time\n Chrome 200.0 0.02\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Iceweasel NaN NaN\n Safari 404.0 0.07\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``.\n\n >>> df.reindex(new_index, fill_value=0, copy=False).sort_index()\n http_status response_time\n Chrome 200 0.02\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Iceweasel 0 0.00\n Safari 404 0.07\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=['http_status', 'user_agent']).sort_index()\n http_status user_agent\n Chrome 200 NaN\n Firefox 200 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n Safari 404 NaN\n\n Or we can use \"axis-style\" keyword arguments\n\n >>> df.reindex(['http_status', 'user_agent'], axis=\"columns\").sort_index()\n http_status user_agent\n Chrome 200 NaN\n Firefox 200 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n Safari 404 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')\n >>> df2 = ps.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2.sort_index()\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')\n >>> df2.reindex(date_index2).sort_index()\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n \"\"\"\n if axis is not None and (index is not None or columns is not None):\n raise TypeError(\"Cannot specify both 'axis' and any of 'index' or 'columns'.\")\n\n if labels is not None:\n axis = validate_axis(axis)\n if axis == 0:\n index = labels\n elif axis == 1:\n columns = labels\n\n if index is not None and not is_list_like(index):\n raise TypeError(\n \"Index must be called with a collection of some kind, \"\n \"%s was passed\" % type(index)\n )\n\n if columns is not None and not is_list_like(columns):\n raise TypeError(\n \"Columns must be called with a collection of some kind, \"\n \"%s was passed\" % type(columns)\n )\n\n df = self\n\n if index is not None:\n df = df._reindex_index(index, fill_value)\n\n if columns is not None:\n df = df._reindex_columns(columns, fill_value)\n\n # Copy\n if copy and df is self:\n return df.copy()\n else:\n return df\n\n def _reindex_index(\n self, index: Optional[Union[\"Index\", Sequence[Any]]], fill_value: Optional[Any]\n ) -> \"DataFrame\":\n # When axis is index, we can mimic pandas' by a right outer join.\n nlevels = self._internal.index_level\n assert nlevels <= 1 or (\n isinstance(index, ps.MultiIndex) and nlevels == index.nlevels\n ), \"MultiIndex DataFrame can only be reindexed with a similar pandas-on-Spark MultiIndex.\"\n\n index_columns = self._internal.index_spark_column_names\n frame = self._internal.resolved_copy.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)\n\n if isinstance(index, ps.Index):\n if nlevels != index.nlevels:\n return DataFrame(index._internal.with_new_columns([])).reindex(\n columns=self.columns, fill_value=fill_value\n )\n\n index_names = index._internal.index_names\n scols = index._internal.index_spark_columns\n labels = index._internal.spark_frame.select(\n [scol.alias(index_column) for scol, index_column in zip(scols, index_columns)]\n )\n else:\n index = ps.Index(list(index))\n labels = index._internal.spark_frame.select(index.spark.column.alias(index_columns[0]))\n index_names = self._internal.index_names\n\n if fill_value is not None:\n frame_index_columns = [\n verify_temp_column_name(frame, \"__frame_index_column_{}__\".format(i))\n for i in range(nlevels)\n ]\n index_scols = [\n scol_for(frame, index_col).alias(frame_index_col)\n for index_col, frame_index_col in zip(index_columns, frame_index_columns)\n ]\n scols = self._internal.resolved_copy.data_spark_columns\n frame = frame.select(index_scols + scols)\n\n temp_fill_value = verify_temp_column_name(frame, \"__fill_value__\")\n labels = labels.withColumn(temp_fill_value, SF.lit(fill_value))\n\n frame_index_scols = [scol_for(frame, col) for col in frame_index_columns]\n labels_index_scols = [scol_for(labels, col) for col in index_columns]\n\n joined_df = frame.join(\n labels,\n on=[fcol == lcol for fcol, lcol in zip(frame_index_scols, labels_index_scols)],\n how=\"right\",\n )\n\n joined_df = joined_df.select(\n *labels_index_scols,\n *[\n F.when(\n reduce(\n lambda c1, c2: c1 & c2,\n [\n fcol.isNull() & lcol.isNotNull()\n for fcol, lcol in zip(frame_index_scols, labels_index_scols)\n ],\n ),\n scol_for(joined_df, temp_fill_value),\n )\n .otherwise(scol_for(joined_df, col))\n .alias(col)\n for col in self._internal.data_spark_column_names\n ],\n )\n data_fields = None\n else:\n joined_df = frame.join(labels, on=index_columns, how=\"right\")\n data_fields = [field.copy(nullable=True) for field in self._internal.data_fields]\n\n sdf = joined_df.drop(NATURAL_ORDER_COLUMN_NAME)\n internal = self._internal.copy(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=index_names,\n index_fields=[\n field.copy(name=name)\n for field, name in zip(\n index._internal.index_fields, self._internal.index_spark_column_names\n )\n ],\n data_spark_columns=[\n scol_for(sdf, col) for col in self._internal.data_spark_column_names\n ],\n data_fields=data_fields,\n )\n return DataFrame(internal)\n\n def _reindex_columns(\n self, columns: Optional[Union[pd.Index, Sequence[Any]]], fill_value: Optional[Any]\n ) -> \"DataFrame\":\n level = self._internal.column_labels_level\n if level > 1:\n label_columns = list(columns)\n for col in label_columns:\n if not isinstance(col, tuple):\n raise TypeError(\"Expected tuple, got {}\".format(type(col).__name__))\n else:\n label_columns = [(col,) for col in columns]\n for col in label_columns:\n if len(col) != level:\n raise ValueError(\n \"shape (1,{}) doesn't match the shape (1,{})\".format(len(col), level)\n )\n fill_value = np.nan if fill_value is None else fill_value\n scols_or_pssers: List[Union[Series, Column]] = []\n labels = []\n for label in label_columns:\n if label in self._internal.column_labels:\n scols_or_pssers.append(self._psser_for(label))\n else:\n scols_or_pssers.append(SF.lit(fill_value).alias(name_like_string(label)))\n labels.append(label)\n\n if isinstance(columns, pd.Index):\n column_label_names = [\n name if is_name_like_tuple(name) else (name,) for name in columns.names\n ]\n internal = self._internal.with_new_columns(\n scols_or_pssers, column_labels=labels, column_label_names=column_label_names\n )\n else:\n internal = self._internal.with_new_columns(scols_or_pssers, column_labels=labels)\n\n return DataFrame(internal)\n\n def reindex_like(self, other: \"DataFrame\", copy: bool = True) -> \"DataFrame\":\n \"\"\"\n Return a DataFrame with matching indices as other object.\n\n Conform the object to the same index on all axes. Places NA/NaN in locations\n having no value in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : DataFrame\n Its row and column indices are used to define the new indices\n of this object.\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n\n Returns\n -------\n DataFrame\n DataFrame with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n\n >>> df1 = ps.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit',\n ... 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = ps.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1).sort_index() # doctest: +NORMALIZE_WHITESPACE\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN None\n 2014-02-15 35.1 NaN medium\n \"\"\"\n\n if isinstance(other, DataFrame):\n return self.reindex(index=other.index, columns=other.columns, copy=copy)\n else:\n raise TypeError(\"other must be a pandas-on-Spark DataFrame\")\n\n def melt(\n self,\n id_vars: Optional[Union[Name, List[Name]]] = None,\n value_vars: Optional[Union[Name, List[Name]]] = None,\n var_name: Optional[Union[str, List[str]]] = None,\n value_name: str = \"value\",\n ) -> \"DataFrame\":\n \"\"\"\n Unpivot a DataFrame from wide format to long format, optionally\n leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where one\n or more columns are identifier variables (`id_vars`), while all other\n columns, considered measured variables (`value_vars`), are \"unpivoted\" to\n the row axis, leaving just two non-identifier columns, 'variable' and\n 'value'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar, default 'variable'\n Name to use for the 'variable' column. If None it uses `frame.columns.name` or\n ‘variable’.\n value_name : scalar, default 'value'\n Name to use for the 'value' column.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},\n ... 'B': {0: 1, 1: 3, 2: 5},\n ... 'C': {0: 2, 1: 4, 2: 6}},\n ... columns=['A', 'B', 'C'])\n >>> df\n A B C\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n\n >>> ps.melt(df)\n variable value\n 0 A a\n 1 B 1\n 2 C 2\n 3 A b\n 4 B 3\n 5 C 4\n 6 A c\n 7 B 5\n 8 C 6\n\n >>> df.melt(id_vars='A')\n A variable value\n 0 a B 1\n 1 a C 2\n 2 b B 3\n 3 b C 4\n 4 c B 5\n 5 c C 6\n\n >>> df.melt(value_vars='A')\n variable value\n 0 A a\n 1 A b\n 2 A c\n\n >>> ps.melt(df, id_vars=['A', 'B'])\n A B variable value\n 0 a 1 C 2\n 1 b 3 C 4\n 2 c 5 C 6\n\n >>> df.melt(id_vars=['A'], value_vars=['C'])\n A variable value\n 0 a C 2\n 1 b C 4\n 2 c C 6\n\n The names of 'variable' and 'value' columns can be customized:\n\n >>> ps.melt(df, id_vars=['A'], value_vars=['B'],\n ... var_name='myVarname', value_name='myValname')\n A myVarname myValname\n 0 a B 1\n 1 b B 3\n 2 c B 5\n \"\"\"\n column_labels = self._internal.column_labels\n\n if id_vars is None:\n id_vars = []\n else:\n if isinstance(id_vars, tuple):\n if self._internal.column_labels_level == 1:\n id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars]\n else:\n raise ValueError(\n \"id_vars must be a list of tuples\" \" when columns are a MultiIndex\"\n )\n elif is_name_like_value(id_vars):\n id_vars = [(id_vars,)]\n else:\n id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars]\n\n non_existence_col = [idv for idv in id_vars if idv not in column_labels]\n if len(non_existence_col) != 0:\n raveled_column_labels = np.ravel(column_labels)\n missing = [\n nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels\n ]\n if len(missing) != 0:\n raise KeyError(\n \"The following 'id_vars' are not present\"\n \" in the DataFrame: {}\".format(missing)\n )\n else:\n raise KeyError(\n \"None of {} are in the {}\".format(non_existence_col, column_labels)\n )\n\n if value_vars is None:\n value_vars = []\n else:\n if isinstance(value_vars, tuple):\n if self._internal.column_labels_level == 1:\n value_vars = [\n valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars\n ]\n else:\n raise ValueError(\n \"value_vars must be a list of tuples\" \" when columns are a MultiIndex\"\n )\n elif is_name_like_value(value_vars):\n value_vars = [(value_vars,)]\n else:\n value_vars = [valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars]\n\n non_existence_col = [valv for valv in value_vars if valv not in column_labels]\n if len(non_existence_col) != 0:\n raveled_column_labels = np.ravel(column_labels)\n missing = [\n nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels\n ]\n if len(missing) != 0:\n raise KeyError(\n \"The following 'value_vars' are not present\"\n \" in the DataFrame: {}\".format(missing)\n )\n else:\n raise KeyError(\n \"None of {} are in the {}\".format(non_existence_col, column_labels)\n )\n\n if len(value_vars) == 0:\n value_vars = column_labels\n\n column_labels = [label for label in column_labels if label not in id_vars]\n\n sdf = self._internal.spark_frame\n\n if var_name is None:\n if (\n self._internal.column_labels_level == 1\n and self._internal.column_label_names[0] is None\n ):\n var_name = [\"variable\"]\n else:\n var_name = [\n name_like_string(name) if name is not None else \"variable_{}\".format(i)\n for i, name in enumerate(self._internal.column_label_names)\n ]\n elif isinstance(var_name, str):\n var_name = [var_name]\n\n pairs = F.explode(\n F.array(\n *[\n F.struct(\n *[SF.lit(c).alias(name) for c, name in zip(label, var_name)],\n *[self._internal.spark_column_for(label).alias(value_name)],\n )\n for label in column_labels\n if label in value_vars\n ]\n )\n )\n\n columns = (\n [\n self._internal.spark_column_for(label).alias(name_like_string(label))\n for label in id_vars\n ]\n + [F.col(\"pairs.`%s`\" % name) for name in var_name]\n + [F.col(\"pairs.`%s`\" % value_name)]\n )\n exploded_df = sdf.withColumn(\"pairs\", pairs).select(columns)\n\n return DataFrame(\n InternalFrame(\n spark_frame=exploded_df,\n index_spark_columns=None,\n column_labels=(\n [label if len(label) == 1 else (name_like_string(label),) for label in id_vars]\n + [(name,) for name in var_name]\n + [(value_name,)]\n ),\n )\n )\n\n def stack(self) -> DataFrameOrSeries:\n \"\"\"\n Stack the prescribed level(s) from columns to index.\n\n Return a reshaped DataFrame or Series having a multi-level\n index with one or more new inner-most levels compared to the current\n DataFrame. The new inner-most levels are created by pivoting the\n columns of the current dataframe:\n\n - if the columns have a single level, the output is a Series;\n - if the columns have multiple levels, the new index\n level(s) is (are) taken from the prescribed level(s) and\n the output is a DataFrame.\n\n The new index levels are sorted.\n\n Returns\n -------\n DataFrame or Series\n Stacked dataframe or series.\n\n See Also\n --------\n DataFrame.unstack : Unstack prescribed level(s) from index axis\n onto column axis.\n DataFrame.pivot : Reshape dataframe from long format to wide\n format.\n DataFrame.pivot_table : Create a spreadsheet-style pivot table\n as a DataFrame.\n\n Notes\n -----\n The function is named by analogy with a collection of books\n being reorganized from being side by side on a horizontal\n position (the columns of the dataframe) to being stacked\n vertically on top of each other (in the index of the\n dataframe).\n\n Examples\n --------\n **Single level columns**\n\n >>> df_single_level_cols = ps.DataFrame([[0, 1], [2, 3]],\n ... index=['cat', 'dog'],\n ... columns=['weight', 'height'])\n\n Stacking a dataframe with a single level column axis returns a Series:\n\n >>> df_single_level_cols\n weight height\n cat 0 1\n dog 2 3\n >>> df_single_level_cols.stack().sort_index()\n cat height 1\n weight 0\n dog height 3\n weight 2\n dtype: int64\n\n **Multi level columns: simple case**\n\n >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('weight', 'pounds')])\n >>> df_multi_level_cols1 = ps.DataFrame([[1, 2], [2, 4]],\n ... index=['cat', 'dog'],\n ... columns=multicol1)\n\n Stacking a dataframe with a multi-level column axis:\n\n >>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE\n weight\n kg pounds\n cat 1 2\n dog 2 4\n >>> df_multi_level_cols1.stack().sort_index()\n weight\n cat kg 1\n pounds 2\n dog kg 2\n pounds 4\n\n **Missing values**\n\n >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('height', 'm')])\n >>> df_multi_level_cols2 = ps.DataFrame([[1.0, 2.0], [3.0, 4.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n It is common to have missing values when stacking a dataframe\n with multi-level columns, as the stacked dataframe typically\n has more values than the original dataframe. Missing values\n are filled with NaNs:\n\n >>> df_multi_level_cols2\n weight height\n kg m\n cat 1.0 2.0\n dog 3.0 4.0\n >>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP\n height weight\n cat kg NaN 1.0\n m 2.0 NaN\n dog kg NaN 3.0\n m 4.0 NaN\n \"\"\"\n from pyspark.pandas.series import first_series\n\n if len(self._internal.column_labels) == 0:\n return DataFrame(\n self._internal.copy(\n column_label_names=self._internal.column_label_names[:-1]\n ).with_filter(SF.lit(False))\n )\n\n column_labels: Dict[Label, Dict[Any, Column]] = defaultdict(dict)\n index_values = set()\n should_returns_series = False\n for label in self._internal.column_labels:\n new_label = label[:-1]\n if len(new_label) == 0:\n new_label = None\n should_returns_series = True\n value = label[-1]\n\n scol = self._internal.spark_column_for(label)\n column_labels[new_label][value] = scol\n\n index_values.add(value)\n\n column_labels = dict(sorted(column_labels.items(), key=lambda x: x[0]))\n\n index_name = self._internal.column_label_names[-1]\n column_label_names = self._internal.column_label_names[:-1]\n if len(column_label_names) == 0:\n column_label_names = [None]\n\n index_column = SPARK_INDEX_NAME_FORMAT(self._internal.index_level)\n data_columns = [name_like_string(label) for label in column_labels]\n\n structs = [\n F.struct(\n *[SF.lit(value).alias(index_column)],\n *[\n (\n column_labels[label][value]\n if value in column_labels[label]\n else SF.lit(None)\n ).alias(name)\n for label, name in zip(column_labels, data_columns)\n ],\n ).alias(value)\n for value in index_values\n ]\n\n pairs = F.explode(F.array(*structs))\n\n sdf = self._internal.spark_frame.withColumn(\"pairs\", pairs)\n sdf = sdf.select(\n self._internal.index_spark_columns\n + [sdf[\"pairs\"][index_column].alias(index_column)]\n + [sdf[\"pairs\"][name].alias(name) for name in data_columns]\n )\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col)\n for col in (self._internal.index_spark_column_names + [index_column])\n ],\n index_names=self._internal.index_names + [index_name],\n index_fields=self._internal.index_fields + [None],\n column_labels=list(column_labels),\n data_spark_columns=[scol_for(sdf, col) for col in data_columns],\n column_label_names=column_label_names,\n )\n psdf: DataFrame = DataFrame(internal)\n\n if should_returns_series:\n return first_series(psdf)\n else:\n return psdf\n\n def unstack(self) -> DataFrameOrSeries:\n \"\"\"\n Pivot the (necessarily hierarchical) index labels.\n\n Returns a DataFrame having a new level of column labels whose inner-most level\n consists of the pivoted index labels.\n\n If the index is not a MultiIndex, the output will be a Series.\n\n .. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and\n it could cause a serious performance degradation since Spark partitions it row based.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n DataFrame.pivot : Pivot a table based on column values.\n DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack).\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": {\"0\": \"a\", \"1\": \"b\", \"2\": \"c\"},\n ... \"B\": {\"0\": \"1\", \"1\": \"3\", \"2\": \"5\"},\n ... \"C\": {\"0\": \"2\", \"1\": \"4\", \"2\": \"6\"}},\n ... columns=[\"A\", \"B\", \"C\"])\n >>> df\n A B C\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n\n >>> df.unstack().sort_index()\n A 0 a\n 1 b\n 2 c\n B 0 1\n 1 3\n 2 5\n C 0 2\n 1 4\n 2 6\n dtype: object\n\n >>> df.columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')])\n >>> df.unstack().sort_index()\n X A 0 a\n 1 b\n 2 c\n B 0 1\n 1 3\n 2 5\n Y C 0 2\n 1 4\n 2 6\n dtype: object\n\n For MultiIndex case:\n\n >>> df = ps.DataFrame({\"A\": [\"a\", \"b\", \"c\"],\n ... \"B\": [1, 3, 5],\n ... \"C\": [2, 4, 6]},\n ... columns=[\"A\", \"B\", \"C\"])\n >>> df = df.set_index('A', append=True)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n B C\n A\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n >>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE\n B C\n A a b c a b c\n 0 1.0 NaN NaN 2.0 NaN NaN\n 1 NaN 3.0 NaN NaN 4.0 NaN\n 2 NaN NaN 5.0 NaN NaN 6.0\n \"\"\"\n from pyspark.pandas.series import first_series\n\n if self._internal.index_level > 1:\n # The index after `reset_index()` will never be used, so use \"distributed\" index\n # as a dummy to avoid overhead.\n with option_context(\"compute.default_index_type\", \"distributed\"):\n df = self.reset_index()\n index = df._internal.column_labels[: self._internal.index_level - 1]\n columns = df.columns[self._internal.index_level - 1]\n df = df.pivot_table(\n index=index, columns=columns, values=self._internal.column_labels, aggfunc=\"first\"\n )\n internal = df._internal.copy(\n index_names=self._internal.index_names[:-1],\n index_fields=df._internal.index_fields[: self._internal.index_level - 1],\n column_label_names=(\n df._internal.column_label_names[:-1]\n + [\n None\n if self._internal.index_names[-1] is None\n else df._internal.column_label_names[-1]\n ]\n ),\n )\n return DataFrame(internal)\n\n # TODO: Codes here are similar with melt. Should we deduplicate?\n column_labels = self._internal.column_labels\n ser_name = SPARK_DEFAULT_SERIES_NAME\n sdf = self._internal.spark_frame\n new_index_columns = [\n SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)\n ]\n\n new_index_map = list(zip_longest(new_index_columns, self._internal.column_label_names, []))\n\n pairs = F.explode(\n F.array(\n *[\n F.struct(\n *[SF.lit(c).alias(name) for c, name in zip(idx, new_index_columns)],\n *[self._internal.spark_column_for(idx).alias(ser_name)],\n )\n for idx in column_labels\n ]\n )\n )\n\n columns = [\n F.col(\"pairs.%s\" % name)\n for name in new_index_columns[: self._internal.column_labels_level]\n ] + [F.col(\"pairs.%s\" % ser_name)]\n\n new_index_len = len(new_index_columns)\n existing_index_columns = []\n for i, (index_name, index_field) in enumerate(\n zip(self._internal.index_names, self._internal.index_fields)\n ):\n name = SPARK_INDEX_NAME_FORMAT(i + new_index_len)\n new_index_map.append((name, index_name, index_field.copy(name=name)))\n existing_index_columns.append(self._internal.index_spark_columns[i].alias(name))\n\n exploded_df = sdf.withColumn(\"pairs\", pairs).select(existing_index_columns + columns)\n\n index_spark_column_names, index_names, index_fields = zip(*new_index_map)\n return first_series(\n DataFrame(\n InternalFrame(\n exploded_df,\n index_spark_columns=[\n scol_for(exploded_df, col) for col in index_spark_column_names\n ],\n index_names=list(index_names),\n index_fields=list(index_fields),\n column_labels=[None],\n )\n )\n )\n\n # TODO: axis, level and **kwargs should be implemented.\n def all(\n self, axis: Axis = 0, bool_only: Optional[bool] = None, skipna: bool = True\n ) -> \"Series\":\n \"\"\"\n Return whether all elements are True.\n\n Returns True unless there is at least one element within a series that is\n False or equivalent (e.g. zero or empty)\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n bool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data.\n\n skipna : boolean, default True\n Exclude NA values, such as None or numpy.NaN.\n If an entire row/column is NA values and `skipna` is True,\n then the result will be True, as for an empty row/column.\n If `skipna` is False, numpy.NaNs are treated as True because these are\n not equal to zero, Nones are treated as False.\n\n Returns\n -------\n Series\n\n Examples\n --------\n Create a dataframe from a dictionary.\n\n >>> df = ps.DataFrame({\n ... 'col1': [True, True, True],\n ... 'col2': [True, False, False],\n ... 'col3': [0, 0, 0],\n ... 'col4': [1, 2, 3],\n ... 'col5': [True, True, None],\n ... 'col6': [True, False, None]},\n ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])\n\n Default behaviour checks if column-wise values all return True.\n\n >>> df.all()\n col1 True\n col2 False\n col3 False\n col4 True\n col5 True\n col6 False\n dtype: bool\n\n Include NA values when set `skipna=False`.\n\n >>> df[['col5', 'col6']].all(skipna=False)\n col5 False\n col6 False\n dtype: bool\n\n Include only boolean columns when set `bool_only=True`.\n\n >>> df.all(bool_only=True)\n col1 True\n col2 False\n dtype: bool\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n column_labels = self._internal.column_labels\n if bool_only:\n column_labels = self._bool_column_labels(column_labels)\n if len(column_labels) == 0:\n return ps.Series([], dtype=bool)\n\n applied = []\n for label in column_labels:\n scol = self._internal.spark_column_for(label)\n\n if isinstance(self._internal.spark_type_for(label), NumericType) or skipna:\n # np.nan takes no effect to the result; None takes no effect if `skipna`\n all_col = F.min(F.coalesce(scol.cast(\"boolean\"), SF.lit(True)))\n else:\n # Take None as False when not `skipna`\n all_col = F.min(\n F.when(scol.isNull(), SF.lit(False)).otherwise(scol.cast(\"boolean\"))\n )\n applied.append(F.when(all_col.isNull(), True).otherwise(all_col))\n\n return self._result_aggregated(column_labels, applied)\n\n # TODO: axis, skipna, level and **kwargs should be implemented.\n def any(self, axis: Axis = 0, bool_only: Optional[bool] = None) -> \"Series\":\n \"\"\"\n Return whether any element is True.\n\n Returns False unless there is at least one element within a series that is\n True or equivalent (e.g. non-zero or non-empty).\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n bool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data.\n\n Returns\n -------\n Series\n\n Examples\n --------\n Create a dataframe from a dictionary.\n\n >>> df = ps.DataFrame({\n ... 'col1': [False, False, False],\n ... 'col2': [True, False, False],\n ... 'col3': [0, 0, 1],\n ... 'col4': [0, 1, 2],\n ... 'col5': [False, False, None],\n ... 'col6': [True, False, None]},\n ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])\n\n Default behaviour checks if column-wise values all return True.\n\n >>> df.any()\n col1 False\n col2 True\n col3 True\n col4 True\n col5 False\n col6 True\n dtype: bool\n\n Include only boolean columns when set `bool_only=True`.\n\n >>> df.any(bool_only=True)\n col1 False\n col2 True\n dtype: bool\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n column_labels = self._internal.column_labels\n if bool_only:\n column_labels = self._bool_column_labels(column_labels)\n if len(column_labels) == 0:\n return ps.Series([], dtype=bool)\n\n applied = []\n for label in column_labels:\n scol = self._internal.spark_column_for(label)\n any_col = F.max(F.coalesce(scol.cast(\"boolean\"), SF.lit(False)))\n applied.append(F.when(any_col.isNull(), False).otherwise(any_col))\n\n return self._result_aggregated(column_labels, applied)\n\n def _bool_column_labels(self, column_labels: List[Label]) -> List[Label]:\n \"\"\"\n Filter column labels of boolean columns (without None).\n \"\"\"\n bool_column_labels = []\n for label in column_labels:\n psser = self._psser_for(label)\n if is_bool_dtype(psser):\n # Rely on dtype rather than spark type because\n # columns that consist of bools and Nones should be excluded\n # if bool_only is True\n bool_column_labels.append(label)\n return bool_column_labels\n\n def _result_aggregated(self, column_labels: List[Label], scols: List[Column]) -> \"Series\":\n \"\"\"\n Given aggregated Spark columns and respective column labels from the original\n pandas-on-Spark DataFrame, construct the result Series.\n \"\"\"\n from pyspark.pandas.series import first_series\n\n cols = []\n result_scol_name = \"value\"\n for label, applied_col in zip(column_labels, scols):\n cols.append(\n F.struct(\n *[SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)],\n *[applied_col.alias(result_scol_name)],\n )\n )\n # Statements under this comment implement spark frame transformations as below:\n # From:\n # +-------------------------------------------------------------------------------------+\n # |arrays |\n # +-------------------------------------------------------------------------------------+\n # |[{col1, true}, {col2, true}, {col3, false}, {col4, true}]|\n # +-------------------------------------------------------------------------------------+\n # To:\n # +-------------+\n # |col |\n # +-------------+\n # |{col1, true} |\n # |{col2, true} |\n # |{col3, false}|\n # |{col4, true} |\n # +-------------+\n # To:\n # +-----------------+-----+\n # |__index_level_0__|value|\n # +-----------------+-----+\n # |col1 |true |\n # |col2 |true |\n # |col3 |false|\n # |col4 |true |\n # +-----------------+-----+\n sdf = self._internal.spark_frame.select(F.array(*cols).alias(\"arrays\")).select(\n F.explode(F.col(\"arrays\"))\n )\n sdf = sdf.selectExpr(\"col.*\")\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i))\n for i in range(self._internal.column_labels_level)\n ],\n index_names=self._internal.column_label_names,\n column_labels=[None],\n data_spark_columns=[scol_for(sdf, result_scol_name)],\n )\n\n # (cont.) The result Series should look as below:\n # col1 False\n # col2 True\n # col3 True\n # col4 True\n # dtype: bool\n return first_series(DataFrame(internal))\n\n # TODO: add axis, pct, na_option parameter\n def rank(\n self, method: str = \"average\", ascending: bool = True, numeric_only: Optional[bool] = None\n ) -> \"DataFrame\":\n \"\"\"\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values.\n\n .. note:: the current implementation of rank uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n numeric_only : bool, optional\n For DataFrame objects, rank only numeric columns if set to True.\n\n Returns\n -------\n ranks : same type as caller\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 4\n 1 2 3\n 2 2 2\n 3 3 1\n\n >>> df.rank().sort_index()\n A B\n 0 1.0 4.0\n 1 2.5 3.0\n 2 2.5 2.0\n 3 4.0 1.0\n\n If method is set to 'min', it use lowest rank in group.\n\n >>> df.rank(method='min').sort_index()\n A B\n 0 1.0 4.0\n 1 2.0 3.0\n 2 2.0 2.0\n 3 4.0 1.0\n\n If method is set to 'max', it use highest rank in group.\n\n >>> df.rank(method='max').sort_index()\n A B\n 0 1.0 4.0\n 1 3.0 3.0\n 2 3.0 2.0\n 3 4.0 1.0\n\n If method is set to 'dense', it leaves no gaps in group.\n\n >>> df.rank(method='dense').sort_index()\n A B\n 0 1.0 4.0\n 1 2.0 3.0\n 2 2.0 2.0\n 3 3.0 1.0\n\n If numeric_only is set to 'True', rank only numeric columns.\n\n >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': ['a', 'b', 'd', 'c']}, columns= ['A', 'B'])\n >>> df\n A B\n 0 1 a\n 1 2 b\n 2 2 d\n 3 3 c\n >>> df.rank(numeric_only=True)\n A\n 0 1.0\n 1 2.5\n 2 2.5\n 3 4.0\n \"\"\"\n if numeric_only:\n numeric_col_names = []\n for label in self._internal.column_labels:\n psser = self._psser_for(label)\n if isinstance(psser.spark.data_type, (NumericType, BooleanType)):\n numeric_col_names.append(psser.name)\n\n psdf = self[numeric_col_names] if numeric_only else self\n return psdf._apply_series_op(\n lambda psser: psser._rank(method=method, ascending=ascending), should_resolve=True\n )\n\n def filter(\n self,\n items: Optional[Sequence[Any]] = None,\n like: Optional[str] = None,\n regex: Optional[str] = None,\n axis: Optional[Axis] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Subset rows or columns of dataframe according to labels in\n the specified index.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n Keep labels from axis which are in items.\n like : string\n Keep labels from axis for which \"like in label == True\".\n regex : string (regular expression)\n Keep labels from axis for which re.search(regex, label) == True.\n axis : int or string axis name\n The axis to filter on. By default this is the info axis,\n 'index' for Series, 'columns' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = ps.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),\n ... index=['mouse', 'rabbit'],\n ... columns=['one', 'two', 'three'])\n\n >>> # select columns by name\n >>> df.filter(items=['one', 'three'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex='e$', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing 'bbi'\n >>> df.filter(like='bbi', axis=0)\n one two three\n rabbit 4 5 6\n\n For a Series,\n\n >>> # select rows by name\n >>> df.one.filter(items=['rabbit'])\n rabbit 4\n Name: one, dtype: int64\n\n >>> # select rows by regular expression\n >>> df.one.filter(regex='e$')\n mouse 1\n Name: one, dtype: int64\n\n >>> # select rows containing 'bbi'\n >>> df.one.filter(like='bbi')\n rabbit 4\n Name: one, dtype: int64\n \"\"\"\n if sum(x is not None for x in (items, like, regex)) > 1:\n raise TypeError(\n \"Keyword arguments `items`, `like`, or `regex` \" \"are mutually exclusive\"\n )\n\n axis = validate_axis(axis, none_axis=1)\n\n index_scols = self._internal.index_spark_columns\n\n if items is not None:\n if is_list_like(items):\n items = list(items)\n else:\n raise ValueError(\"items should be a list-like object.\")\n if axis == 0:\n if len(index_scols) == 1:\n if len(items) <= ps.get_option(\"compute.isin_limit\"):\n col = index_scols[0].isin([SF.lit(item) for item in items])\n return DataFrame(self._internal.with_filter(col))\n else:\n item_sdf_col = verify_temp_column_name(\n self._internal.spark_frame, \"__item__\"\n )\n item_sdf = default_session().createDataFrame(\n pd.DataFrame({item_sdf_col: items})\n )\n joined_sdf = self._internal.spark_frame.join(\n other=F.broadcast(item_sdf),\n on=(index_scols[0] == scol_for(item_sdf, item_sdf_col)),\n how=\"semi\",\n )\n\n return DataFrame(self._internal.with_new_sdf(joined_sdf))\n\n else:\n # for multi-index\n col = None\n for item in items:\n if not isinstance(item, tuple):\n raise TypeError(\"Unsupported type {}\".format(type(item).__name__))\n if not item:\n raise ValueError(\"The item should not be empty.\")\n midx_col = None\n for i, element in enumerate(item):\n if midx_col is None:\n midx_col = index_scols[i] == SF.lit(element)\n else:\n midx_col = midx_col & (index_scols[i] == SF.lit(element))\n if col is None:\n col = midx_col\n else:\n col = col | midx_col\n return DataFrame(self._internal.with_filter(col))\n else:\n return self[items]\n elif like is not None:\n if axis == 0:\n col = None\n for index_scol in index_scols:\n if col is None:\n col = index_scol.contains(like)\n else:\n col = col | index_scol.contains(like)\n return DataFrame(self._internal.with_filter(col))\n else:\n column_labels = self._internal.column_labels\n output_labels = [label for label in column_labels if any(like in i for i in label)]\n return self[output_labels]\n elif regex is not None:\n if axis == 0:\n col = None\n for index_scol in index_scols:\n if col is None:\n col = index_scol.rlike(regex)\n else:\n col = col | index_scol.rlike(regex)\n return DataFrame(self._internal.with_filter(col))\n else:\n column_labels = self._internal.column_labels\n matcher = re.compile(regex)\n output_labels = [\n label\n for label in column_labels\n if any(matcher.search(i) is not None for i in label)\n ]\n return self[output_labels]\n else:\n raise TypeError(\"Must pass either `items`, `like`, or `regex`\")\n\n def rename(\n self,\n mapper: Optional[Union[Dict, Callable[[Any], Any]]] = None,\n index: Optional[Union[Dict, Callable[[Any], Any]]] = None,\n columns: Optional[Union[Dict, Callable[[Any], Any]]] = None,\n axis: Axis = \"index\",\n inplace: bool = False,\n level: Optional[int] = None,\n errors: str = \"ignore\",\n ) -> Optional[\"DataFrame\"]:\n\n \"\"\"\n Alter axes labels.\n Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series\n will be left as-is. Extra labels listed don’t throw an error.\n\n Parameters\n ----------\n mapper : dict-like or function\n Dict-like or functions transformations to apply to that axis’ values.\n Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index`\n and `columns`.\n index : dict-like or function\n Alternative to specifying axis (\"mapper, axis=0\" is equivalent to \"index=mapper\").\n columns : dict-like or function\n Alternative to specifying axis (\"mapper, axis=1\" is equivalent to \"columns=mapper\").\n axis : int or str, default 'index'\n Axis to target with mapper. Can be either the axis name ('index', 'columns') or\n number (0, 1).\n inplace : bool, default False\n Whether to return a new DataFrame.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified level.\n errors : {'ignore', 'raise}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns`\n contains labels that are not present in the Index being transformed. If 'ignore',\n existing keys will be renamed and extra keys will be ignored.\n\n Returns\n -------\n DataFrame with the renamed axis labels.\n\n Raises\n ------\n `KeyError`\n If any of the labels is not found in the selected axis and \"errors='raise'\".\n\n Examples\n --------\n >>> psdf1 = ps.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> psdf1.rename(columns={\"A\": \"a\", \"B\": \"c\"}) # doctest: +NORMALIZE_WHITESPACE\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> psdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE\n A B\n 0 1 4\n 10 2 5\n 20 3 6\n\n >>> def str_lower(s) -> str:\n ... return str.lower(s)\n >>> psdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> def mul10(x) -> int:\n ... return x * 10\n >>> psdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE\n A B\n 0 1 4\n 10 2 5\n 20 3 6\n\n >>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')])\n >>> psdf2 = ps.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)\n >>> psdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE\n x y\n A B C D\n 0 1 2 3 4\n 1 5 6 7 8\n\n >>> psdf3 = ps.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab'))\n >>> psdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE\n a b\n x a 1 2\n b 3 4\n y c 5 6\n d 7 8\n \"\"\"\n\n def gen_mapper_fn(\n mapper: Union[Dict, Callable[[Any], Any]], skip_return_type: bool = False\n ) -> Tuple[Callable[[Any], Any], Dtype, DataType]:\n if isinstance(mapper, dict):\n mapper_dict = mapper\n\n type_set = set(map(lambda x: type(x), mapper_dict.values()))\n if len(type_set) > 1:\n raise ValueError(\"Mapper dict should have the same value type.\")\n dtype, spark_return_type = pandas_on_spark_type(list(type_set)[0])\n\n def mapper_fn(x: Any) -> Any:\n if x in mapper_dict:\n return mapper_dict[x]\n else:\n if errors == \"raise\":\n raise KeyError(\"Index include value which is not in the `mapper`\")\n return x\n\n return mapper_fn, dtype, spark_return_type\n elif callable(mapper):\n mapper_callable = cast(Callable, mapper)\n\n def mapper_fn(x: Any) -> Any:\n return mapper_callable(x)\n\n if skip_return_type:\n return mapper_fn, None, None\n else:\n return_type = cast(ScalarType, infer_return_type(mapper))\n dtype = return_type.dtype\n spark_return_type = return_type.spark_type\n return mapper_fn, dtype, spark_return_type\n else:\n raise ValueError(\n \"`mapper` or `index` or `columns` should be \"\n \"either dict-like or function type.\"\n )\n\n index_mapper_fn = None\n index_mapper_ret_stype = None\n columns_mapper_fn = None\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if mapper:\n axis = validate_axis(axis)\n if axis == 0:\n index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype = gen_mapper_fn(\n mapper\n )\n elif axis == 1:\n columns_mapper_fn, _, _ = gen_mapper_fn(mapper)\n else:\n if index:\n index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype = gen_mapper_fn(\n index\n )\n if columns:\n columns_mapper_fn, _, _ = gen_mapper_fn(columns, skip_return_type=True)\n\n if not index and not columns:\n raise ValueError(\"Either `index` or `columns` should be provided.\")\n\n psdf = self.copy()\n if index_mapper_fn:\n # rename index labels, if `level` is None, rename all index columns, otherwise only\n # rename the corresponding level index.\n # implement this by transform the underlying spark dataframe,\n # Example:\n # suppose the psdf index column in underlying spark dataframe is \"index_0\", \"index_1\",\n # if rename level 0 index labels, will do:\n # ``psdf._sdf.withColumn(\"index_0\", mapper_fn_udf(col(\"index_0\"))``\n # if rename all index labels (`level` is None), then will do:\n # ```\n # psdf._sdf.withColumn(\"index_0\", mapper_fn_udf(col(\"index_0\"))\n # .withColumn(\"index_1\", mapper_fn_udf(col(\"index_1\"))\n # ```\n\n index_columns = psdf._internal.index_spark_column_names\n num_indices = len(index_columns)\n if level:\n if level < 0 or level >= num_indices:\n raise ValueError(\"level should be an integer between [0, num_indices)\")\n\n @pandas_udf(returnType=index_mapper_ret_stype) # type: ignore[call-overload]\n def index_mapper_udf(s: pd.Series) -> pd.Series:\n return s.map(index_mapper_fn)\n\n index_spark_columns = psdf._internal.index_spark_columns.copy()\n index_fields = psdf._internal.index_fields.copy()\n if level is None:\n for i in range(num_indices):\n index_spark_columns[i] = index_mapper_udf(index_spark_columns[i]).alias(\n index_columns[i]\n )\n index_fields[i] = index_fields[i].copy(\n dtype=index_mapper_ret_dtype,\n spark_type=index_mapper_ret_stype,\n nullable=True,\n )\n else:\n index_spark_columns[level] = index_mapper_udf(index_spark_columns[level]).alias(\n index_columns[level]\n )\n index_fields[level] = index_fields[level].copy(\n dtype=index_mapper_ret_dtype,\n spark_type=index_mapper_ret_stype,\n nullable=True,\n )\n psdf = DataFrame(\n psdf._internal.copy(\n index_spark_columns=index_spark_columns, index_fields=index_fields\n )\n )\n if columns_mapper_fn:\n # rename column name.\n # Will modify the `_internal._column_labels` and transform underlying spark dataframe\n # to the same column name with `_internal._column_labels`.\n if level:\n if level < 0 or level >= psdf._internal.column_labels_level:\n raise ValueError(\"level should be an integer between [0, column_labels_level)\")\n\n def gen_new_column_labels_entry(column_labels_entry: Label) -> Label:\n if level is None:\n # rename all level columns\n return tuple(map(columns_mapper_fn, column_labels_entry))\n else:\n # only rename specified level column\n entry_list = list(column_labels_entry)\n entry_list[level] = columns_mapper_fn(entry_list[level])\n return tuple(entry_list)\n\n new_column_labels = list(map(gen_new_column_labels_entry, psdf._internal.column_labels))\n\n new_data_pssers = [\n psdf._psser_for(old_label).rename(new_label)\n for old_label, new_label in zip(psdf._internal.column_labels, new_column_labels)\n ]\n psdf = DataFrame(psdf._internal.with_new_columns(new_data_pssers))\n if inplace:\n self._update_internal_frame(psdf._internal)\n return None\n else:\n return psdf\n\n def rename_axis(\n self,\n mapper: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None,\n index: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None,\n columns: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None,\n axis: Optional[Axis] = 0,\n inplace: Optional[bool] = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to rename.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new DataFrame.\n\n Returns\n -------\n DataFrame, or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n\n The second calling convention will modify the names of the\n corresponding index specified by axis.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n >>> df = ps.DataFrame({\"num_legs\": [4, 4, 2],\n ... \"num_arms\": [0, 0, 2]},\n ... index=[\"dog\", \"cat\", \"monkey\"],\n ... columns=[\"num_legs\", \"num_arms\"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df = df.rename_axis(\"animal\").sort_index()\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n animal\n cat 4 0\n dog 4 0\n monkey 2 2\n\n >>> df = df.rename_axis(\"limbs\", axis=\"columns\").sort_index()\n >>> df # doctest: +NORMALIZE_WHITESPACE\n limbs num_legs num_arms\n animal\n cat 4 0\n dog 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> index = pd.MultiIndex.from_product([['mammal'],\n ... ['dog', 'cat', 'monkey']],\n ... names=['type', 'name'])\n >>> df = ps.DataFrame({\"num_legs\": [4, 4, 2],\n ... \"num_arms\": [0, 0, 2]},\n ... index=index,\n ... columns=[\"num_legs\", \"num_arms\"])\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n class name\n mammal cat 4 0\n dog 4 0\n monkey 2 2\n\n >>> df.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n TYPE NAME\n mammal cat 4 0\n dog 4 0\n monkey 2 2\n \"\"\"\n\n def gen_names(\n v: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]],\n curnames: List[Name],\n ) -> List[Label]:\n newnames: List[Name]\n if is_scalar(v):\n newnames = [cast(Name, v)]\n elif is_list_like(v) and not is_dict_like(v):\n newnames = list(cast(Sequence[Name], v))\n elif is_dict_like(v):\n v_dict = cast(Dict[Name, Name], v)\n newnames = [v_dict[name] if name in v_dict else name for name in curnames]\n elif callable(v):\n v_callable = cast(Callable[[Name], Name], v)\n newnames = [v_callable(name) for name in curnames]\n else:\n raise ValueError(\n \"`mapper` or `index` or `columns` should be \"\n \"either dict-like or function type.\"\n )\n\n if len(newnames) != len(curnames):\n raise ValueError(\n \"Length of new names must be {}, got {}\".format(len(curnames), len(newnames))\n )\n\n return [name if is_name_like_tuple(name) else (name,) for name in newnames]\n\n if mapper is not None and (index is not None or columns is not None):\n raise TypeError(\"Cannot specify both 'mapper' and any of 'index' or 'columns'.\")\n\n if mapper is not None:\n axis = validate_axis(axis)\n if axis == 0:\n index = mapper\n elif axis == 1:\n columns = mapper\n\n column_label_names = (\n gen_names(columns, self.columns.names)\n if columns is not None\n else self._internal.column_label_names\n )\n index_names = (\n gen_names(index, self.index.names) if index is not None else self._internal.index_names\n )\n\n internal = self._internal.copy(\n index_names=index_names, column_label_names=column_label_names\n )\n if inplace:\n self._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal)\n\n def keys(self) -> pd.Index:\n \"\"\"\n Return alias for columns.\n\n Returns\n -------\n Index\n Columns of the DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', 'sidewinder'],\n ... columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 5\n sidewinder 7 8\n\n >>> df.keys()\n Index(['max_speed', 'shield'], dtype='object')\n \"\"\"\n return self.columns\n\n def pct_change(self, periods: int = 1) -> \"DataFrame\":\n \"\"\"\n Percentage change between the current and a prior element.\n\n .. note:: the current implementation of this API uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n Percentage change in French franc, Deutsche Mark, and Italian lira\n from 1980-01-01 to 1980-03-01.\n\n >>> df = ps.DataFrame({\n ... 'FR': [4.0405, 4.0963, 4.3149],\n ... 'GR': [1.7246, 1.7482, 1.8519],\n ... 'IT': [804.74, 810.01, 860.13]},\n ... index=['1980-01-01', '1980-02-01', '1980-03-01'])\n >>> df\n FR GR IT\n 1980-01-01 4.0405 1.7246 804.74\n 1980-02-01 4.0963 1.7482 810.01\n 1980-03-01 4.3149 1.8519 860.13\n\n >>> df.pct_change()\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 0.013810 0.013684 0.006549\n 1980-03-01 0.053365 0.059318 0.061876\n\n You can set periods to shift for forming percent change\n\n >>> df.pct_change(2)\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 NaN NaN NaN\n 1980-03-01 0.067912 0.073814 0.06883\n \"\"\"\n window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods)\n\n def op(psser: ps.Series) -> Column:\n prev_row = F.lag(psser.spark.column, periods).over(window)\n return ((psser.spark.column - prev_row) / prev_row).alias(\n psser._internal.data_spark_column_names[0]\n )\n\n return self._apply_series_op(op, should_resolve=True)\n\n # TODO: axis = 1\n def idxmax(self, axis: Axis = 0) -> \"Series\":\n \"\"\"\n Return index of first occurrence of maximum over requested axis.\n NA/null values are excluded.\n\n .. note:: This API collect all rows with maximum value using `to_pandas()`\n because we suppose the number of rows with max values are usually small in general.\n\n Parameters\n ----------\n axis : 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.idxmax\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf\n a b c\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmax()\n a 2\n b 0\n c 2\n dtype: int64\n\n For Multi-column Index\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmax()\n a x 2\n b y 0\n c z 2\n dtype: int64\n \"\"\"\n max_cols = map(lambda scol: F.max(scol), self._internal.data_spark_columns)\n sdf_max = self._internal.spark_frame.select(*max_cols).head()\n # `sdf_max` looks like below\n # +------+------+------+\n # |(a, x)|(b, y)|(c, z)|\n # +------+------+------+\n # | 3| 4.0| 400|\n # +------+------+------+\n\n conds = (\n scol == max_val for scol, max_val in zip(self._internal.data_spark_columns, sdf_max)\n )\n cond = reduce(lambda x, y: x | y, conds)\n\n psdf: DataFrame = DataFrame(self._internal.with_filter(cond))\n\n return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmax()))\n\n # TODO: axis = 1\n def idxmin(self, axis: Axis = 0) -> \"Series\":\n \"\"\"\n Return index of first occurrence of minimum over requested axis.\n NA/null values are excluded.\n\n .. note:: This API collect all rows with minimum value using `to_pandas()`\n because we suppose the number of rows with min values are usually small in general.\n\n Parameters\n ----------\n axis : 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.idxmin\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf\n a b c\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmin()\n a 0\n b 3\n c 1\n dtype: int64\n\n For Multi-column Index\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmin()\n a x 0\n b y 3\n c z 1\n dtype: int64\n \"\"\"\n min_cols = map(lambda scol: F.min(scol), self._internal.data_spark_columns)\n sdf_min = self._internal.spark_frame.select(*min_cols).head()\n\n conds = (\n scol == min_val for scol, min_val in zip(self._internal.data_spark_columns, sdf_min)\n )\n cond = reduce(lambda x, y: x | y, conds)\n\n psdf: DataFrame = DataFrame(self._internal.with_filter(cond))\n\n return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmin()))\n\n def info(\n self,\n verbose: Optional[bool] = None,\n buf: Optional[IO[str]] = None,\n max_cols: Optional[int] = None,\n null_counts: Optional[bool] = None,\n ) -> None:\n \"\"\"\n Print a concise summary of a DataFrame.\n\n This method prints information about a DataFrame including\n the index dtype and column dtypes, non-null values and memory usage.\n\n Parameters\n ----------\n verbose : bool, optional\n Whether to print the full summary.\n buf : writable buffer, defaults to sys.stdout\n Where to send the output. By default, the output is printed to\n sys.stdout. Pass a writable buffer if you need to further process\n the output.\n max_cols : int, optional\n When to switch from the verbose to the truncated output. If the\n DataFrame has more than `max_cols` columns, the truncated output\n is used.\n null_counts : bool, optional\n Whether to show the non-null counts.\n\n Returns\n -------\n None\n This method prints a summary of a DataFrame and returns None.\n\n See Also\n --------\n DataFrame.describe: Generate descriptive statistics of DataFrame\n columns.\n\n Examples\n --------\n >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']\n >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]\n >>> df = ps.DataFrame(\n ... {\"int_col\": int_values, \"text_col\": text_values, \"float_col\": float_values},\n ... columns=['int_col', 'text_col', 'float_col'])\n >>> df\n int_col text_col float_col\n 0 1 alpha 0.00\n 1 2 beta 0.25\n 2 3 gamma 0.50\n 3 4 delta 0.75\n 4 5 epsilon 1.00\n\n Prints information of all columns:\n\n >>> df.info(verbose=True) # doctest: +SKIP\n \n Index: 5 entries, 0 to 4\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 int_col 5 non-null int64\n 1 text_col 5 non-null object\n 2 float_col 5 non-null float64\n dtypes: float64(1), int64(1), object(1)\n\n Prints a summary of columns count and its dtypes but not per column\n information:\n\n >>> df.info(verbose=False) # doctest: +SKIP\n \n Index: 5 entries, 0 to 4\n Columns: 3 entries, int_col to float_col\n dtypes: float64(1), int64(1), object(1)\n\n Pipe output of DataFrame.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> df.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open('%s/info.txt' % path, \"w\",\n ... encoding=\"utf-8\") as f:\n ... _ = f.write(s)\n >>> with open('%s/info.txt' % path) as f:\n ... f.readlines() # doctest: +SKIP\n [\"\\\\n\",\n 'Index: 5 entries, 0 to 4\\\\n',\n 'Data columns (total 3 columns):\\\\n',\n ' # Column Non-Null Count Dtype \\\\n',\n '--- ------ -------------- ----- \\\\n',\n ' 0 int_col 5 non-null int64 \\\\n',\n ' 1 text_col 5 non-null object \\\\n',\n ' 2 float_col 5 non-null float64\\\\n',\n 'dtypes: float64(1), int64(1), object(1)']\n \"\"\"\n # To avoid pandas' existing config affects pandas-on-Spark.\n # TODO: should we have corresponding pandas-on-Spark configs?\n with pd.option_context(\n \"display.max_info_columns\", sys.maxsize, \"display.max_info_rows\", sys.maxsize\n ):\n try:\n # hack to use pandas' info as is.\n object.__setattr__(self, \"_data\", self)\n count_func = self.count\n self.count = ( # type: ignore[assignment]\n lambda: count_func()._to_pandas() # type: ignore[assignment, misc, union-attr]\n )\n return pd.DataFrame.info(\n self, # type: ignore[arg-type]\n verbose=verbose,\n buf=buf,\n max_cols=max_cols,\n memory_usage=False,\n null_counts=null_counts,\n )\n finally:\n del self._data\n self.count = count_func # type: ignore[assignment]\n\n # TODO: fix parameter 'axis' and 'numeric_only' to work same as pandas'\n def quantile(\n self,\n q: Union[float, Iterable[float]] = 0.5,\n axis: Axis = 0,\n numeric_only: bool = True,\n accuracy: int = 10000,\n ) -> DataFrameOrSeries:\n \"\"\"\n Return value at the given quantile.\n\n .. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile\n based upon approximate percentile computation because computing quantile across a\n large dataset is extremely expensive.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute.\n axis : int or str, default 0 or 'index'\n Can only be set to 0 at the moment.\n numeric_only : bool, default True\n If False, the quantile of datetime and timedelta data will be computed as well.\n Can only be set to True at the moment.\n accuracy : int, optional\n Default accuracy of approximation. Larger value means better accuracy.\n The relative error can be deduced by 1.0 / accuracy.\n\n Returns\n -------\n Series or DataFrame\n If q is an array, a DataFrame will be returned where the\n index is q, the columns are the columns of self, and the values are the quantiles.\n If q is a float, a Series will be returned where the\n index is the columns of self and the values are the quantiles.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]})\n >>> psdf\n a b\n 0 1 6\n 1 2 7\n 2 3 8\n 3 4 9\n 4 5 0\n\n >>> psdf.quantile(.5)\n a 3.0\n b 7.0\n Name: 0.5, dtype: float64\n\n >>> psdf.quantile([.25, .5, .75])\n a b\n 0.25 2.0 6.0\n 0.50 3.0 7.0\n 0.75 4.0 8.0\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n if not isinstance(accuracy, int):\n raise TypeError(\n \"accuracy must be an integer; however, got [%s]\" % type(accuracy).__name__\n )\n\n qq: Union[float, List[float]] = list(q) if isinstance(q, Iterable) else q\n\n for v in qq if isinstance(qq, list) else [qq]:\n if not isinstance(v, float):\n raise TypeError(\n \"q must be a float or an array of floats; however, [%s] found.\" % type(v)\n )\n if v < 0.0 or v > 1.0:\n raise ValueError(\"percentiles should all be in the interval [0, 1].\")\n\n def quantile(psser: \"Series\") -> Column:\n spark_type = psser.spark.data_type\n spark_column = psser.spark.column\n if isinstance(spark_type, (BooleanType, NumericType)):\n return F.percentile_approx(spark_column.cast(DoubleType()), qq, accuracy)\n else:\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n\n if isinstance(qq, list):\n # First calculate the percentiles from all columns and map it to each `quantiles`\n # by creating each entry as a struct. So, it becomes an array of structs as below:\n #\n # +-----------------------------------------+\n # | arrays|\n # +-----------------------------------------+\n # |[[0.25, 2, 6], [0.5, 3, 7], [0.75, 4, 8]]|\n # +-----------------------------------------+\n\n percentile_cols: List[Column] = []\n percentile_col_names: List[str] = []\n column_labels: List[Label] = []\n for label, column in zip(\n self._internal.column_labels, self._internal.data_spark_column_names\n ):\n psser = self._psser_for(label)\n\n is_numeric_or_boolean = isinstance(\n psser.spark.data_type, (NumericType, BooleanType)\n )\n keep_column = not numeric_only or is_numeric_or_boolean\n\n if keep_column:\n percentile_col = quantile(psser)\n percentile_cols.append(percentile_col.alias(column))\n percentile_col_names.append(column)\n column_labels.append(label)\n\n if len(percentile_cols) == 0:\n return DataFrame(index=qq)\n\n sdf = self._internal.spark_frame.select(percentile_cols)\n # Here, after select percentile cols, a spark_frame looks like below:\n # +---------+---------+\n # | a| b|\n # +---------+---------+\n # |[2, 3, 4]|[6, 7, 8]|\n # +---------+---------+\n\n cols_dict: Dict[str, List[Column]] = {}\n for column in percentile_col_names:\n cols_dict[column] = list()\n for i in range(len(qq)):\n cols_dict[column].append(scol_for(sdf, column)[i].alias(column))\n\n internal_index_column = SPARK_DEFAULT_INDEX_NAME\n cols = []\n for i, col in enumerate(zip(*cols_dict.values())):\n cols.append(F.struct(SF.lit(qq[i]).alias(internal_index_column), *col))\n sdf = sdf.select(F.array(*cols).alias(\"arrays\"))\n\n # And then, explode it and manually set the index.\n # +-----------------+---+---+\n # |__index_level_0__| a| b|\n # +-----------------+---+---+\n # | 0.25| 2| 6|\n # | 0.5| 3| 7|\n # | 0.75| 4| 8|\n # +-----------------+---+---+\n sdf = sdf.select(F.explode(F.col(\"arrays\"))).selectExpr(\"col.*\")\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, internal_index_column)],\n column_labels=column_labels,\n data_spark_columns=[scol_for(sdf, col) for col in percentile_col_names],\n )\n return DataFrame(internal)\n else:\n return self._reduce_for_stat_function(\n quantile, name=\"quantile\", numeric_only=numeric_only\n ).rename(qq)\n\n def query(self, expr: str, inplace: bool = False) -> Optional[\"DataFrame\"]:\n \"\"\"\n Query the columns of a DataFrame with a boolean expression.\n\n .. note:: Internal columns that starting with a '__' prefix are able to access, however,\n they are not supposed to be accessed.\n\n .. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the\n pandas specific syntax such as `@` is not supported. If you want the pandas syntax,\n you can work around with :meth:`DataFrame.pandas_on_spark.apply_batch`, but you should\n be aware that `query_func` will be executed at different nodes in a distributed manner.\n So, for example, to use `@` syntax, make sure the variable is serialized by, for\n example, putting it within the closure as below.\n\n >>> df = ps.DataFrame({'A': range(2000), 'B': range(2000)})\n >>> def query_func(pdf):\n ... num = 1995\n ... return pdf.query('A > @num')\n >>> df.pandas_on_spark.apply_batch(query_func)\n A B\n 1996 1996 1996\n 1997 1997 1997\n 1998 1998 1998\n 1999 1999 1999\n\n Parameters\n ----------\n expr : str\n The query string to evaluate.\n\n You can refer to column names that contain spaces by surrounding\n them in backticks.\n\n For example, if one of your columns is called ``a a`` and you want\n to sum it with ``b``, your query should be ```a a` + b``.\n\n inplace : bool\n Whether the query should modify the data in place or return\n a modified copy.\n\n Returns\n -------\n DataFrame\n DataFrame resulting from the provided query expression.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(1, 6),\n ... 'B': range(10, 0, -2),\n ... 'C C': range(10, 5, -1)})\n >>> df\n A B C C\n 0 1 10 10\n 1 2 8 9\n 2 3 6 8\n 3 4 4 7\n 4 5 2 6\n\n >>> df.query('A > B')\n A B C C\n 4 5 2 6\n\n The previous expression is equivalent to\n\n >>> df[df.A > df.B]\n A B C C\n 4 5 2 6\n\n For columns with spaces in their name, you can use backtick quoting.\n\n >>> df.query('B == `C C`')\n A B C C\n 0 1 10 10\n\n The previous expression is equivalent to\n\n >>> df[df.B == df['C C']]\n A B C C\n 0 1 10 10\n \"\"\"\n if isinstance(self.columns, pd.MultiIndex):\n raise TypeError(\"Doesn't support for MultiIndex columns\")\n if not isinstance(expr, str):\n raise TypeError(\n \"expr must be a string to be evaluated, {} given\".format(type(expr).__name__)\n )\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n data_columns = [label[0] for label in self._internal.column_labels]\n sdf = self._internal.spark_frame.select(\n self._internal.index_spark_columns\n + [\n scol.alias(col)\n for scol, col in zip(self._internal.data_spark_columns, data_columns)\n ]\n ).filter(expr)\n internal = self._internal.with_new_sdf(sdf, data_columns=data_columns)\n\n if inplace:\n self._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal)\n\n def take(self, indices: List[int], axis: Axis = 0, **kwargs: Any) -> \"DataFrame\":\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3]).sort_index()\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2]).sort_index()\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n \"\"\"\n axis = validate_axis(axis)\n if not is_list_like(indices) or isinstance(indices, (dict, set)):\n raise TypeError(\"`indices` must be a list-like except dict or set\")\n if axis == 0:\n return cast(DataFrame, self.iloc[indices, :])\n else:\n return cast(DataFrame, self.iloc[:, indices])\n\n def eval(self, expr: str, inplace: bool = False) -> Optional[DataFrameOrSeries]:\n \"\"\"\n Evaluate a string describing operations on DataFrame columns.\n\n Operates on columns only, not specific rows or elements. This allows\n `eval` to run arbitrary code, which can make you vulnerable to code\n injection if you pass user input to this function.\n\n Parameters\n ----------\n expr : str\n The expression string to evaluate.\n inplace : bool, default False\n If the expression contains an assignment, whether to perform the\n operation inplace and mutate the existing DataFrame. Otherwise,\n a new DataFrame is returned.\n\n Returns\n -------\n The result of the evaluation.\n\n See Also\n --------\n DataFrame.query : Evaluates a boolean expression to query the columns\n of a frame.\n DataFrame.assign : Can evaluate an expression or function to create new\n values for a column.\n eval : Evaluate a Python expression as a string using various\n backends.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n >>> df.eval('A + B')\n 0 11\n 1 10\n 2 9\n 3 8\n 4 7\n dtype: int64\n\n Assignment is allowed though by default the original DataFrame is not\n modified.\n\n >>> df.eval('C = A + B')\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n\n Use ``inplace=True`` to modify the original DataFrame.\n\n >>> df.eval('C = A + B', inplace=True)\n >>> df\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n \"\"\"\n from pyspark.pandas.series import first_series\n\n if isinstance(self.columns, pd.MultiIndex):\n raise TypeError(\"`eval` is not supported for multi-index columns\")\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n should_return_series = False\n series_name = None\n should_return_scalar = False\n\n # Since `eval_func` doesn't have a type hint, inferring the schema is always preformed\n # in the `apply_batch`. Hence, the variables `should_return_series`, `series_name`,\n # and `should_return_scalar` can be updated.\n def eval_func(pdf): # type: ignore[no-untyped-def]\n nonlocal should_return_series\n nonlocal series_name\n nonlocal should_return_scalar\n result_inner = pdf.eval(expr, inplace=inplace)\n if inplace:\n result_inner = pdf\n if isinstance(result_inner, pd.Series):\n should_return_series = True\n series_name = result_inner.name\n result_inner = result_inner.to_frame()\n elif is_scalar(result_inner):\n should_return_scalar = True\n result_inner = pd.Series(result_inner).to_frame()\n return result_inner\n\n result = self.pandas_on_spark.apply_batch(eval_func)\n if inplace:\n # Here, the result is always a frame because the error is thrown during schema inference\n # from pandas.\n self._update_internal_frame(result._internal, requires_same_anchor=False)\n return None\n elif should_return_series:\n return first_series(result).rename(series_name)\n elif should_return_scalar:\n return first_series(result)[0]\n else:\n # Returns a frame\n return result\n\n def explode(self, column: Name, ignore_index: bool = False) -> \"DataFrame\":\n \"\"\"\n Transform each element of a list-like to a row, replicating index values.\n\n Parameters\n ----------\n column : str or tuple\n Column to explode.\n ignore_index : bool, default False\n If True, the resulting index will be labeled 0, 1, …, n - 1.\n\n Returns\n -------\n DataFrame\n Exploded lists to rows of the subset columns;\n index will be duplicated for these rows.\n\n See Also\n --------\n DataFrame.unstack : Pivot a level of the (necessarily hierarchical)\n index labels.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1})\n >>> df\n A B\n 0 [1, 2, 3] 1\n 1 [] 1\n 2 [3, 4] 1\n\n >>> df.explode('A')\n A B\n 0 1.0 1\n 0 2.0 1\n 0 3.0 1\n 1 NaN 1\n 2 3.0 1\n 2 4.0 1\n\n >>> df.explode('A', ignore_index=True)\n A B\n 0 1.0 1\n 1 2.0 1\n 2 3.0 1\n 3 NaN 1\n 4 3.0 1\n 5 4.0 1\n \"\"\"\n from pyspark.pandas.series import Series\n\n if not is_name_like_value(column):\n raise TypeError(\"column must be a scalar\")\n\n psdf: DataFrame = DataFrame(self._internal.resolved_copy)\n psser = psdf[column]\n if not isinstance(psser, Series):\n raise ValueError(\n \"The column %s is not unique. For a multi-index, the label must be a tuple \"\n \"with elements corresponding to each level.\" % name_like_string(column)\n )\n if not isinstance(psser.spark.data_type, ArrayType):\n return self.copy()\n\n sdf = psdf._internal.spark_frame.withColumn(\n psser._internal.data_spark_column_names[0], F.explode_outer(psser.spark.column)\n )\n\n data_fields = psdf._internal.data_fields.copy()\n idx = psdf._internal.column_labels.index(psser._column_label)\n field = data_fields[idx]\n spark_type = cast(ArrayType, field.spark_type).elementType\n dtype = spark_type_to_pandas_dtype(spark_type)\n data_fields[idx] = field.copy(dtype=dtype, spark_type=spark_type, nullable=True)\n\n internal = psdf._internal.with_new_sdf(sdf, data_fields=data_fields)\n result_df: DataFrame = DataFrame(internal)\n return result_df.reset_index(drop=True) if ignore_index else result_df\n\n def mad(self, axis: Axis = 0) -> \"Series\":\n \"\"\"\n Return the mean absolute deviation of values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n >>> df.mad()\n a 0.666667\n b 0.066667\n dtype: float64\n\n >>> df.mad(axis=1)\n 0 0.45\n 1 0.90\n 2 1.35\n 3 NaN\n dtype: float64\n \"\"\"\n from pyspark.pandas.series import first_series\n\n axis = validate_axis(axis)\n\n if axis == 0:\n\n def get_spark_column(psdf: DataFrame, label: Label) -> Column:\n scol = psdf._internal.spark_column_for(label)\n col_type = psdf._internal.spark_type_for(label)\n\n if isinstance(col_type, BooleanType):\n scol = scol.cast(\"integer\")\n\n return scol\n\n new_column_labels: List[Label] = []\n for label in self._internal.column_labels:\n # Filtering out only columns of numeric and boolean type column.\n dtype = self._psser_for(label).spark.data_type\n if isinstance(dtype, (NumericType, BooleanType)):\n new_column_labels.append(label)\n\n new_columns = [\n F.avg(get_spark_column(self, label)).alias(name_like_string(label))\n for label in new_column_labels\n ]\n\n mean_data = self._internal.spark_frame.select(*new_columns).first()\n\n new_columns = [\n F.avg(\n F.abs(get_spark_column(self, label) - mean_data[name_like_string(label)])\n ).alias(name_like_string(label))\n for label in new_column_labels\n ]\n\n sdf = self._internal.spark_frame.select(\n *[SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)], *new_columns\n )\n\n # The data is expected to be small so it's fine to transpose/use default index.\n with ps.option_context(\"compute.max_rows\", 1):\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],\n column_labels=new_column_labels,\n column_label_names=self._internal.column_label_names,\n )\n return first_series(DataFrame(internal).transpose())\n\n else:\n\n @pandas_udf(returnType=DoubleType()) # type: ignore[call-overload]\n def calculate_columns_axis(*cols: pd.Series) -> pd.Series:\n return pd.concat(cols, axis=1).mad(axis=1)\n\n internal = self._internal.copy(\n column_labels=[None],\n data_spark_columns=[\n calculate_columns_axis(*self._internal.data_spark_columns).alias(\n SPARK_DEFAULT_SERIES_NAME\n )\n ],\n data_fields=[None],\n column_label_names=None,\n )\n return first_series(DataFrame(internal))\n\n def tail(self, n: int = 5) -> \"DataFrame\":\n \"\"\"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n For negative values of `n`, this function returns all rows except\n the first `n` rows, equivalent to ``df[n:]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = ps.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail() # doctest: +SKIP\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3) # doctest: +SKIP\n animal\n 6 shark\n 7 whale\n 8 zebra\n\n For negative values of `n`\n\n >>> df.tail(-3) # doctest: +SKIP\n animal\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n \"\"\"\n if not isinstance(n, int):\n raise TypeError(\"bad operand type for unary -: '{}'\".format(type(n).__name__))\n if n < 0:\n n = len(self) + n\n if n <= 0:\n return ps.DataFrame(self._internal.with_filter(SF.lit(False)))\n # Should use `resolved_copy` here for the case like `(psdf + 1).tail()`\n sdf = self._internal.resolved_copy.spark_frame\n rows = sdf.tail(n)\n new_sdf = default_session().createDataFrame(rows, sdf.schema)\n\n return DataFrame(self._internal.with_new_sdf(new_sdf))\n\n def align(\n self,\n other: DataFrameOrSeries,\n join: str = \"outer\",\n axis: Optional[Axis] = None,\n copy: bool = True,\n ) -> Tuple[\"DataFrame\", DataFrameOrSeries]:\n \"\"\"\n Align two objects on their axes with the specified join method.\n\n Join method is specified for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None).\n copy : bool, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n\n Returns\n -------\n (left, right) : (DataFrame, type of other)\n Aligned objects.\n\n Examples\n --------\n >>> ps.set_option(\"compute.ops_on_diff_frames\", True)\n >>> df1 = ps.DataFrame({\"a\": [1, 2, 3], \"b\": [\"a\", \"b\", \"c\"]}, index=[10, 20, 30])\n >>> df2 = ps.DataFrame({\"a\": [4, 5, 6], \"c\": [\"d\", \"e\", \"f\"]}, index=[10, 11, 12])\n\n Align both axis:\n\n >>> aligned_l, aligned_r = df1.align(df2)\n >>> aligned_l.sort_index()\n a b c\n 10 1.0 a NaN\n 11 NaN None NaN\n 12 NaN None NaN\n 20 2.0 b NaN\n 30 3.0 c NaN\n >>> aligned_r.sort_index()\n a b c\n 10 4.0 NaN d\n 11 5.0 NaN e\n 12 6.0 NaN f\n 20 NaN NaN None\n 30 NaN NaN None\n\n Align only axis=0 (index):\n\n >>> aligned_l, aligned_r = df1.align(df2, axis=0)\n >>> aligned_l.sort_index()\n a b\n 10 1.0 a\n 11 NaN None\n 12 NaN None\n 20 2.0 b\n 30 3.0 c\n >>> aligned_r.sort_index()\n a c\n 10 4.0 d\n 11 5.0 e\n 12 6.0 f\n 20 NaN None\n 30 NaN None\n\n Align only axis=1 (column):\n\n >>> aligned_l, aligned_r = df1.align(df2, axis=1)\n >>> aligned_l.sort_index()\n a b c\n 10 1 a NaN\n 20 2 b NaN\n 30 3 c NaN\n >>> aligned_r.sort_index()\n a b c\n 10 4 NaN d\n 11 5 NaN e\n 12 6 NaN f\n\n Align with the join type \"inner\":\n\n >>> aligned_l, aligned_r = df1.align(df2, join=\"inner\")\n >>> aligned_l.sort_index()\n a\n 10 1\n >>> aligned_r.sort_index()\n a\n 10 4\n\n Align with a Series:\n\n >>> s = ps.Series([7, 8, 9], index=[10, 11, 12])\n >>> aligned_l, aligned_r = df1.align(s, axis=0)\n >>> aligned_l.sort_index()\n a b\n 10 1.0 a\n 11 NaN None\n 12 NaN None\n 20 2.0 b\n 30 3.0 c\n >>> aligned_r.sort_index()\n 10 7.0\n 11 8.0\n 12 9.0\n 20 NaN\n 30 NaN\n dtype: float64\n\n >>> ps.reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n from pyspark.pandas.series import Series, first_series\n\n if not isinstance(other, (DataFrame, Series)):\n raise TypeError(\"unsupported type: {}\".format(type(other).__name__))\n\n how = validate_how(join)\n axis = validate_axis(axis, None)\n\n right_is_series = isinstance(other, Series)\n if right_is_series:\n if axis is None:\n raise ValueError(\"Must specify axis=0 or 1\")\n elif axis != 0:\n raise NotImplementedError(\n \"align currently only works for axis=0 when right is Series\"\n )\n\n left = self\n right = other\n\n if (axis is None or axis == 0) and not same_anchor(left, right):\n combined = combine_frames(left, right, how=how)\n left = combined[\"this\"]\n right = combined[\"that\"]\n\n if right_is_series:\n right = first_series(cast(DataFrame[Any], right)).rename(other.name)\n\n if (\n axis is None or axis == 1\n ) and left._internal.column_labels != right._internal.column_labels:\n\n if left._internal.column_labels_level != right._internal.column_labels_level:\n raise ValueError(\"cannot join with no overlapping index names\")\n\n left = left.copy()\n right = right.copy()\n\n if how == \"full\":\n column_labels = sorted(\n list(set(left._internal.column_labels) | set(right._internal.column_labels))\n )\n elif how == \"inner\":\n column_labels = sorted(\n list(set(left._internal.column_labels) & set(right._internal.column_labels))\n )\n elif how == \"left\":\n column_labels = left._internal.column_labels\n else:\n column_labels = right._internal.column_labels\n\n for label in column_labels:\n if label not in left._internal.column_labels:\n left[label] = SF.lit(None).cast(DoubleType())\n left = left[column_labels]\n for label in column_labels:\n if label not in right._internal.column_labels:\n right[label] = SF.lit(None).cast(DoubleType())\n right = right[column_labels]\n\n return (left.copy(), right.copy()) if copy else (left, right)\n\n @staticmethod\n def from_dict(\n data: Dict[Name, Sequence[Any]],\n orient: str = \"columns\",\n dtype: Union[str, Dtype] = None,\n columns: Optional[List[Name]] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Construct DataFrame from dict of array-like or dicts.\n\n Creates DataFrame object from dictionary by columns or by index\n allowing dtype specification.\n\n Parameters\n ----------\n data : dict\n Of the form {field : array-like} or {field : dict}.\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass 'columns'\n (default). Otherwise if the keys should be rows, pass 'index'.\n dtype : dtype, default None\n Data type to force, otherwise infer.\n columns : list, default None\n Column labels to use when ``orient='index'``. Raises a ValueError\n if used with ``orient='columns'``.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_records : DataFrame from structured ndarray, sequence\n of tuples or dicts, or DataFrame.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n By default the keys of the dict become the DataFrame columns:\n\n >>> data = {'col_1': [3, 2, 1, 0], 'col_2': [10, 20, 30, 40]}\n >>> ps.DataFrame.from_dict(data)\n col_1 col_2\n 0 3 10\n 1 2 20\n 2 1 30\n 3 0 40\n\n Specify ``orient='index'`` to create the DataFrame using dictionary\n keys as rows:\n\n >>> data = {'row_1': [3, 2, 1, 0], 'row_2': [10, 20, 30, 40]}\n >>> ps.DataFrame.from_dict(data, orient='index').sort_index()\n 0 1 2 3\n row_1 3 2 1 0\n row_2 10 20 30 40\n\n When using the 'index' orientation, the column names can be\n specified manually:\n\n >>> ps.DataFrame.from_dict(data, orient='index',\n ... columns=['A', 'B', 'C', 'D']).sort_index()\n A B C D\n row_1 3 2 1 0\n row_2 10 20 30 40\n \"\"\"\n return DataFrame(\n pd.DataFrame.from_dict(\n data, orient=orient, dtype=dtype, columns=columns # type: ignore[arg-type]\n )\n )\n\n # Override the `groupby` to specify the actual return type annotation.\n def groupby(\n self,\n by: Union[Name, \"Series\", List[Union[Name, \"Series\"]]],\n axis: Axis = 0,\n as_index: bool = True,\n dropna: bool = True,\n ) -> \"DataFrameGroupBy\":\n return cast(\n \"DataFrameGroupBy\", super().groupby(by=by, axis=axis, as_index=as_index, dropna=dropna)\n )\n\n groupby.__doc__ = Frame.groupby.__doc__\n\n def _build_groupby(\n self, by: List[Union[\"Series\", Label]], as_index: bool, dropna: bool\n ) -> \"DataFrameGroupBy\":\n from pyspark.pandas.groupby import DataFrameGroupBy\n\n return DataFrameGroupBy._build(self, by, as_index=as_index, dropna=dropna)\n\n def resample(\n self,\n rule: str,\n closed: Optional[str] = None,\n label: Optional[str] = None,\n on: Optional[\"Series\"] = None,\n ) -> \"DataFrameResampler\":\n \"\"\"\n Resample time-series data.\n\n Convenience method for frequency conversion and resampling of time series.\n The object must have a datetime-like index (only support `DatetimeIndex` for now),\n or the caller must pass the label of a datetime-like\n series/index to the ``on`` keyword parameter.\n\n .. versionadded:: 3.4.0\n\n Parameters\n ----------\n rule : str\n The offset string or object representing target conversion.\n Currently, supported units are {'Y', 'A', 'M', 'D', 'H',\n 'T', 'MIN', 'S'}.\n closed : {{'right', 'left'}}, default None\n Which side of bin interval is closed. The default is 'left'\n for all frequency offsets except for 'A', 'Y' and 'M' which all\n have a default of 'right'.\n label : {{'right', 'left'}}, default None\n Which bin edge label to label bucket with. The default is 'left'\n for all frequency offsets except for 'A', 'Y' and 'M' which all\n have a default of 'right'.\n on : Series, optional\n For a DataFrame, column to use instead of index for resampling.\n Column must be datetime-like.\n\n Returns\n -------\n DataFrameResampler\n\n See Also\n --------\n Series.resample : Resample a Series.\n groupby : Group by mapping, function, label, or list of labels.\n \"\"\"\n from pyspark.pandas.indexes import DatetimeIndex\n from pyspark.pandas.resample import DataFrameResampler\n\n if on is None and not isinstance(self.index, DatetimeIndex):\n raise NotImplementedError(\"resample currently works only for DatetimeIndex\")\n if on is not None and not isinstance(as_spark_type(on.dtype), TimestampType):\n raise NotImplementedError(\"resample currently works only for TimestampType\")\n\n agg_columns: List[ps.Series] = []\n for column_label in self._internal.column_labels:\n if isinstance(self._internal.spark_type_for(column_label), (NumericType, BooleanType)):\n agg_columns.append(self._psser_for(column_label))\n\n if len(agg_columns) == 0:\n raise ValueError(\"No available aggregation columns!\")\n\n return DataFrameResampler(\n psdf=self,\n resamplekey=on,\n rule=rule,\n closed=closed,\n label=label,\n agg_columns=agg_columns,\n )\n\n def _to_internal_pandas(self) -> pd.DataFrame:\n \"\"\"\n Return a pandas DataFrame directly from _internal to avoid overhead of copy.\n\n This method is for internal use only.\n \"\"\"\n return self._internal.to_pandas_frame\n\n def _get_or_create_repr_pandas_cache(self, n: int) -> Union[pd.DataFrame, pd.Series]:\n if not hasattr(self, \"_repr_pandas_cache\") or n not in self._repr_pandas_cache:\n object.__setattr__(\n self, \"_repr_pandas_cache\", {n: self.head(n + 1)._to_internal_pandas()}\n )\n return self._repr_pandas_cache[n]\n\n def __repr__(self) -> str:\n max_display_count = get_option(\"display.max_rows\")\n if max_display_count is None:\n return self._to_internal_pandas().to_string()\n\n pdf = cast(\"DataFrame\", self._get_or_create_repr_pandas_cache(max_display_count))\n pdf_length = len(pdf)\n pdf = cast(\"DataFrame\", pdf.iloc[:max_display_count])\n if pdf_length > max_display_count:\n repr_string = pdf.to_string(show_dimensions=True)\n match = REPR_PATTERN.search(repr_string)\n if match is not None:\n nrows = match.group(\"rows\")\n ncols = match.group(\"columns\")\n footer = \"\\n\\n[Showing only the first {nrows} rows x {ncols} columns]\".format(\n nrows=nrows, ncols=ncols\n )\n return REPR_PATTERN.sub(footer, repr_string)\n return pdf.to_string()\n\n def _repr_html_(self) -> str:\n max_display_count = get_option(\"display.max_rows\")\n if max_display_count is None:\n return self._to_internal_pandas().to_html(notebook=True)\n\n pdf = self._get_or_create_repr_pandas_cache(max_display_count)\n pdf_length = len(pdf)\n pdf = pdf.iloc[:max_display_count]\n if pdf_length > max_display_count:\n repr_html = pdf.to_html(show_dimensions=True, notebook=True)\n match = REPR_HTML_PATTERN.search(repr_html)\n if match is not None:\n nrows = match.group(\"rows\")\n ncols = match.group(\"columns\")\n by = chr(215)\n footer = (\n \"\\n

Showing only the first {rows} rows \"\n \"{by} {cols} columns

\\n\".format(rows=nrows, by=by, cols=ncols)\n )\n return REPR_HTML_PATTERN.sub(footer, repr_html)\n return pdf.to_html(notebook=True)\n\n def __getitem__(self, key: Any) -> Any:\n from pyspark.pandas.series import Series\n\n if key is None:\n raise KeyError(\"none key\")\n elif isinstance(key, Series):\n return self.loc[key.astype(bool)]\n elif isinstance(key, slice):\n if any(type(n) == int or None for n in [key.start, key.stop]):\n # Seems like pandas Frame always uses int as positional search when slicing\n # with ints.\n return self.iloc[key]\n return self.loc[key]\n elif is_name_like_value(key):\n return self.loc[:, key]\n elif is_list_like(key):\n return self.loc[:, list(key)]\n raise NotImplementedError(key)\n\n def __setitem__(self, key: Any, value: Any) -> None:\n from pyspark.pandas.series import Series\n\n if isinstance(value, (DataFrame, Series)) and not same_anchor(value, self):\n # Different Series or DataFrames\n level = self._internal.column_labels_level\n key = DataFrame._index_normalized_label(level, key)\n value = DataFrame._index_normalized_frame(level, value)\n\n def assign_columns(\n psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label]\n ) -> Iterator[Tuple[\"Series\", Label]]:\n assert len(key) == len(that_column_labels)\n # Note that here intentionally uses `zip_longest` that combine\n # that_columns.\n for k, this_label, that_label in zip_longest(\n key, this_column_labels, that_column_labels\n ):\n yield (psdf._psser_for(that_label), tuple([\"that\", *k]))\n if this_label is not None and this_label[1:] != k:\n yield (psdf._psser_for(this_label), this_label)\n\n psdf = align_diff_frames(assign_columns, self, value, fillna=False, how=\"left\")\n elif isinstance(value, list):\n if len(self) != len(value):\n raise ValueError(\"Length of values does not match length of index\")\n\n # TODO: avoid using default index?\n with option_context(\n \"compute.default_index_type\",\n \"distributed-sequence\",\n \"compute.ops_on_diff_frames\",\n True,\n ):\n psdf = self.reset_index()\n psdf[key] = ps.DataFrame(value)\n psdf = psdf.set_index(psdf.columns[: self._internal.index_level])\n psdf.index.names = self.index.names\n\n elif isinstance(key, list):\n assert isinstance(value, DataFrame)\n # Same DataFrames.\n field_names = value.columns\n psdf = self._assign({k: value[c] for k, c in zip(key, field_names)})\n else:\n # Same Series.\n psdf = self._assign({key: value})\n\n self._update_internal_frame(psdf._internal)\n\n @staticmethod\n def _index_normalized_label(level: int, labels: Union[Name, Sequence[Name]]) -> List[Label]:\n \"\"\"\n Returns a label that is normalized against the current column index level.\n For example, the key \"abc\" can be (\"abc\", \"\", \"\") if the current Frame has\n a multi-index for its column\n \"\"\"\n if is_name_like_tuple(labels):\n labels = [labels]\n elif is_name_like_value(labels):\n labels = [(labels,)]\n else:\n labels = [k if is_name_like_tuple(k) else (k,) for k in labels]\n\n if any(len(label) > level for label in labels):\n raise KeyError(\n \"Key length ({}) exceeds index depth ({})\".format(\n max(len(label) for label in labels), level\n )\n )\n return [tuple(list(label) + ([\"\"] * (level - len(label)))) for label in labels]\n\n @staticmethod\n def _index_normalized_frame(level: int, psser_or_psdf: DataFrameOrSeries) -> \"DataFrame\":\n \"\"\"\n Returns a frame that is normalized against the current column index level.\n For example, the name in `pd.Series([...], name=\"abc\")` can be can be\n (\"abc\", \"\", \"\") if the current DataFrame has a multi-index for its column\n \"\"\"\n from pyspark.pandas.series import Series\n\n if isinstance(psser_or_psdf, Series):\n psdf = psser_or_psdf.to_frame()\n else:\n assert isinstance(psser_or_psdf, DataFrame), type(psser_or_psdf)\n psdf = psser_or_psdf.copy()\n\n psdf.columns = pd.MultiIndex.from_tuples(\n [\n tuple([name_like_string(label)] + ([\"\"] * (level - 1)))\n for label in psdf._internal.column_labels\n ],\n )\n\n return psdf\n\n def __getattr__(self, key: str) -> Any:\n if key.startswith(\"__\"):\n raise AttributeError(key)\n if hasattr(_MissingPandasLikeDataFrame, key):\n property_or_func = getattr(_MissingPandasLikeDataFrame, key)\n if isinstance(property_or_func, property):\n return property_or_func.fget(self)\n else:\n return partial(property_or_func, self)\n\n try:\n return self.loc[:, key]\n except KeyError:\n raise AttributeError(\n \"'%s' object has no attribute '%s'\" % (self.__class__.__name__, key)\n )\n\n def __setattr__(self, key: str, value: Any) -> None:\n try:\n object.__getattribute__(self, key)\n return object.__setattr__(self, key, value)\n except AttributeError:\n pass\n\n if (key,) in self._internal.column_labels:\n self[key] = value\n else:\n msg = \"pandas-on-Spark doesn't allow columns to be created via a new attribute name\"\n if is_testing():\n raise AssertionError(msg)\n else:\n warnings.warn(msg, UserWarning)\n\n def __len__(self) -> int:\n return self._internal.resolved_copy.spark_frame.count()\n\n def __dir__(self) -> Iterable[str]:\n fields = [\n f for f in self._internal.resolved_copy.spark_frame.schema.fieldNames() if \" \" not in f\n ]\n return list(super().__dir__()) + fields\n\n def __iter__(self) -> Iterator[Name]:\n return iter(self.columns)\n\n # NDArray Compat\n def __array_ufunc__(\n self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any\n ) -> \"DataFrame\":\n # TODO: is it possible to deduplicate it with '_map_series_op'?\n if all(isinstance(inp, DataFrame) for inp in inputs) and any(\n not same_anchor(inp, inputs[0]) for inp in inputs\n ):\n # binary only\n assert len(inputs) == 2\n this = inputs[0]\n that = inputs[1]\n if this._internal.column_labels_level != that._internal.column_labels_level:\n raise ValueError(\"cannot join with no overlapping index names\")\n\n # Different DataFrames\n def apply_op(\n psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label]\n ) -> Iterator[Tuple[\"Series\", Label]]:\n for this_label, that_label in zip(this_column_labels, that_column_labels):\n yield (\n ufunc(\n psdf._psser_for(this_label), psdf._psser_for(that_label), **kwargs\n ).rename(this_label),\n this_label,\n )\n\n return align_diff_frames(apply_op, this, that, fillna=True, how=\"full\")\n else:\n # DataFrame and Series\n applied = []\n this = inputs[0]\n assert all(inp is this for inp in inputs if isinstance(inp, DataFrame))\n\n for label in this._internal.column_labels:\n arguments = []\n for inp in inputs:\n arguments.append(inp[label] if isinstance(inp, DataFrame) else inp)\n # both binary and unary.\n applied.append(ufunc(*arguments, **kwargs).rename(label))\n\n internal = this._internal.with_new_columns(applied)\n return DataFrame(internal)\n\n def __class_getitem__(cls, params: Any) -> object:\n # This is a workaround to support variadic generic in DataFrame in Python 3.7.\n # See https://github.com/python/typing/issues/193\n # we always wraps the given type hints by a tuple to mimic the variadic generic.\n return create_tuple_for_frame_type(params)\n\n\ndef _reduce_spark_multi(sdf: SparkDataFrame, aggs: List[Column]) -> Any:\n \"\"\"\n Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions.\n \"\"\"\n assert isinstance(sdf, SparkDataFrame)\n sdf0 = sdf.agg(*aggs)\n lst = sdf0.limit(2).toPandas()\n assert len(lst) == 1, (sdf, lst)\n row = lst.iloc[0]\n lst2 = list(row)\n assert len(lst2) == len(aggs), (row, lst2)\n return lst2\n\n\nclass CachedDataFrame(DataFrame):\n \"\"\"\n Cached pandas-on-Spark DataFrame, which corresponds to pandas DataFrame logically, but\n internally it caches the corresponding Spark DataFrame.\n \"\"\"\n\n def __init__(self, internal: InternalFrame, storage_level: Optional[StorageLevel] = None):\n if storage_level is None:\n object.__setattr__(self, \"_cached\", internal.spark_frame.cache())\n elif isinstance(storage_level, StorageLevel):\n object.__setattr__(self, \"_cached\", internal.spark_frame.persist(storage_level))\n else:\n raise TypeError(\n \"Only a valid pyspark.StorageLevel type is acceptable for the `storage_level`\"\n )\n super().__init__(internal)\n\n def __enter__(self) -> \"CachedDataFrame\":\n return self\n\n def __exit__(\n self,\n exception_type: Optional[Type[BaseException]],\n exception_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> Optional[bool]:\n self.spark.unpersist()\n return None\n\n # create accessor for Spark related methods.\n spark = CachedAccessor(\"spark\", CachedSparkFrameMethods)\n\n\ndef _test() -> None:\n import os\n import doctest\n import shutil\n import sys\n import tempfile\n import uuid\n from pyspark.sql import SparkSession\n import pyspark.pandas.frame\n\n os.chdir(os.environ[\"SPARK_HOME\"])\n\n globs = pyspark.pandas.frame.__dict__.copy()\n globs[\"ps\"] = pyspark.pandas\n spark = (\n SparkSession.builder.master(\"local[4]\").appName(\"pyspark.pandas.frame tests\").getOrCreate()\n )\n\n db_name = \"db%s\" % str(uuid.uuid4()).replace(\"-\", \"\")\n spark.sql(\"CREATE DATABASE %s\" % db_name)\n globs[\"db\"] = db_name\n\n path = tempfile.mkdtemp()\n globs[\"path\"] = path\n\n (failure_count, test_count) = doctest.testmod(\n pyspark.pandas.frame,\n globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,\n )\n\n shutil.rmtree(path, ignore_errors=True)\n spark.sql(\"DROP DATABASE IF EXISTS %s CASCADE\" % db_name)\n spark.stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n"}}},{"rowIdx":542636,"cells":{"filename":{"kind":"string","value":"the-stack_106_30946"},"text":{"kind":"string","value":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 21 23:47:11 2021\n\n@author: bruzewskis\n\"\"\"\n\nfrom dataclasses import dataclass, field\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom typing import Union\n\n@dataclass\nclass Resource:\n '''\n This class theoretically describes a VLA resource. \n '''\n name: str\n library: str = field(init=False)\n \n def __post_init__(self):\n default_resources = {'Lx32'}\n if self.name in default_resources:\n self.library = 'NRAO_Default'\n else:\n self.library = 'MyResources'\n\n@dataclass\nclass Source:\n '''\n This class theoretically describes a source to be pointed at\n '''\n \n name: str\n coord: SkyCoord\n resource: Resource\n dur: u.Quantity = 30*u.s\n \n def __str__(self):\n return self.pretty()\n \n def __repr__(self):\n pretty_pos = self.coord.to_string()\n return f''\n \n def pretty(self, depth=1, indent=0):\n pretty_time = self.dur.to_string()\n return '\\t'*indent + f'- {self.name} ({pretty_time})\\n'\n \n def separation(self, other):\n return self.coord.separation(other.coord).deg\n \n@dataclass\nclass Loop:\n '''\n This class theoretically describes a loop which contains sources\n '''\n name: str\n repeat: int\n scans: list[Source]\n \n def __str__(self):\n return self.pretty()\n \n def __repr__(self):\n pretty_scans = [ s.name for s in self.scans ]\n return f''\n \n def pretty(self, depth=1, indent=0):\n out = '\\t'*indent + f'o {self.name} [x{self.repeat}]\\n'\n for item in self.scans:\n if depth>0:\n out += item.pretty(depth-1, indent+1)\n return out\n \n@dataclass\nclass Block:\n '''\n This class theoretically describes a block which contains loops or sources\n '''\n name: str\n start_time: str\n scans: list[Union[Source, Loop]]\n \n def __str__(self):\n return self.pretty()\n \n def __repr__(self):\n pretty_scans = [ s.name for s in self.scans ]\n return f''\n \n def pretty(self, depth : int = 1, indent : int = 0) -> str:\n out = '\\t'*indent + f'> {self.name}\\n'\n for item in self.scans:\n if depth>0:\n out += item.pretty(depth-1, indent+1)\n return out\n \n @classmethod\n def from_file(cls, filename : str, stype : str = 'fits'):\n '''\n This method should read a block from a file. It should be flexible \n enough to handle a csv or fits table with the right columns \n (Name,RA,DEC,Intent,etc...) or just a file right from the OPT. Can \n either have the user entry fits/csv/opt or we can guess it\n '''\n return cls()\n \n@dataclass\nclass Project:\n '''\n This class theoretically describes a project which contains blocks\n '''\n name: str = 'Default'\n blocks: list[Block] = field(default_factory=list)\n \n def __str__(self):\n return self.pretty()\n \n def __repr__(self):\n pretty_blocks = [ b.name for b in self.blocks ]\n return f''\n \n def pretty(self, depth=1, indent=0):\n out = '\\t'*indent + self.name + '\\n'\n for item in self.blocks:\n if depth>0:\n out += item.pretty(depth-1, indent+1)\n return out\n \n @classmethod\n def from_xml(cls, filename : str):\n '''\n Not implemented yet, will eventually return a full constructed project\n which one can then edit as they like\n '''\n return cls()\n \n def write(self, filename : str, style : str = 'xml', \n clobber : bool = False) -> bool:\n '''\n Not implemented yet, will eventually write out the file either as XML\n or as all the relevant text files one would need\n '''\n return True\n \n def simulate(self) -> float:\n '''\n Simple implementation of timing, assuming no slew time, just adds up\n the time\n '''\n time = 0\n for block in self.blocks:\n for scan in block.scans:\n if isinstance(scan, Loop):\n loop_time = 0\n for sub_scan in scan.scans:\n loop_time += sub_scan.dur\n time += scan.repeat * loop_time\n else:\n time += scan.dur\n \n return time\n \ndef make_test_project():\n Lx32 = Resource('Lx32')\n \n s1 = Source('Source1', SkyCoord('01h01m01s','01d01\\'01\"'), Lx32, 5*u.min)\n s2 = Source('Source2', SkyCoord('02h02m02s','02d02\\'02\"'), Lx32)\n s3 = Source('Source3', SkyCoord('03h03m03s','03d03\\'03\"'), Lx32)\n l1 = Loop('Loop1', 2, [s2,s3])\n \n b1 = Block('Block1', '00:00:00.00', [s1,l1])\n \n p1 = Project('Project1', [b1,b1])\n print(p1.pretty(3))\n print(p1)\n print(p1.simulate())\n return p1\n\nif __name__=='__main__':\n test_project = make_test_project()"}}},{"rowIdx":542637,"cells":{"filename":{"kind":"string","value":"the-stack_106_30947"},"text":{"kind":"string","value":"# coding: utf-8\nfrom __future__ import division\nimport unicodedata, math, re, sys, string, os, ntpath, numpy as np\nfrom time import gmtime, strftime\nfrom io import open, StringIO\nfrom imp import reload\nfrom difflib import SequenceMatcher\ntry:\n from itertools import izip\nexcept ImportError:\n izip = zip\n\nWORD = re.compile(r'\\w+')\n\ndef getCP(ali, w = 6):\n l = len(ali)\n if l == 0:\n l = 1\n \n result = 0.0\n \n for ali_i in ali:\n s = sum(ali_i)\n \n pen = 1/ (1 + (abs(1 - s))**w)\n \n result += math.log(pen)\n return result / l\n\ndef getEnt(ali):\n l = len(ali)\n if l == 0:\n l = 1\n \n res = 0.0\n \n for pd in ali:\n norm = sum(pd)\n if norm > 0:\n normPd = [p / norm for p in pd]\n entr = -sum([(p * math.log(p) if p else 0) for p in normPd])\n res -= entr\n else:\n res = 0\n \n return res / l\n\ndef getRevEnt(ali, w = 0.1):\n return getEnt(list(zip(*ali)))\n\ndef printHelp():\n print ('process_alignments.py -i [-o ] [-f ] [-s ] [-t ]')\n print ('input_file is the file with alignment weights (required)')\n print ('source_sentence_file and target_sentence_file are required only for NeuralMonkey')\n print ('output_type can be web (default), block, block2 or color')\n print ('from_system can be Nematus, Marian, Sockeye, OpenNMT or NeuralMonkey (default)')\n\ndef printColor(value):\n colors = [\n '\u001b[48;5;232m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;233m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;234m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;235m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;236m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;237m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;238m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;239m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;240m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;240m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;241m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;242m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;243m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;244m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;245m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;246m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;247m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;248m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;249m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;250m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;251m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;252m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;253m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;254m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;255m\u001b[K \u001b[m\u001b[K',\n '\u001b[48;5;255m\u001b[K \u001b[m\u001b[K',\n ]\n num = int(math.floor((value-0.01)*25))\n if num<0: num = 0\n sys.stdout.write(colors[num])\n\ndef printBlock2(value):\n blocks2 = ['██', '▉▉', '▊▊', '▋▋', '▌▌', '▍▍', '▎▎', '▏▏', ' ',]\n num = int(math.floor((value-0.01)*8))\n if num<0: num = 0\n sys.stdout.write(blocks2[num])\n \ndef printBlock(value):\n blocks = ['██', '▓▓', '▒▒', '░░', ' ',]\n num = int(math.floor((value-0.01)*4))\n if num<0: num = 0\n sys.stdout.write(blocks[num])\n \ndef readSnts(filename):\n with open(filename, 'r', encoding='utf-8') as fh:\n return [escape(line).strip().split() for line in fh]\n \ndef deBPE(srcs, tgts, ali, sources, targets):\n slen = len(sources)\n for i in range(slen):\n if i > len(sources)-1:\n break;\n while len(sources[i]) > 2 and sources[i][-2:] == \"@@\":\n sources[i] = sources[i].replace(\"@@\",\"\") + sources[i+1]\n del sources[i+1]\n slen = len(sources)\n \n #Now sum the alignments\n newLength = ali.shape[1]-1\n result = np.zeros((ali.shape[0],newLength))\n for x in range(newLength):\n if x == i:\n result[:,x] = np.sum(ali[:,x:x+2],axis=1)\n ali = np.delete(ali, x+1, 1)\n else:\n result[:,x] = ali[:,x]\n ali = result\n srcs[-1] = sources\n tlen = len(targets)\n for i in range(tlen):\n if i > len(targets)-1:\n break;\n n = 0\n while len(targets[i]) > 2 and targets[i][-2:] == \"@@\":\n n+=1\n targets[i] = targets[i].replace(\"@@\",\"\") + targets[i+1]\n del targets[i+1]\n tlen = len(targets)\n \n if n>0:\n #Now average the alignments\n newLength = ali.shape[0]-n\n result = np.zeros((newLength, ali.shape[1]))\n for x in range(newLength):\n if x == i:\n result[x,:] = np.average(ali[x:x+n+1,:],axis=0)\n for c in range(x+n, x, -1):\n ali = np.delete(ali, c, 0)\n else:\n result[x,:] = ali[x,:]\n ali = result\n tgts[-1] = targets\n \n return srcs, tgts, ali\n\ndef readNematus(filename, from_system = \"Nematus\", de_bpe = False):\n with open(filename, 'r', encoding='utf-8') as fh:\n alis = []\n tgts = []\n srcs = []\n wasNew = True\n aliTXT = ''\n for line in fh:\n # Reads the first line that contains a translation and it's source sentence\n if wasNew:\n if len(aliTXT) > 0:\n c = StringIO(aliTXT)\n ali = np.loadtxt(c)\n \n # Now we probably have source and target tokens + attentions\n if de_bpe == True:\n # In case we want to combine subword units and the respective attentions (by summing columns and averaging rows)\n sources = escape(lineparts[3]).strip().split()\n targets = escape(lineparts[1]).strip().split()\n (srcs, tgts, ali) = deBPE(srcs, tgts, ali, sources, targets)\n \n if from_system == \"Nematus\" or from_system == \"OpenNMT\" or from_system == \"Marian-Dev\":\n ali = ali.transpose()\n alis.append(ali)\n aliTXT = ''\n lineparts = line.split(' ||| ')\n if from_system == \"Nematus\":\n lineparts[1] += ' '\n lineparts[3] += ' '\n tgts.append(escape(lineparts[1]).strip().split())\n srcs.append(escape(lineparts[3]).strip().split())\n wasNew = False\n continue\n # Reads the attention matrix into \"aliTXT\"\n if line != '\\n' and line != '\\r\\n':\n aliTXT += line\n else:\n wasNew = True\n if len(aliTXT) > 0:\n c = StringIO(aliTXT)\n ali = np.loadtxt(c)\n if de_bpe == True:\n # In case we want to combine subword units and the respective attentions (by summing columns and averaging rows)\n sources = escape(lineparts[3]).strip().split()\n targets = escape(lineparts[1]).strip().split()\n (srcs, tgts, ali) = deBPE(srcs, tgts, ali, sources, targets)\n if from_system == \"Nematus\" or from_system == \"Sockeye\" or from_system == \"Marian-Dev\":\n ali = ali.transpose()\n alis.append(ali)\n aliTXT = ''\n return srcs, tgts, alis\n \ndef escape(string):\n return string.replace('\"','&quot;').replace(\"'\",\"&apos;\")\n \ndef readAmu(in_file, src_file):\n with open(src_file, 'r', encoding='utf-8') as fi:\n with open(in_file, 'r', encoding='utf-8') as fh:\n alis = []\n tgts = []\n srcs = []\n aliTXT = ''\n for src_line, out_line in izip(fi, fh):\n lineparts = out_line.split(' ||| ')\n src_line = src_line.strip() + ' '\n tgts.append(escape(lineparts[0]).strip().split())\n srcs.append(escape(src_line).split())\n #alignment weights\n weightparts = lineparts[1].split(' ')\n for weightpart in weightparts:\n aliTXT += weightpart.replace(',',' ') + '\\n'\n if len(aliTXT) > 0:\n c = StringIO(aliTXT)\n ali = np.loadtxt(c)\n ali = ali.transpose()\n alis.append(ali)\n aliTXT = ''\n return srcs, tgts, alis\n \ndef compare(srcs1, srcs2):\n for i in range(0, len(srcs1)):\n if srcs1[i][len(srcs1[i])-1] != '':\n srcs1[i].append('')\n if srcs2[i][len(srcs2[i])-1] != '':\n srcs2[i].append('')\n return srcs1==srcs2\n \ndef synchData(data1,data2):\n addEOS1 = False\n addEOS2 = False\n for i in range(0, len(data1)):\n diff1 = len(data1[i][1]) - len(data2[i][1])\n diff2 = len(data2[i][1]) - len(data1[i][1])\n \n if(diff1 > 0):\n for j in range(0, diff1):\n data2[i][1].append(u'')\n if(diff2 > 0):\n for j in range(0, diff2):\n data1[i][1].append(u'')\n return data1, data2\n \ndef longestCommonSubstring(s1, s2):\n m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))]\n longest, x_longest = 0, 0\n for x in range(1, 1 + len(s1)):\n for y in range(1, 1 + len(s2)):\n if s1[x - 1] == s2[y - 1]:\n m[x][y] = m[x - 1][y - 1] + 1\n if m[x][y] > longest:\n longest = m[x][y]\n x_longest = x\n else:\n m[x][y] = 0\n return s1[x_longest - longest: x_longest]\n \ndef processAlignments(data, folder, inputfile, outputType, num, refs=False):\n with open(folder + \"/\" + ntpath.basename(inputfile) + '.ali.js', 'w', encoding='utf-8') as out_a_js:\n with open(folder + \"/\" + ntpath.basename(inputfile) + '.src.js', 'w', encoding='utf-8') as out_s_js:\n with open(folder + \"/\" + ntpath.basename(inputfile) + '.trg.js', 'w', encoding='utf-8') as out_t_js:\n with open(folder + \"/\" + ntpath.basename(inputfile) + '.con.js', 'w', encoding='utf-8') as out_c_js:\n with open(folder + \"/\" + ntpath.basename(inputfile) + '.sc.js', 'w', encoding='utf-8') as out_sc_js:\n out_a_js.write(u'var alignments = [\\n')\n out_s_js.write(u'var sources = [\\n')\n out_t_js.write(u'var targets = [\\n')\n out_c_js.write(u'var confidences = [\\n')\n out_sc_js.write(u'var sentence_confidences = [\\n')\n num = int(num) - 1\n if num > -1 and (num < len(data)):\n data = [data[num]]\n elif num >= len(data):\n print ('The selected sentence number is higher than the sentence count!\\n')\n printHelp()\n sys.exit()\n for i in range(0, len(data)):\n (src, tgt, rawAli) = data[i]\n #In case the source string is empty\n if rawAli.ndim == 1:\n rawAli = np.array([rawAli])\n ali = [l[:len(list(filter(None, tgt)))] for l in rawAli[:len(src)]]\n \n srcTotal = []\n trgTotal = []\n tali = np.array(ali).transpose()\n for a in range(0, len(ali)):\n srcTotal.append(str(math.pow(math.e, -0.05 * math.pow((getCP([ali[a]]) + getEnt([ali[a]]) + getRevEnt([ali[a]])), 2))))\n for a in range(0, len(tali)):\n trgTotal.append(str(math.pow(math.e, -0.05 * math.pow((getCP([tali[a]]) + getEnt([tali[a]]) + getRevEnt([tali[a]])), 2))))\n \n JoinedSource = \" \".join(src)\n JoinedTarget = \" \".join(tgt)\n StrippedSource = ''.join(c for c in JoinedSource if unicodedata.category(c).startswith('L')).replace('EOS','').replace('quot','').replace('apos','')\n StrippedTarget = ''.join(c for c in JoinedTarget if unicodedata.category(c).startswith('L')).replace('EOS','').replace('quot','').replace('apos','')\n \n #Get the confidence metrics\n CDP = round(getCP(ali), 10)\n APout = round(getEnt(ali), 10)\n APin = round(getRevEnt(ali), 10)\n Total = round(CDP + APout + APin, 10)\n \n #Can we calculate BLEU?\n bleuNumber = -1\n if(refs):\n try:\n #NLTK requires Python versions 3.5, 3.6, 3.7, or 3.8\n version = sys.version_info\n if version.major == 3 and version.minor > 4:\n from nltk.translate import bleu\n from nltk.translate.bleu_score import SmoothingFunction\n sm = SmoothingFunction()\n refNumber = i if num < 0 else num\n deBpeRef = \" \".join(refs[refNumber]).replace('@@ ','')\n deBpeHyp = JoinedTarget.replace('@@ ','').replace('','').strip()\n bleuNumber = round(bleu([deBpeRef.split()], deBpeHyp.split(), smoothing_function=sm.method3)*100, 2)\n bleuScore = u', ' + repr(bleuNumber)\n else:\n refs = False\n bleuScore = u''\n except ImportError:\n sys.stdout.write('NLTK not found! BLEU will not be calculated\\n')\n refs = False\n bleuScore = u''\n else:\n bleuScore = u''\n \n jls = JoinedSource.replace('@@ ','').replace('','').replace('&quot;','\"').replace(\"&apos;\",\"'\").replace(\"&amp;\",\"&\").replace(\"@-@\",\"-\").strip()\n jlt = JoinedTarget.replace('@@ ','').replace('','').replace('&quot;','\"').replace(\"&apos;\",\"'\").replace(\"&amp;\",\"&\").replace(\"@-@\",\"-\").strip()\n longest = longestCommonSubstring(jls, jlt).strip()\n similarity = len(longest)/len(jlt)\n \n #Penalize sentences with more than 4 tokens\n if (len(tgt) > 4) and (similarity > 0.3):\n #The more similar, the higher penalty\n #It's worse to have more words with a higher similarity\n #Let's make it between 0.7 and about 1.5 for veeeery long sentences\n multiplier = ((0.8+(len(tgt)*0.01)) * (3-((1-similarity)*5)) * (0.7 + similarity) * math.tan(similarity))\n Total = round(CDP + APout + APin - multiplier, 10)\n \n # e^(-1(x^2))\n CDP_pr = round(math.pow(math.e, -1 * math.pow(CDP, 2)) * 100, 2)\n # e^(-0.05(x^2))\n APout_pr = round(math.pow(math.e, -0.05 * math.pow(APout, 2)) * 100, 2)\n APin_pr = round(math.pow(math.e, -0.05 * math.pow(APin, 2)) * 100, 2)\n Total_pr = round(math.pow(math.e, -0.05 * math.pow(Total, 2)) * 100, 2)\n # 1-e^(-0.0001(x^2))\n Len = round((1-math.pow(math.e, -0.0001 * math.pow(len(JoinedSource), 2))) * 100, 2)\n \n \n out_s_js.write('[\"'+ JoinedSource.replace(' ','\", \"') +'\"], \\n')\n out_t_js.write('[\"'+ JoinedTarget.replace(' ','\", \"') +'\"], \\n')\n out_c_js.write(u'['+ repr(CDP_pr) + u', '+ repr(APout_pr) + u', '+ repr(APin_pr) + u', '+ repr(Total_pr) \n + u', '+ repr(Len) + u', '+ repr(len(JoinedSource)) + u', '\n + repr(round(similarity*100, 2)) \n + bleuScore \n + u'], \\n')\n out_sc_js.write(u'[[' + \", \".join(srcTotal) + u'], ' + u'[' + \", \".join(trgTotal) + u'], ' + u'], \\n')\n \n word = 0\n out_a_js.write(u'[')\n for ali_i in ali:\n linePartC=0\n for ali_j in ali_i:\n # Maybe worth playing around with this for transformer (and convolutional) NMT output\n # if ali_j < 0.15:\n # ali_j = 0\n out_a_js.write(u'['+repr(word)+u', ' + str(np.round(ali_j, 8)) + u', '+repr(linePartC)+u'], ')\n linePartC+=1\n if outputType == 'color':\n printColor(ali_j)\n elif outputType == 'block':\n printBlock(ali_j)\n elif outputType == 'block2':\n printBlock2(ali_j)\n if outputType != 'web' and outputType != 'compare':\n sys.stdout.write(src[word].encode('utf-8', errors='replace').decode('utf-8'))\n word+=1\n if outputType != 'web' and outputType != 'compare':\n sys.stdout.write('\\n')\n \n # write target sentences\n #build 2d array\n occupied_to = []\n outchars = []\n outchars.append([])\n tw = 0\n for tword in tgt:\n columns = len(tgt)\n # Some characters use multiple symbols. Need to decode and then encode...\n twchars = list(tword)\n twlen = len(twchars)\n xpos = tw * 2\n emptyline = 0\n \n for el in range(0, len(occupied_to)):\n # if occupied, move to a new line!\n if occupied_to[el] < xpos:\n emptyline = el\n if len(outchars) < emptyline+1:\n # add a new row\n outchars.append([])\n break\n if el == len(occupied_to)-1:\n emptyline=el+1\n if len(outchars) < emptyline+1:\n outchars.append([])\n \n for column in range(0, xpos):\n if len(outchars[emptyline]) <= column:\n outchars[emptyline].append(' ')\n\n for charindex in range(0, twlen):\n if xpos+charindex == len(outchars[emptyline]):\n outchars[emptyline].append(twchars[charindex])\n else:\n outchars[emptyline][charindex] = twchars[charindex]\n \n if len(occupied_to) <= emptyline:\n occupied_to.append(xpos+twlen+1)\n else:\n occupied_to[emptyline]=xpos+twlen+1;\n tw+=1\n\n #print 2d array\n if outputType != 'web' and outputType != 'compare':\n for liline in outchars:\n sys.stdout.write(''.join(liline).encode('utf-8', errors='replace').decode('utf-8') + '\\n')\n # print scores\n sys.stdout.write('\\nCoverage Deviation Penalty: \\t\\t' + repr(round(CDP, 8)) + ' (' + repr(CDP_pr) + '%)' + '\\n')\n sys.stdout.write('Input Absentmindedness Penalty: \\t' + repr(round(APin, 8)) + ' (' + repr(APin_pr) + '%)' + '\\n')\n sys.stdout.write('Output Absentmindedness Penalty: \\t' + repr(round(APout, 8)) + ' (' + repr(APout_pr) + '%)' + '\\n')\n sys.stdout.write('Confidence: \\t\\t\\t\\t' + repr(round(Total, 8)) + ' (' + repr(Total_pr) + '%)' + '\\n')\n sys.stdout.write('Similarity: \\t\\t\\t\\t' + repr(round(similarity*100, 2)) + '%' + '\\n')\n if bleuNumber > -1:\n sys.stdout.write('BLEU: \\t\\t\\t\\t\\t' + repr(bleuNumber) + '\\n')\n \n # write target sentences\n word = 0\n out_a_js.write(u'], \\n')\n if outputType != 'web' and outputType != 'compare':\n sys.stdout.write('\\n')\n out_a_js.write(u'\\n]')\n out_s_js.write(u']')\n out_t_js.write(u']')\n out_c_js.write(u']')\n out_sc_js.write(u']')"}}},{"rowIdx":542638,"cells":{"filename":{"kind":"string","value":"the-stack_106_30948"},"text":{"kind":"string","value":"#!/usr/bin/env python3\n# Copyright (c) 2016-2017 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test various net timeouts.\n\n- Create three sparkd nodes:\n\n no_verack_node - we never send a verack in response to their version\n no_version_node - we never send a version (only a ping)\n no_send_node - we never send any P2P message.\n\n- Start all three nodes\n- Wait 1 second\n- Assert that we're connected\n- Send a ping to no_verack_node and no_version_node\n- Wait 30 seconds\n- Assert that we're still connected\n- Send a ping to no_verack_node and no_version_node\n- Wait 31 seconds\n- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds)\n\"\"\"\n\nfrom time import sleep\n\nfrom test_framework.messages import msg_ping\nfrom test_framework.mininode import P2PInterface\nfrom test_framework.test_framework import SparkTestFramework\n\nclass TestP2PConn(P2PInterface):\n def on_version(self, message):\n # Don't send a verack in response\n pass\n\nclass TimeoutsTest(SparkTestFramework):\n def set_test_params(self):\n self.setup_clean_chain = True\n self.num_nodes = 1\n\n def run_test(self):\n # Setup the p2p connections\n no_verack_node = self.nodes[0].add_p2p_connection(TestP2PConn())\n no_version_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False)\n no_send_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False)\n\n sleep(1)\n\n assert no_verack_node.is_connected\n assert no_version_node.is_connected\n assert no_send_node.is_connected\n\n no_verack_node.send_message(msg_ping())\n no_version_node.send_message(msg_ping())\n\n sleep(30)\n\n assert \"version\" in no_verack_node.last_message\n\n assert no_verack_node.is_connected\n assert no_version_node.is_connected\n assert no_send_node.is_connected\n\n no_verack_node.send_message(msg_ping())\n no_version_node.send_message(msg_ping())\n\n sleep(31)\n\n assert not no_verack_node.is_connected\n assert not no_version_node.is_connected\n assert not no_send_node.is_connected\n\nif __name__ == '__main__':\n TimeoutsTest().main()\n"}}},{"rowIdx":542639,"cells":{"filename":{"kind":"string","value":"the-stack_106_30950"},"text":{"kind":"string","value":"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module is to support text processing for nlp. It includes two parts:\ntransforms and utils. transforms is a high performance\nnlp text processing module which is developed with icu4c and cppjieba.\nutils provides some general methods for nlp text processing.\n\"\"\"\nimport platform\nfrom .transforms import Lookup, JiebaTokenizer, UnicodeCharTokenizer, Ngram, WordpieceTokenizer, TruncateSequencePair, \\\n ToNumber\nfrom .utils import to_str, to_bytes, JiebaMode, Vocab, NormalizeForm\n\n__all__ = [\n \"Lookup\", \"JiebaTokenizer\", \"UnicodeCharTokenizer\", \"Ngram\",\n \"to_str\", \"to_bytes\", \"Vocab\", \"WordpieceTokenizer\", \"TruncateSequencePair\", \"ToNumber\",\n \"PythonTokenizer\"\n]\n\nif platform.system().lower() != 'windows':\n from .transforms import UnicodeScriptTokenizer, WhitespaceTokenizer, CaseFold, NormalizeUTF8, \\\n RegexReplace, RegexTokenizer, BasicTokenizer, BertTokenizer, PythonTokenizer\n\n __all__.append([\"UnicodeScriptTokenizer\", \"WhitespaceTokenizer\", \"CaseFold\", \"NormalizeUTF8\",\n \"RegexReplace\", \"RegexTokenizer\", \"BasicTokenizer\", \"BertTokenizer\"])\n"}}},{"rowIdx":542640,"cells":{"filename":{"kind":"string","value":"the-stack_106_30951"},"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\"\nFunctions for converting RCNN-derived zircon segmentation masks to polygons\nviewable and editable in GUI and vice-versa.\n\"\"\"\n\nimport numpy as np\nfrom skimage import draw\nimport skimage.measure as measure\n\n__all__ = ['mask_to_poly',\n 'poly_to_mask',\n 'vertex_dict_to_list',\n 'poly_dicts_to_arrays']\n\n# code for fxn below significantly modified from: \\\n# https://github.com/waspinator/pycococreator (covered by Apache-2.0 License)\ndef mask_to_poly(mask_for_conversion, tolerance = 1, scale_factor = 1.0):\n \"\"\"Convert a numpy mask array to polygon suitable for GUI display, editing.\n\n Parameters\n ----------\n mask_for_conversion : np array\n A numpy binary array representing the central zircon mask for an image,\n as returned by (successfully) running mos_proc.get_central_mask().\n tolerance : Int, optional\n Tolerance in microns for polygon converted from input mask; resulting\n polygon will approximate the mask within *tolerance* microns.\n The default is 1.\n scale_factor : float, optional\n Scale factor for the current mosaic image. Used to adjust polygon\n tolerance to microns. The default is 1.0.\n\n Returns\n -------\n export_polygon\n An ordered list of dicts {x:, y:} representing vertices in a polygon.\n Point coordinates are x = x/image width, y = y/image height.\n Suitable for display/editing in manual adjustment/annotation GUI.\n\n \"\"\"\n #print('Input shape:', mask_for_conversion.shape)\n\n #closes contour\n def close_contour(contour):\n if not np.array_equal(contour[0], contour[-1]):\n contour = np.vstack((contour, contour[0]))\n return contour\n\n export_polygon = []\n\n full_mask_h, full_mask_w = mask_for_conversion.shape #size of original mask\n\n #adjust tolerance to image size so that polygons are consistent during processing\n adj_tolerance = tolerance / scale_factor\n\n # padding of mask is apparently necessary for contour closure. /\n # This line also converts mask to binary.\n padded_mask = np.pad(mask_for_conversion.astype(int), pad_width = 1,\n mode='constant', constant_values = 0)\n\n mask_labels, labels_nnum = measure.label(padded_mask, return_num=True)\n\n main_region_label = 1\n\n if labels_nnum > 1:\n #selects largest region in case central zircon mask has multiple disconnected regions\n regions = measure.regionprops(mask_labels)\n area_list = [props.area for props in regions]\n main_region_label = regions[area_list.index(max(area_list))].label\n\n #gets contours of mask\n mask_contours = measure.find_contours(mask_labels == main_region_label, 0.5)[0]\n\n mask_contours = np.subtract(mask_contours, 1)\n mask_contours = close_contour(mask_contours)\n poly_pts = measure.approximate_polygon(mask_contours, adj_tolerance) #converts contours to mask\n\n #flip ensures that polygons load properly (rather than mirrored) in GUI\n poly_pts = np.flip(poly_pts, axis=1)\n\n #converts to list of {x:, y:} dicts for JS annotation tool\n for each_pt in poly_pts:\n pt_dict = {'x': 0.0, 'y': 0.0}\n\n if each_pt[0] >= 0:\n pt_dict['x'] = round(each_pt[0]/full_mask_w, 3)\n\n if each_pt[1] >= 0:\n pt_dict['y'] = round(each_pt[1]/full_mask_h, 3)\n export_polygon.append(pt_dict)\n\n return export_polygon\n\n\ndef poly_to_mask(poly_for_conversion, original_image):\n \"\"\"Converts polygons exported by JS annotation tool to masks for automated measurement.\n\n Parameters\n ----------\n poly_for_conversion : list of np 2d arrays\n An ordered list of arrays [x, y] representing vertices in a polygon.\n original_image : np array\n Numpy array representing the original image from which polygon was derived.\n\n Returns\n -------\n success_bool : Boolean\n Boolean indicating whether the polygon was successfully converted.\n Will be False if input polygon didn't exist, had under three points, or\n had no area.\n mask_output : np array or list\n If conversion successful, a numpy binary array representing the input\n polygon. Otherwise, an empty list.\n\n \"\"\"\n\n success_bool = False\n if poly_for_conversion is None:\n return(success_bool, [])\n #polygon must have at least 3 points to have any area\n if np.shape(poly_for_conversion)[0] < 3:\n return(success_bool, [])\n\n poly_pts = np.clip(poly_for_conversion, 0, 1)\n\n original_image_shape = original_image.shape[:2]\n\n rescaled_poly = poly_pts * np.asarray(original_image_shape)\n\n mask_output = draw.polygon2mask(original_image_shape, rescaled_poly)\n\n #if polygon has no area, do not send it for measurements!\n if len(np.column_stack(np.where(mask_output > 0))) < 10:\n return(success_bool, [])\n success_bool = True\n\n return success_bool, mask_output\n\ndef vertex_dict_to_list(input_poly):\n \"\"\"Convert polygon vertices from {x:, y:} to [x, y].\n\n Parameters\n ----------\n input_poly : dict\n Dict with position of x, y polygon vertex {x:, y:}.\n\n Returns\n -------\n Type: any\n X coordinate of vertex.\n Type: any\n Y coordinate of vertex.\n\n \"\"\"\n\n return (input_poly['y'], input_poly['x'])\n\ndef poly_dicts_to_arrays(input_list):\n \"\"\"Convert a list of lists of dicts {x:, y:} with polygon vertices to a list\n of arrays for same vertices.\n\n Parameters\n ----------\n input_list : list of lists of dicts\n List of lists (1 per polygon, 1 polygon per image) of dicts containing\n polygon vertex locations.\n\n Returns\n -------\n arr_list : list[arr]\n List of np arrays representing polygon vertices (1 per image).\n\n \"\"\"\n arr_list = []\n for vertices_per_img in input_list:\n poly_as_array = [vertex_dict_to_list(vertex)\n for vertex in vertices_per_img]\n if poly_as_array:\n arr_list.append(np.stack(poly_as_array))\n else:\n arr_list.append(None)\n return arr_list\n"}}},{"rowIdx":542641,"cells":{"filename":{"kind":"string","value":"the-stack_106_30952"},"text":{"kind":"string","value":"#/usr/bin/env python\nimport io\nimport re\nfrom setuptools import setup, find_packages\nimport sys\n\n\nif sys.version_info[:3] < (3, 4):\n raise SystemExit(\"Toga requires Python 3.4+.\")\n\n\nwith io.open('src/core/toga/__init__.py', encoding='utf8') as version_file:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M)\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\n\nwith io.open('src/android/README.rst', encoding='utf8') as readme:\n long_description = readme.read()\n\n\nsetup(\n name='toga-android',\n version=version,\n description='An Android backend for the Toga widget toolkit.',\n long_description=long_description,\n author='Russell Keith-Magee',\n author_email='russell@keith-magee.com',\n url='http://pybee.org/toga',\n packages=find_packages('src/android'),\n package_dir={'': 'src/android'},\n install_requires=[\n 'toga-core>=%s' % version,\n ],\n license='New BSD',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Environment :: Handhelds/PDA\\'s',\n 'Operating System :: Android',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: User Interfaces',\n 'Topic :: Software Development :: Widget Sets',\n ],\n test_suite='tests',\n)\n"}}},{"rowIdx":542642,"cells":{"filename":{"kind":"string","value":"the-stack_106_30953"},"text":{"kind":"string","value":"from graphnet.data.extractors.i3extractor import I3Extractor\nfrom graphnet.data.extractors.utilities import (\n frame_is_montecarlo,\n frame_is_noise,\n)\n\n\nclass I3RetroExtractor(I3Extractor):\n def __init__(self, name=\"retro\"):\n super().__init__(name)\n\n def __call__(self, frame) -> dict:\n \"\"\"Extracts RETRO reco. and associated quantities if available.\"\"\"\n output = {}\n\n if self._frame_contains_retro(frame):\n output.update(\n {\n \"azimuth_retro\": frame[\"L7_reconstructed_azimuth\"].value,\n \"time_retro\": frame[\"L7_reconstructed_time\"].value,\n \"energy_retro\": frame[\n \"L7_reconstructed_total_energy\"\n ].value,\n \"position_x_retro\": frame[\n \"L7_reconstructed_vertex_x\"\n ].value,\n \"position_y_retro\": frame[\n \"L7_reconstructed_vertex_y\"\n ].value,\n \"position_z_retro\": frame[\n \"L7_reconstructed_vertex_z\"\n ].value,\n \"zenith_retro\": frame[\"L7_reconstructed_zenith\"].value,\n \"azimuth_sigma\": frame[\n \"L7_retro_crs_prefit__azimuth_sigma_tot\"\n ].value,\n \"position_x_sigma\": frame[\n \"L7_retro_crs_prefit__x_sigma_tot\"\n ].value,\n \"position_y_sigma\": frame[\n \"L7_retro_crs_prefit__y_sigma_tot\"\n ].value,\n \"position_z_sigma\": frame[\n \"L7_retro_crs_prefit__z_sigma_tot\"\n ].value,\n \"time_sigma\": frame[\n \"L7_retro_crs_prefit__time_sigma_tot\"\n ].value,\n \"zenith_sigma\": frame[\n \"L7_retro_crs_prefit__zenith_sigma_tot\"\n ].value,\n \"energy_sigma\": frame[\n \"L7_retro_crs_prefit__energy_sigma_tot\"\n ].value,\n \"cascade_energy_retro\": frame[\n \"L7_reconstructed_cascade_energy\"\n ].value,\n \"track_energy_retro\": frame[\n \"L7_reconstructed_track_energy\"\n ].value,\n \"track_length_retro\": frame[\n \"L7_reconstructed_track_length\"\n ].value,\n }\n )\n\n if self._frame_contains_classifiers(frame):\n classifiers = [\n \"L7_MuonClassifier_FullSky_ProbNu\",\n \"L4_MuonClassifier_Data_ProbNu\",\n \"L4_NoiseClassifier_ProbNu\",\n \"L7_PIDClassifier_FullSky_ProbTrack\",\n ]\n for classifier in classifiers:\n if classifier in frame:\n output.update({classifier: frame[classifier].value})\n\n if frame_is_montecarlo(frame):\n if frame_is_noise(frame):\n output.update(\n {\n \"osc_weight\": frame[\"noise_weight\"][\"weight\"],\n }\n )\n else:\n output[\"osc_weight\"] = self._try_get_key(\n frame[\"I3MCWeightDict\"], \"weight\", default_value=-1\n )\n\n return output\n\n def _frame_contains_retro(self, frame):\n return \"L7_reconstructed_zenith\" in frame\n\n def _frame_contains_classifiers(self, frame):\n return \"L4_MuonClassifier_Data_ProbNu\" in frame\n\n def _try_get_key(self, frame, key, default_value=-1):\n \"\"\"Return `key` in `frame` if it exists; otherwise return `default_value.\"\"\"\n try:\n return frame[key]\n except KeyError:\n return default_value\n"}}},{"rowIdx":542643,"cells":{"filename":{"kind":"string","value":"the-stack_106_30954"},"text":{"kind":"string","value":"from __future__ import annotations\n\nimport asyncio\nimport functools\nimport os\nimport random\nimport shutil\nimport signal\nfrom typing import Any, Dict, List, Optional\n\nimport backoff\nimport colorama\nimport devtools\nimport httpx\nimport pydantic\nimport pyfiglet\nimport typer\n\nimport servo\nimport servo.api\nimport servo.configuration\nimport servo.utilities.key_paths\nimport servo.utilities.strings\nfrom servo.servo import _set_current_servo\nfrom servo.types import Adjustment, Control, Description, Duration, Measurement\n\n\nclass ServoRunner(pydantic.BaseModel, servo.logging.Mixin, servo.api.Mixin):\n interactive: bool = False\n _servo: servo.Servo = pydantic.PrivateAttr(None)\n _connected: bool = pydantic.PrivateAttr(False)\n _running: bool = pydantic.PrivateAttr(False)\n _main_loop_task: Optional[asyncio.Task] = pydantic.PrivateAttr(None)\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(self, servo_: servo, **kwargs) -> None: # noqa: D10\n super().__init__(**kwargs)\n self._servo = servo_\n\n # initialize default servo options if not configured\n if self.config.settings is None:\n self.config.settings = servo.CommonConfiguration()\n\n @property\n def servo(self) -> servo.Servo:\n return self._servo\n\n @property\n def running(self) -> bool:\n return self._running\n\n @property\n def connected(self) -> bool:\n return self._connected\n\n @property\n def optimizer(self) -> servo.Optimizer:\n return self.servo.optimizer\n\n @property\n def config(self) -> servo.BaseServoConfiguration:\n return self.servo.config\n\n @property\n def api_client_options(self) -> Dict[str, Any]:\n # Adopt the servo config for driving the API mixin\n return self.servo.api_client_options\n\n async def describe(self, control: Control) -> Description:\n self.logger.info(\"Describing...\")\n\n aggregate_description = Description.construct()\n results: List[servo.EventResult] = await self.servo.dispatch_event(servo.Events.describe, control=control)\n for result in results:\n description = result.value\n aggregate_description.components.extend(description.components)\n aggregate_description.metrics.extend(description.metrics)\n\n return aggregate_description\n\n async def measure(self, param: servo.MeasureParams) -> Measurement:\n if isinstance(param, dict):\n # required parsing has failed in api.Mixin._post_event(), run parse_obj to surface the validation errors\n servo.api.MeasureParams.parse_obj(param)\n servo.logger.info(f\"Measuring... [metrics={', '.join(param.metrics)}]\")\n servo.logger.trace(devtools.pformat(param))\n\n aggregate_measurement = Measurement.construct()\n results: List[servo.EventResult] = await self.servo.dispatch_event(\n servo.Events.measure, metrics=param.metrics, control=param.control\n )\n for result in results:\n measurement = result.value\n aggregate_measurement.readings.extend(measurement.readings)\n aggregate_measurement.annotations.update(measurement.annotations)\n\n return aggregate_measurement\n\n async def adjust(\n self, adjustments: List[Adjustment], control: Control\n ) -> Description:\n summary = f\"[{', '.join(list(map(str, adjustments)))}]\"\n self.logger.info(f\"Adjusting... {summary}\")\n self.logger.trace(devtools.pformat(adjustments))\n self.logger.trace(devtools.pformat(control))\n\n aggregate_description = Description.construct()\n results = await self.servo.dispatch_event(\n servo.Events.adjust, adjustments=adjustments, control=control\n )\n for result in results:\n description = result.value\n aggregate_description.components.extend(description.components)\n aggregate_description.metrics.extend(description.metrics)\n\n self.logger.success(f\"Adjustment completed {summary}\")\n return aggregate_description\n\n @backoff.on_exception(\n backoff.expo,\n (httpx.HTTPError, pydantic.ValidationError),\n max_time=lambda: servo.current_servo().config.settings.backoff.max_time(),\n max_tries=lambda: servo.current_servo().config.settings.backoff.max_tries(),\n )\n async def exec_command(self) -> servo.api.Status:\n cmd_response = await self._post_event(servo.api.Events.whats_next, None)\n self.logger.info(f\"What's Next? => {cmd_response.command}\")\n self.logger.trace(devtools.pformat(cmd_response))\n\n if cmd_response.command == servo.api.Commands.describe:\n description = await self.describe(Control(**cmd_response.param.get(\"control\", {})))\n self.logger.success(\n f\"Described: {len(description.components)} components, {len(description.metrics)} metrics\"\n )\n self.logger.debug(devtools.pformat(description))\n\n status = servo.api.Status.ok(descriptor=description.__opsani_repr__())\n return await self._post_event(servo.api.Events.describe, status.dict())\n\n elif cmd_response.command == servo.api.Commands.measure:\n try:\n measurement = await self.measure(cmd_response.param)\n self.logger.success(\n f\"Measured: {len(measurement.readings)} readings, {len(measurement.annotations)} annotations\"\n )\n self.logger.trace(devtools.pformat(measurement))\n param = measurement.__opsani_repr__()\n except servo.errors.EventError as error:\n self.logger.error(f\"Measurement failed: {error}\")\n param = servo.api.Status.from_error(error).dict()\n self.logger.error(f\"Responding with {param}\")\n self.logger.opt(exception=error).debug(\"Measure failure details\")\n\n return await self._post_event(servo.api.Events.measure, param)\n\n elif cmd_response.command == servo.api.Commands.adjust:\n adjustments = servo.api.descriptor_to_adjustments(cmd_response.param[\"state\"])\n control = Control(**cmd_response.param.get(\"control\", {}))\n\n try:\n description = await self.adjust(adjustments, control)\n status = servo.api.Status.ok(state=description.__opsani_repr__())\n\n components_count = len(description.components)\n settings_count = sum(\n len(component.settings) for component in description.components\n )\n self.logger.success(\n f\"Adjusted: {components_count} components, {settings_count} settings\"\n )\n except servo.EventError as error:\n self.logger.error(f\"Adjustment failed: {error}\")\n status = servo.api.Status.from_error(error)\n self.logger.error(f\"Responding with {status.dict()}\")\n self.logger.opt(exception=error).debug(\"Adjust failure details\")\n\n return await self._post_event(servo.api.Events.adjust, status.dict())\n\n elif cmd_response.command == servo.api.Commands.sleep:\n # TODO: Model this\n duration = Duration(cmd_response.param.get(\"duration\", 120))\n status = servo.utilities.key_paths.value_for_key_path(cmd_response.param, \"data.status\", None)\n reason = servo.utilities.key_paths.value_for_key_path(\n cmd_response.param, \"data.reason\", \"unknown reason\"\n )\n msg = f\"{status}: {reason}\" if status else f\"{reason}\"\n self.logger.info(f\"Sleeping for {duration} ({msg}).\")\n await asyncio.sleep(duration.total_seconds())\n\n # Return a status so we have a simple API contract\n return servo.api.Status(status=\"ok\", message=msg)\n else:\n raise ValueError(f\"Unknown command '{cmd_response.command.value}'\")\n\n # Main run loop for processing commands from the optimizer\n async def main_loop(self) -> None:\n # FIXME: We have seen exceptions from using `with self.servo.current()` crossing contexts\n _set_current_servo(self.servo)\n\n while self._running:\n try:\n if self.interactive:\n if not typer.confirm(\"Poll for next command?\"):\n typer.echo(\"Sleeping for 1m\")\n await asyncio.sleep(60)\n continue\n\n status = await self.exec_command()\n if status.status == servo.api.OptimizerStatuses.unexpected_event:\n self.logger.warning(\n f\"server reported unexpected event: {status.reason}\"\n )\n\n except (httpx.TimeoutException, httpx.HTTPStatusError) as error:\n self.logger.warning(f\"command execution failed HTTP client error: {error}\")\n\n except pydantic.ValidationError as error:\n self.logger.warning(f\"command execution failed with model validation error: {error}\")\n self.logger.opt(exception=error).debug(\"Pydantic model failed validation\")\n\n except Exception as error:\n self.logger.exception(f\"failed with unrecoverable error: {error}\")\n raise error\n\n def run_main_loop(self) -> None:\n if self._main_loop_task:\n self._main_loop_task.cancel()\n\n def _reraise_if_necessary(task: asyncio.Task) -> None:\n try:\n if not task.cancelled():\n task.result()\n except Exception as error: # pylint: disable=broad-except\n self.logger.error(f\"Exiting from servo main loop do to error: {error} (task={task})\")\n self.logger.opt(exception=error).trace(f\"Exception raised by task {task}\")\n raise error # Ensure that we surface the error for handling\n\n self._main_loop_task = asyncio.create_task(self.main_loop(), name=f\"main loop for servo {self.optimizer.id}\")\n self._main_loop_task.add_done_callback(_reraise_if_necessary)\n\n async def run(self, *, poll: bool = True) -> None:\n self._running = True\n\n _set_current_servo(self.servo)\n await self.servo.startup()\n self.logger.info(\n f\"Servo started with {len(self.servo.connectors)} active connectors [{self.optimizer.id} @ {self.optimizer.url or self.optimizer.base_url}]\"\n )\n\n async def giveup(details) -> None:\n loop = asyncio.get_event_loop()\n self.logger.critical(\"retries exhausted, giving up\")\n asyncio.create_task(self.shutdown(loop))\n\n try:\n @backoff.on_exception(\n backoff.expo,\n httpx.HTTPError,\n max_time=lambda: self.config.settings.backoff.max_time(),\n max_tries=lambda: self.config.settings.backoff.max_tries(),\n on_giveup=giveup,\n )\n async def connect() -> None:\n self.logger.info(\"Saying HELLO.\", end=\" \")\n await self._post_event(servo.api.Events.hello, dict(\n agent=servo.api.user_agent(),\n telemetry=self.servo.telemetry.values\n ))\n self._connected = True\n\n\n self.logger.info(f\"Connecting to Opsani Optimizer @ {self.optimizer.url}...\")\n if self.interactive:\n typer.confirm(\"Connect to the optimizer?\", abort=True)\n\n await connect()\n except typer.Abort:\n # Rescue abort and notify user\n servo.logger.warning(\"Operation aborted. Use Control-C to exit\")\n except asyncio.CancelledError as error:\n self.logger.trace(\"task cancelled, aborting servo runner\")\n raise error\n except:\n self.logger.exception(\"exception encountered during connect\")\n\n if poll:\n self.run_main_loop()\n else:\n self.logger.warning(f\"Servo runner initialized with polling disabled -- command loop is not running\")\n\n async def shutdown(self, *, reason: Optional[str] = None) -> None:\n \"\"\"Shutdown the running servo.\"\"\"\n try:\n self._running = False\n if self.connected:\n await self._post_event(servo.api.Events.goodbye, dict(reason=reason))\n except Exception:\n self.logger.exception(f\"Exception occurred during GOODBYE request\")\n\nclass AssemblyRunner(pydantic.BaseModel, servo.logging.Mixin):\n assembly: servo.Assembly\n runners: List[ServoRunner] = []\n progress_handler: Optional[servo.logging.ProgressHandler] = None\n progress_handler_id: Optional[int] = None\n _running: bool = pydantic.PrivateAttr(False)\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(self, assembly: servo.Assembly, **kwargs) -> None:\n super().__init__(assembly=assembly, **kwargs)\n\n def _runner_for_servo(self, servo: servo.Servo) -> ServoRunner:\n for runner in self.runners:\n if runner.servo == servo:\n return runner\n\n raise KeyError(f\"no runner was found for the servo: \\\"{servo}\\\"\")\n\n @property\n def running(self) -> bool:\n return self._running\n\n def run(self, *, poll: bool = True, interactive: bool = False) -> None:\n \"\"\"Asynchronously run all servos active within the assembly.\n\n Running the assembly takes over the current event loop and schedules a `ServoRunner` instance for each servo active in the assembly.\n \"\"\"\n if self.running:\n raise RuntimeError(\"Cannot run an assembly that is already running\")\n\n self._running = True\n loop = asyncio.get_event_loop()\n\n # Setup signal handling\n signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT, signal.SIGUSR1)\n for s in signals:\n loop.add_signal_handler(\n s, lambda s=s: asyncio.create_task(self._shutdown(loop, signal=s))\n )\n\n loop.set_exception_handler(self._handle_exception)\n\n # Setup logging\n async def _report_progress(**kwargs) -> None:\n # Forward to the active servo...\n if servo_ := servo.current_servo():\n await servo_.report_progress(**kwargs)\n else:\n self.logger.warning(\n f\"failed progress reporting -- no current servo context is established (kwargs={devtools.pformat(kwargs)})\"\n )\n\n async def handle_progress_exception(progress: Dict[str, Any], error: Exception) -> None:\n # FIXME: This needs to be made multi-servo aware\n # Restart the main event loop if we get out of sync with the server\n if isinstance(error, (servo.errors.UnexpectedEventError, servo.errors.EventCancelledError)):\n if isinstance(error, servo.errors.UnexpectedEventError):\n self.logger.error(\n \"servo has lost synchronization with the optimizer: restarting\"\n )\n elif isinstance(error, servo.errors.EventCancelledError):\n self.logger.error(\n \"optimizer has cancelled operation in progress: cancelling and restarting loop\"\n )\n\n # Post a status to resolve the operation\n operation = progress['operation']\n status = servo.api.Status.from_error(error)\n self.logger.error(f\"Responding with {status.dict()}\")\n runner = self._runner_for_servo(servo.current_servo())\n await runner._post_event(operation, status.dict())\n\n tasks = [\n t for t in asyncio.all_tasks() if t is not asyncio.current_task()\n ]\n self.logger.info(f\"Cancelling {len(tasks)} outstanding tasks\")\n [task.cancel() for task in tasks]\n\n await asyncio.gather(*tasks, return_exceptions=True)\n\n # Restart a fresh main loop\n if poll:\n runner = self._runner_for_servo(servo.current_servo())\n runner.run_main_loop()\n else:\n self.logger.error(\n f\"unrecognized exception passed to progress exception handler: {error}\"\n )\n\n\n self.progress_handler = servo.logging.ProgressHandler(\n _report_progress, self.logger.warning, handle_progress_exception\n )\n self.progress_handler_id = self.logger.add(self.progress_handler.sink)\n\n self._display_banner()\n\n try:\n for servo_ in self.assembly.servos:\n servo_runner = ServoRunner(servo_, interactive=interactive)\n loop.create_task(servo_runner.run(poll=poll))\n self.runners.append(servo_runner)\n\n loop.run_forever()\n\n finally:\n loop.close()\n\n def _display_banner(self) -> None:\n fonts = ['slant', 'banner3', 'bigchief', 'cosmic', 'speed', 'nancyj', 'fourtops',\n 'contessa', 'doom', 'broadway', 'acrobatic', 'trek', 'eftirobot', 'roman']\n color_map = {'RED': colorama.Fore.RED, 'GREEN': colorama.Fore.GREEN,\n 'YELLOW': colorama.Fore.YELLOW, 'BLUE': colorama.Fore.BLUE,\n 'MAGENTA': colorama.Fore.MAGENTA, 'CYAN': colorama.Fore.CYAN,\n 'RAINBOW': colorama.Fore.MAGENTA}\n terminal_size = shutil.get_terminal_size()\n\n # Generate an awesome banner for this launch\n font = os.getenv('SERVO_BANNER_FONT', random.choice(fonts))\n color_name = os.getenv('SERVO_BANNER_COLOR')\n # coinflip unless we have been directly configured from the env\n rainbow = (\n bool(random.getrandbits(1)) if color_name is None\n else (color_name.upper() == 'RAINBOW')\n )\n\n figlet = pyfiglet.Figlet(font=font, width=terminal_size.columns)\n banner = figlet.renderText('ServoX').rstrip()\n\n if rainbow:\n # Rainbow it\n colored_banner = [random.choice(list(color_map.values())) + char for char in banner]\n typer.echo(''.join(colored_banner), color=True)\n else:\n # Flat single color\n color = (color_map[color_name.upper()] if color_name else random.choice(list(color_map.values())))\n typer.echo(f'{color}{banner}', color=True)\n\n secho = functools.partial(typer.secho, color=True)\n types = servo.Assembly.all_connector_types()\n types.remove(servo.Servo)\n\n names = []\n for c in types:\n name = typer.style(\n servo.utilities.strings.commandify(c.__default_name__), fg=typer.colors.CYAN, bold=False\n )\n version = typer.style(str(c.version), fg=typer.colors.WHITE, bold=True)\n names.append(f\"{name}-{version}\")\n\n version = typer.style(f\"v{servo.__version__}\", fg=typer.colors.WHITE, bold=True)\n codename = typer.style(servo.__cryptonym__, fg=typer.colors.MAGENTA, bold=False)\n initialized = typer.style(\n \"initialized\", fg=typer.colors.BRIGHT_GREEN, bold=True\n )\n version = typer.style(f\"v{servo.__version__}\", fg=typer.colors.WHITE, bold=True)\n\n secho(f'{version} \"{codename}\" {initialized}')\n secho(reset=True)\n secho(f\"connectors: {', '.join(sorted(names))}\")\n secho(\n f\"config file: {typer.style(str(self.assembly.config_file), bold=True, fg=typer.colors.YELLOW)}\"\n )\n\n if len(self.assembly.servos) == 1:\n servo_ = self.assembly.servos[0]\n optimizer = servo_.optimizer\n\n id = typer.style(optimizer.id, bold=True, fg=typer.colors.WHITE)\n secho(f\"optimizer: {id}\")\n if optimizer.base_url != \"https://api.opsani.com/\":\n base_url = typer.style(\n f\"{optimizer.base_url}\", bold=True, fg=typer.colors.RED\n )\n secho(f\"base url: {base_url}\")\n\n if servo_.config.settings and servo_.config.settings.proxies:\n proxies = typer.style(\n f\"{devtools.pformat(servo_.config.settings.proxies)}\",\n bold=True,\n fg=typer.colors.CYAN,\n )\n secho(f\"proxies: {proxies}\")\n else:\n servo_count = typer.style(str(len(self.assembly.servos)), bold=True, fg=typer.colors.WHITE)\n secho(f\"servos: {servo_count}\")\n\n secho(reset=True)\n\n async def _shutdown(self, loop, signal=None):\n if not self.running:\n raise RuntimeError(\"Cannot shutdown an assembly that is not running\")\n\n if signal:\n self.logger.info(f\"Received exit signal {signal.name}...\")\n\n reason = signal.name if signal else \"shutdown\"\n\n # Shut down the servo runners, breaking active control loops\n if len(self.runners) == 1:\n self.logger.info(f\"Shutting down servo...\")\n else:\n self.logger.info(f\"Shutting down {len(self.runners)} running servos...\")\n for fut in asyncio.as_completed(list(map(lambda r: r.shutdown(reason=reason), self.runners)), timeout=30.0):\n try:\n await fut\n except Exception as error:\n self.logger.critical(f\"Failed servo runner shutdown with error: {error}\")\n\n # Shutdown the assembly and the servos it contains\n self.logger.debug(\"Dispatching shutdown event...\")\n try:\n await self.assembly.shutdown()\n except Exception as error:\n self.logger.critical(f\"Failed assembly shutdown with error: {error}\")\n\n await asyncio.gather(self.progress_handler.shutdown(), return_exceptions=True)\n self.logger.remove(self.progress_handler_id)\n\n # Cancel any outstanding tasks -- under a clean, graceful shutdown this list will be empty\n # The shutdown of the assembly and the servo should clean up its tasks\n tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]\n if len(tasks):\n [task.cancel() for task in tasks]\n\n self.logger.info(f\"Cancelling {len(tasks)} outstanding tasks\")\n self.logger.debug(f\"Outstanding tasks: {devtools.pformat(tasks)}\")\n await asyncio.gather(*tasks, return_exceptions=True)\n\n self.logger.info(\"Servo shutdown complete.\")\n await asyncio.gather(self.logger.complete(), return_exceptions=True)\n\n self._running = False\n\n loop.stop()\n\n def _handle_exception(self, loop: asyncio.AbstractEventLoop, context: dict) -> None:\n self.logger.debug(f\"asyncio exception handler triggered with context: {context}\")\n\n exception = context.get(\"exception\", None)\n logger = self.logger.opt(exception=exception)\n\n if isinstance(exception, asyncio.CancelledError):\n logger.warning(f\"ignoring asyncio.CancelledError exception\")\n pass\n elif loop.is_closed():\n logger.critical(\n \"Ignoring exception -- the event loop is closed.\"\n )\n elif self.running:\n logger.critical(\n \"Shutting down due to unhandled exception in asyncio event loop...\"\n )\n loop.create_task(self._shutdown(loop))\n else:\n logger.critical(\n \"Ignoring exception -- the assembly is not running\"\n )\n"}}},{"rowIdx":542644,"cells":{"filename":{"kind":"string","value":"the-stack_106_30955"},"text":{"kind":"string","value":"#!/usr/bin/env python3\n# Copyright (c) 2018-2019 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\nfrom test_framework.test_framework import AgroCoinTestFramework\nfrom test_framework.util import (\n assert_equal,\n assert_raises_rpc_error,\n)\nfrom test_framework.blocktools import (\n TIME_GENESIS_BLOCK,\n)\n\n\nclass CreateTxWalletTest(AgroCoinTestFramework):\n def set_test_params(self):\n self.setup_clean_chain = True\n self.num_nodes = 1\n\n def skip_test_if_missing_module(self):\n self.skip_if_no_wallet()\n\n def run_test(self):\n self.log.info('Create some old blocks')\n self.nodes[0].setmocktime(TIME_GENESIS_BLOCK)\n self.nodes[0].generate(200)\n self.nodes[0].setmocktime(0)\n\n self.test_anti_fee_sniping()\n self.test_tx_size_too_large()\n\n def test_anti_fee_sniping(self):\n self.log.info('Check that we have some (old) blocks and that anti-fee-sniping is disabled')\n assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)\n txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)\n tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])\n assert_equal(tx['locktime'], 0)\n\n self.log.info('Check that anti-fee-sniping is enabled when we mine a recent block')\n self.nodes[0].generate(1)\n txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)\n tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])\n assert 0 < tx['locktime'] <= 201\n\n def test_tx_size_too_large(self):\n # More than 10kB of outputs, so that we hit -maxtxfee with a high feerate\n outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for _ in range(400)}\n raw_tx = self.nodes[0].createrawtransaction(inputs=[], outputs=outputs)\n\n for fee_setting in ['-minrelaytxfee=0.01', '-mintxfee=0.01', '-paytxfee=0.01']:\n self.log.info('Check maxtxfee in combination with {}'.format(fee_setting))\n self.restart_node(0, extra_args=[fee_setting])\n assert_raises_rpc_error(\n -6,\n \"Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)\",\n lambda: self.nodes[0].sendmany(dummy=\"\", amounts=outputs),\n )\n assert_raises_rpc_error(\n -4,\n \"Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)\",\n lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx),\n )\n\n self.log.info('Check maxtxfee in combination with settxfee')\n self.restart_node(0)\n self.nodes[0].settxfee(0.01)\n assert_raises_rpc_error(\n -6,\n \"Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)\",\n lambda: self.nodes[0].sendmany(dummy=\"\", amounts=outputs),\n )\n assert_raises_rpc_error(\n -4,\n \"Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)\",\n lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx),\n )\n self.nodes[0].settxfee(0)\n\n\nif __name__ == '__main__':\n CreateTxWalletTest().main()\n"}}},{"rowIdx":542645,"cells":{"filename":{"kind":"string","value":"the-stack_106_30956"},"text":{"kind":"string","value":"import copy\n\n__author__ = 'rolandh'\n\n\nclass UserInfo(object):\n \"\"\" Read only interface to a user info store \"\"\"\n\n def __init__(self, db=None):\n self.db = db\n\n def filter(self, userinfo, user_info_claims=None):\n \"\"\"\n Return only those claims that are asked for.\n It's a best effort task; if essential claims are not present\n no error is flagged.\n\n :param userinfo: A dictionary containing the available user info.\n :param user_info_claims: A dictionary specifying the asked for claims\n :return: A dictionary of filtered claims.\n \"\"\"\n\n if user_info_claims is None:\n return copy.copy(userinfo)\n else:\n result = {}\n missing = []\n optional = []\n for key, restr in user_info_claims.items():\n try:\n result[key] = userinfo[key]\n except KeyError:\n if restr == {\"essential\": True}:\n missing.append(key)\n else:\n optional.append(key)\n return result\n\n def __call__(self, userid, client_id, user_info_claims=None, **kwargs):\n try:\n return self.filter(self.db[userid], user_info_claims)\n except KeyError:\n return {}\n"}}},{"rowIdx":542646,"cells":{"filename":{"kind":"string","value":"the-stack_106_30958"},"text":{"kind":"string","value":"\"\"\"Marks all fixed errors #34 on ruwiki's CheckWikipedia.\"\"\"\nimport re\nimport pywikibot\nfrom checkwiki import load_page_list, mark_error_done, log\n\nNUMBER = \"34\"\nREGEXP = r\"{{{[^!]|#if:|#ifeq:|#switch:|#ifexist:|{{fullpagename}}|{{sitename}}|{{namespace}}|{{basepagename}}|{{pagename}}|{{subpagename}}|{{talkpagename}}|{{подст:|{{subst:\"\nFLAGS = re.I\n\ndef main():\n \"\"\"Main script function.\"\"\"\n site = pywikibot.Site()\n for line in load_page_list(NUMBER):\n page = pywikibot.Page(site, line)\n if re.search(REGEXP, page.text, flags=FLAGS) is None:\n mark_error_done(NUMBER, page.title())\n log(line, success=True)\n else:\n log(line, success=False)\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":542647,"cells":{"filename":{"kind":"string","value":"the-stack_106_30959"},"text":{"kind":"string","value":"from main.gui import Draw\nfrom main.model import Service\n\n\ndef main():\n # Configuring microservice structure\n proxy = Service(5, 100, 'proxy')\n aggregate = Service(5, 100, 'aggregate')\n app = Service(5, 100, 'crud')\n another_app = Service(5, 100, 'another_crud')\n database = Service(5, 100, 'database')\n another_app_db = Service(5, 100, 'database')\n cache = Service(5, 100, 'cache')\n proxy.add_dependency(aggregate)\n aggregate.add_dependency(app)\n aggregate.add_dependency(another_app)\n app.add_dependency(database)\n app.add_dependency(cache)\n another_app.add_dependency(cache)\n another_app.add_dependency(another_app_db)\n # Simulating calls in cycles\n cycles = 100000\n for _ in range(cycles):\n proxy.call()\n # Drawing from root\n draw = Draw()\n draw.draw_tree(proxy)\n\n\nif __name__ == '__main__':\n main()\n"}}},{"rowIdx":542648,"cells":{"filename":{"kind":"string","value":"the-stack_106_30960"},"text":{"kind":"string","value":"import requests\nfrom legitindicators import atrpips\n\nBINANCE_URL = \"https://api.binance.com/api/v3/klines\"\nSYMBOL = \"BTCUSDT\"\nINTERVAL = \"5m\"\nPARAMS = {\"symbol\":SYMBOL, \"interval\":INTERVAL}\n\ndef test_atrpips():\n response = requests.get(url=BINANCE_URL, params=PARAMS)\n data = response.json()\n open = [float(o[1]) for o in data]\n high = [float(h[2]) for h in data]\n low = [float(l[3]) for l in data]\n close = [float(c[4]) for c in data]\n\n input_data = []\n for i in range(0, len(data)):\n ohlc = [open[i], high[i], low[i], close[i]]\n input_data.append(ohlc)\n apips = atrpips(input_data, 14)\n print(apips)\n assert len(apips) == len(close)\n"}}},{"rowIdx":542649,"cells":{"filename":{"kind":"string","value":"the-stack_106_30961"},"text":{"kind":"string","value":"\"\"\" Advent of code 2021 day 10 / 2 \"\"\"\n\nfrom os import path\nfrom collections import deque\n\np = {\n \"()\": 1,\n \"[]\": 2,\n \"kk\": 3,\n \"<>\": 4,\n}\nm = {\n \"(\": \"()\",\n \"[\": \"[]\",\n \"{\": \"kk\",\n \"<\": \"<>\",\n \")\": \"()\",\n \"]\": \"[]\",\n \"}\": \"kk\",\n \">\": \"<>\",\n}\n\nopening = set([\"(\", \"[\", \"{\", \"<\"])\nclosing = set([\")\", \"]\", \"}\", \">\"])\n\nclass Code(object):\n def __init__(self, lines):\n self.lines = lines\n\n def solve(self):\n # print(self.lines)\n scores = deque()\n for line in self.lines:\n s = 0\n check_stack = deque()\n for c in line:\n if c in opening:\n check_stack.append(c)\n elif c in closing:\n shouldclose = check_stack.pop()\n if m[shouldclose] != m[c]:\n # ignore corrupted line\n break\n else:\n # calc incomplete line\n while len(check_stack) > 0:\n next_char = check_stack.pop()\n s *= 5\n s += p[m[next_char]]\n scores.append(s)\n return sorted(scores)[len(scores)//2]\n\n\ndef preprocess(raw_data):\n # pattern = re.compile(r'(\\w+) (\\d+)')\n processed_data = []\n for line in raw_data.split(\"\\n\"):\n # match = re.match(pattern, line)\n # data = [match.group(1), match.group(2)]\n data = line\n processed_data.append(data)\n return processed_data\n\n\ndef solution(data):\n \"\"\" Solution to the problem \"\"\"\n lines = preprocess(data)\n solver = Code(lines)\n return solver.solve()\n\n\nif __name__ == \"__main__\":\n with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file:\n print(solution(input_file.read()))\n"}}},{"rowIdx":542650,"cells":{"filename":{"kind":"string","value":"the-stack_106_30964"},"text":{"kind":"string","value":"# coding:utf-8\n# --------------------------------------------------------\n# Pytorch multi-GPU Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport pprint\nimport pdb\nimport time\nimport _init_paths\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\n\n\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list\nfrom model.utils.net_utils import (\n adjust_learning_rate,\n save_checkpoint,\n get_dataloader,\n setup_seed,\n)\nfrom model.ema.optim_weight_ema import WeightEMA\nfrom model.utils.parser_func import parse_args, set_dataset_args\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.nms.nms_wrapper import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv\n\nfrom prettytimer import PrettyTimer\n\n\ndef get_cfg():\n args = parse_args()\n\n print(\"Called with args:\")\n print(args)\n args = set_dataset_args(args)\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n print(\"Using config:\")\n pprint.pprint(cfg)\n # np.random.seed(cfg.RNG_SEED)\n setup_seed(cfg.RNG_SEED)\n return args\n\n\nif __name__ == \"__main__\":\n args = get_cfg()\n\n output_dir = f\"{args.save_dir}/{args.net}/{args.dataset}\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if args.dataset_t == \"water\":\n args.aug = False\n\n if args.dataset_t == \"foggy_cityscape\":\n # initilize the network here.\n from model.umt_faster_rcnn_truncate.umt_vgg16 import vgg16\n from model.umt_faster_rcnn_truncate.umt_resnet import resnet\n else:\n from model.umt_faster_rcnn.umt_vgg16 import vgg16\n from model.umt_faster_rcnn.umt_resnet import resnet\n\n student_save_name = os.path.join(\n output_dir,\n \"conf_{}_conf_gamma_{}_source_like_{}_aug_{}_target_like_{}_pe_{}_pl_{}_thresh_{}\"\n \"_lambda_{}_student_target_{}\".format(\n args.conf,\n args.conf_gamma,\n args.source_like,\n args.aug,\n args.target_like,\n args.pretrained_epoch,\n args.pl,\n args.threshold,\n args.lam,\n args.dataset_t,\n ),\n )\n print(\"Model will be saved to: \")\n print(student_save_name)\n # torch.backends.cudnn.benchmark = True\n if torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n # train set\n # -- Note: Use validation set and disable the flipped to enable faster loading.\n cfg.TRAIN.USE_FLIPPED = True\n cfg.USE_GPU_NMS = args.cuda\n\n # source train set\n s_imdb, s_train_size, s_dataloader = get_dataloader(args.imdb_name, args)\n\n # source-like/fake-source train set data loader\n if args.source_like:\n s_fake_imdb, s_fake_train_size, s_fake_dataloader = get_dataloader(\n args.imdb_name_fake_source, args, sequential=True, augment=args.aug\n )\n else:\n s_fake_imdb, s_fake_train_size, s_fake_dataloader = get_dataloader(\n args.imdb_name_target, args, sequential=True, augment=args.aug\n )\n # target train set\n t_imdb, t_train_size, t_dataloader = get_dataloader(\n args.imdb_name_target, args, sequential=True, augment=args.aug\n )\n # target-like/fake-target train set\n t_fake_imdb, t_fake_train_size, t_fake_dataloader = get_dataloader(\n args.imdb_name_fake_target, args\n )\n\n print(\"{:d} source roidb entries\".format(s_train_size))\n print(\"{:d} source like roidb entries\".format(s_fake_train_size))\n print(\"{:d} target roidb entries\".format(t_train_size))\n print(\"{:d} target like roidb entries\".format(t_fake_train_size))\n\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n # ship to cuda\n if args.cuda:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n\n # make variable\n im_data = Variable(im_data)\n im_info = Variable(im_info)\n num_boxes = Variable(num_boxes)\n gt_boxes = Variable(gt_boxes)\n\n if args.cuda:\n cfg.CUDA = True\n\n imdb = s_imdb\n\n if args.net == \"vgg16\":\n student_fasterRCNN = vgg16(\n imdb.classes,\n pretrained=True,\n class_agnostic=args.class_agnostic,\n conf=args.conf,\n )\n teacher_fasterRCNN = vgg16(\n imdb.classes,\n pretrained=True,\n class_agnostic=args.class_agnostic,\n conf=args.conf,\n )\n elif args.net == \"res101\":\n student_fasterRCNN = resnet(\n imdb.classes,\n 101,\n pretrained=True,\n class_agnostic=args.class_agnostic,\n conf=args.conf,\n )\n teacher_fasterRCNN = resnet(\n imdb.classes,\n 101,\n pretrained=True,\n class_agnostic=args.class_agnostic,\n conf=args.conf,\n )\n elif args.net == \"res50\":\n student_fasterRCNN = resnet(\n imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic\n )\n teacher_fasterRCNN = resnet(\n imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic\n )\n else:\n print(\"network is not defined\")\n pdb.set_trace()\n\n student_fasterRCNN.create_architecture()\n teacher_fasterRCNN.create_architecture()\n\n lr = cfg.TRAIN.LEARNING_RATE\n lr = args.lr\n\n student_detection_params = []\n params = []\n for key, value in dict(student_fasterRCNN.named_parameters()).items():\n if value.requires_grad:\n if \"bias\" in key:\n params += [\n {\n \"params\": [value],\n \"lr\": lr * (cfg.TRAIN.DOUBLE_BIAS + 1),\n \"weight_decay\": cfg.TRAIN.BIAS_DECAY\n and cfg.TRAIN.WEIGHT_DECAY\n or 0,\n }\n ]\n else:\n params += [\n {\n \"params\": [value],\n \"lr\": lr,\n \"weight_decay\": cfg.TRAIN.WEIGHT_DECAY,\n }\n ]\n student_detection_params += [value]\n\n teacher_detection_params = []\n for key, value in dict(teacher_fasterRCNN.named_parameters()).items():\n if value.requires_grad:\n teacher_detection_params += [value]\n value.requires_grad = False\n\n if args.optimizer == \"adam\":\n lr = lr * 0.1\n student_optimizer = torch.optim.Adam(params)\n elif args.optimizer == \"sgd\":\n student_optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)\n teacher_optimizer = WeightEMA(\n teacher_detection_params, student_detection_params, alpha=args.teacher_alpha\n )\n\n if args.cuda:\n student_fasterRCNN.cuda()\n teacher_fasterRCNN.cuda()\n\n if args.resume:\n student_checkpoint = torch.load(args.student_load_name)\n args.session = student_checkpoint[\"session\"]\n args.start_epoch = student_checkpoint[\"epoch\"]\n student_fasterRCNN.load_state_dict(student_checkpoint[\"model\"])\n student_optimizer.load_state_dict(student_checkpoint[\"optimizer\"])\n lr = student_optimizer.param_groups[0][\"lr\"]\n if \"pooling_mode\" in student_checkpoint.keys():\n cfg.POOLING_MODE = student_checkpoint[\"pooling_mode\"]\n print(\"loaded checkpoint %s\" % (args.student_load_name))\n\n teacher_checkpoint = torch.load(args.teacher_load_name)\n teacher_fasterRCNN.load_state_dict(teacher_checkpoint[\"model\"])\n if \"pooling_mode\" in teacher_checkpoint.keys():\n cfg.POOLING_MODE = teacher_checkpoint[\"pooling_mode\"]\n print(\"loaded checkpoint %s\" % (args.teacher_load_name))\n\n if args.mGPUs:\n student_fasterRCNN = nn.DataParallel(student_fasterRCNN)\n teacher_fasterRCNN = nn.DataParallel(teacher_fasterRCNN)\n iters_per_epoch = int(10000 / args.batch_size)\n\n if args.use_tfboard:\n from tensorboardX import SummaryWriter\n\n logger = SummaryWriter(\"logs\")\n\n count_iter = 0\n conf_gamma = args.conf_gamma\n pretrained_epoch = args.pretrained_epoch\n timer = PrettyTimer()\n for epoch in range(args.start_epoch, args.max_epochs + 1):\n # setting to train mode\n student_fasterRCNN.train()\n teacher_fasterRCNN.train()\n loss_temp = 0\n\n start = time.time()\n epoch_start = time.time()\n if epoch % (args.lr_decay_step + 1) == 0:\n adjust_learning_rate(student_optimizer, args.lr_decay_gamma)\n lr *= args.lr_decay_gamma\n\n data_iter_s = iter(s_dataloader)\n data_iter_t = iter(t_dataloader)\n data_iter_s_fake = iter(s_fake_dataloader)\n data_iter_t_fake = iter(t_fake_dataloader)\n for step in range(1, iters_per_epoch + 1):\n timer.start(\"iter\")\n try:\n data_s = next(data_iter_s)\n except:\n data_iter_s = iter(s_dataloader)\n data_s = next(data_iter_s)\n\n try:\n data_s_fake = next(data_iter_s_fake)\n except:\n data_iter_s_fake = iter(s_fake_dataloader)\n data_s_fake = next(data_iter_s_fake)\n\n try:\n data_t = next(data_iter_t)\n except:\n data_iter_t = iter(t_dataloader)\n data_t = next(data_iter_t)\n\n assert (\n data_s_fake[0].size() == data_t[0].size()\n ), \"The size should be same between source fake and target\"\n assert (\n data_s_fake[1] == data_t[1]\n ).all(), \"The image info should be same between source fake and target\"\n try:\n data_t_fake = next(data_iter_t_fake)\n except:\n data_iter_t_fake = iter(t_fake_dataloader)\n data_t_fake = next(data_iter_t_fake)\n\n # eta = 1.0\n count_iter += 1\n\n # put source data into variable\n im_data.data.resize_(data_s[0].size()).copy_(data_s[0])\n im_info.data.resize_(data_s[1].size()).copy_(data_s[1])\n gt_boxes.data.resize_(data_s[2].size()).copy_(data_s[2])\n num_boxes.data.resize_(data_s[3].size()).copy_(data_s[3])\n\n student_fasterRCNN.zero_grad()\n (\n rois,\n cls_prob,\n bbox_pred,\n rpn_loss_cls,\n rpn_loss_box,\n RCNN_loss_cls,\n RCNN_loss_bbox,\n rois_label,\n out_d_pixel,\n out_d,\n confidence_loss,\n _,\n ) = student_fasterRCNN(im_data, im_info, gt_boxes, num_boxes, hints=True)\n loss = (\n rpn_loss_cls.mean()\n + rpn_loss_box.mean()\n + RCNN_loss_cls.mean()\n + RCNN_loss_bbox.mean()\n )\n if args.conf:\n conf_loss = confidence_loss.mean()\n\n if args.target_like:\n # put fake target data into variable\n im_data.data.resize_(data_t_fake[0].size()).copy_(data_t_fake[0])\n im_info.data.resize_(data_t_fake[1].size()).copy_(data_t_fake[1])\n # gt is empty\n gt_boxes.data.resize_(data_t_fake[2].size()).copy_(data_t_fake[2])\n num_boxes.data.resize_(data_t_fake[3].size()).copy_(data_t_fake[3])\n\n (\n rois,\n cls_prob,\n bbox_pred,\n rpn_loss_cls_t_fake,\n rpn_loss_box_t_fake,\n RCNN_loss_cls_t_fake,\n RCNN_loss_bbox_t_fake,\n rois_label_t_fake,\n out_d_pixel,\n out_d,\n _,\n _,\n ) = student_fasterRCNN(\n im_data, im_info, gt_boxes, num_boxes, hints=False\n ) # --------------------------------\n loss += (\n rpn_loss_cls_t_fake.mean()\n + rpn_loss_box_t_fake.mean()\n + RCNN_loss_cls_t_fake.mean()\n + RCNN_loss_bbox_t_fake.mean()\n )\n\n if epoch > pretrained_epoch and args.pl:\n teacher_fasterRCNN.eval()\n\n im_data.data.resize_(data_s_fake[0].size()).copy_(data_s_fake[0])\n im_info.data.resize_(data_s_fake[1].size()).copy_(data_s_fake[1])\n # gt is emqpty\n gt_boxes.data.resize_(1, 1, 5).zero_()\n num_boxes.data.resize_(1).zero_()\n (\n rois,\n cls_prob,\n bbox_pred,\n rpn_loss_cls_,\n rpn_loss_box_,\n RCNN_loss_cls_,\n RCNN_loss_bbox_,\n rois_label_,\n d_pred_,\n _,\n _,\n confidence_s_fake,\n ) = teacher_fasterRCNN(im_data, im_info, gt_boxes, num_boxes, test=True)\n\n scores = cls_prob.data\n boxes = rois.data[:, :, 1:5]\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n box_deltas = (\n box_deltas.view(-1, 4)\n * torch.FloatTensor(\n cfg.TRAIN.BBOX_NORMALIZE_STDS\n ).cuda()\n + torch.FloatTensor(\n cfg.TRAIN.BBOX_NORMALIZE_MEANS\n ).cuda()\n )\n box_deltas = box_deltas.view(1, -1, 4)\n else:\n box_deltas = (\n box_deltas.view(-1, 4)\n * torch.FloatTensor(\n cfg.TRAIN.BBOX_NORMALIZE_STDS\n ).cuda()\n + torch.FloatTensor(\n cfg.TRAIN.BBOX_NORMALIZE_MEANS\n ).cuda()\n )\n box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))\n\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n scores = scores.squeeze()\n if args.conf:\n scores = torch.sqrt(\n scores * confidence_s_fake\n ) # using confidence score to adjust scores\n pred_boxes = pred_boxes.squeeze()\n gt_boxes_target = []\n pre_thresh = 0.0\n thresh = args.threshold\n empty_array = np.transpose(np.array([[], [], [], [], []]), (1, 0))\n for j in range(1, len(imdb.classes)):\n inds = torch.nonzero(scores[:, j] > pre_thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:, j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n if args.class_agnostic:\n cls_boxes = pred_boxes[inds, :]\n else:\n cls_boxes = pred_boxes[inds][:, j * 4 : (j + 1) * 4]\n\n cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)\n # cls_dets = torch.cat((cls_boxes, cls_scores), 1)\n cls_dets = cls_dets[order]\n keep = nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep.view(-1).long()]\n # all_boxes[j][i] = cls_dets.cpu().numpy()\n cls_dets_numpy = cls_dets.cpu().numpy()\n for i in range(np.minimum(10, cls_dets_numpy.shape[0])):\n bbox = tuple(\n int(np.round(x)) for x in cls_dets_numpy[i, :4]\n )\n score = cls_dets_numpy[i, -1]\n if score > thresh:\n gt_boxes_target.append(list(bbox[0:4]) + [j])\n\n gt_boxes_padding = torch.FloatTensor(cfg.MAX_NUM_GT_BOXES, 5).zero_()\n if len(gt_boxes_target) != 0:\n gt_boxes_numpy = torch.FloatTensor(gt_boxes_target)\n num_boxes_cpu = torch.LongTensor(\n [min(gt_boxes_numpy.size(0), cfg.MAX_NUM_GT_BOXES)]\n )\n gt_boxes_padding[:num_boxes_cpu, :] = gt_boxes_numpy[:num_boxes_cpu]\n else:\n num_boxes_cpu = torch.LongTensor([0])\n\n # teacher_fasterRCNN.train()\n # put source-like data into variable\n im_data.data.resize_(data_t[0].size()).copy_(data_t[0])\n im_info.data.resize_(data_t[1].size()).copy_(data_t[1])\n gt_boxes_padding = torch.unsqueeze(gt_boxes_padding, 0)\n gt_boxes.data.resize_(gt_boxes_padding.size()).copy_(gt_boxes_padding)\n num_boxes.data.resize_(num_boxes_cpu.size()).copy_(num_boxes_cpu)\n\n (\n rois,\n cls_prob,\n bbox_pred,\n rpn_loss_cls_s_fake,\n rpn_loss_box_s_fake,\n RCNN_loss_cls_s_fake,\n RCNN_loss_bbox_s_fake,\n rois_label_s_fake,\n out_d_pixel,\n out_d,\n _,\n _,\n ) = student_fasterRCNN(im_data, im_info, gt_boxes, num_boxes)\n\n loss += args.lam * (\n rpn_loss_cls_s_fake.mean()\n + rpn_loss_box_s_fake.mean()\n + RCNN_loss_cls_s_fake.mean()\n + RCNN_loss_bbox_s_fake.mean()\n )\n\n if args.conf:\n loss += conf_gamma * conf_loss\n\n loss_temp += loss.item()\n student_optimizer.zero_grad()\n loss.backward()\n student_optimizer.step()\n teacher_fasterRCNN.zero_grad()\n teacher_optimizer.step()\n timer.end(\"iter\")\n estimate_time = timer.eta(\n \"iter\", count_iter, args.max_epochs * iters_per_epoch\n )\n if step % args.disp_interval == 0:\n end = time.time()\n if step > 0:\n loss_temp /= args.disp_interval\n\n if args.mGPUs:\n loss_rpn_cls = rpn_loss_cls.mean().item()\n loss_rpn_box = rpn_loss_box.mean().item()\n loss_rcnn_cls = RCNN_loss_cls.mean().item()\n loss_rcnn_box = RCNN_loss_bbox.mean().item()\n fg_cnt = torch.sum(rois_label.data.ne(0))\n bg_cnt = rois_label.data.numel() - fg_cnt\n if args.pl and epoch > pretrained_epoch:\n loss_rpn_cls_s_fake = rpn_loss_cls_s_fake.mean().item()\n loss_rpn_box_s_fake = rpn_loss_box_s_fake.mean().item()\n loss_rcnn_cls_s_fake = RCNN_loss_cls_s_fake.mean().item()\n loss_rcnn_box_s_fake = RCNN_loss_bbox_s_fake.mean().item()\n fg_cnt_s_fake = torch.sum(rois_label_s_fake.data.ne(0))\n bg_cnt_s_fake = rois_label_s_fake.data.numel() - fg_cnt_s_fake\n if args.target_like:\n loss_rpn_cls_t_fake = rpn_loss_cls_t_fake.mean().item()\n loss_rpn_box_t_fake = rpn_loss_box_t_fake.mean().item()\n loss_rcnn_cls_t_fake = RCNN_loss_cls_t_fake.mean().item()\n loss_rcnn_box_t_fake = RCNN_loss_bbox_t_fake.mean().item()\n fg_cnt_t_fake = torch.sum(rois_label_t_fake.data.ne(0))\n bg_cnt_t_fake = rois_label_t_fake.data.numel() - fg_cnt_t_fake\n\n # dloss_s_fake = dloss_s_fake.mean().item()\n # dloss_t_fake = dloss_t_fake.mean().item()\n # dloss_s_p_fake = dloss_s_p_fake.mean().item()\n # dloss_t_p_fake = dloss_t_p_fake.mean().item()\n else:\n loss_rpn_cls = rpn_loss_cls.item()\n loss_rpn_box = rpn_loss_box.item()\n loss_rcnn_cls = RCNN_loss_cls.item()\n loss_rcnn_box = RCNN_loss_bbox.item()\n fg_cnt = torch.sum(rois_label.data.ne(0))\n bg_cnt = rois_label.data.numel() - fg_cnt\n\n if args.conf:\n loss_conf = conf_loss.item()\n\n if args.pl and epoch > pretrained_epoch:\n loss_rpn_cls_s_fake = rpn_loss_cls_s_fake.item()\n loss_rpn_box_s_fake = rpn_loss_box_s_fake.item()\n loss_rcnn_cls_s_fake = RCNN_loss_cls_s_fake.item()\n loss_rcnn_box_s_fake = RCNN_loss_bbox_s_fake.item()\n fg_cnt_s_fake = torch.sum(rois_label_s_fake.data.ne(0))\n bg_cnt_s_fake = rois_label_s_fake.data.numel() - fg_cnt\n\n if args.target_like:\n loss_rpn_cls_t_fake = rpn_loss_cls_t_fake.item()\n loss_rpn_box_t_fake = rpn_loss_box_t_fake.item()\n loss_rcnn_cls_t_fake = RCNN_loss_cls_t_fake.item()\n loss_rcnn_box_t_fake = RCNN_loss_bbox_t_fake.item()\n fg_cnt_t_fake = torch.sum(rois_label_t_fake.data.ne(0))\n bg_cnt_t_fake = rois_label_t_fake.data.numel() - fg_cnt_t_fake\n\n print(\n \"[session %d][epoch %2d][iter %4d/%4d] lr: %.2e, loss: %.4f, eta: %s\"\n % (\n args.session,\n epoch,\n step,\n iters_per_epoch,\n lr,\n loss_temp,\n estimate_time,\n )\n )\n print(\n \"\\t\\t\\tfg/bg=(%d/%d), time cost: %f\" % (fg_cnt, bg_cnt, end - start)\n )\n print(\n \"\\t\\t\\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f\"\n % (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box)\n )\n if args.pl and epoch > pretrained_epoch:\n print(\"\\t\\t\\tfg/bg=(%d/%d)\" % (fg_cnt_s_fake, bg_cnt_s_fake))\n print(\n \"\\t\\t\\trpn_cls_s_fake: %.4f, rpn_box_s_fake: %.4f, rcnn_cls_s_fake: %.4f, rcnn_box_s_fake %.4f\"\n % (\n loss_rpn_cls_s_fake,\n loss_rpn_box_s_fake,\n loss_rcnn_cls_s_fake,\n loss_rcnn_box_s_fake,\n )\n )\n\n if args.target_like:\n print(\"\\t\\t\\tfg/bg=(%d/%d)\" % (fg_cnt_t_fake, bg_cnt_t_fake))\n print(\n \"\\t\\t\\trpn_cls_t_fake: %.4f, rpn_box_t_fake: %.4f, rcnn_cls_t_fake: %.4f, rcnn_box_t_fake %.4f\"\n % (\n loss_rpn_cls_t_fake,\n loss_rpn_box_t_fake,\n loss_rcnn_cls_t_fake,\n loss_rcnn_box_t_fake,\n )\n )\n if args.conf is True:\n print(f\"\\t\\t\\tconf loss: {loss_conf:.4}\")\n\n if args.use_tfboard:\n info = {\n \"loss\": loss_temp,\n \"loss_rpn_cls\": loss_rpn_cls,\n \"loss_rpn_box\": loss_rpn_box,\n \"loss_rcnn_cls\": loss_rcnn_cls,\n \"loss_rcnn_box\": loss_rcnn_box,\n \"loss_rpn_cls_s_fake\": loss_rpn_cls_s_fake,\n \"loss_rpn_box_s_fake\": loss_rpn_box_s_fake,\n \"loss_rcnn_cls_s_fake\": loss_rcnn_cls_s_fake,\n \"loss_rcnn_box_s_fake\": loss_rcnn_box_s_fake,\n \"loss_rpn_cls_t_fake\": loss_rpn_cls_t_fake\n if args.target_like is True\n else 0,\n \"loss_rpn_box_t_fake\": loss_rpn_box_t_fake\n if args.target_like is True\n else 0,\n \"loss_rcnn_cls_t_fake\": loss_rcnn_cls_t_fake\n if args.target_like is True\n else 0,\n \"loss_rcnn_box_t_fake\": loss_rcnn_box_t_fake\n if args.target_like is True\n else 0,\n \"loss_conf\": loss_conf if args.conf is True else 0,\n \"conf_gamma\": conf_gamma,\n }\n logger.add_scalars(\n \"logs_s_{}/losses\".format(args.session),\n info,\n (epoch - 1) * iters_per_epoch + step,\n )\n\n loss_temp = 0\n\n start = time.time()\n\n student_save_name = os.path.join(\n output_dir,\n \"conf_{}_conf_gamma_{}_source_like_{}_aug_{}_target_like_{}_pe_{}_pl_{}_\"\n \"thresh_{}_lambda_{}_lam2_{}_student_target_{}_session_{}_epoch_{}_step_{}.pth\".format(\n args.conf,\n args.conf_gamma,\n args.source_like,\n args.aug,\n args.target_like,\n args.pretrained_epoch,\n args.pl,\n args.threshold,\n args.lam,\n args.lam2,\n args.dataset_t,\n args.session,\n epoch,\n step,\n ),\n )\n save_checkpoint(\n {\n \"session\": args.session,\n \"epoch\": epoch + 1,\n \"model\": student_fasterRCNN.mumt_train.pyodule.state_dict()\n if args.mGPUs\n else student_fasterRCNN.state_dict(),\n \"optimizer\": student_optimizer.state_dict(),\n \"pooling_mode\": cfg.POOLING_MODE,\n \"class_agnostic\": args.class_agnostic,\n },\n student_save_name,\n )\n print(\"save student model: {}\".format(student_save_name))\n\n teacher_save_name = os.path.join(\n output_dir,\n \"conf_{}_conf_gamma_{}_source_like_{}_aug_{}_target_like_{}_pe_{}_pl_{}_\"\n \"thresh_{}_lambda_{}_lam2_{}_teacher_target_{}_session_{}_epoch_{}_step_{}.pth\".format(\n args.conf,\n args.conf_gamma,\n args.source_like,\n args.aug,\n args.target_like,\n args.pretrained_epoch,\n args.pl,\n args.threshold,\n args.lam,\n args.lam2,\n args.dataset_t,\n args.session,\n epoch,\n step,\n ),\n )\n save_checkpoint(\n {\n \"session\": args.session,\n \"epoch\": epoch + 1,\n \"model\": teacher_fasterRCNN.mumt_train.pyodule.state_dict()\n if args.mGPUs\n else teacher_fasterRCNN.state_dict(),\n \"pooling_mode\": cfg.POOLING_MODE,\n \"class_agnostic\": args.class_agnostic,\n },\n teacher_save_name,\n )\n print(\"save teacher model: {}\".format(teacher_save_name))\n epoch_end = time.time()\n print(\"epoch cost time: {} min\".format((epoch_end - epoch_start) / 60.0))\n\n # cmd = (\n # f\"python test_net_global_local.py --dataset {args.dataset_t} --net {args.net}\"\n # f\" --load_name {student_save_name}\"\n # )\n # print(\"cmd: \", cmd)\n # cmd = [i.strip() for i in cmd.split(\" \") if len(i.strip()) > 0]\n # try:\n # proc = subprocess.Popen(cmd)\n # proc.wait()\n # except (KeyboardInterrupt, SystemExit):\n # pass\n\n # cmd = (\n # f\"python test_net_global_local.py --dataset {args.dataset_t} --net {args.net}\"\n # f\" --load_name {teacher_save_name}\"\n # )\n # print(\"cmd: \", cmd)\n # cmd = [i.strip() for i in cmd.split(\" \") if len(i.strip()) > 0]\n # try:\n # proc = subprocess.Popen(cmd)\n # proc.wait()\n # except (KeyboardInterrupt, SystemExit):\n # pass\n\n if args.use_tfboard:\n logger.close()\n"}}},{"rowIdx":542651,"cells":{"filename":{"kind":"string","value":"the-stack_106_30966"},"text":{"kind":"string","value":"import sys\ndone = []\ntotalrmed = 0\nfname = sys.argv[1]\nlines = open(fname).read().split(\"\\n\")\noutput = open(fname,\"w\")\nfor line in lines:\n if line.startswith(\"#\") or line.startswith(\"!\") or line == \"\":\n output.write(\"{}\\n\".format(line))\n else:\n if line in done:\n totalrmed += 1\n continue\n done.append(line)\n output.write(\"{}\\n\".format(line))\noutput.close()\nprint(\"----- All entries removed -----\")\nprint(\"Scanned file {}\".format(fname))\nprint(\"{} total non-redundant entries\".format(len(done)))\nprint(\"{} entries removed\".format(totalrmed))\ninput()\n \n"}}},{"rowIdx":542652,"cells":{"filename":{"kind":"string","value":"the-stack_106_30967"},"text":{"kind":"string","value":"# Copyright 2013-2015 ARM Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport sys\nimport warnings\nfrom itertools import chain\n\ntry:\n from setuptools import setup\n from setuptools.command.sdist import sdist as orig_sdist\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.sdist import sdist as orig_sdist\n\n\nwa_dir = os.path.join(os.path.dirname(__file__), 'wa')\n\nsys.path.insert(0, os.path.join(wa_dir, 'framework'))\nfrom version import get_wa_version, get_wa_version_with_commit\n\n# happens if falling back to distutils\nwarnings.filterwarnings('ignore', \"Unknown distribution option: 'install_requires'\")\nwarnings.filterwarnings('ignore', \"Unknown distribution option: 'extras_require'\")\n\ntry:\n os.remove('MANIFEST')\nexcept OSError:\n pass\n\npackages = []\ndata_files = {'': [os.path.join(wa_dir, 'commands', 'postgres_schema.sql')]}\nsource_dir = os.path.dirname(__file__)\nfor root, dirs, files in os.walk(wa_dir):\n rel_dir = os.path.relpath(root, source_dir)\n data = []\n if '__init__.py' in files:\n for f in files:\n if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']:\n data.append(f)\n package_name = rel_dir.replace(os.sep, '.')\n package_dir = root\n packages.append(package_name)\n data_files[package_name] = data\n else:\n # use previous package name\n filepaths = [os.path.join(root, f) for f in files]\n data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])\n\nscripts = [os.path.join('scripts', s) for s in os.listdir('scripts')]\n\nparams = dict(\n name='wlauto',\n description='A framework for automating workload execution and measurement collection on ARM devices.',\n version=get_wa_version_with_commit(),\n packages=packages,\n package_data=data_files,\n include_package_data=True,\n scripts=scripts,\n url='https://github.com/ARM-software/workload-automation',\n license='Apache v2',\n maintainer='ARM Architecture & Technology Device Lab',\n maintainer_email='workload-automation@arm.com',\n setup_requires=[\n 'numpy'\n ],\n install_requires=[\n 'python-dateutil', # converting between UTC and local time.\n 'pexpect>=3.3', # Send/receive to/from device\n 'pyserial', # Serial port interface\n 'colorama', # Printing with colors\n 'pyYAML', # YAML-formatted agenda parsing\n 'requests', # Fetch assets over HTTP\n 'devlib>=1.1.dev1', # Interacting with devices\n 'louie-latest', # callbacks dispatch\n 'wrapt', # better decorators\n 'pandas>=0.23.0', # Data analysis and manipulation\n 'future', # Python 2-3 compatiblity\n ],\n dependency_links=['https://github.com/ARM-software/devlib/tarball/master#egg=devlib-1.1.dev1'],\n extras_require={\n 'other': ['jinja2'],\n 'test': ['nose', 'mock'],\n 'mongodb': ['pymongo'],\n 'notify': ['notify2'],\n 'doc': ['sphinx'],\n },\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n ],\n)\n\nall_extras = list(chain(iter(params['extras_require'].values())))\nparams['extras_require']['everything'] = all_extras\n\n\nclass sdist(orig_sdist):\n\n user_options = orig_sdist.user_options + [\n ('strip-commit', 's',\n \"Strip git commit hash from package version \")\n ]\n\n def initialize_options(self):\n orig_sdist.initialize_options(self)\n self.strip_commit = False\n\n\n def run(self):\n if self.strip_commit:\n self.distribution.get_version = get_wa_version\n orig_sdist.run(self)\n\n\nparams['cmdclass'] = {'sdist': sdist}\n\nsetup(**params)\n"}}},{"rowIdx":542653,"cells":{"filename":{"kind":"string","value":"the-stack_106_30968"},"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# pylint: disable=E1101, C0330, C0103\n# E1101: Module X has no Y member\n# C0330: Wrong continued indentation\n# C0103: Invalid attribute/variable/method name\n\"\"\"\nutils.py\n=========\n\nThis is a collection of utilities used by the :mod:`wx.lib.plot` package.\n\n\"\"\"\n__docformat__ = \"restructuredtext en\"\n\n# Standard Library\nimport functools\nimport inspect\nimport itertools\nfrom warnings import warn as _warn\n\n# Third Party\nimport numpy as np\n\n\nclass DisplaySide(object):\n \"\"\"\n Generic class for describing which sides of a box are displayed.\n\n Used for fine-tuning the axis, ticks, and values of a graph.\n\n This class somewhat mimics a collections.namedtuple factory function in\n that it is an iterable and can have indiviual elements accessible by name.\n It differs from a namedtuple in a few ways:\n\n - it's mutable\n - it's not a factory function but a full-fledged class\n - it contains type checking, only allowing boolean values\n - it contains name checking, only allowing valid_names as attributes\n\n :param bottom: Display the bottom side\n :type bottom: bool\n :param left: Display the left side\n :type left: bool\n :param top: Display the top side\n :type top: bool\n :param right: Display the right side\n :type right: bool\n \"\"\"\n # TODO: Do I want to replace with __slots__?\n # Not much memory gain because this class is only called a small\n # number of times, but it would remove the need for part of\n # __setattr__...\n valid_names = (\"bottom\", \"left\", \"right\", \"top\")\n\n def __init__(self, bottom, left, top, right):\n if not all([isinstance(x, bool) for x in [bottom, left, top, right]]):\n raise TypeError(\"All args must be bools\")\n self.bottom = bottom\n self.left = left\n self.top = top\n self.right = right\n\n def __str__(self):\n s = \"{}(bottom={}, left={}, top={}, right={})\"\n s = s.format(self.__class__.__name__,\n self.bottom,\n self.left,\n self.top,\n self.right,\n )\n return s\n\n def __repr__(self):\n # for now, just return the str representation\n return self.__str__()\n\n def __setattr__(self, name, value):\n \"\"\"\n Override __setattr__ to implement some type checking and prevent\n other attributes from being created.\n \"\"\"\n if name not in self.valid_names:\n err_str = \"attribute must be one of {}\"\n raise NameError(err_str.format(self.valid_names))\n if not isinstance(value, bool):\n raise TypeError(\"'{}' must be a boolean\".format(name))\n self.__dict__[name] = value\n\n def __len__(self):\n return 4\n\n def __hash__(self):\n return hash(tuple(self))\n\n def __getitem__(self, key):\n return (self.bottom, self.left, self.top, self.right)[key]\n\n def __setitem__(self, key, value):\n if key == 0:\n self.bottom = value\n elif key == 1:\n self.left = value\n elif key == 2:\n self.top = value\n elif key == 3:\n self.right = value\n else:\n raise IndexError(\"list index out of range\")\n\n def __iter__(self):\n return iter([self.bottom, self.left, self.top, self.right])\n\n\n# TODO: replace with wx.DCPenChanger/wx.DCBrushChanger, etc.\n# Alternatively, replace those with this function...\nclass TempStyle(object):\n \"\"\"\n Decorator / Context Manager to revert pen or brush changes.\n\n Will revert pen, brush, or both to their previous values after a method\n call or block finish.\n\n :param which: The item to save and revert after execution. Can be\n one of ``{'both', 'pen', 'brush'}``.\n :type which: str\n :param dc: The DC to get brush/pen info from.\n :type dc: :class:`wx.DC`\n\n ::\n\n # Using as a method decorator:\n @TempStyle() # same as @TempStyle('both')\n def func(self, dc, a, b, c): # dc must be 1st arg (beside self)\n # edit pen and brush here\n\n # Or as a context manager:\n with TempStyle('both', dc):\n # do stuff\n\n .. Note::\n\n As of 2016-06-15, this can only be used as a decorator for **class\n methods**, not standard functions. There is a plan to try and remove\n this restriction, but I don't know when that will happen...\n\n .. epigraph::\n\n *Combination Decorator and Context Manager! Also makes Julienne fries!\n Will not break! Will not... It broke!*\n\n -- The Genie\n \"\"\"\n _valid_types = {'both', 'pen', 'brush'}\n _err_str = (\n \"No DC provided and unable to determine DC from context for function \"\n \"`{func_name}`. When `{cls_name}` is used as a decorator, the \"\n \"decorated function must have a wx.DC as a keyword arg 'dc=' or \"\n \"as the first arg.\"\n )\n\n def __init__(self, which='both', dc=None):\n if which not in self._valid_types:\n raise ValueError(\n \"`which` must be one of {}\".format(self._valid_types)\n )\n self.which = which\n self.dc = dc\n self.prevPen = None\n self.prevBrush = None\n\n def __call__(self, func):\n\n @functools.wraps(func)\n def wrapper(instance, dc, *args, **kwargs):\n # fake the 'with' block. This solves:\n # 1. plots only being shown on 2nd menu selection in demo\n # 2. self.dc compalaining about not having a super called when\n # trying to get or set the pen/brush values in __enter__ and\n # __exit__:\n # RuntimeError: super-class __init__() of type\n # BufferedDC was never called\n self._save_items(dc)\n func(instance, dc, *args, **kwargs)\n self._revert_items(dc)\n\n #import copy # copy solves issue #1 above, but\n #self.dc = copy.copy(dc) # possibly causes issue #2.\n\n #with self:\n # print('in with')\n # func(instance, dc, *args, **kwargs)\n\n return wrapper\n\n def __enter__(self):\n self._save_items(self.dc)\n return self\n\n def __exit__(self, *exc):\n self._revert_items(self.dc)\n return False # True means exceptions *are* suppressed.\n\n def _save_items(self, dc):\n if self.which == 'both':\n self._save_pen(dc)\n self._save_brush(dc)\n elif self.which == 'pen':\n self._save_pen(dc)\n elif self.which == 'brush':\n self._save_brush(dc)\n else:\n err_str = (\"How did you even get here?? This class forces \"\n \"correct values for `which` at instancing...\"\n )\n raise ValueError(err_str)\n\n def _revert_items(self, dc):\n if self.which == 'both':\n self._revert_pen(dc)\n self._revert_brush(dc)\n elif self.which == 'pen':\n self._revert_pen(dc)\n elif self.which == 'brush':\n self._revert_brush(dc)\n else:\n err_str = (\"How did you even get here?? This class forces \"\n \"correct values for `which` at instancing...\")\n raise ValueError(err_str)\n\n def _save_pen(self, dc):\n self.prevPen = dc.GetPen()\n\n def _save_brush(self, dc):\n self.prevBrush = dc.GetBrush()\n\n def _revert_pen(self, dc):\n dc.SetPen(self.prevPen)\n\n def _revert_brush(self, dc):\n dc.SetBrush(self.prevBrush)\n\n\ndef pendingDeprecation(new_func):\n \"\"\"\n Raise `PendingDeprecationWarning` and display a message.\n\n Uses inspect.stack() to determine the name of the item that this\n is called from.\n\n :param new_func: The name of the function that should be used instead.\n :type new_func: string.\n \"\"\"\n warn_txt = \"`{}` is pending deprecation. Please use `{}` instead.\"\n _warn(warn_txt.format(inspect.stack()[1][3], new_func),\n PendingDeprecationWarning)\n\n\ndef scale_and_shift_point(x, y, scale=1, shift=0):\n \"\"\"\n Creates a scaled and shifted 2x1 numpy array of [x, y] values.\n\n The shift value must be in the scaled units.\n\n :param float `x`: The x value of the unscaled, unshifted point\n :param float `y`: The y valye of the unscaled, unshifted point\n :param np.array `scale`: The scale factor to use ``[x_sacle, y_scale]``\n :param np.array `shift`: The offset to apply ``[x_shift, y_shift]``.\n Must be in scaled units\n\n :returns: a numpy array of 2 elements\n :rtype: np.array\n\n .. note::\n\n :math:`new = (scale * old) + shift`\n \"\"\"\n point = scale * np.array([x, y]) + shift\n return point\n\n\ndef set_displayside(value):\n \"\"\"\n Wrapper around :class:`~wx.lib.plot._DisplaySide` that allows for \"overloaded\" calls.\n\n If ``value`` is a boolean: all 4 sides are set to ``value``\n\n If ``value`` is a 2-tuple: the bottom and left sides are set to ``value``\n and the other sides are set to False.\n\n If ``value`` is a 4-tuple, then each item is set individually: ``(bottom,\n left, top, right)``\n\n :param value: Which sides to display.\n :type value: bool, 2-tuple of bool, or 4-tuple of bool\n :raises: `TypeError` if setting an invalid value.\n :raises: `ValueError` if the tuple has incorrect length.\n :rtype: :class:`~wx.lib.plot._DisplaySide`\n \"\"\"\n err_txt = (\"value must be a bool or a 2- or 4-tuple of bool\")\n\n # TODO: for 2-tuple, do not change other sides? rather than set to False.\n if isinstance(value, bool):\n # turns on or off all axes\n _value = (value, value, value, value)\n elif isinstance(value, tuple):\n if len(value) == 2:\n _value = (value[0], value[1], False, False)\n elif len(value) == 4:\n _value = value\n else:\n raise ValueError(err_txt)\n else:\n raise TypeError(err_txt)\n return DisplaySide(*_value)\n\n\ndef pairwise(iterable):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n\nif __name__ == \"__main__\":\n raise RuntimeError(\"This module is not intended to be run by itself.\")\n"}}},{"rowIdx":542654,"cells":{"filename":{"kind":"string","value":"the-stack_106_30970"},"text":{"kind":"string","value":"import rmgpy.quantity as quantity\nimport logging\nfrom rmgpy.species import Species\nfrom rmgpy.data.solvation import SolventData, SoluteData, SoluteGroups, SolvationDatabase\nfrom rmgpy.reaction import Reaction\n\n\nclass DiffusionLimited():\n\n def __init__(self):\n # default is false, enabled if there is a solvent\n self.enabled = False\n\n def enable(self, solventData, solvationDatabase, comment=''):\n # diffusionLimiter is enabled if a solvent has been added to the RMG object.\n logging.info(\"Enabling diffusion-limited kinetics...\")\n diffusionLimiter.enabled = True\n diffusionLimiter.database = solvationDatabase\n diffusionLimiter.solventData = solventData\n\n def getSolventViscosity(self, T):\n return self.solventData.getSolventViscosity(T)\n \n def getEffectiveRate(self, reaction, T):\n \"\"\"\n Return the ratio of k_eff to k_intrinsic, which is between 0 and 1.\n \n It is 1.0 if diffusion has no effect.\n \n For 1<=>2 reactions, the reverse rate is limited.\n For 2<=>2 reactions, the faster direction is limited.\n For 2<=>1 or 2<=>3 reactions, the forward rate is limited.\n \"\"\"\n intrinsicKinetics = reaction.kinetics\n reactants = len(reaction.reactants)\n products = len(reaction.products)\n k_forward = intrinsicKinetics.getRateCoefficient(T,P=100e5)\n Keq = reaction.getEquilibriumConstant(T) # Kc\n k_reverse = k_forward / Keq\n k_eff = k_forward\n \n if reactants == 1:\n if products == 1:\n k_eff = k_forward\n else: # two products; reverse rate is limited\n k_diff = self.getDiffusionLimit(T, reaction, forward=False)\n k_eff_reverse = k_reverse*k_diff/(k_reverse+k_diff)\n k_eff = k_eff_reverse * Keq\n else: # 2 reactants\n if products == 1 or products == 3:\n k_diff = self.getDiffusionLimit(T, reaction, forward=True)\n k_eff = k_forward*k_diff/(k_forward+k_diff)\n else: # 2 products\n if Keq > 1.0: # forward rate is faster and thus limited\n k_diff = self.getDiffusionLimit(T, reaction, forward=True)\n k_eff = k_forward*k_diff/(k_forward+k_diff)\n else: # reverse rate is faster and thus limited\n k_diff = self.getDiffusionLimit(T, reaction, forward=False)\n k_eff_reverse = k_reverse*k_diff/(k_reverse+k_diff)\n k_eff = k_eff_reverse * Keq\n return k_eff \n \n def getDiffusionLimit(self, T, reaction, forward=True):\n \"\"\"\n Return the diffusive limit on the rate coefficient, k_diff.\n \n This is the upper limit on the rate, in the specified direction.\n (ie. forward direction if forward=True [default] or reverse if forward=False)\n \"\"\"\n if forward:\n reacting = reaction.reactants\n else:\n reacting = reaction.products\n assert len(reacting)==2, \"Can only calculate diffusion limit in a bimolecular direction\"\n radii = 0.0\n diffusivities = 0.0\n for spec in reacting:\n soluteData = self.database.getSoluteData(spec)\n # calculate radius with the McGowan volume and assuming sphere\n radius = ((75*soluteData.V/3.14159)**(1/3))/100\n diff = soluteData.getStokesDiffusivity(T, self.getSolventViscosity(T))\n radii += radius\n diffusivities += diff\n N_a = 6.022e23 # Avogadro's Number\n k_diff = 4*3.14159*radii*diffusivities*N_a\n return k_diff\n\n\n# module level variable. There should only ever be one. It starts off disabled\ndiffusionLimiter = DiffusionLimited()"}}},{"rowIdx":542655,"cells":{"filename":{"kind":"string","value":"the-stack_106_30971"},"text":{"kind":"string","value":"# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"schema_guided_dialogue dataset.\"\"\"\n\nimport tensorflow_datasets.public_api as tfds\nfrom tensorflow_datasets.text.schema_guided_dialogue import schema_guided_dialogue\n\n\nclass SchemaGuidedDialogueTest(tfds.testing.DatasetBuilderTestCase):\n \"\"\"Tests for schema_guided_dialogue dataset.\"\"\"\n DATASET_CLASS = schema_guided_dialogue.SchemaGuidedDialogue\n SPLITS = {\n 'train': 3, # Number of fake train example\n 'dev': 2, # Number of fake train example\n 'test': 1, # Number of fake test example\n }\n\n\nif __name__ == '__main__':\n tfds.testing.test_main()\n"}}},{"rowIdx":542656,"cells":{"filename":{"kind":"string","value":"the-stack_106_30972"},"text":{"kind":"string","value":"import torch\r\n\r\nfrom ..utils import box_utils\r\nfrom .data_preprocessing import PredictionTransform\r\nfrom ..utils.misc import Timer\r\n\r\n\r\nclass Predictor:\r\n def __init__(self, net, size, mean=0.0, std=1.0, nms_method=None,\r\n iou_threshold=0.45, filter_threshold=0.01, candidate_size=200, sigma=0.5, device=None):\r\n self.net = net\r\n self.transform = PredictionTransform(size, mean, std)\r\n self.iou_threshold = iou_threshold\r\n self.filter_threshold = filter_threshold\r\n self.candidate_size = candidate_size\r\n self.nms_method = nms_method\r\n\r\n self.sigma = sigma\r\n if device:\r\n self.device = device\r\n else:\r\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n self.net.to(self.device)\r\n self.net.eval()\r\n\r\n self.timer = Timer()\r\n\r\n def predict(self, image, top_k=-1, prob_threshold=None):\r\n cpu_device = torch.device(\"cpu\")\r\n height, width, _ = image.shape\r\n image = self.transform(image)\r\n images = image.unsqueeze(0)\r\n images = images.to(self.device)\r\n with torch.no_grad():\r\n self.timer.start()\r\n scores, boxes = self.net.forward(images)\r\n print(\"Inference time: \", self.timer.end())\r\n boxes = boxes[0]\r\n scores = scores[0]\r\n if not prob_threshold:\r\n prob_threshold = self.filter_threshold\r\n # this version of nms is slower on GPU, so we move data to CPU.\r\n boxes = boxes.to(cpu_device)\r\n scores = scores.to(cpu_device)\r\n picked_box_probs = []\r\n picked_labels = []\r\n for class_index in range(1, scores.size(1)):\r\n probs = scores[:, class_index]\r\n mask = probs > prob_threshold\r\n probs = probs[mask]\r\n if probs.size(0) == 0:\r\n continue\r\n subset_boxes = boxes[mask, :]\r\n box_probs = torch.cat([subset_boxes, probs.reshape(-1, 1)], dim=1)\r\n box_probs = box_utils.nms(box_probs, self.nms_method,\r\n score_threshold=prob_threshold,\r\n iou_threshold=self.iou_threshold,\r\n sigma=self.sigma,\r\n top_k=top_k,\r\n candidate_size=self.candidate_size)\r\n picked_box_probs.append(box_probs)\r\n picked_labels.extend([class_index] * box_probs.size(0))\r\n if not picked_box_probs:\r\n return torch.tensor([]), torch.tensor([]), torch.tensor([])\r\n picked_box_probs = torch.cat(picked_box_probs)\r\n picked_box_probs[:, 0] *= width\r\n picked_box_probs[:, 1] *= height\r\n picked_box_probs[:, 2] *= width\r\n picked_box_probs[:, 3] *= height\r\n return picked_box_probs[:, :4], torch.tensor(picked_labels), picked_box_probs[:, 4]"}}},{"rowIdx":542657,"cells":{"filename":{"kind":"string","value":"the-stack_106_30973"},"text":{"kind":"string","value":"from pub_data import publish\nimport psutil\n\ncpu = psutil.cpu_percent()\nprint(cpu)\n\nram = psutil.virtual_memory().percent\nprint(ram)\n\ndisk = psutil.disk_usage('/').percent\nprint(disk)\n\ndata = {\n \"cpu\" : cpu,\n \"ram\" : ram,\n \"disk\": disk\n }\n\npublish(\"piSystemUsage\", data)\n"}}},{"rowIdx":542658,"cells":{"filename":{"kind":"string","value":"the-stack_106_30974"},"text":{"kind":"string","value":"from requests import post\nimport os\n \n\"\"\"\nTG 消息推送模块\n\"\"\"\n\nTG_TOKEN = os.getenv(\"TG_TOKEN\")\t#TG机器人的TOKEN\nCHAT_ID = os.getenv(\"CHAT_ID\")\t #推送消息的CHAT_ID\n\n\n\ndef post_tg(message):\n telegram_message = f\"{message}\"\n \n params = (\n ('chat_id', CHAT_ID),\n ('text', telegram_message),\n ('parse_mode', \"Markdown\"), #可选Html或Markdown\n ('disable_web_page_preview', \"yes\")\n ) \n telegram_url = \"https://api.telegram.org/bot\" + TG_TOKEN + \"/sendMessage\"\n telegram_req = post(telegram_url, params=params)\n telegram_status = telegram_req.status_code\n if telegram_status == 200:\n print(f\"INFO: Telegram Message sent\")\n else:\n print(\"Telegram Error\")\n \nif __name__ == \"__main__\":\n post_tg('浙江大学每日健康打卡 V1.0 '+ \" \\n\\n 签到结果: \" + \"请自行查看\") \n \n"}}},{"rowIdx":542659,"cells":{"filename":{"kind":"string","value":"the-stack_106_30976"},"text":{"kind":"string","value":"import click\nfrom count_all_mutations_helpers import count_mutations, file_merge_algorithm\nfrom count_all_mutations_helpers import post_analyse\nimport os\nimport pandas as pd\nimport threading\nfrom queue import Queue\n\n\nprint_lock = threading.Lock()\nurl_queue = Queue()\n\n\ndef process_queue():\n while True:\n input_tuple = url_queue.get()\n file_merge_algorithm(input_tuple)\n url_queue.task_done()\n\n\n@click.command()\n@click.argument('input_folder')\n@click.argument('output_folder')\n@click.option('--from_step', '-s')\n@click.option('--rerun', '-r')\ndef main(input_folder, output_folder,\n from_step='', rerun=''):\n if not from_step:\n from_step = 0\n if not rerun:\n rerun = 0\n rerun = bool(rerun)\n from_step = int(from_step)\n print(from_step, rerun)\n click.echo(\"Analysis starting for:\")\n click.echo(input_folder)\n if from_step <= 1:\n print(\"Initializing step 1\")\n count_mutations(input_folder=input_folder, output_folder=output_folder,\n rerun=rerun)\n print(\"Finished step 1\")\n else:\n click.echo(\"Skipping step 1\")\n\n if from_step <= 2:\n print(\"Initializing step 2\")\n files_temp = [[x[0] + '/' + y for y in x[2] if ('_eval' not in y and 'results_count_all' in y)] for x\n in os.walk(output_folder + '/patients')\n ]\n files_temp = [file for sublist in files_temp for file in sublist]\n # print(files_temp)\n for file in files_temp:\n if not os.path.isfile(file.split('.')[0] + '_eval.csv'):\n\n post_analyse(file,\n file.split('.')[0] + '_eval.csv')\n if not os.path.isfile(file.split('.')[0] + '_evaluated.csv'):\n file_df = pd.read_csv(file.split('.')[0] + '_eval.csv')\n\n file_df = file_df[file_df['eval']]\n file_df.to_csv(file.split('.')[0] + '_evaluated.csv')\n print(\"Finished step 2\")\n else:\n click.echo(\"Skipping step 2\")\n\n if from_step <= 3:\n print(\"Initializing step 3\")\n files_temp = [[x[0] + '/' + y for y in x[2] if ('_evaluated.csv' in y and 'results_count_all' in y\n )] for x\n in os.walk(output_folder + '/patients')\n ]\n files_temp = [file for sublist in files_temp for file in sublist]\n\n results = pd.DataFrame()\n for i in range(10):\n t = threading.Thread(target=process_queue)\n t.daemon = True\n t.start()\n\n for file in files_temp:\n # print(file)\n url_queue.put(file)\n\n url_queue.join()\n\n files_temp = [[x[0] + '/' + y for y in x[2] if ('_algorithms_merged.csv' in y and 'results_count_all' in y\n )] for x\n in os.walk(output_folder + '/patients')\n ]\n files_temp = [file for sublist in files_temp for file in sublist]\n\n for file in files_temp:\n temp_df = pd.read_csv(file)\n user = temp_df['indiv_name'].unique()[0]\n count = temp_df.shape[0]\n new_record = pd.DataFrame([[user, count]], columns=['patient_id', 'mutation_count'])\n results = pd.concat([results, new_record])\n results.to_csv(output_folder + '/patient_mutation_count.csv', index=False)\n print(\"Finished step 3\")\n else:\n click.echo(\"Skipping step 3\")\n\n click.echo(\"Analysis finished\")\n\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":542660,"cells":{"filename":{"kind":"string","value":"the-stack_106_30977"},"text":{"kind":"string","value":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:hydrogen\n# text_representation:\n# extension: .py\n# format_name: hydrogen\n# format_version: '1.2'\n# jupytext_version: 1.1.7\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %%\nimport datetime\n# %%\nimport math\nimport os\nimport time\n# %%\nfrom collections import deque\nfrom collections.abc import Iterable\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n# %%\nimport torch\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torch.utils import data\n\nimport yolact\n# %%\nfrom siim import bootstrap, io\nfrom siim.config import cfg\nfrom siim.resource import WEIGHTS_DIR_PATH, ScatterWrapper, TrainDataset\nfrom yolact.data import detection_collate\nfrom yolact.utils.augmentations import BaseTransform, SSDAugmentation\nfrom yolact.utils.functions import MovingAverage, SavePath\n\n\n# %%\ndef prepare_data(datum, device=torch.device('cpu')):\n images, (targets, masks, num_crowds) = datum\n images = Variable(images.to(device), requires_grad=False)\n targets = [Variable(ann.to(device), requires_grad=False)\n for ann in targets]\n masks = [Variable(mask.to(device), requires_grad=False) for mask in masks]\n\n return images, targets, masks, num_crowds\n\n# %%\ndef set_lr(optimizer, new_lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n\n\n# %%\nbootstrap()\n\n# %%\ndataset = TrainDataset(transform=SSDAugmentation(), empty_mask_is_negative=True)\nn_samples = len(dataset) # n_samples is 60000\ntrain_size = int(n_samples * 0.8) # train_size is 48000\nval_size = n_samples - train_size # val_size is 48000\ntrain_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])\n# %%\nimg, (target, mask, no_crowd) = train_dataset[2900]\nplt.figure(figsize=(10, 10))\nplt.subplot(1, 2, 1)\nplt.imshow(img[0], cmap=\"gray\")\nplt.axis(\"off\")\n#\nplt.subplot(1, 2, 2)\nplt.imshow(img[0] * mask, cmap=\"gray\")\nplt.axis(\"off\")\n\n# %%\nnet = yolact.yolact.Yolact()\nnet.train()\nnet.init_weights(backbone_path=str(WEIGHTS_DIR_PATH / cfg.backbone.path))\n\n# %%\ndata_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=cfg.batch_size,\n num_workers=cfg.num_workers,\n shuffle=True,\n collate_fn=detection_collate,\n pin_memory=True,\n)\n\noptimizer = optim.SGD(\n net.parameters(), lr=cfg.lr, momentum=cfg.momentum, weight_decay=cfg.decay\n)\ncriterion = yolact.layers.MultiBoxLoss(\n num_classes=cfg.num_classes,\n pos_threshold=cfg.positive_iou_threshold,\n neg_threshold=cfg.negative_iou_threshold,\n negpos_ratio=3,\n)\n\n# %%\nsave_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=cfg.save_folder)\ntime_avg = MovingAverage()\nloss_types = [\"B\", \"C\", \"M\", \"P\", \"D\", \"E\", \"S\"]\nloss_avgs = {k: MovingAverage(100) for k in loss_types}\nprint(\"Begin training!\")\n\n# %%\n# try-except so you can use ctrl+c to save early and stop training\nstep_index = 0\niteration = 0\nlast_time = time.time()\nepoch_size = len(train_dataset) // cfg.batch_size\nnum_epochs = math.ceil(cfg.max_iter / epoch_size)\n\nfor epoch in range(num_epochs):\n for datum in data_loader:\n # Stop if we've reached an epoch if we're resuming from start_iter\n if iteration == (epoch + 1) * epoch_size:\n break\n\n # Stop at the configured number of iterations even if mid-epoch\n if iteration == cfg.max_iter:\n break\n\n # Change a config setting if we've reached the specified iteration\n changed = False\n for change in cfg.delayed_settings:\n if iteration >= change[0]:\n changed = True\n cfg.replace(change[1])\n\n # Reset the loss averages because things might have changed\n for avg in loss_avgs:\n avg.reset()\n\n # If a config setting was changed, remove it from the list so we don't keep checking\n if changed:\n cfg.delayed_settings = [\n x for x in cfg.delayed_settings if x[0] > iteration]\n\n # Warm up by linearly interpolating the learning rate from some smaller value\n if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:\n set_lr(optimizer, (cfg.lr - cfg.lr_warmup_init) *\n (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)\n\n # Adjust the learning rate at the given iterations, but also if we resume from past that iteration\n while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:\n step_index += 1\n set_lr(optimizer, args.lr * (args.gamma ** step_index))\n\n # Load training data\n # Note, for training on multiple gpus this will use the custom replicate and gather I wrote up there\n images, targets, masks, num_crowds = prepare_data(datum)\n\n # Forward Pass\n out = net(images)\n\n # Compute Loss\n optimizer.zero_grad()\n\n wrapper = ScatterWrapper(targets, masks, num_crowds)\n losses = criterion(out, wrapper, wrapper.make_mask())\n\n # Mean here because Dataparallel\n losses = {k: v.mean() for k, v in losses.items()}\n loss = sum([losses[k] for k in losses])\n\n # Backprop\n loss.backward() # Do this to free up vram even if loss is not finite\n if torch.isfinite(loss).item():\n optimizer.step()\n\n # Add the loss to the moving average for bookkeeping\n for k in losses:\n loss_avgs[k].add(losses[k].item())\n\n cur_time = time.time()\n elapsed = cur_time - last_time\n last_time = cur_time\n\n # Exclude graph setup from the timing information\n if 0 < iteration:\n time_avg.add(elapsed)\n\n if iteration % 10 == 0:\n eta_str = str(datetime.timedelta(\n seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0]\n\n total = sum([loss_avgs[k].get_avg() for k in losses])\n loss_labels = sum([[k, loss_avgs[k].get_avg()]\n for k in loss_types if k in losses], [])\n\n print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f')\n % tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True)\n\n iteration += 1\n\n if iteration % cfg.save_interval == 0 and 0 < iteration:\n print('Saving state, iter:', iteration)\n net.save_weights(save_path(epoch, iteration))\n\n #if cfg.keep_latest:\n # for p in SavePath.get_olds(cfg.save_folder, cfg.name):\n # print('Deleting old save...')\n # os.remove(str(p))\n \n break\n break\n\nnet.save_weights(save_path(epoch, iteration))\n"}}},{"rowIdx":542661,"cells":{"filename":{"kind":"string","value":"the-stack_106_30979"},"text":{"kind":"string","value":"import re\nimport sys\nfrom pathlib import Path\n\nimport setuptools\n\n\nLABEXTENSIONS_DIR = Path('py_src') / 'jupyter_lsp' / 'labextensions'\nLABEXTENSIONS_INSTALL_DIR = Path('share') / 'jupyter' / 'labextensions'\n\n\ndef get_data_files():\n extension_files = [\n (str(LABEXTENSIONS_INSTALL_DIR / file.relative_to(LABEXTENSIONS_DIR).parent), [str(file)])\n for file in LABEXTENSIONS_DIR.rglob(\"*.*\")\n ]\n\n extension_files.append((\"etc/jupyter/jupyter_notebook_config.d\", [\"py_src/jupyter_lsp/etc/jupyter-lsp-serverextension.json\"]))\n\n return extension_files\n\n\nsetuptools.setup(\n version=re.findall(\n r\"\"\"__version__ = \"([^\"]+)\"$\"\"\",\n (Path(__file__).parent / \"py_src\" / \"jupyter_lsp\" / \"_version.py\").read_text(),\n )[0],\n setup_requires=[\"pytest-runner\"] if \"test\" in sys.argv else [],\n data_files=get_data_files(),\n)\n"}}},{"rowIdx":542662,"cells":{"filename":{"kind":"string","value":"the-stack_106_30980"},"text":{"kind":"string","value":"# -*- coding:utf8 -*-\n\"\"\"\n传入的key和类型,写在db_api外,当作一个小conf一起传入,db_api根据传入自行判断接受与否\ndb会自动创建两个时间键值:create_time,last_write_time\n\"\"\"\n\n\nfrom db.local_db import LocalDb as BaseDb\n\n\nclass PipeTaskInfo(BaseDb):\n\n def __init__(self):\n super(PipeTaskInfo, self).__init__()\n self.table_type = \"pipetask_info\"\n self.design_table_type.update({ # db会自动添加create_time:str和last_write_time:str两项\n \"pipetask_id\": str,\n\n \"pipeline_id\": str,\n \"finish_node_list\": list,\n \"first_input_args\": None,\n \"first_input_kwargs\": None,\n\n \"pipetask_status\": str,\n \"flags\": None\n })\n self.map_id = \"pipetask_id\"\n self._init_db_folder()\n\n\nclass PipeLineInfo(BaseDb):\n\n def __init__(self):\n super(PipeLineInfo, self).__init__()\n self.table_type = \"pipeline_info\"\n self.design_table_type.update({ # db会自动添加create_time:str和last_write_time:str两项\n \"pipeline_id\": str,\n \"pipeline_name\": str,\n\n \"dag_dict\": dict,\n \"topo_order_list\": list,\n \"config\": None,\n \"node_id_dict\": dict,\n \"flags\": None\n })\n self.map_id = \"pipeline_id\"\n self._init_db_folder()\n\n\nclass PipeNodeInfo(BaseDb):\n\n def __init__(self):\n super(PipeNodeInfo, self).__init__()\n self.table_type = \"pipenode_info\"\n self.design_table_type.update({ # db会自动添加create_time:str和last_write_time:str两项\n \"pipenode_id\": str,\n \"pipenode_name\": str,\n\n \"func_des\": list,\n \"func_str\": str,\n \"type\": str,\n \"inputs\": list,\n \"outputs\": list,\n \"next_nodes\": list,\n \"prep_nodes\": list,\n \"outputs_r\": dict,\n \"flags\": None\n })\n self.map_id = \"pipenode_id\"\n self._init_db_folder()\n"}}},{"rowIdx":542663,"cells":{"filename":{"kind":"string","value":"the-stack_106_30984"},"text":{"kind":"string","value":"\"\"\"\nLift Curve Widget\n-----------------\n\n\"\"\"\nfrom collections import namedtuple\n\nimport numpy as np\nimport sklearn.metrics as skl_metrics\n\nfrom AnyQt import QtWidgets\nfrom AnyQt.QtGui import QColor, QPen, QPalette, QFont\nfrom AnyQt.QtCore import Qt\n\nimport pyqtgraph as pg\n\nimport Orange\nfrom Orange.widgets import widget, gui, settings\nfrom Orange.widgets.evaluate.utils import check_results_adequacy\nfrom Orange.widgets.utils import colorpalette, colorbrewer\nfrom Orange.widgets.evaluate.owrocanalysis import convex_hull\nfrom Orange.widgets.widget import Input\nfrom Orange.widgets import report\n\n\nCurvePoints = namedtuple(\"CurvePoints\", [\"cases\", \"tpr\", \"thresholds\"])\nCurvePoints.is_valid = property(lambda self: self.cases.size > 0)\n\nLiftCurve = namedtuple(\"LiftCurve\", [\"points\", \"hull\"])\nLiftCurve.is_valid = property(lambda self: self.points.is_valid)\n\n\ndef liftCurve_from_results(results, clf_index, target):\n x, y, thresholds = lift_curve_from_results(results, target, clf_index)\n\n points = CurvePoints(x, y, thresholds)\n hull = CurvePoints(*convex_hull([(x, y, thresholds)]))\n return LiftCurve(points, hull)\n\n\nPlotCurve = namedtuple(\"PlotCurve\", [\"curve\", \"curve_item\", \"hull_item\"])\n\n\nclass OWLiftCurve(widget.OWWidget):\n name = \"Lift Curve\"\n description = (\n \"Construct and display a lift curve \" \"from the evaluation of classifiers.\"\n )\n icon = \"icons/LiftCurve.svg\"\n priority = 1020\n\n class Inputs:\n evaluation_results = Input(\"Evaluation Results\", Orange.evaluation.Results)\n\n target_index = settings.Setting(0)\n selected_classifiers = settings.Setting([])\n\n display_convex_hull = settings.Setting(False)\n display_cost_func = settings.Setting(True)\n\n fp_cost = settings.Setting(500)\n fn_cost = settings.Setting(500)\n target_prior = settings.Setting(50.0)\n\n graph_name = \"plot\"\n\n def __init__(self):\n super().__init__()\n\n self.results = None\n self.classifier_names = []\n self.colors = []\n self._curve_data = {}\n\n box = gui.vBox(self.controlArea, \"Plot\")\n tbox = gui.vBox(box, \"Target Class\")\n tbox.setFlat(True)\n\n self.target_cb = gui.comboBox(\n tbox,\n self,\n \"target_index\",\n callback=self._on_target_changed,\n contentsLength=8,\n )\n\n cbox = gui.vBox(box, \"Classifiers\")\n cbox.setFlat(True)\n self.classifiers_list_box = gui.listBox(\n cbox,\n self,\n \"selected_classifiers\",\n \"classifier_names\",\n selectionMode=QtWidgets.QListView.MultiSelection,\n callback=self._on_classifiers_changed,\n )\n\n gui.checkBox(\n box,\n self,\n \"display_convex_hull\",\n \"Show lift convex hull\",\n callback=self._replot,\n )\n\n self.plotview = pg.GraphicsView(background=\"w\")\n self.plotview.setFrameStyle(QtWidgets.QFrame.StyledPanel)\n\n self.plot = pg.PlotItem(enableMenu=False)\n self.plot.setMouseEnabled(False, False)\n self.plot.hideButtons()\n\n pen = QPen(self.palette().color(QPalette.Text))\n\n tickfont = QFont(self.font())\n tickfont.setPixelSize(max(int(tickfont.pixelSize() * 2 // 3), 11))\n\n axis = self.plot.getAxis(\"bottom\")\n axis.setTickFont(tickfont)\n axis.setPen(pen)\n axis.setLabel(\"P Rate\")\n\n axis = self.plot.getAxis(\"left\")\n axis.setTickFont(tickfont)\n axis.setPen(pen)\n axis.setLabel(\"TP Rate\")\n\n self.plot.showGrid(True, True, alpha=0.1)\n self.plot.setRange(xRange=(0.0, 1.0), yRange=(0.0, 1.0), padding=0.05)\n\n self.plotview.setCentralItem(self.plot)\n self.mainArea.layout().addWidget(self.plotview)\n\n @Inputs.evaluation_results\n def set_results(self, results):\n \"\"\"Set the input evaluation results.\"\"\"\n self.clear()\n self.results = check_results_adequacy(results, self.Error)\n if self.results is not None:\n self._initialize(results)\n self._setup_plot()\n\n def clear(self):\n \"\"\"Clear the widget state.\"\"\"\n self.plot.clear()\n self.results = None\n self.target_cb.clear()\n self.target_index = 0\n self.classifier_names = []\n self.colors = []\n self._curve_data = {}\n\n def _initialize(self, results):\n N = len(results.predicted)\n\n names = getattr(results, \"learner_names\", None)\n if names is None:\n names = [\"#{}\".format(i + 1) for i in range(N)]\n\n scheme = colorbrewer.colorSchemes[\"qualitative\"][\"Dark2\"]\n if N > len(scheme):\n scheme = colorpalette.DefaultRGBColors\n self.colors = colorpalette.ColorPaletteGenerator(N, scheme)\n\n self.classifier_names = names\n self.selected_classifiers = list(range(N))\n for i in range(N):\n item = self.classifiers_list_box.item(i)\n item.setIcon(colorpalette.ColorPixmap(self.colors[i]))\n\n self.target_cb.addItems(results.data.domain.class_var.values)\n\n def plot_curves(self, target, clf_idx):\n if (target, clf_idx) not in self._curve_data:\n curve = liftCurve_from_results(self.results, clf_idx, target)\n color = self.colors[clf_idx]\n pen = QPen(color, 1)\n pen.setCosmetic(True)\n shadow_pen = QPen(pen.color().lighter(160), 2.5)\n shadow_pen.setCosmetic(True)\n item = pg.PlotDataItem(\n curve.points[0],\n curve.points[1],\n pen=pen,\n shadowPen=shadow_pen,\n symbol=\"+\",\n symbolSize=3,\n symbolPen=shadow_pen,\n antialias=True,\n )\n hull_item = pg.PlotDataItem(\n curve.hull[0], curve.hull[1], pen=pen, antialias=True\n )\n self._curve_data[target, clf_idx] = PlotCurve(curve, item, hull_item)\n\n return self._curve_data[target, clf_idx]\n\n def _setup_plot(self):\n target = self.target_index\n selected = self.selected_classifiers\n curves = [self.plot_curves(target, clf_idx) for clf_idx in selected]\n\n for curve in curves:\n self.plot.addItem(curve.curve_item)\n\n if self.display_convex_hull:\n hull = convex_hull([c.curve.hull for c in curves])\n self.plot.plot(hull[0], hull[1], pen=\"y\", antialias=True)\n\n pen = QPen(QColor(100, 100, 100, 100), 1, Qt.DashLine)\n pen.setCosmetic(True)\n self.plot.plot([0, 1], [0, 1], pen=pen, antialias=True)\n\n warning = \"\"\n if not all(c.curve.is_valid for c in curves):\n if any(c.curve.is_valid for c in curves):\n warning = \"Some lift curves are undefined\"\n else:\n warning = \"All lift curves are undefined\"\n\n self.warning(warning)\n\n def _replot(self):\n self.plot.clear()\n if self.results is not None:\n self._setup_plot()\n\n def _on_target_changed(self):\n self._replot()\n\n def _on_classifiers_changed(self):\n self._replot()\n\n def send_report(self):\n if self.results is None:\n return\n caption = report.list_legend(\n self.classifiers_list_box, self.selected_classifiers\n )\n self.report_items(((\"Target class\", self.target_cb.currentText()),))\n self.report_plot()\n self.report_caption(caption)\n\n\ndef lift_curve_from_results(results, target, clf_idx, subset=slice(0, -1)):\n actual = results.actual[subset]\n scores = results.probabilities[clf_idx][subset][:, target]\n yrate, tpr, thresholds = lift_curve(actual, scores, target)\n return yrate, tpr, thresholds\n\n\ndef lift_curve(ytrue, ypred, target=1):\n P = np.sum(ytrue == target)\n N = ytrue.size - P\n\n if P == 0 or N == 0:\n # Undefined TP and FP rate\n return np.array([]), np.array([]), np.array([])\n\n fpr, tpr, thresholds = skl_metrics.roc_curve(ytrue, ypred, target)\n rpp = fpr * (N / (P + N)) + tpr * (P / (P + N))\n return rpp, tpr, thresholds\n\n\ndef main():\n import sip\n from AnyQt.QtWidgets import QApplication\n from Orange.classification import (\n LogisticRegressionLearner,\n SVMLearner,\n NuSVMLearner,\n )\n\n app = QApplication([])\n w = OWLiftCurve()\n w.show()\n w.raise_()\n\n data = Orange.data.Table(\"ionosphere\")\n results = Orange.evaluation.CrossValidation(\n data,\n [\n LogisticRegressionLearner(penalty=\"l2\"),\n LogisticRegressionLearner(penalty=\"l1\"),\n SVMLearner(probability=True),\n NuSVMLearner(probability=True),\n ],\n store_data=True,\n )\n results.learner_names = [\"LR l2\", \"LR l1\", \"SVM\", \"Nu SVM\"]\n w.set_results(results)\n rval = app.exec_()\n\n sip.delete(w)\n del w\n app.processEvents()\n del app\n return rval\n\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":542664,"cells":{"filename":{"kind":"string","value":"the-stack_106_30986"},"text":{"kind":"string","value":"# --------------------------------------------------------------------------- #\n# Diagnostics # \n# --------------------------------------------------------------------------- #\n\"\"\"Diagnostic Plots for single gradient descent optimizations. \"\"\"\nimport datetime\nfrom IPython.display import HTML\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-whitegrid')\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\nfrom .basic_plots import BasicPlots\nfrom ..utils.filemanager import save_fig, save_csv, save_gif\n\nclass Diagnostics():\n\n def learning_rates(self, models, directory=None, filename=None, \n xlim=None, ylim=None,show=True):\n \"\"\"Prints learning rates by epoch for one or more models\"\"\"\n\n results = []\n for _, v in models.items(): \n result = pd.DataFrame({\"Name\": v.name, \n \"Iteration\": np.arange(1,len(v.blackbox.learning_rates)+1),\n \"Learning Rate\": v.blackbox.learning_rates})\n results.append(result)\n results = pd.concat(results, axis=0)\n\n # Render Plot\n fig, ax = plt.subplots(figsize=(12,4)) \n plot = BasicPlots()\n title = \"Learning Rate(s)\"\n ax = plot.lineplot(x='Iteration', y='Learning Rate', z='Name', data=results, title=title, ax=ax)\n # Set x and y limits\n if xlim is not None:\n ax.set_xlim(left = xlim[0], right=xlim[1])\n if ylim is not None:\n ax.set_xlim(bottom=ylim[0], top=ylim[1])\n # Finalize, show and save\n fig.tight_layout()\n if show:\n plt.show()\n if directory is not None:\n if filename is None:\n filename = title + '.png'\n save_fig(fig, directory, filename)\n plt.close(fig) \n\n \n def validation_curve(self, model, directory=None, filename=None, \n xlim=None, ylim=None,show=True):\n \"\"\"Renders validation curve e.g. training and validation error\"\"\"\n\n # Extract parameters and data\n params = model.get_params()\n\n d = {'Iteration': np.arange(1,model.epochs+1), \n 'Learning Rates': model.learning_rates, \n 'Training Set': model.train_scores,\n 'Validation Set': model.val_scores}\n df = pd.DataFrame(data=d)\n df = pd.melt(df, id_vars=['Iteration', 'Learning Rates'], var_name='Dataset', value_name='Scores')\n # Format title\n title = model.algorithm + \"\\n\" + \"Validation Curve\" \n\n # Initialize plot and set aesthetics \n fig, ax = plt.subplots(figsize=(12,4)) \n sns.set(style=\"whitegrid\", font_scale=1)\n ax.set_facecolor('w')\n ax.tick_params(colors='k')\n ax.xaxis.label.set_color('k')\n ax.yaxis.label.set_color('k')\n ax.set_xlabel('Iteration')\n ax.set_ylabel('Learning Rates')\n ax.set_title(title, color='k') \n\n # Plot Learning Rates\n ax = sns.lineplot(x='Iteration', y='Learning Rates', color='g', data=df, ax=ax) \n\n # Plot scores\n ax2 = ax.twinx()\n ax2 = sns.lineplot(x='Iteration', y='Scores', hue='Dataset', data=df, ax=ax2)\n ax2.set_ylabel('Scores')\n # Set x and y limits\n if xlim is not None:\n ax.set_xlim(left = xlim[0], right=xlim[1])\n if ylim is not None:\n ax.set_xlim(bottom=ylim[0], top=ylim[1])\n\n # Show plot\n fig.tight_layout()\n if show:\n plt.show()\n # Save plot if instructed to do so\n if directory is not None:\n if filename is None:\n filename = model.algorithm + ' Validation Curve.png '\n save_fig(fig, directory, filename) \n return fig "}}},{"rowIdx":542665,"cells":{"filename":{"kind":"string","value":"the-stack_106_30987"},"text":{"kind":"string","value":"##################################################################################################\n# Copyright (c) 2012 Brett Dixon\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in \n# the Software without restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the \n# Software, and to permit persons to whom the Software is furnished to do so, \n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all \n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS \n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR \n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER \n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION \n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n##################################################################################################\n\nfrom django.conf.urls import url\n\n# import views\nfrom frog import views\n\n\nurlpatterns = [\n # -- Gallery\n url(r'^gallery$', views.gallery.index),\n url(r'^gallery/(?P\\d+)$', views.gallery.index),\n url(r'^gallery/(?P\\d+)/filter$', views.gallery.filterObjects),\n url(r'^gallery/(?P\\d+)/subscribe$', views.gallery.subscribe),\n # -- Piece\n url(r'^like/(?P\\w+)$', views.piece.like),\n url(r'^piece/group/$', views.piece.group),\n url(r'^piece/group/(?P\\d+)/$', views.piece.group),\n url(r'^piece/(?P\\w+)/$', views.piece.data),\n url(r'^p$', views.piece.getGuids),\n # -- Tag\n url(r'^tag/$', views.tag.index),\n url(r'^tag/(?P\\d+)/$', views.tag.index),\n url(r'^tag/resolve/(?P[\\w\\s\\.\\-\\`\\']+)$', views.tag.resolve),\n url(r'^tag/search$', views.tag.search),\n url(r'^tag/manage$', views.tag.manage),\n url(r'^tag/merge/(?P\\d+)/$', views.tag.merge),\n # -- User prefs\n url(r'^pref/$', views.userpref.index),\n # -- Comments\n url(r'^comment/$', views.comment.commentList),\n url(r'^comment/(?P\\d+)/$', views.comment.index),\n # -- Misc functions\n url(r'^download$', views.download),\n url(r'^switchartist$', views.switchArtist),\n url(r'^artistlookup$', views.artistLookup),\n url(r'^isunique/$', views.isUnique),\n url(r'^getuser$', views.getUser),\n url(r'^userlist', views.userList),\n url(r'^csrf$', views.csrf),\n url(r'^siteconfig/$', views.siteconfig.index),\n url(r'^clienterror/$', views.clientError),\n url(r'^releasenotes$', views.releaseNotes),\n url(r'^view/$', views.piece.recordView),\n # -- Authentication\n url(r'^login$', views.login_),\n url(r'^logout$', views.logout_),\n url(r'^access_denied', views.accessDenied),\n\n url(r'^$', views.index),\n]\n"}}},{"rowIdx":542666,"cells":{"filename":{"kind":"string","value":"the-stack_106_30988"},"text":{"kind":"string","value":"def to_type(o, new_type):\n '''\n Helper funciton that receives an object or a dict and convert it to a new given type.\n\n :param object|dict o: The object to convert\n :param Type new_type: The type to convert to.\n '''\n if new_type == type(o):\n return o\n else:\n return new_type(**o)\n\n\nclass Position(object):\n def __init__(self, line, character):\n \"\"\"\n Constructs a new Position instance.\n\n :param int line: Line position in a document (zero-based).\n :param int character: Character offset on a line in a document (zero-based).\n \"\"\"\n self.line = line\n self.character = character\n\n\nclass Range(object):\n def __init__(self, start, end):\n \"\"\"\n Constructs a new Range instance.\n\n :param Position start: The range's start position.\n :param Position end: The range's end position.\n \"\"\"\n self.start = to_type(start, Position)\n self.end = to_type(end, Position)\n\n\nclass Location(object):\n \"\"\"\n Represents a location inside a resource, such as a line inside a text file.\n \"\"\"\n def __init__(self, uri, range):\n \"\"\"\n Constructs a new Range instance.\n\n :param str uri: Resource file.\n :param Range range: The range inside the file\n \"\"\"\n self.uri = uri\n self.range = to_type(range, Range)\n\n \nclass Diagnostic(object):\n def __init__(self, range, severity, code, source, message, relatedInformation):\n \"\"\"\n Constructs a new Diagnostic instance.\n :param Range range: The range at which the message applies.Resource file.\n :param int severity: The diagnostic's severity. Can be omitted. If omitted it is up to the\n client to interpret diagnostics as error, warning, info or hint.\n :param str code: The diagnostic's code, which might appear in the user interface.\n :param str source: A human-readable string describing the source of this\n diagnostic, e.g. 'typescript' or 'super lint'.\n :param str message: The diagnostic's message.\n :param list relatedInformation: An array of related diagnostic information, e.g. when symbol-names within \n a scope collide all definitions can be marked via this property.\n \"\"\"\n self.range = range\n self.severity = severity\n self.code = code\n self.source = source\n self.message = message\n self.relatedInformation = relatedInformation\n\n\nclass DiagnosticSeverity(object):\n Error = 1\n Warning = 2 # TODO: warning is known in python\n Information = 3\n Hint = 4\n\n\nclass DiagnosticRelatedInformation(object):\n def __init__(self, location, message):\n \"\"\"\n Constructs a new Diagnostic instance.\n :param Location location: The location of this related diagnostic information.\n :param str message: The message of this related diagnostic information.\n \"\"\"\n self.location = location\n self.message = message\n\n \nclass Command(object):\n def __init__(self, title, command, arguments):\n \"\"\"\n Constructs a new Diagnostic instance.\n :param str title: Title of the command, like `save`.\n :param str command: The identifier of the actual command handler.\n :param list argusments: Arguments that the command handler should be invoked with.\n \"\"\"\n self.title = title\n self.command = command\n self.arguments = arguments\n\n\nclass TextDocumentItem(object):\n \"\"\"\n An item to transfer a text document from the client to the server.\n \"\"\"\n def __init__(self, uri, languageId, version, text):\n \"\"\"\n Constructs a new Diagnostic instance.\n \n :param DocumentUri uri: Title of the command, like `save`.\n :param str languageId: The identifier of the actual command handler.\n :param int version: Arguments that the command handler should be invoked with.\n :param str text: Arguments that the command handler should be invoked with.\n \"\"\"\n self.uri = uri\n self.languageId = languageId\n self.version = version\n self.text = text\n\n\nclass TextDocumentIdentifier(object):\n \"\"\"\n Text documents are identified using a URI. On the protocol level, URIs are passed as strings. \n \"\"\"\n def __init__(self, uri):\n \"\"\"\n Constructs a new TextDocumentIdentifier instance.\n\n :param DocumentUri uri: The text document's URI. \n \"\"\"\n self.uri = uri\n\nclass TextDocumentPositionParams(object):\n \"\"\"\n A parameter literal used in requests to pass a text document and a position inside that document.\n \"\"\"\n def __init__(self, textDocument, position):\n \"\"\"\n Constructs a new TextDocumentPositionParams instance.\n \n :param TextDocumentIdentifier textDocument: The text document.\n :param Position position: The position inside the text document.\n \"\"\"\n self.textDocument = textDocument\n self.position = position\n\n\nclass LANGUAGE_IDENTIFIER:\n BAT=\"bat\"\n BIBTEX=\"bibtex\"\n CLOJURE=\"clojure\"\n COFFESCRIPT=\"coffeescript\"\n C=\"c\"\n CPP=\"cpp\"\n CSHARP=\"csharp\"\n CSS=\"css\"\n DIFF=\"diff\"\n DOCKERFILE=\"dockerfile\"\n FSHARP=\"fsharp\"\n GIT_COMMIT=\"git-commit\"\n GIT_REBASE=\"git-rebase\"\n GO=\"go\"\n GROOVY=\"groovy\"\n HANDLEBARS=\"handlebars\"\n HTML=\"html\"\n INI=\"ini\"\n JAVA=\"java\"\n JAVASCRIPT=\"javascript\"\n JSON=\"json\"\n LATEX=\"latex\"\n LESS=\"less\"\n LUA=\"lua\"\n MAKEFILE=\"makefile\"\n MARKDOWN=\"markdown\"\n OBJECTIVE_C=\"objective-c\"\n OBJECTIVE_CPP=\"objective-cpp\"\n Perl=\"perl\"\n PHP=\"php\"\n POWERSHELL=\"powershell\"\n PUG=\"jade\"\n PYTHON=\"python\"\n R=\"r\"\n RAZOR=\"razor\"\n RUBY=\"ruby\"\n RUST=\"rust\"\n SASS=\"sass\"\n SCSS=\"scss\"\n ShaderLab=\"shaderlab\"\n SHELL_SCRIPT=\"shellscript\"\n SQL=\"sql\"\n SWIFT=\"swift\"\n TYPE_SCRIPT=\"typescript\"\n TEX=\"tex\"\n VB=\"vb\"\n XML=\"xml\"\n XSL=\"xsl\"\n YAML=\"yaml\"\n\n\nclass SymbolKind(object):\n File = 1\n Module = 2\n Namespace = 3\n Package = 4\n Class = 5\n Method = 6\n Property = 7\n Field = 8\n Constructor = 9\n Enum = 10\n Interface = 11\n Function = 12\n Variable = 13\n Constant = 14\n String = 15\n Number = 16\n Boolean = 17\n Array = 18\n Object = 19\n Key = 20\n Null = 21\n EnumMember = 22\n Struct = 23\n Event = 24\n Operator = 25\n TypeParameter = 26\n\n\nclass SymbolInformation(object):\n \"\"\"\n Represents information about programming constructs like variables, classes, interfaces etc.\n \"\"\"\n def __init__(self, name, kind, location, containerName, deprecated=False):\n \"\"\"\n Constructs a new SymbolInformation instance.\n\n :param str name: The name of this symbol.\n :param int kind: The kind of this symbol.\n :param bool Location: The location of this symbol. The location's range is used by a tool\n to reveal the location in the editor. If the symbol is selected in the\n tool the range's start information is used to position the cursor. So\n the range usually spans more then the actual symbol's name and does\n normally include things like visibility modifiers.\n\n The range doesn't have to denote a node range in the sense of a abstract\n syntax tree. It can therefore not be used to re-construct a hierarchy of\n the symbols.\n :param str containerName: The name of the symbol containing this symbol. This information is for\n user interface purposes (e.g. to render a qualifier in the user interface\n if necessary). It can't be used to re-infer a hierarchy for the document\n symbols.\n :param bool deprecated: Indicates if this symbol is deprecated.\n \"\"\"\n self.name = name\n self.kind = kind\n self.deprecated = deprecated\n self.location = to_type(location, Location)\n self.containerName = containerName\n\n\nclass ParameterInformation(object):\n \"\"\"\n Represents a parameter of a callable-signature. A parameter can\n have a label and a doc-comment.\n \"\"\"\n def __init__(self, label, documentation=\"\"):\n \"\"\"\n Constructs a new ParameterInformation instance.\n\n :param str label: The label of this parameter. Will be shown in the UI.\n :param str documentation: The human-readable doc-comment of this parameter. Will be shown in the UI but can be omitted.\n \"\"\"\n self.label = label\n self.documentation = documentation\n\n\nclass SignatureInformation(object):\n \"\"\"\n Represents the signature of something callable. A signature\n can have a label, like a function-name, a doc-comment, and\n a set of parameters.\n \"\"\"\n def __init__(self, label, documentation=\"\", parameters=[]):\n \"\"\"\n Constructs a new SignatureInformation instance.\n\n :param str label: The label of this signature. Will be shown in the UI.\n :param str documentation: The human-readable doc-comment of this signature. Will be shown in the UI but can be omitted.\n :param ParameterInformation[] parameters: The parameters of this signature.\n \"\"\"\n self.label = label\n self.documentation = documentation\n self.parameters = [to_type(parameter, ParameterInformation) for parameter in parameters]\n\n\nclass SignatureHelp(object):\n \"\"\"\n Signature help represents the signature of something\n callable. There can be multiple signature but only one\n active and only one active parameter.\n \"\"\"\n def __init__(self, signatures, activeSignature=0, activeParameter=0):\n \"\"\"\n Constructs a new SignatureHelp instance.\n\n :param SignatureInformation[] signatures: One or more signatures.\n :param int activeSignature:\n :param int activeParameter:\n \"\"\"\n self.signatures = [to_type(signature, SignatureInformation) for signature in signatures]\n self.activeSignature = activeSignature\n self.activeParameter = activeParameter\n\n\nclass CompletionTriggerKind(object):\n Invoked = 1\n TriggerCharacter = 2\n TriggerForIncompleteCompletions = 3\n\n\nclass CompletionContext(object):\n \"\"\"\n Contains additional information about the context in which a completion request is triggered.\n \"\"\"\n def __init__(self, triggerKind, triggerCharacter=None):\n \"\"\"\n Constructs a new CompletionContext instance.\n\n :param CompletionTriggerKind triggerKind: How the completion was triggered.\n :param str triggerCharacter: The trigger character (a single character) that has trigger code complete.\n Is undefined if `triggerKind !== CompletionTriggerKind.TriggerCharacter`\n \"\"\"\n self.triggerKind = triggerKind\n if triggerCharacter:\n self.triggerCharacter = triggerCharacter\n\n\nclass TextEdit(object):\n \"\"\"\n A textual edit applicable to a text document.\n \"\"\"\n def __init__(self, range, newText):\n \"\"\"\n :param Range range: The range of the text document to be manipulated. To insert \n text into a document create a range where start === end.\n :param str newText: The string to be inserted. For delete operations use an empty string.\n \"\"\"\n self.range = range\n self.newText = newText\n\n\nclass InsertTextFormat(object):\n PlainText = 1\n Snippet = 2\n\n\nclass CompletionItem(object):\n \"\"\"\n \"\"\"\n def __init__(self, label, kind=None, detail=None, documentation=None, deprecated=None, presented=None, sortText=None, filterText=None, insertText=None, insertTextFormat=None, textEdit=None, additionalTextEdits=None, commitCharacters=None, command=None, data=None): \n \"\"\" \n :param str label: The label of this completion item. By default also the text that is inserted when selecting\n this completion.\n :param int kind: The kind of this completion item. Based of the kind an icon is chosen by the editor.\n :param str detail: A human-readable string with additional information about this item, like type or symbol information.\n :param tr ocumentation: A human-readable string that represents a doc-comment.\n :param bool deprecated: Indicates if this item is deprecated.\n :param bool presented: Select this item when showing. Note: that only one completion item can be selected and that the\n tool / client decides which item that is. The rule is that the first item of those that match best is selected.\n :param str sortText: A string that should be used when comparing this item with other items. When `falsy` the label is used.\n :param str filterText: A string that should be used when filtering a set of completion items. When `falsy` the label is used.\n :param str insertText: A string that should be inserted into a document when selecting this completion. When `falsy` the label is used.\n The `insertText` is subject to interpretation by the client side. Some tools might not take the string literally. For example\n VS Code when code complete is requested in this example `con` and a completion item with an `insertText` of `console` is provided it\n will only insert `sole`. Therefore it is recommended to use `textEdit` instead since it avoids additional client side interpretation.\n @deprecated Use textEdit instead.\n :param InsertTextFormat isertTextFormat: The format of the insert text. The format applies to both the `insertText` property\n and the `newText` property of a provided `textEdit`.\n :param TextEdit textEdit: An edit which is applied to a document when selecting this completion. When an edit is provided the value of `insertText` is ignored.\n Note:* The range of the edit must be a single line range and it must contain the position at which completion\n has been requested.\n :param TextEdit additionalTextEdits: An optional array of additional text edits that are applied when selecting this completion. \n Edits must not overlap (including the same insert position) with the main edit nor with themselves.\n Additional text edits should be used to change text unrelated to the current cursor position\n (for example adding an import statement at the top of the file if the completion item will\n insert an unqualified type).\n :param str commitCharacters: An optional set of characters that when pressed while this completion is active will accept it first and\n then type that character. *Note* that all commit characters should have `length=1` and that superfluous\n characters will be ignored.\n :param Command command: An optional command that is executed *after* inserting this completion. Note: that\n additional modifications to the current document should be described with the additionalTextEdits-property.\n :param data: An data entry field that is preserved on a completion item between a completion and a completion resolve request.\n \"\"\"\n self.label = label\n self.kind = kind\n self.detail = detail\n self.documentation = documentation\n self.deprecated = deprecated\n self.presented = presented\n self.sortText = sortText\n self.filterText = filterText\n self.insertText = insertText\n self.insertTextFormat = insertTextFormat\n self.textEdit = textEdit\n self.additionalTextEdits = additionalTextEdits\n self.commitCharacters = commitCharacters\n self.command = command\n self.data = data\n\n\nclass CompletionList(object):\n \"\"\"\n Represents a collection of [completion items](#CompletionItem) to be presented in the editor.\n \"\"\"\n def __init__(self, isIncomplete, items):\n \"\"\"\n Constructs a new CompletionContext instance.\n \n :param bool isIncomplete: This list it not complete. Further typing should result in recomputing this list.\n :param CompletionItem items: The completion items.\n \"\"\"\n self.isIncomplete = isIncomplete\n self.items = [to_type(i, CompletionItem) for i in items]\n\nclass ErrorCodes(object):\n\t# Defined by JSON RPC\n\tParseError = -32700\n\tInvalidRequest = -32600\n\tMethodNotFound = -32601\n\tInvalidParams = -32602\n\tInternalError = -32603\n\tserverErrorStart = -32099\n\tserverErrorEnd = -32000\n\tServerNotInitialized = -32002\n\tUnknownErrorCode = -32001\n\n\t# Defined by the protocol.\n\tRequestCancelled = -32800\n\tContentModified = -32801\n\nclass ResponseError(Exception):\n def __init__(self, code, message, data = None):\n self.code = code\n self.message = message\n if data:\n self.data = data"}}},{"rowIdx":542667,"cells":{"filename":{"kind":"string","value":"the-stack_106_30991"},"text":{"kind":"string","value":"\"\"\"ResNets for Steering Prediction.\n\nAuthor: Yuhuang Hu\nEmail : duguyue100@gmail.com\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport cPickle as pickle\n\nfrom sacred import Experiment\n\nimport numpy as np\nimport h5py\nfrom keras.models import load_model\n\nimport spiker\nfrom spiker.data import ddd17\n\nexp = Experiment(\"ResNet - Steering - Experiment\")\n\nexp.add_config({\n \"model_name\": \"\", # the model name\n \"data_name\": \"\", # the data name\n \"channel_id\": 0, # which channel to chose, 0: dvs, 1: aps, 2: both\n \"stages\": 0, # number of stages\n \"blocks\": 0, # number of blocks of each stage\n \"filter_list\": [], # number of filters per stage\n \"nb_epoch\": 0, # number of training epochs\n \"batch_size\": 0, # batch size\n })\n\n\n@exp.automain\ndef resnet_exp(model_name, data_name, channel_id, stages, blocks, filter_list,\n nb_epoch, batch_size):\n \"\"\"Perform ResNet experiment.\"\"\"\n model_path = os.path.join(spiker.HOME, \"data\", \"exps\", \"ral-exps\",\n model_name)\n model_file_base = os.path.join(model_path, model_name)\n\n # print model info\n print(\"[MESSAGE] Model Name: %s\" % (model_name))\n print(\"[MESSAGE] Number of epochs: %d\" % (nb_epoch))\n print(\"[MESSAGE] Batch Size: %d\" % (batch_size))\n print(\"[MESSAGE] Number of stages: %d\" % (stages))\n print(\"[MESSAGE] Number of blocks: %d\" % (blocks))\n\n # load data\n data_path = os.path.join(spiker.HOME, \"data\", \"exps\", \"data\", \"ddd17\",\n data_name)\n if not os.path.isfile(data_path):\n raise ValueError(\"This dataset does not exist at %s\" % (data_path))\n print(\"[MESSAGE] Dataset %s\" % (data_path))\n\n dataset = h5py.File(data_path, \"r\")\n\n if channel_id != 2:\n X_test = dataset[\"test_data\"][\n :, :, :, channel_id][()][..., np.newaxis].astype(\"float32\")/255.\n else:\n X_test = dataset[\"test_data\"][()].astype(\"float32\")/255.\n\n Y_test = dataset[\"test_target\"][()]\n\n dataset.close()\n\n print(\"[MESSAGE] Number of test samples %d\" % (X_test.shape[0]))\n\n # Build model\n print (\"[MESSAGE] Model is compiled.\")\n model_file = model_file_base + \"-best.hdf5\"\n model = load_model(model_file)\n\n Y_predict = model.predict(X_test)\n\n with open(model_file_base+\"-prediction.pkl\", \"wb\") as f:\n pickle.dump([Y_test, Y_predict], f)\n"}}},{"rowIdx":542668,"cells":{"filename":{"kind":"string","value":"the-stack_106_30992"},"text":{"kind":"string","value":"from tqdm import tqdm\nfrom concurrent.futures import ProcessPoolExecutor, as_completed\n\n\ndef parallel_process(array,\n function,\n n_jobs=16,\n use_kwargs=False,\n front_num=3,\n tqdm=tqdm):\n \"\"\"\n This function was copied from here:\n http://danshiebler.com/2016-09-14-parallel-progress-bar/\n\n A parallel version of the map function with a progress bar.\n\n Args:\n array (array-like): An array to iterate over.\n function (function):\n A python function to apply to the elements of array\n n_jobs (int, default=16): The number of cores to use\n use_kwargs (boolean, default=False):\n Whether to consider the elements of array as dictionaries of\n keyword arguments to function\n front_num (int, default=3): The number of iterations to run serially\n before kicking off the parallel job.\n Useful for catching bugs\n Returns:\n [function(array[0]), function(array[1]), ...]\n \"\"\"\n # We run the first few iterations serially to catch bugs\n if front_num > 0:\n front = [function(**a) if use_kwargs else function(a)\n for a in array[:front_num]]\n # If we set n_jobs to 1, just run a list comprehension. This is useful for\n # benchmarking and debugging.\n if n_jobs == 1:\n return front + [function(**a) if use_kwargs else function(a)\n for a in tqdm(array[front_num:])]\n # Assemble the workers\n with ProcessPoolExecutor(max_workers=n_jobs) as pool:\n # Pass the elements of array into function\n if use_kwargs:\n futures = [pool.submit(function, **a) for a in array[front_num:]]\n else:\n futures = [pool.submit(function, a) for a in array[front_num:]]\n kwargs = {\n 'total': len(futures),\n 'unit': 'it',\n 'unit_scale': True,\n 'leave': True\n }\n # Print out the progress as tasks complete\n for _ in tqdm(as_completed(futures), **kwargs):\n pass\n out = []\n # Get the results from the futures.\n for i, future in tqdm(enumerate(futures)):\n try:\n out.append(future.result())\n except Exception as e:\n out.append(e)\n return front + out\n"}}},{"rowIdx":542669,"cells":{"filename":{"kind":"string","value":"the-stack_106_30993"},"text":{"kind":"string","value":"cube = lambda x: pow(x,3) # complete the lambda function \n\ndef fibonacci(n):\n l = [0,1]\n for i in range(2, n):\n temp = l[-1] + l[-2]\n l.append(temp)\n return l[0:n]\n \nif __name__ == '__main__':\n n = int(input())\n print(list(map(cube, fibonacci(n))))"}}},{"rowIdx":542670,"cells":{"filename":{"kind":"string","value":"the-stack_106_30994"},"text":{"kind":"string","value":"from collections import OrderedDict\nfrom rest_framework import serializers\nfrom profiles.models import Project, Tag, Basemap, Spatialitedbs, Otherfiles, Profile, ProfileSet\nfrom profiles.models import UserProject\nfrom django.contrib.auth import get_user_model\nfrom rest_framework.fields import SkipField\n\nclass ProjectSerializer(serializers.ModelSerializer):\n class Meta:\n model = Project\n fields = ('path', 'modifieddate', 'url', 'uploadurl', 'size' )\n\n # this bit is for omitting empty fields (size)\n def to_representation(self, instance):\n result = super(ProjectSerializer, self).to_representation(instance)\n return OrderedDict([(key, result[key]) for key in result if result[key] is not None])\n\nclass UserProjectSerializer(serializers.HyperlinkedModelSerializer):\n owner = serializers.SlugRelatedField(read_only=True, slug_field='id')\n \n class Meta:\n model = UserProject\n fields = ('modifieddate', 'owner', 'document', 'description')\n \nclass TagSerializer(serializers.ModelSerializer):\n owner = serializers.SlugRelatedField(read_only=True, slug_field='id')\n\n class Meta:\n model = Tag\n fields = ('path', 'modifieddate', 'url', 'size', 'owner')\n\n # this bit is for omitting empty fields (size)\n def to_representation(self, instance):\n result = super(TagSerializer, self).to_representation(instance)\n return OrderedDict([(key, result[key]) for key in result if result[key] is not None])\n\nclass BasemapSerializer(serializers.ModelSerializer):\n class Meta:\n model = Basemap\n fields = ('path', 'modifieddate', 'url', 'size' )\n\n # this bit is for omitting empty fields (size)\n def to_representation(self, instance):\n result = super(BasemapSerializer, self).to_representation(instance)\n return OrderedDict([(key, result[key]) for key in result if result[key] is not None])\n\nclass SpatialitedbsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Spatialitedbs\n fields = ('path', 'modifieddate', 'url', 'size', 'uploadurl', 'visible' )\n\n # this bit is for omitting empty fields (size)\n def to_representation(self, instance):\n result = super(SpatialitedbsSerializer, self).to_representation(instance)\n return OrderedDict([(key, result[key]) for key in result if result[key] is not None])\n\nclass OtherfilesSerializer(serializers.ModelSerializer):\n class Meta:\n model = Otherfiles\n fields = ('path', 'modifieddate', 'url', 'size' )\n\n # this bit is for omitting empty fields (size)\n def to_representation(self, instance):\n result = super(OtherfilesSerializer, self).to_representation(instance)\n return OrderedDict([(key, result[key]) for key in result if result[key] is not None])\n\nclass ProfileSerializer(serializers.ModelSerializer):\n project = ProjectSerializer(read_only=True)\n tags = TagSerializer(read_only=True)\n basemaps = BasemapSerializer(many=True, read_only=True)\n spatialitedbs = SpatialitedbsSerializer(many=True, read_only=True)\n otherfiles = OtherfilesSerializer(many=True, read_only=True)\n\n # this bit is for omitting empty fields (size)\n def to_representation(self, instance):\n result = super(ProfileSerializer, self).to_representation(instance)\n return OrderedDict([(key, result[key]) for key in result if result[key] is not None])\n\n class Meta:\n model = Profile\n fields = ('name', 'description', 'creationdate', 'modifieddate', 'color', 'active',\n 'sdcardPath', 'mapView', 'project', 'tags', 'basemaps', 'spatialitedbs', 'otherfiles' )\n\nclass ProfileSetSerializer(serializers.ModelSerializer):\n profiles = ProfileSerializer(read_only=True, many=True)\n class Meta:\n model = ProfileSet\n fields = ('formatVersion', 'profiles')\n"}}},{"rowIdx":542671,"cells":{"filename":{"kind":"string","value":"the-stack_106_30996"},"text":{"kind":"string","value":"\"\"\"Code for handling downloading of HPO files used by scout from CLI\"\"\"\nimport logging\nimport pathlib\n\nimport click\n\nfrom scout.utils.scout_requests import fetch_mim_files\n\nLOG = logging.getLogger(__name__)\n\n\ndef print_omim(out_dir, api_key):\n \"\"\"Print HPO files to a directory\n\n Args:\n out_dir(Path)\n \"\"\"\n mim_files = fetch_mim_files(api_key, mim2genes=True, genemap2=True)\n file_name = \"genemap2.txt\"\n file_path = out_dir / file_name\n LOG.info(\"Print genemap genes to %s\", file_path)\n with file_path.open(\"w\", encoding=\"utf-8\") as outfile:\n for line in mim_files[\"genemap2\"]:\n outfile.write(line + \"\\n\")\n\n file_name = \"mim2genes.txt\"\n file_path = out_dir / file_name\n LOG.info(\"Print mim2gene info to %s\", file_path)\n with file_path.open(\"w\", encoding=\"utf-8\") as outfile:\n for line in mim_files[\"mim2genes\"]:\n outfile.write(line + \"\\n\")\n\n\n@click.command(\"omim\", help=\"Download a files with OMIM info\")\n@click.option(\"--api-key\", help=\"Specify the api key\", required=True)\n@click.option(\"-o\", \"--out-dir\", default=\"./\", show_default=True)\ndef omim(out_dir, api_key):\n \"\"\"Download the OMIM genes\"\"\"\n out_dir = pathlib.Path(out_dir)\n out_dir.mkdir(parents=True, exist_ok=True)\n LOG.info(\"Download OMIM resources to %s\", out_dir)\n\n print_omim(out_dir, api_key)\n"}}},{"rowIdx":542672,"cells":{"filename":{"kind":"string","value":"the-stack_106_30997"},"text":{"kind":"string","value":"from slyd.orm.exceptions import ImproperlyConfigured\n\n\n__all__ = [\n 'get_serializer',\n]\n\nserializers = {}\n\n\ndef get_serializer(schema_type):\n try:\n return serializers[schema_type]\n except KeyError:\n raise ImproperlyConfigured(\n u\"No schema for type '{}' exists\".format(schema_type))\n"}}},{"rowIdx":542673,"cells":{"filename":{"kind":"string","value":"the-stack_106_31000"},"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nimport pytest\nfrom bkuser_core.categories.constants import CategoryStatus\nfrom bkuser_core.categories.views import CategoryViewSet\nfrom bkuser_core.profiles.constants import ProfileStatus\nfrom bkuser_core.tests.utils import get_one_object, make_simple_category, make_simple_department, make_simple_profile\nfrom bkuser_core.user_settings.models import Setting\n\npytestmark = pytest.mark.django_db\n\n\nclass TestActionApis:\n @pytest.fixture(scope=\"class\")\n def view(self):\n return CategoryViewSet.as_view(\n {\"get\": \"retrieve\", \"put\": \"update\", \"patch\": \"partial_update\", \"delete\": \"destroy\", \"post\": \"restoration\"}\n )\n\n def test_update_category(self, view):\n pass\n\n @pytest.mark.parametrize(\n \"enabled,status\", [(False, CategoryStatus.NORMAL.value), (False, CategoryStatus.INACTIVE.value)]\n )\n def test_category_restoration(self, factory, view, enabled, status):\n cc = make_simple_category(\"xoodomain\", \"Domain\", force_create_params={\"enabled\": enabled, \"status\": status})\n setting_id = []\n for setting in cc.make_default_settings():\n setting.enabled = 0\n setting.save(update_fields=[\"enabled\"])\n setting_id.append(setting.id)\n d = make_simple_department(\"dep\", parent_id=1, force_create_params={\"category_id\": cc.id, \"enabled\": enabled})\n p = make_simple_profile(\"profile\", force_create_params={\"category_id\": cc.id, \"enabled\": enabled})\n request = factory.post(f\"/api/v2/categories/{cc.id}/restoration/?include_disabled=1\")\n setattr(request, \"operator\", \"faker\")\n response = view(request=request, lookup_value=f\"{cc.id}\")\n assert response.status_code == 200\n cc = get_one_object(\"profilecategory\", id=cc.id, domain=cc.domain)\n assert cc.enabled and cc.status == CategoryStatus.NORMAL.value\n assert get_one_object(\"department\", id=d.id, name=d.name).enabled\n p = get_one_object(\"profile\", id=p.id, username=p.username)\n assert p.enabled and p.status == ProfileStatus.NORMAL.value\n assert {x.id for x in Setting.objects.filter(id__in=setting_id, enabled=True)} == set(setting_id)\n\n\nclass TestListCreateApis:\n @pytest.fixture(scope=\"class\")\n def view(self):\n return CategoryViewSet.as_view({\"get\": \"list\", \"post\": \"create\"})\n\n @pytest.mark.parametrize(\n \"all_count,fields,result_count,include_disabled,expected_fields\",\n [\n (10, \"id,display_name,domain,enabled\", 5, \"false\", \"id,display_name,domain,enabled\"),\n (10, \"id,display_name,domain\", 10, \"true\", \"id,display_name,domain,enabled\"),\n (10, \"id,display_name,domain,enabled\", 10, \"true\", \"id,display_name,domain,enabled\"),\n ],\n )\n def test_category_include_enabled_fields(\n self, factory, view, all_count, fields, result_count, include_disabled, expected_fields\n ):\n \"\"\"测试目录软删除显式拉取和字段选择\"\"\"\n for i in range(1, all_count):\n make_simple_category(f\"domain{i}\", f\"Display{i}\", force_create_params={\"enabled\": i % 2 == 0})\n response = view(\n request=factory.get(f\"/api/v2/categories/?fields={fields}&include_disabled={include_disabled}\")\n )\n assert response.data[\"count\"] == result_count\n assert set(response.data[\"results\"][0].keys()) == set(expected_fields.split(\",\"))\n"}}},{"rowIdx":542674,"cells":{"filename":{"kind":"string","value":"the-stack_106_31003"},"text":{"kind":"string","value":"import logging\nimport os\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"[%(asctime)s] %(levelname)-12s|process:%(process)-5s|thread:%\"\n \"(thread)-5s|funcName:%(funcName)s|message:%(message)s\",\n handlers=[\n # logging.FileHandler('fileName.log'),\n logging.StreamHandler()\n ])\n\nuser_sessions = {}\ndays = [\"понедельник\", \"вторник\", \"среда\", \"четверг\", \"пятница\",\n \"суббота\", \"воскресенье\"]\n\nbot_token = None\ndebug_mode = None\n\ntry:\n bot_token = os.environ['TELEGRAM_TOKEN']\n debug_mode = 'true' == os.environ['DEBUG_MODE']\nexcept KeyError as e:\n logging.error(e)\n"}}},{"rowIdx":542675,"cells":{"filename":{"kind":"string","value":"the-stack_106_31009"},"text":{"kind":"string","value":"# Copyright 2016 VMware Inc\n# All Rights Reserved\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom tempest.api.network import base\nfrom tempest import config\nfrom tempest.lib.common.utils import data_utils\nfrom tempest.lib import decorators\nfrom tempest import test\n\nfrom vmware_nsx_tempest.services import nsxv3_client\n\nCONF = config.CONF\n\n\nclass NSXv3RoutersTest(base.BaseAdminNetworkTest):\n \"\"\"Test L3 Router and realization on NSX backend\n\n When test L3 Router feature, we need to test both REST API\n call from neutron and realization state on backend. Two tests\n have been added in this class:\n - Test create and update router\n - Test delete router\n\n \"\"\"\n\n @classmethod\n def skip_checks(cls):\n super(NSXv3RoutersTest, cls).skip_checks()\n if not test.is_extension_enabled('router', 'network'):\n msg = \"router extension not enabled.\"\n raise cls.skipException(msg)\n\n @classmethod\n def resource_setup(cls):\n super(NSXv3RoutersTest, cls).resource_setup()\n cls.nsx = nsxv3_client.NSXV3Client(CONF.nsxv3.nsx_manager,\n CONF.nsxv3.nsx_user,\n CONF.nsxv3.nsx_password)\n\n @test.attr(type='nsxv3')\n @decorators.idempotent_id('0e9938bc-d2a3-4a9a-a4f9-7a93ee8bb344')\n def test_create_update_nsx_router(self):\n # Create a router\n router_name = data_utils.rand_name('router-')\n router = self.create_router(router_name, admin_state_up=True)\n self.addCleanup(self._delete_router, router['id'])\n nsx_router = self.nsx.get_logical_router(router['name'],\n router['id'])\n self.assertEqual(router['name'], router_name)\n self.assertEqual(router['admin_state_up'], True)\n self.assertIsNotNone(nsx_router)\n # Update the name of router and verify if it is updated on both\n # neutron and nsx backend\n updated_name = 'updated ' + router_name\n update_body = self.routers_client.update_router(router['id'],\n name=updated_name)\n updated_router = update_body['router']\n nsx_router = self.nsx.get_logical_router(updated_router['name'],\n updated_router['id'])\n self.assertEqual(updated_router['name'], updated_name)\n self.assertIsNotNone(nsx_router)\n\n @test.attr(type='nsxv3')\n @decorators.idempotent_id('6f49b69c-0800-4c83-b1f8-595ae5bfeea7')\n def test_delete_nsx_router(self):\n # Create a router\n router_name = data_utils.rand_name('router-')\n router = self.create_router(router_name, admin_state_up=True)\n nsx_router = self.nsx.get_logical_router(router['name'],\n router['id'])\n self.assertEqual(router['name'], router_name)\n self.assertIsNotNone(nsx_router)\n # Delete the router and verify it is deleted on nsx backend\n self.routers_client.delete_router(router['id'])\n nsx_router = self.nsx.get_logical_router(router['name'],\n router['id'])\n self.assertIsNone(nsx_router)\n\n def _delete_router(self, router_id):\n # Delete the router in case the test exits with any exception\n list_body = self.routers_client.list_routers()\n for router in list_body.get('router', []):\n if router['id'] == router_id:\n self.routers_client.delete_router(router_id)\n"}}},{"rowIdx":542676,"cells":{"filename":{"kind":"string","value":"the-stack_106_31010"},"text":{"kind":"string","value":"import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nfrom google.cloud import storage\nfrom linebot import LineBotApi\nfrom linebot.models import TextSendMessage, ImageSendMessage, QuickReply, QuickReplyButton, MessageAction\nfrom linebot.exceptions import LineBotApiError\n\nimport time\nimport os\nfrom models.task import Task\nimport style_transfer\nimport matplotlib.pyplot as plt\n\n\n'''\nConfigs\n'''\n# Use a service account\ncred_path = os.environ[\"CRED_PATH\"]\nbucket_name = os.environ[\"USER_INFO_GS_BUCKET_NAME\"]\nstyles = {\n 1: \"01_01_starry_night.jpeg\",\n 2: \"01_02_sunflowers.jpeg\",\n 3: \"01_03_the_yellow_house.jpeg\",\n 4: \"02_04_water_fall.jpeg\",\n 5: \"02_05_ascending_and_descending.jpeg\",\n 6: \"02_06_ralativity.jpg\",\n 7: \"03_07_unknown.jpeg\",\n 8: \"03_08_flower_and_bird.jpeg\",\n 9: \"03_09_flowers_and_birds_seasons.jpeg\",\n 10: \"04_10_first_impression.jpeg\",\n 11: \"04_11_unknown.jpeg\",\n 12: \"04_12_unknown.jpeg\",\n 13: \"05_13_frida_kahlo.jpeg\",\n 14: \"05_14_frida_kahlo.jpeg\",\n 15: \"05_15_frida_kahlo.jpeg\"\n}\niteration_times = int(os.environ[\"ITERATION_TIMES\"])\nline_channel_access_token = os.environ[\"LINE_CHANNEL_ACCESS_TOKEN\"]\n\n'''\n'''\n# Initialize CloudStorage\nstorage_client = storage.Client.from_service_account_json(cred_path)\nbucket = storage_client.bucket(bucket_name)\n\n# Initialize Line Bot Api\nline_bot_api = LineBotApi(line_channel_access_token)\n\ncred = credentials.Certificate(cred_path)\nfirebase_admin.initialize_app(cred)\ndb = firestore.client()\n\n# Create a callback on_snapshot function to capture changes\n\ntasks_ref = db.collection(u'tasks')\n\n\ndef on_snapshot(doc_snapshot, changes, read_time):\n for change in changes:\n print(change.document.id, \"added\")\n os.sleep(os.environ[\"DELTA\"])\n task_ref = tasks_ref.document(change.document.id)\n task_doc = task_ref.get()\n if task_doc.exists:\n task = Task.from_dict(task_doc)\n if task.status == 0:\n task.status = 1\n task_ref.set(document_data=task.to_dict(), merge=True)\n print(change.document.id, \"started\")\n bucket.blob(task.content_pic_url).download_to_filename(\n os.path.basename(task.content_pic_url))\n best_img, best_loss = style_transfer.run_style_transfer(\n os.path.basename(task.content_pic_url), 'styles/'+styles[task.style_id], num_iterations=iteration_times)\n plt.imsave('done.jpg', best_img)\n\n # remove original image\n os.remove(os.path.basename(task.content_pic_url))\n\n # upload to CloudStorage\n destination_blob_name = f'{change.document.id}/image/done_{change.document.id}_{str(time.time())}.jpg'\n bucket.blob(destination_blob_name).upload_from_filename(\n 'done.jpg')\n\n # make it publicly readable\n bucket.blob(destination_blob_name).make_public()\n\n # delete task in FireStore\n task_ref.delete()\n\n # push message to user\n try:\n line_bot_api.push_message(change.document.id, [\n TextSendMessage(text=\"啾啾幫你畫好啦~\"),\n ImageSendMessage(\n original_content_url=f'https://storage.googleapis.com/{bucket_name}/{destination_blob_name}',\n preview_image_url=f'https://storage.googleapis.com/{bucket_name}/{destination_blob_name}'\n ),\n TextSendMessage(text=\"好不好看呢?\\n歡迎多多抖內作者ㄛ~\"),\n TextSendMessage(text=\"如果想再請啾啾畫一張畫,請輸入「再來一張」\",\n quick_reply=QuickReply(items=[\n QuickReplyButton(action=MessageAction(\n label=\"再來一張\", text=\"再來一張\"))\n ]))\n ])\n except linebot.exceptions.LineBotApiError as e:\n print(e.status_code)\n print(e.request_id)\n print(e.error.message)\n print(e.error.details)\n\n\n# Watch the document\ndoc_watch = tasks_ref.on_snapshot(on_snapshot)\n\n\nwhile True:\n time.sleep(1)\n"}}},{"rowIdx":542677,"cells":{"filename":{"kind":"string","value":"the-stack_106_31013"},"text":{"kind":"string","value":"import json\nimport threading\nimport time\nimport os\nimport stat\nfrom decimal import Decimal\nfrom typing import Union\n\nfrom copy import deepcopy\n\nfrom . import util\nfrom .util import (user_dir, print_error, PrintError, make_dir,\n NoDynamicFeeEstimates, format_fee_satoshis, quantize_feerate)\nfrom .i18n import _\n\nFEE_ETA_TARGETS = [25, 10, 5, 2]\nFEE_DEPTH_TARGETS = [10000000, 5000000, 2000000, 1000000, 500000, 200000, 100000]\n\n# satoshi per kbyte\nFEERATE_MAX_DYNAMIC = 1500000\nFEERATE_WARNING_HIGH_FEE = 600000\nFEERATE_FALLBACK_STATIC_FEE = 150000\nFEERATE_DEFAULT_RELAY = 1000\nFEERATE_STATIC_VALUES = [5000, 10000, 20000, 30000, 50000, 70000, 100000, 150000, 200000, 300000]\n\n\nconfig = None\n\n\ndef get_config():\n global config\n return config\n\n\ndef set_config(c):\n global config\n config = c\n\n\nFINAL_CONFIG_VERSION = 3\n\n\nclass SimpleConfig(PrintError):\n \"\"\"\n The SimpleConfig class is responsible for handling operations involving\n configuration files.\n\n There are two different sources of possible configuration values:\n 1. Command line options.\n 2. User configuration (in the user's config directory)\n They are taken in order (1. overrides config options set in 2.)\n \"\"\"\n\n def __init__(self, options=None, read_user_config_function=None,\n read_user_dir_function=None):\n\n if options is None:\n options = {}\n\n # This lock needs to be acquired for updating and reading the config in\n # a thread-safe way.\n self.lock = threading.RLock()\n\n self.mempool_fees = {}\n self.fee_estimates = {}\n self.fee_estimates_last_updated = {}\n self.last_time_fee_estimates_requested = 0 # zero ensures immediate fees\n\n # The following two functions are there for dependency injection when\n # testing.\n if read_user_config_function is None:\n read_user_config_function = read_user_config\n if read_user_dir_function is None:\n self.user_dir = user_dir\n else:\n self.user_dir = read_user_dir_function\n\n # The command line options\n self.cmdline_options = deepcopy(options)\n # don't allow to be set on CLI:\n self.cmdline_options.pop('config_version', None)\n\n # Set self.path and read the user config\n self.user_config = {} # for self.get in electrum_path()\n self.path = self.electrum_path()\n self.user_config = read_user_config_function(self.path)\n if not self.user_config:\n # avoid new config getting upgraded\n self.user_config = {'config_version': FINAL_CONFIG_VERSION}\n\n # config \"upgrade\" - CLI options\n self.rename_config_keys(\n self.cmdline_options, {'auto_cycle': 'auto_connect'}, True)\n\n # config upgrade - user config\n if self.requires_upgrade():\n self.upgrade()\n\n # Make a singleton instance of 'self'\n set_config(self)\n\n def electrum_path(self):\n # Read electrum_path from command line\n # Otherwise use the user's default data directory.\n path = self.get('electrum_path')\n if path is None:\n path = self.user_dir()\n\n make_dir(path, allow_symlink=False)\n if self.get('testnet'):\n path = os.path.join(path, 'testnet')\n make_dir(path, allow_symlink=False)\n elif self.get('regtest'):\n path = os.path.join(path, 'regtest')\n make_dir(path, allow_symlink=False)\n elif self.get('simnet'):\n path = os.path.join(path, 'simnet')\n make_dir(path, allow_symlink=False)\n\n self.print_error(\"electrum directory\", path)\n return path\n\n def rename_config_keys(self, config, keypairs, deprecation_warning=False):\n \"\"\"Migrate old key names to new ones\"\"\"\n updated = False\n for old_key, new_key in keypairs.items():\n if old_key in config:\n if new_key not in config:\n config[new_key] = config[old_key]\n if deprecation_warning:\n self.print_stderr('Note that the {} variable has been deprecated. '\n 'You should use {} instead.'.format(old_key, new_key))\n del config[old_key]\n updated = True\n return updated\n\n def set_key(self, key, value, save=True):\n if not self.is_modifiable(key):\n self.print_stderr(\"Warning: not changing config key '%s' set on the command line\" % key)\n return\n self._set_key_in_user_config(key, value, save)\n\n def _set_key_in_user_config(self, key, value, save=True):\n with self.lock:\n if value is not None:\n self.user_config[key] = value\n else:\n self.user_config.pop(key, None)\n if save:\n self.save_user_config()\n\n def get(self, key, default=None):\n with self.lock:\n out = self.cmdline_options.get(key)\n if out is None:\n out = self.user_config.get(key, default)\n return out\n\n def requires_upgrade(self):\n return self.get_config_version() < FINAL_CONFIG_VERSION\n\n def upgrade(self):\n with self.lock:\n self.print_error('upgrading config')\n\n self.convert_version_2()\n self.convert_version_3()\n\n self.set_key('config_version', FINAL_CONFIG_VERSION, save=True)\n\n def convert_version_2(self):\n if not self._is_upgrade_method_needed(1, 1):\n return\n\n self.rename_config_keys(self.user_config, {'auto_cycle': 'auto_connect'})\n\n try:\n # change server string FROM host:port:proto TO host:port:s\n server_str = self.user_config.get('server')\n host, port, protocol = str(server_str).rsplit(':', 2)\n assert protocol in ('s', 't')\n int(port) # Throw if cannot be converted to int\n server_str = '{}:{}:s'.format(host, port)\n self._set_key_in_user_config('server', server_str)\n except BaseException:\n self._set_key_in_user_config('server', None)\n\n self.set_key('config_version', 2)\n\n def convert_version_3(self):\n if not self._is_upgrade_method_needed(2, 2):\n return\n\n base_unit = self.user_config.get('base_unit')\n if isinstance(base_unit, str):\n self._set_key_in_user_config('base_unit', None)\n map_ = {'vtc':8, 'mvtc':5, 'uvtc':2, 'bits':2, 'sat':0}\n decimal_point = map_.get(base_unit.lower())\n self._set_key_in_user_config('decimal_point', decimal_point)\n\n self.set_key('config_version', 3)\n\n def _is_upgrade_method_needed(self, min_version, max_version):\n cur_version = self.get_config_version()\n if cur_version > max_version:\n return False\n elif cur_version < min_version:\n raise Exception(\n ('config upgrade: unexpected version %d (should be %d-%d)'\n % (cur_version, min_version, max_version)))\n else:\n return True\n\n def get_config_version(self):\n config_version = self.get('config_version', 1)\n if config_version > FINAL_CONFIG_VERSION:\n self.print_stderr('WARNING: config version ({}) is higher than ours ({})'\n .format(config_version, FINAL_CONFIG_VERSION))\n return config_version\n\n def is_modifiable(self, key):\n return key not in self.cmdline_options\n\n def save_user_config(self):\n if not self.path:\n return\n path = os.path.join(self.path, \"config\")\n s = json.dumps(self.user_config, indent=4, sort_keys=True)\n try:\n with open(path, \"w\", encoding='utf-8') as f:\n f.write(s)\n os.chmod(path, stat.S_IREAD | stat.S_IWRITE)\n except FileNotFoundError:\n # datadir probably deleted while running...\n if os.path.exists(self.path): # or maybe not?\n raise\n\n def get_wallet_path(self):\n \"\"\"Set the path of the wallet.\"\"\"\n\n # command line -w option\n if self.get('wallet_path'):\n return os.path.join(self.get('cwd'), self.get('wallet_path'))\n\n # path in config file\n path = self.get('default_wallet_path')\n if path and os.path.exists(path):\n return path\n\n # default path\n util.assert_datadir_available(self.path)\n dirpath = os.path.join(self.path, \"wallets\")\n make_dir(dirpath, allow_symlink=False)\n\n new_path = os.path.join(self.path, \"wallets\", \"default_wallet\")\n\n # default path in pre 1.9 versions\n old_path = os.path.join(self.path, \"electrum.dat\")\n if os.path.exists(old_path) and not os.path.exists(new_path):\n os.rename(old_path, new_path)\n\n return new_path\n\n def remove_from_recently_open(self, filename):\n recent = self.get('recently_open', [])\n if filename in recent:\n recent.remove(filename)\n self.set_key('recently_open', recent)\n\n def set_session_timeout(self, seconds):\n self.print_error(\"session timeout -> %d seconds\" % seconds)\n self.set_key('session_timeout', seconds)\n\n def get_session_timeout(self):\n return self.get('session_timeout', 300)\n\n def open_last_wallet(self):\n if self.get('wallet_path') is None:\n last_wallet = self.get('gui_last_wallet')\n if last_wallet is not None and os.path.exists(last_wallet):\n self.cmdline_options['default_wallet_path'] = last_wallet\n\n def save_last_wallet(self, wallet):\n if self.get('wallet_path') is None:\n path = wallet.storage.path\n self.set_key('gui_last_wallet', path)\n\n def impose_hard_limits_on_fee(func):\n def get_fee_within_limits(self, *args, **kwargs):\n fee = func(self, *args, **kwargs)\n if fee is None:\n return fee\n fee = min(FEERATE_MAX_DYNAMIC, fee)\n fee = max(FEERATE_DEFAULT_RELAY, fee)\n return fee\n return get_fee_within_limits\n\n @impose_hard_limits_on_fee\n def eta_to_fee(self, slider_pos) -> Union[int, None]:\n \"\"\"Returns fee in sat/kbyte.\"\"\"\n slider_pos = max(slider_pos, 0)\n slider_pos = min(slider_pos, len(FEE_ETA_TARGETS))\n if slider_pos < len(FEE_ETA_TARGETS):\n target_blocks = FEE_ETA_TARGETS[slider_pos]\n fee = self.fee_estimates.get(target_blocks)\n else:\n fee = self.fee_estimates.get(2)\n if fee is not None:\n fee += fee/2\n fee = int(fee)\n return fee\n\n def fee_to_depth(self, target_fee):\n depth = 0\n for fee, s in self.mempool_fees:\n depth += s\n if fee <= target_fee:\n break\n else:\n return 0\n return depth\n\n @impose_hard_limits_on_fee\n def depth_to_fee(self, slider_pos) -> int:\n \"\"\"Returns fee in sat/kbyte.\"\"\"\n target = self.depth_target(slider_pos)\n depth = 0\n for fee, s in self.mempool_fees:\n depth += s\n if depth > target:\n break\n else:\n return 0\n return fee * 1000\n\n def depth_target(self, slider_pos):\n slider_pos = max(slider_pos, 0)\n slider_pos = min(slider_pos, len(FEE_DEPTH_TARGETS)-1)\n return FEE_DEPTH_TARGETS[slider_pos]\n\n def eta_target(self, i):\n if i == len(FEE_ETA_TARGETS):\n return 1\n return FEE_ETA_TARGETS[i]\n\n def fee_to_eta(self, fee_per_kb):\n import operator\n l = list(self.fee_estimates.items()) + [(1, self.eta_to_fee(4))]\n dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), l)\n min_target, min_value = min(dist, key=operator.itemgetter(1))\n if fee_per_kb < self.fee_estimates.get(25)/2:\n min_target = -1\n return min_target\n\n def depth_tooltip(self, depth):\n return \"%.1f MB from tip\"%(depth/1000000)\n\n def eta_tooltip(self, x):\n if x < 0:\n return _('Low fee')\n elif x == 1:\n return _('In the next block')\n else:\n return _('Within {} blocks').format(x)\n\n def get_fee_status(self):\n dyn = self.is_dynfee()\n mempool = self.use_mempool_fees()\n pos = self.get_depth_level() if mempool else self.get_fee_level()\n fee_rate = self.fee_per_kb()\n target, tooltip = self.get_fee_text(pos, dyn, mempool, fee_rate)\n return tooltip + ' [%s]'%target if dyn else target + ' [Static]'\n\n def get_fee_text(self, pos, dyn, mempool, fee_rate):\n \"\"\"Returns (text, tooltip) where\n text is what we target: static fee / num blocks to confirm in / mempool depth\n tooltip is the corresponding estimate (e.g. num blocks for a static fee)\n \"\"\"\n if fee_rate is None:\n rate_str = 'unknown'\n else:\n rate_str = format_fee_satoshis(fee_rate/1000) + ' sat/byte'\n\n if dyn:\n if mempool:\n depth = self.depth_target(pos)\n text = self.depth_tooltip(depth)\n else:\n eta = self.eta_target(pos)\n text = self.eta_tooltip(eta)\n tooltip = rate_str\n else:\n text = rate_str\n if mempool and self.has_fee_mempool():\n depth = self.fee_to_depth(fee_rate)\n tooltip = self.depth_tooltip(depth)\n elif not mempool and self.has_fee_etas():\n eta = self.fee_to_eta(fee_rate)\n tooltip = self.eta_tooltip(eta)\n else:\n tooltip = ''\n return text, tooltip\n\n def get_depth_level(self):\n maxp = len(FEE_DEPTH_TARGETS) - 1\n return min(maxp, self.get('depth_level', 2))\n\n def get_fee_level(self):\n maxp = len(FEE_ETA_TARGETS) # not (-1) to have \"next block\"\n return min(maxp, self.get('fee_level', 2))\n\n def get_fee_slider(self, dyn, mempool):\n if dyn:\n if mempool:\n pos = self.get_depth_level()\n maxp = len(FEE_DEPTH_TARGETS) - 1\n fee_rate = self.depth_to_fee(pos)\n else:\n pos = self.get_fee_level()\n maxp = len(FEE_ETA_TARGETS) # not (-1) to have \"next block\"\n fee_rate = self.eta_to_fee(pos)\n else:\n fee_rate = self.fee_per_kb(dyn=False)\n pos = self.static_fee_index(fee_rate)\n maxp = 9\n return maxp, pos, fee_rate\n\n def static_fee(self, i):\n return FEERATE_STATIC_VALUES[i]\n\n def static_fee_index(self, value):\n if value is None:\n raise TypeError('static fee cannot be None')\n dist = list(map(lambda x: abs(x - value), FEERATE_STATIC_VALUES))\n return min(range(len(dist)), key=dist.__getitem__)\n\n def has_fee_etas(self):\n return len(self.fee_estimates) == 4\n\n def has_fee_mempool(self):\n return bool(self.mempool_fees)\n\n def has_dynamic_fees_ready(self):\n if self.use_mempool_fees():\n return self.has_fee_mempool()\n else:\n return self.has_fee_etas()\n\n def is_dynfee(self):\n return bool(self.get('dynamic_fees', True))\n\n def use_mempool_fees(self):\n return bool(self.get('mempool_fees', False))\n\n def _feerate_from_fractional_slider_position(self, fee_level: float, dyn: bool,\n mempool: bool) -> Union[int, None]:\n fee_level = max(fee_level, 0)\n fee_level = min(fee_level, 1)\n if dyn:\n max_pos = (len(FEE_DEPTH_TARGETS) - 1) if mempool else len(FEE_ETA_TARGETS)\n slider_pos = round(fee_level * max_pos)\n fee_rate = self.depth_to_fee(slider_pos) if mempool else self.eta_to_fee(slider_pos)\n else:\n max_pos = len(FEERATE_STATIC_VALUES) - 1\n slider_pos = round(fee_level * max_pos)\n fee_rate = FEERATE_STATIC_VALUES[slider_pos]\n return fee_rate\n\n def fee_per_kb(self, dyn: bool=None, mempool: bool=None, fee_level: float=None) -> Union[int, None]:\n \"\"\"Returns sat/kvB fee to pay for a txn.\n Note: might return None.\n\n fee_level: float between 0.0 and 1.0, representing fee slider position\n \"\"\"\n if dyn is None:\n dyn = self.is_dynfee()\n if mempool is None:\n mempool = self.use_mempool_fees()\n if fee_level is not None:\n return self._feerate_from_fractional_slider_position(fee_level, dyn, mempool)\n # there is no fee_level specified; will use config.\n # note: 'depth_level' and 'fee_level' in config are integer slider positions,\n # unlike fee_level here, which (when given) is a float in [0.0, 1.0]\n if dyn:\n if mempool:\n fee_rate = self.depth_to_fee(self.get_depth_level())\n else:\n fee_rate = self.eta_to_fee(self.get_fee_level())\n else:\n fee_rate = self.get('fee_per_kb', FEERATE_FALLBACK_STATIC_FEE)\n return fee_rate\n\n def fee_per_byte(self):\n \"\"\"Returns sat/vB fee to pay for a txn.\n Note: might return None.\n \"\"\"\n fee_per_kb = self.fee_per_kb()\n return fee_per_kb / 1000 if fee_per_kb is not None else None\n\n def estimate_fee(self, size):\n fee_per_kb = self.fee_per_kb()\n if fee_per_kb is None:\n raise NoDynamicFeeEstimates()\n return self.estimate_fee_for_feerate(fee_per_kb, size)\n\n @classmethod\n def estimate_fee_for_feerate(cls, fee_per_kb, size):\n fee_per_kb = Decimal(fee_per_kb)\n fee_per_byte = fee_per_kb / 1000\n # to be consistent with what is displayed in the GUI,\n # the calculation needs to use the same precision:\n fee_per_byte = quantize_feerate(fee_per_byte)\n return round(fee_per_byte * size)\n\n def update_fee_estimates(self, key, value):\n self.fee_estimates[key] = value\n self.fee_estimates_last_updated[key] = time.time()\n\n def is_fee_estimates_update_required(self):\n \"\"\"Checks time since last requested and updated fee estimates.\n Returns True if an update should be requested.\n \"\"\"\n now = time.time()\n return now - self.last_time_fee_estimates_requested > 60\n\n def requested_fee_estimates(self):\n self.last_time_fee_estimates_requested = time.time()\n\n def get_video_device(self):\n device = self.get(\"video_device\", \"default\")\n if device == 'default':\n device = ''\n return device\n\n\ndef read_user_config(path):\n \"\"\"Parse and store the user config settings in electrum.conf into user_config[].\"\"\"\n if not path:\n return {}\n config_path = os.path.join(path, \"config\")\n if not os.path.exists(config_path):\n return {}\n try:\n with open(config_path, \"r\", encoding='utf-8') as f:\n data = f.read()\n result = json.loads(data)\n except:\n print_error(\"Warning: Cannot read config file.\", config_path)\n return {}\n if not type(result) is dict:\n return {}\n return result\n"}}},{"rowIdx":542678,"cells":{"filename":{"kind":"string","value":"the-stack_106_31014"},"text":{"kind":"string","value":"from pandac.PandaModules import *\nfrom direct.showbase.DirectObject import *\nfrom direct.interval.IntervalGlobal import *\nfrom pirates.piratesbase import PiratesGlobals\nfrom direct.distributed import DistributedObject\nfrom pirates.effects.DustCloud import DustCloud\nfrom pirates.effects.SmallSplash import SmallSplash\nimport random\nfrom PooledEffect import PooledEffect\nDebrisDict = {'0': 'models/props/rock_1_floor','1': 'models/props/rock_2_floor','2': 'models/props/rock_3_floor','3': 'models/props/rock_4_floor'}\n\nclass RockDebris(PooledEffect):\n BaseEndPlaneZ = -10\n\n def __init__(self):\n PooledEffect.__init__(self)\n self.collSphereRadius = 2.0\n self.startPos = Vec3(0, 0, 0)\n self.endPlaneZ = self.BaseEndPlaneZ\n self.transNode = self.attachNewNode('trans')\n filePrefix = DebrisDict.get(str(random.randint(0, 3)))\n self.debris = loader.loadModel(filePrefix)\n self.debris.reparentTo(self.transNode)\n self.debris.setScale(0.5)\n self.debris.setColorScale(0.8, 0.8, 0.8, 1.0)\n self.weaponHitEvent = 'weaponHit' + str(id(self))\n self.accept(self.weaponHitEvent, self.weaponHitObject)\n self.collSphere = CollisionSphere(0, 0, 0, self.collSphereRadius)\n self.cnode = CollisionNode('collSphere')\n self.cnode.addSolid(self.collSphere)\n self.collision = self.transNode.attachNewNode(self.cnode)\n self.cnode.setFromCollideMask(PiratesGlobals.TargetBitmask)\n self.cnode.setIntoCollideMask(BitMask32.allOff())\n self.collHandler = CollisionHandlerEvent()\n self.collHandler.addInPattern(self.weaponHitEvent)\n self.radiusDist = 25\n self.minHeight = 30\n self.maxHeight = 100\n self.track = None\n return\n\n def createTrack(self, rate=1):\n self.startVel = Vec3(random.uniform(-self.radiusDist, self.radiusDist), random.uniform(-self.radiusDist, self.radiusDist), random.uniform(self.minHeight, self.maxHeight))\n try:\n playProjectile = ProjectileInterval(self.transNode, startPos=self.startPos, startVel=self.startVel, endZ=self.endPlaneZ, gravityMult=4.0)\n self.playProjectile = playProjectile\n except StandardError:\n playProjectile = Wait(0.2)\n self.playProjectile = None\n\n randomNumX = random.uniform(360, 2880)\n randomNumY = random.uniform(360, 2880)\n randomNumZ = random.uniform(360, 2880)\n self.playRotate = self.debris.hprInterval(6, Point3(randomNumX, randomNumY, randomNumZ))\n enableColl = Sequence(Wait(0.2), Func(self.cnode.setFromCollideMask, PiratesGlobals.TargetBitmask))\n playDebris = Parallel(playProjectile, enableColl)\n self.track = Sequence(Func(self.transNode.reparentTo, self), playDebris, Func(self.cleanUpEffect))\n return\n\n def play(self, rate=1):\n self.createTrack()\n if self.startPos[2] > self.endPlaneZ:\n base.cTrav.addCollider(self.collision, self.collHandler)\n self.track.start()\n self.playRotate.loop()\n else:\n self.finish()\n\n def stop(self):\n if self.track:\n self.track.finish()\n if self.playRotate:\n self.playRotate.finish()\n\n def finish(self):\n self.stop()\n self.cleanUpEffect()\n\n def cleanUpEffect(self):\n self.detachNode()\n self.checkInEffect(self)\n\n def destroy(self):\n self.stop()\n del self.track\n del self.playProjectile\n self.removeNode()\n self.ignore(self.weaponHitEvent)\n PooledEffect.destroy(self)\n\n def weaponHitObject(self, entry):\n if not entry.hasSurfacePoint() or not entry.hasInto():\n return\n if not entry.getInto().isTangible():\n return\n hitObject = entry.getIntoNodePath()\n objType = hitObject.getNetTag('objType')\n if not objType:\n return\n objType = int(objType)\n if objType == PiratesGlobals.COLL_SEA and base.cr.wantSpecialEffects:\n pos = entry.getSurfacePoint(render)\n if base.cr.activeWorld.getWater():\n entryWaterHeight = base.cr.activeWorld.getWater().calcHeight(pos[0], pos[1]) + 7.0\n else:\n entryWaterHeight = pos[2]\n splashEffect = SmallSplash.getEffect()\n if splashEffect:\n splashEffect.reparentTo(render)\n splashEffect.setPos(pos[0], pos[1], entryWaterHeight)\n splashEffect.play()\n self.cnode.setFromCollideMask(PiratesGlobals.TargetBitmask.allOff())\n elif objType == PiratesGlobals.COLL_LAND and base.cr.wantSpecialEffects:\n pos = entry.getSurfacePoint(render)\n dustCloudEffect = DustCloud.getEffect()\n if dustCloudEffect:\n dustCloudEffect.wrtReparentTo(render)\n dustCloudEffect.setPos(pos)\n dustCloudEffect.play()\n self.cnode.setFromCollideMask(PiratesGlobals.TargetBitmask.allOff())\n\n def offsetEndPlaneZFrom(self, zHeight):\n self.endPlaneZ = self.BaseEndPlaneZ + zHeight\n\n def testTrajectory(self):\n self.createTrack()\n return bool(self.playProjectile and self.playProjectile.testTrajectory())"}}},{"rowIdx":542679,"cells":{"filename":{"kind":"string","value":"the-stack_106_31015"},"text":{"kind":"string","value":"'''\nData loader for annotated text datasets.\n'''\nimport os\nimport re\nimport enum\nimport glob\nimport array\nimport random\nimport shutil\nimport struct\nimport tempfile\nfrom collections import Counter\nfrom contextlib import ExitStack\n\nimport torch\nfrom torch import nn\n\nimport metrics\nfrom data import preprocess\nfrom data.text import TextDataset\nfrom data.utils import maybe_download\nfrom utils.file import Open, extract_all\nfrom utils.tree import ParseTree\n\n\nMASKED = ''\n\n\nclass TextAnnotation(enum.Enum):\n ''' An enumeration of text annotation types '''\n NONE = ('', 'bpe.32000.bin', 'bpe.32000')\n CONSTITUENCY_PARSE = ('parsed', '{lang}.parse', 'parse.fully.upto.span{span}')\n PARSE_SPANS = ('spans', '{lang}.parse', 'bpe.32000')\n\n def __init__(self, identifier, ext, vocab_ext):\n ''' Initialize the text annotation '''\n self.ext = ext\n self.vocab_ext = vocab_ext\n self.identifier = identifier\n\n def data_path(self, split, directory, **kwargs):\n ''' Return the data path '''\n data_ext = self.ext.format(**kwargs)\n return os.path.join(directory, f'{split}.{data_ext}')\n\n def vocab_path(self, directory, **kwargs):\n ''' Return the vocab path '''\n vocab_ext = self.vocab_ext.format(**kwargs)\n return os.path.join(directory, f'vocab.{vocab_ext}')\n\n\nclass AnnotatedTextDataset(TextDataset):\n ''' Class that encapsulates an annotated text dataset '''\n NAME = ''\n LANGUAGE_PAIR = ('en', 'en')\n\n URLS = []\n RAW_SPLITS = {}\n SPLITS = {\n 'train': 'train.tok',\n 'valid': 'valid.tok',\n 'dev': 'valid.tok',\n 'test': 'test.tok'\n }\n\n IGNORE_REGEX_LIST = []\n SEGMENT_REGEX = re.compile(r'<\\s*seg\\s+id\\s*=\\s*\"\\d+\"\\s*>\\s*(.+)\\s*<\\s*/\\s*seg\\s*>')\n\n def __init__(self, config, split='train', swap=False, annotation=TextAnnotation.NONE):\n ''' Initialize the annotated text dataset '''\n super(AnnotatedTextDataset, self).__init__(config, split=split)\n\n self.swap = swap\n self.segmenters = []\n self.annotation = annotation\n\n @classmethod\n def name(cls, swap=False, annotation=TextAnnotation.NONE):\n ''' Return a name for the dataset given the passed in configuration '''\n config = [cls.NAME] + list(reversed(cls.LANGUAGE_PAIR) if swap else cls.LANGUAGE_PAIR)\n if annotation.identifier:\n config += [annotation.identifier]\n\n return '_'.join(config)\n\n @property\n def source_language(self):\n ''' Return the source language '''\n return type(self).LANGUAGE_PAIR[1 if self.swap else 0]\n\n @property\n def target_language(self):\n ''' Return the target language '''\n return type(self).LANGUAGE_PAIR[0 if self.swap else 1]\n\n @property\n def mask_idx(self):\n ''' Return the start of summary value '''\n return self.token2id[MASKED]\n\n def span_idx(self, span):\n ''' Return the span index value '''\n return self.token2id[f'']\n\n @property\n def base_data_path(self):\n ''' Get the path of the processed data file '''\n return TextAnnotation.NONE.data_path(\n type(self).SPLITS[self.split],\n self.preprocess_directory\n )\n\n @property\n def source_annotation_data_path(self):\n ''' Get the path of the processed data file '''\n return self.annotation.data_path(\n type(self).SPLITS[self.split],\n self.preprocess_directory,\n lang=self.source_language\n )\n\n @property\n def target_annotation_data_path(self):\n ''' Get the path of the processed data file '''\n return self.annotation.data_path(\n type(self).SPLITS[self.split],\n self.preprocess_directory,\n lang=self.target_language\n )\n\n @property\n def data_paths(self):\n ''' Get the list of data files '''\n return set([\n self.base_data_path,\n self.source_annotation_data_path,\n self.target_annotation_data_path\n ])\n\n @property\n def base_vocab_path(self):\n ''' Get the path of the vocab file '''\n return TextAnnotation.NONE.vocab_path(\n self.preprocess_directory,\n span=self.config.span\n )\n\n @property\n def annotation_vocab_path(self):\n ''' Get the path of the annotation specific vocab file '''\n return self.annotation.vocab_path(\n self.preprocess_directory,\n span=self.config.span\n )\n\n @property\n def constituent_vocab_path(self):\n ''' Get the path of the constituent vocab file '''\n return TextAnnotation.CONSTITUENCY_PARSE.vocab_path(\n self.preprocess_directory,\n span=self.config.span\n )\n\n @property\n def vocab_paths(self):\n ''' Get the list of vocab files '''\n return set([self.base_vocab_path, self.annotation_vocab_path])\n\n @property\n def preprocess_directory(self):\n ''' Get the preprocess directory '''\n return self.config.preprocess_directory\n\n @property\n def preprocess_buffer_size(self):\n ''' Get the preprocess buffer size '''\n return self.config.preprocess_buffer_size\n\n @property\n def stats(self):\n ''' Return the dataset stats '''\n metric_store = super(AnnotatedTextDataset, self).stats\n\n if self.annotation is TextAnnotation.NONE or self.split == 'train':\n return metric_store\n\n spans = metrics.Metric('Constituent Spans', metrics.format_float, 'l(max)')\n for datum in self.data:\n _, target_spans = self.segmenters[-1](datum['target_annotation'])\n if target_spans:\n spans.updates(target_spans)\n metric_store.add(spans)\n\n return metric_store\n\n def collate_field(self, batch, field_name, values):\n ''' Collate a specific field '''\n if 'annotation' in field_name:\n batch[field_name + 's'] = nn.utils.rnn.pad_sequence(\n values, batch_first=True, padding_value=self.padding_idx - self.reserved_range)\n batch[field_name + '_lens'] = torch.LongTensor([len(sequence) for sequence in values])\n else:\n super(AnnotatedTextDataset, self).collate_field(batch, field_name, values)\n\n def annotation_spans(self, annotation):\n ''' Calculate the spans from the annotation '''\n spans = []\n for constituent_id in annotation:\n constituent = self.id2token[int(constituent_id)]\n match = ParseTree.CONSTITUENT_REGEX.match(constituent)\n spans.append(int(match[2]) if match else 1)\n\n return spans\n\n def annotated_sequence(self, target, annotation, spans):\n ''' Create the masked target from the annotation and spans '''\n annotation_target = []\n original_target = list(target)\n for span_idx, span in enumerate(spans):\n annotation_target.append(annotation[span_idx])\n annotation_target.extend(original_target[:span])\n original_target = original_target[span:]\n\n assert not original_target\n return annotation_target\n\n def masked_target(self, annotation, spans):\n ''' Create the masked target from the annotation and spans '''\n return self.annotated_sequence([self.mask_idx] * int(sum(spans)), annotation, spans)\n\n def tensorize(self, index):\n ''' Tensorize the specified example index '''\n if self.annotation is TextAnnotation.NONE:\n return super(AnnotatedTextDataset, self).tensorize(index)\n\n datum = self.data[index]\n segmenter = (\n self.segmenters[random.randrange(self.config.span)]\n if self.config.randomize_chunks else\n self.segmenters[-1]\n )\n target_annotation, target_spans = segmenter(datum['target_annotation'])\n target_annotation = (\n [self.token2id[annotation] for annotation in target_annotation]\n if self.annotation is TextAnnotation.CONSTITUENCY_PARSE\n else [self.span_idx(span) for span in target_spans]\n )\n masked_target = self.masked_target(target_annotation, target_spans)\n annotated_target = self.annotated_sequence(\n datum['target'], target_annotation, target_spans\n )\n\n example = {}\n example['input'] = torch.LongTensor(datum['input'])\n example['target'] = torch.LongTensor(annotated_target)\n example['masked_target'] = torch.LongTensor(masked_target)\n example['target_annotation'] = torch.LongTensor(\n [self.sos_idx] + list(target_annotation) + [self.eos_idx]\n ) - self.reserved_range\n\n return example\n\n def preprocess_raw_line(self, line, xml=False):\n ''' Preprocess the raw text '''\n line = line.strip()\n if self.config.max_line_length and len(line) > self.config.max_line_length:\n return\n\n if any(ignore.match(line) for ignore in type(self).IGNORE_REGEX_LIST):\n return\n\n if xml:\n match = type(self).SEGMENT_REGEX.match(line)\n if not match:\n return\n return match[1]\n\n return line\n\n def download_and_extract(self):\n ''' Download and extract the dataset '''\n for filename, url in type(self).URLS:\n filepath = os.path.join(self.config.data_directory, filename)\n maybe_download(filepath, url)\n extract_all(filepath, self.preprocess_directory)\n\n def preprocess_raw(self):\n ''' Tokenize/bpe encode the raw text '''\n def is_xml(filename):\n ''' Determine if a file is XML formatted '''\n return filename.endswith('.sgm') or filename.endswith('.xml')\n\n def filter_lines(in_file, basename):\n ''' Scan the file for any filtered lines '''\n filtered = set()\n xml = is_xml(basename)\n for i, line in enumerate(in_file):\n if not self.preprocess_raw_line(line, xml=xml):\n filtered.add(i)\n\n return filtered\n\n def merge(basename, in_file, out_file, filtered=None):\n ''' Tokenize the passed in file and write it to the designated file '''\n filtered = filtered or set()\n xml = is_xml(basename)\n for i, line in enumerate(in_file):\n if i in filtered:\n continue\n\n processed_line = self.preprocess_raw_line(line, xml=xml)\n out_file.write(processed_line + '\\n')\n\n # First, clean-up any incomplete preprocessing files\n for path in glob.glob(os.path.join(self.preprocess_directory, '*.incomplete')):\n os.remove(os.path.join(self.preprocess_directory, path))\n\n bpe_code_path = os.path.join(self.preprocess_directory, 'bpe.32000')\n if not os.path.exists(bpe_code_path):\n for split, file_pairs in type(self).RAW_SPLITS.items():\n for pair in file_pairs:\n # First determine which lines must be skipped in both files, since the files are\n # a parallel corpora.\n filtered = set()\n for filename, lang in zip(pair, type(self).LANGUAGE_PAIR):\n in_path = os.path.join(self.preprocess_directory, filename)\n with ExitStack() as stack:\n in_file = stack.enter_context(Open(in_path, 'rt'))\n filtered.update(filter_lines(in_file, os.path.basename(filename)))\n\n for filename, lang in zip(pair, type(self).LANGUAGE_PAIR):\n basename = os.path.basename(filename)\n in_path = os.path.join(self.preprocess_directory, filename)\n split_path = os.path.join(self.preprocess_directory, f'{split}.{lang}')\n\n if os.path.exists(split_path):\n continue\n\n with ExitStack() as stack:\n out_path = f'{split_path}.incomplete'\n in_file = stack.enter_context(Open(in_path, 'rt'))\n out_file = stack.enter_context(Open(out_path, 'at'))\n\n merge(basename, in_file, out_file, filtered)\n\n word_counts = Counter()\n for split in type(self).RAW_SPLITS:\n for lang in type(self).LANGUAGE_PAIR:\n try:\n split_path = os.path.join(self.preprocess_directory, f'{split}.{lang}')\n os.rename(f'{split_path}.incomplete', split_path)\n except FileNotFoundError:\n # This can happen if the preprocessing is interrupted\n pass\n\n tokenized_path = os.path.join(self.preprocess_directory, f'{split}.tok.{lang}')\n word_counts.update(preprocess.tokenize(\n split_path, tokenized_path, self.preprocess_buffer_size\n ))\n\n print('Learning BPE')\n preprocess.learn_bpe(bpe_code_path, word_counts.items())\n\n vocab_path = os.path.join(self.preprocess_directory, 'vocab.bpe.32000')\n if not os.path.exists(vocab_path):\n vocab = set()\n for split in type(self).RAW_SPLITS:\n for lang in type(self).LANGUAGE_PAIR:\n in_path = os.path.join(\n self.preprocess_directory,\n f'{split}.tok.{lang}'\n )\n bpe_path = os.path.join(\n self.preprocess_directory,\n f'{split}.tok.bpe.32000.{lang}'\n )\n\n vocab.update(preprocess.apply_bpe(\n bpe_code_path, in_path, bpe_path, self.preprocess_buffer_size\n ))\n\n vocab_path = os.path.join(self.preprocess_directory, 'vocab.bpe.32000')\n incomplete_vocab_path = f'{vocab_path}.incomplete'\n with Open(incomplete_vocab_path, 'wt') as vocab_file:\n vocab_file.writelines('\\n'.join([word for word in sorted(vocab)]))\n os.rename(incomplete_vocab_path, vocab_path)\n\n def preprocess(self):\n ''' Do any data preprocessing if needed '''\n if (\n all(os.path.exists(p) for p in self.data_paths) and\n all(os.path.exists(p) for p in self.vocab_paths)\n ):\n return\n\n if not os.path.exists(self.preprocess_directory):\n os.makedirs(self.preprocess_directory)\n\n self.download_and_extract()\n self.preprocess_raw()\n\n # Make sure we have loaded the vocab\n self.load_vocab(preprocessing=True)\n\n split_filename = type(self).SPLITS[self.split]\n self.preprocess_bpe(split_filename)\n\n if self.annotation in (\n TextAnnotation.PARSE_SPANS,\n TextAnnotation.CONSTITUENCY_PARSE\n ):\n base_annotation_id = len(self.id2token)\n for filename in type(self).SPLITS.values():\n self.preprocess_parse(filename)\n\n if not os.path.exists(self.constituent_vocab_path):\n with Open(self.constituent_vocab_path, 'wt') as file:\n file.write('\\n'.join([\n self.id2token[annotation_id]\n for annotation_id in range(base_annotation_id, len(self.id2token))\n ]))\n\n def preprocess_parse(self, filename):\n ''' Preprocess the parse data '''\n base_path = os.path.join(self.preprocess_directory, f'{filename}')\n tokenized_bpe_path = f'{base_path}.bpe.32000'\n\n source_path = f'{base_path}.{self.source_language}.parse'\n if not os.path.exists(source_path):\n preprocess.parse(\n f'{tokenized_bpe_path}.{self.source_language}',\n source_path,\n self.preprocess_buffer_size\n )\n\n target_path = f'{base_path}.{self.target_language}.parse'\n if not os.path.exists(target_path):\n preprocess.parse(\n f'{tokenized_bpe_path}.{self.target_language}',\n target_path,\n self.preprocess_buffer_size\n )\n\n if os.path.exists(self.constituent_vocab_path):\n return\n\n bpe_path = os.path.join(self.preprocess_directory, 'bpe.32000')\n self.segmenters = [\n preprocess.ParseSegmenter(\n bpe_path, span, self.config.max_span, self.config.randomize_chunks\n )\n for span in range(1, self.config.span + 1)\n ]\n\n vocab = preprocess.get_parse_vocab(\n f'{base_path}.{self.source_language}.parse',\n self.segmenters, self.preprocess_buffer_size\n )\n vocab.update(preprocess.get_parse_vocab(\n f'{base_path}.{self.target_language}.parse',\n self.segmenters, self.preprocess_buffer_size\n ))\n\n for token in vocab:\n if token not in self.token2id:\n self.token2id[token] = len(self.id2token)\n self.id2token.append(token)\n\n def preprocess_bpe(self, filename):\n ''' Preprocess the BPE data '''\n tokenized_bpe_path = os.path.join(self.preprocess_directory, f'{filename}.bpe.32000')\n\n target_path = f'{tokenized_bpe_path}.{self.target_language}'\n source_path = f'{tokenized_bpe_path}.{self.source_language}'\n processed_path = f'{tokenized_bpe_path}.bin'\n\n if os.path.exists(processed_path):\n return\n\n with ExitStack() as stack:\n source_file = stack.enter_context(Open(source_path, 'rt'))\n target_file = stack.enter_context(Open(target_path, 'rt'))\n\n def encode_sentence(line):\n ''' Helper function that encodes a sentence '''\n sentence = array.array('H')\n sentence.extend((\n self.token2id[token]\n for token in line.split()\n ))\n\n byte_rep = sentence.tostring()\n byte_len = len(byte_rep)\n return struct.pack('Q{}s'.format(byte_len), byte_len, byte_rep)\n\n out_file = stack.enter_context(tempfile.NamedTemporaryFile())\n for source_line, target_line in zip(source_file, target_file):\n source_sentence = encode_sentence(source_line)\n target_sentence = encode_sentence(target_line)\n\n out_file.write(source_sentence)\n out_file.write(target_sentence)\n\n out_file.flush()\n shutil.copy(out_file.name, f'{processed_path}.incomplete')\n os.rename(f'{processed_path}.incomplete', processed_path)\n\n def load_vocab(self, preprocessing=False):\n ''' Return the data loader for the dataset '''\n if not os.path.exists(self.base_vocab_path):\n print('Cannot find the vocab file!')\n exit(1)\n\n with Open(self.base_vocab_path, 'rt') as vocab_file:\n self.token2id = {}\n self.id2token = []\n for token in vocab_file.read().split('\\n'):\n self.token2id[token] = len(self.id2token)\n self.id2token.append(token)\n\n super(AnnotatedTextDataset, self).load_vocab(preprocessing)\n if preprocessing or self.annotation is TextAnnotation.NONE:\n return\n\n if self.annotation is TextAnnotation.CONSTITUENCY_PARSE:\n if not os.path.exists(self.annotation_vocab_path):\n print('Cannot find the annotation vocab file!')\n exit(1)\n\n with Open(self.annotation_vocab_path, 'rt') as vocab_file:\n for token in vocab_file.read().split('\\n'):\n self.token2id[token] = len(self.id2token)\n self.id2token.append(token)\n elif self.annotation is TextAnnotation.PARSE_SPANS:\n for i in range(self.config.span):\n token = f''\n self.token2id[token] = len(self.id2token)\n self.id2token.append(token)\n\n\n self.token2id[MASKED] = len(self.id2token)\n self.id2token.append(MASKED)\n\n # Need to cache off the segmenters as the BPE loading is slow. We do\n # not want that overhead for each subprocess we create in the dataloaders.\n bpe_path = os.path.join(self.preprocess_directory, 'bpe.32000')\n self.segmenters = [\n preprocess.ParseSegmenter(\n bpe_path, span, self.config.max_span, self.config.randomize_chunks\n )\n for span in range(1, self.config.span + 1)\n ]\n\n def load_text(self):\n ''' Load the translations '''\n if not all(os.path.exists(p) for p in self.data_paths):\n print('Cannot find the processed translations!')\n exit(1)\n\n with ExitStack() as stack:\n base_data_file = stack.enter_context(Open(self.base_data_path, 'rb'))\n if self.annotation is not TextAnnotation.NONE:\n source_annotation_data_file = stack.enter_context(\n Open(self.source_annotation_data_path, 'rt')\n )\n target_annotation_data_file = stack.enter_context(\n Open(self.target_annotation_data_path, 'rt')\n )\n\n while True:\n if self.swap:\n source_key = 'target'\n target_key = 'input'\n else:\n source_key = 'input'\n target_key = 'target'\n\n example = {}\n example['input'] = array.array('H')\n example['target'] = array.array('H')\n\n # prepend the start of sentence token to the target\n if self.annotation is TextAnnotation.NONE:\n example['target'].append(self.sos_idx)\n\n source_sentence_len = base_data_file.read(8)\n if not source_sentence_len:\n break\n\n source_sentence_len, = struct.unpack('Q', source_sentence_len)\n example[source_key].fromstring(base_data_file.read(source_sentence_len))\n\n target_sentence_len = base_data_file.read(8)\n if not target_sentence_len:\n print('Unexpected end of file while trying to read a de sentence!')\n exit(1)\n\n target_sentence_len, = struct.unpack('Q', target_sentence_len)\n example[target_key].frombytes(base_data_file.read(target_sentence_len))\n\n # append the end of sentence token to the target\n if self.annotation is TextAnnotation.NONE:\n example['target'].append(self.eos_idx)\n\n if self.annotation in (\n TextAnnotation.PARSE_SPANS,\n TextAnnotation.CONSTITUENCY_PARSE\n ):\n example['source_annotation'] = source_annotation_data_file.readline()\n example['target_annotation'] = target_annotation_data_file.readline()\n\n self.add_datum(example)\n"}}},{"rowIdx":542680,"cells":{"filename":{"kind":"string","value":"the-stack_106_31016"},"text":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@Author: chenzhen\n@Date: 2020-04-10 17:04:46\n@LastEditTime: 2020-04-24 15:45:41\n@LastEditors: chenzhen\n@Description:\n'''\n\nimport sys\nsys.path.append('../../')\n\nimport numpy as np\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.preprocessing import OneHotEncoder\nimport matrixslow as ms\nfrom matrixslow.trainer import SimpleTrainer\n\n\n# 输入图像尺寸\nimg_shape = (28, 28)\n\n# 加载MNIST数据集,取一部分样本并归一化\ntest_data, test_label = fetch_openml(\n 'mnist_784', version=1, return_X_y=True, cache=True)\ntest_data, test_label = test_data[1000:2000] / \\\n 255, test_label.astype(np.int)[1000:2000]\ntest_data = np.reshape(np.array(test_data), (1000, *img_shape))\n\n\nsaver = ms.trainer.Saver('./epoches10')\n\nsaver.load(model_file_name='my_model.json', weights_file_name='my_weights.npz')\n\n# 根据训练时定义的节点名称,从计算图中把输入输出节点查询出来\n# 如果训练时未定义,节点名称自动生成,需要从模型文件中人为识别出来\nx = ms.get_node_from_graph('img_input')\npred = ms.get_node_from_graph('softmax_output')\n\nfor index in range(len(test_data)):\n # 把预测数据赋值给输入节点\n x.set_value(np.mat(test_data[index]).T)\n # 执行前向传播,计算输出节点的值,即模型预测概率\n pred.forward()\n gt = test_label.values[index]\n print('model predict {} and ground truth: {}'.format(\n np.argmax(pred.value), gt))\n"}}},{"rowIdx":542681,"cells":{"filename":{"kind":"string","value":"the-stack_106_31020"},"text":{"kind":"string","value":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n complete_readme = fh.read()\n long_description = complete_readme.split(\"**System image\")[0]\n long_description += \"\\n\\n**Made by Help-a-Sloth org. Check us on GitHub.**\"\n\n\nsetuptools.setup(\n name=\"mischief-managed\", \n packages=setuptools.find_packages(),\n version=\"1.0.0\",\n author=\"Hemant Singh\",\n keywords=[\"Quick Work\" , \"Productivity\" , \"Automation\", \"Cleanup\"\n \"Files\" , \"Management\" , \"Tidy\" , \"Folder Manage\"],\n description=(\"Files outside any folder are \"+\n \"made tidy/managed by putting inside folder based on \"+\n \"their extension or date\"),\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Help-a-Sloth/mischief-managed\",\n maintainer=\"amifunny\",\n entry_points={\n 'console_scripts':[\n 'mischief-managed=mischief_managed.__main__:main'\n ]\n },\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: OS Independent\",\n ]\n\n)"}}},{"rowIdx":542682,"cells":{"filename":{"kind":"string","value":"the-stack_106_31021"},"text":{"kind":"string","value":"\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom __future__ import division\nget_ipython().magic(u'matplotlib inline')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport multivarlinreg\nimport rmse\n\n\n# In[ ]:\n\n\n#Linear regression\nred_train = np.loadtxt('redwine_training.txt')\nred_test = np.loadtxt('redwine_testing.txt')\nred_train_data = red_train[:, :11]\nred_train_score = red_train[:, 11]\nred_test_data = red_test[:, :11]\nred_test_score = red_test[:, 11]\n#red_train.shape\n\n\n# In[ ]:\n\n\n\"\"\"\ndef multivarlinreg(data, ground_truth):\n #data = full_data[:, :-1]\n X = np.hstack((data, np.repeat(1, data.shape[0]).reshape(-1, 1)))\n X_T_X = np.dot(X.T, X)\n # if full-rank matrix or positive definite matrix:\n #check if it invertible\n if np.linalg.det(X_T_X) != 0:\n inverse = np.linalg.inv(X_T_X)\n w = np.dot(np.dot(inverse, X.T), ground_truth) #w0 at the last column\n #print w\n return w\n else:\n print \"use other method\"\n \"\"\"\n\n\n# In[ ]:\n\n\n#only contains the first feature (fixed acidity)\ntrain_fixed_acidity = red_train_data[:, 0].reshape(-1, 1)\ntrain_w_acidity = multivarlinreg.multivarlinreg(train_fixed_acidity, red_train_score)\ntrain_w_acidity\n#the propotion of acidity is not very high; bias is very large for it???\n#actually we can not use it to predivt the wine's quality very well\n#array([0.05035934, 5.2057261 ])\n\n\n# In[ ]:\n\n\n#physiochemical\nw_all = multivarlinreg.multivarlinreg(red_train_data, red_train_score)\nw_all.shape\nnp.set_printoptions(suppress=True)\nw_all\n#positive relate negative relation\n#the first weight for acidity is changed \n#Some features play important roles in wine's quality. Some features are negatively related.\n\n\n# In[ ]:\n\n\n\"\"\"#Exercise 3 (Evaluating Linear Regression).\ndef rmse(predicted_value, ground_truth):\n diff = ground_truth - predicted_value\n diff_square = np.dot(diff, diff)\n #rmse = np.sqrt(np.divide(diff_square, ground_truth.shape[0]))\n rmse = np.sqrt(diff_square/ground_truth.shape[0])\n return rmse\n \"\"\"\n\n\n# In[ ]:\n\n\n#1-dimensional input variables using the training set\n#first feature for the test set\ntest_fixed_acidity = red_test_data[:, 0].reshape(-1, 1)\ntest_X_acidity = np.hstack((test_fixed_acidity, np.repeat(1, test_fixed_acidity.shape[0]).reshape(-1, 1)))\npredicted_score_acidity = np.dot(test_X_acidity, train_w_acidity.T)\n#predicted_score_acidity = predicted_value(train_fixed_acidity, test_fixed_acidity, red_test_score)\nrmse.rmse(predicted_score_acidity, red_test_score)\n#0.7860892754162216\n\n\n# In[ ]:\n\n\n#full 11-dimensional input variables\ntest_X = np.hstack((red_test_data, np.repeat(1, red_test_data.shape[0]).reshape(-1, 1)))\npredicted_score = np.dot(test_X, w_all.T)\nrmse.rmse(predicted_score, red_test_score)\n#0.644717277241364\n\n"}}},{"rowIdx":542683,"cells":{"filename":{"kind":"string","value":"the-stack_106_31022"},"text":{"kind":"string","value":"#!/usr/bin/python\nfrom __future__ import division\nimport numpy as np\nimport scipy as sp\nfrom scipy.stats import gaussian_kde\nfrom scipy.interpolate import interp1d\nfrom scipy.integrate import quad\nfrom scipy.special import gamma, gammaln, polygamma\nfrom scipy.optimize import minimize_scalar\nfrom math import pi\n\nTINY_FLOAT64 = sp.finfo(sp.float64).tiny\n\n\n\"\"\"\nGaussian Kernel Density Estimation\n\"\"\"\n\n\n# Gaussian kernel density estimation with cross validation and bootstrap sampling\ndef gkde(data0, xs, num_samples=0, num_h=100, massage_J=True, tolerance=1E-3, ERROR_switch=False):\n\n data = data0.copy()\n N = len(data)\n G = len(xs)\n dx = xs[1] - xs[0]\n\n # Sort data\n data.sort()\n\n # Set h_min to minimal data spacing. Shift data if there are ties\n h_min = np.diff(data).min()\n if h_min == 0.:\n # This ensures the shifted data will round to the correct value (to 1st decimal for WHO data)\n data_shifted = np.zeros(N) # Do not change data directly. Use data_shifted!\n for i in range(N):\n if data[i] == xs.min():\n data_shifted[i] = data[i] + 0.05 * np.random.rand()\n if xs.min() < data[i] < xs.max():\n data_shifted[i] = data[i] + 0.10 * (np.random.rand() - 0.5)\n if data[i] == xs.max():\n data_shifted[i] = data[i] - 0.05 * np.random.rand()\n data = data_shifted\n data.sort()\n h_min = np.diff(data).min()\n # If there are still ties, give up\n if h_min == 0.:\n Q_star, Q_samples, ERROR_switch = None, None, True\n return Q_star, Q_samples, ERROR_switch\n\n # Set h_max to maximal data spacing x 10\n h_max = (data.max()-data.min()) * 10\n\n # Form hs\n hs = np.geomspace(h_min, h_max, num_h)\n\n # For each h, compute the risk function\n Js = np.zeros(num_h)\n for k in range(num_h):\n h = hs[k]\n sum_over_i = 0.\n for i in range(N):\n data_i = list(data.copy())\n data_i.pop(i)\n Q_i = gaussian_kde(data_i, bw_method=h)(xs)\n Q_i /= (sp.sum(Q_i*dx) + TINY_FLOAT64)\n # Set negative interpolated values (occurring when h is very small) to 0\n value = max(float(interp1d(xs, Q_i, kind='cubic', fill_value=\"extrapolate\")(data[i])), 0.)\n sum_over_i += np.log(value + TINY_FLOAT64)\n J = - sum_over_i\n # Terminate if got an nan from gaussian_kde\n if np.isnan(J):\n Q_star, Q_samples, ERROR_switch = None, None, True\n return Q_star, Q_samples, ERROR_switch\n Js[k] = J\n\n # Massage Js so that the risk function is better-behaved\n if massage_J:\n Js = Js - Js.min() + 1.0\n Js = np.log(Js)\n\n # Interpolate the risk function\n J_func = interp1d(hs, Js, kind='cubic')\n\n # Compute 1st derivative of the risk function\n dJdhs = np.gradient(Js)\n\n # Solve for all hs that correspond to local extrema of the risk function\n hs_solved, Js_solved = [], []\n for k in range(num_h-1):\n if dJdhs[k] * dJdhs[k+1] < 0:\n h_k = h_solver(hs[k], hs[k+1], hs, dJdhs, tolerance)\n J_k = float(J_func(h_k))\n hs_solved.append(h_k)\n Js_solved.append(J_k)\n\n # Pick up h_star that corresponds to the global minimum of the risk function\n if len(hs_solved) > 0:\n h_star = hs_solved[sp.array(Js_solved).argmin()]\n # If this minimum is actually local, set h_star to either h_max or h_min\n if (min(Js_solved) > Js[0]) or (min(Js_solved) > Js[-1]):\n if Js[0] > Js[-1]:\n h_star = h_max\n elif Js[0] < Js[-1]:\n h_star = h_min\n # If no h were solved, set h_star to either h_max or h_min\n else:\n if Js[0] > Js[-1]:\n h_star = h_max\n elif Js[0] < Js[-1]:\n h_star = h_min\n\n # Estimate the optimal density with h_star\n Q_star = gaussian_kde(data, bw_method=h_star)(xs)\n Q_star /= sp.sum(Q_star*dx)\n\n # Use bootstrap to estimate uncertainty (h is fixed at h_star)\n Q_samples = np.zeros([G,num_samples])\n for k in range(num_samples):\n bootstrapped_data = np.random.choice(data, size=N, replace=True)\n Q_k = gaussian_kde(bootstrapped_data, bw_method=h_star)(xs)\n Q_k /= sp.sum(Q_k*dx)\n Q_samples[:,k] = Q_k\n\n # Return\n return Q_star, Q_samples, ERROR_switch\n\n\n# Solve h at which dJdh = 0 using bisection\ndef h_solver(h_lb, h_ub, hs, dJdhs, tolerance):\n h1, h2 = h_lb, h_ub\n hm_old = np.inf\n while True:\n hm = (h1+h2)/2\n if abs(hm-hm_old) < tolerance:\n break\n hm_old = hm\n f1 = dJdh_func(h1, hs, dJdhs)\n f2 = dJdh_func(h2, hs, dJdhs)\n fm = dJdh_func(hm, hs, dJdhs)\n if f1*fm < 0:\n h1, h2 = h1, hm\n elif fm*f2 < 0:\n h1, h2 = hm, h2\n return hm\n\n\n# 1st derivative of the risk function\ndef dJdh_func(h, hs, dJdhs):\n return interp1d(hs, dJdhs, kind='cubic')(h)\n\n\n\"\"\"\nDirichlet Process Mixture Modeling\n\"\"\"\n\n\n# Dirichlet process mixture modeling with Gibbs sampling\ndef dpmm(data, xs, num_samples=100, num_thermalization=100, H=10, M=1, ERROR_switch=False):\n\n N = len(data)\n G = len(xs)\n\n # Initialize\n kappa = 1\n mu0 = sp.mean(data)\n alpha0 = 1\n beta0 = sp.std(data)**2\n\n m_array = np.zeros([H,2])\n m_array[:,1] = invgamma_sampler(alpha=alpha0, beta=beta0, size=H)\n for h in range(H):\n m_array[h,0] = np.random.normal(loc=mu0, scale=sp.sqrt(kappa*m_array[h,1]), size=1)\n\n w_array = np.ones(H) / H\n\n # Gibbs sampling\n Q_samples = np.zeros([G,num_samples])\n for k in range(num_thermalization+num_samples):\n\n # Update clustering\n r_array = np.zeros(N)\n for i in range(N):\n wf = np.zeros(H)\n for h in range(H):\n wf[h] = w_array[h] * normal(x=data[i], mu=m_array[h,0], sigma=sp.sqrt(m_array[h,1]))\n wf /= sp.sum(wf)\n r_array[i] = np.random.choice(range(H), size=1, p=wf)\n\n r_list = [int(r_array[i]) for i in range(N)]\n\n # Update locations\n m_array = np.zeros([H,2])\n for h in range(H):\n i_list = []\n for i in range(N):\n if r_list[i] == h:\n i_list.append(i)\n n_h = len(i_list)\n if n_h > 0:\n data_h = data[i_list]\n data_mean_h = sp.mean(data_h)\n kappa_h = 1 / (1/kappa + n_h)\n mu_h = kappa_h * (mu0/kappa + n_h*data_mean_h)\n alpha_h = alpha0 + n_h / 2\n beta_h = beta0 + (sp.sum((data_h-data_mean_h)**2) + n_h/(1+kappa*n_h)*(data_mean_h-mu0)**2) / 2\n m_array[h,1] = invgamma_sampler(alpha=alpha_h, beta=beta_h, size=1)\n m_array[h,0] = np.random.normal(loc=mu_h, scale=sp.sqrt(kappa_h*m_array[h,1]), size=1)\n else:\n m_array[h,1] = invgamma_sampler(alpha=alpha0, beta=beta0, size=1)\n m_array[h,0] = np.random.normal(loc=mu0, scale=sp.sqrt(kappa*m_array[h,1]), size=1)\n\n # Update weights (stick-breaking algorithm)\n A_array = np.zeros(H)\n for h in range(H):\n A_array[h] = r_list.count(h)\n B_array = np.zeros(H)\n for h in range(H):\n B_array[h] = sp.sum(A_array[h+1:])\n\n v_array = np.zeros(H)\n for h in range(H):\n v_array[h] = np.random.beta(a=A_array[h]+1, b=B_array[h]+M, size=1)\n\n u_array = np.ones(H) - v_array\n\n w_array = np.zeros(H)\n w_array[0] = v_array[0]\n for h in range(1, H-1):\n w_array[h] = v_array[h] * np.cumprod(u_array[:h])[-1]\n w_array[-1] = abs(1-sp.sum(w_array))\n\n # Save samples after thermalization\n if k > num_thermalization-1:\n Q_samples[:,k-num_thermalization] = combine_normals(xs, w_array, m_array)\n\n # Compute mean of the samples as the optimal density\n Q_star = Q_samples.mean(axis=1)\n\n # Return\n return Q_star, Q_samples, ERROR_switch\n\n\n# Inverse-gamma distribution\ndef invgamma(x, alpha, beta):\n return beta**alpha * sp.exp(-beta/x) / gamma(alpha) / x**(alpha+1)\n\n\n# Draw random numbers from inverse-gamma distribution\ndef invgamma_sampler(alpha, beta, size, invgamma_min=1E-3):\n x_start = beta/(alpha+1) # mode (most likely value) of invgamma\n x_lb = x_start\n while invgamma(x_lb, alpha, beta) > invgamma_min:\n x_lb /= 10.0\n x_ub = x_start\n while invgamma(x_ub, alpha, beta) > invgamma_min:\n x_ub *= 10.0\n xs = np.linspace(x_lb, x_ub, 10001)\n dx = xs[1] - xs[0]\n xs = np.linspace(x_lb+dx/2, x_ub-dx/2, 10000)\n prob = invgamma(xs, alpha, beta) / sp.sum(invgamma(xs, alpha, beta))\n samples = np.random.choice(xs, size=size, replace=True, p=prob)\n jitter = dx * (np.random.rand(size)-0.5)\n samples += jitter\n return samples\n\n\n# Normal distribution\ndef normal(x, mu, sigma):\n return sp.exp(-(x-mu)**2/(2*sigma**2)) / sp.sqrt(2*pi*sigma**2)\n\n\n# Combine normal distributions\ndef combine_normals(xs, w_array, m_array):\n H = len(w_array)\n G = len(xs)\n dx = xs[1] - xs[0]\n wf = np.zeros([H,G])\n for h in range(H):\n wf[h,:] = w_array[h] * normal(xs, mu=m_array[h,0], sigma=sp.sqrt(m_array[h,1]))\n Q = wf.sum(axis=0)\n Q /= sp.sum(Q*dx)\n return Q\n\n\n\"\"\"\nSome utility functions\n\"\"\"\n\n\n# Compute log-likelihood per datum\ndef likelihood(xs, Q, data):\n Q_func = interp1d(xs, Q, kind='cubic', fill_value=\"extrapolate\")\n L_data = 1/len(data) * sp.sum(sp.log(Q_func(data) + TINY_FLOAT64))\n return L_data\n\n\n# Compute Kullback-Leibler divergence, D_KL(P||Q)\ndef KL_divergence(P, Q, dx):\n D_KL = sp.sum(dx * P * sp.log((P+TINY_FLOAT64)/(Q+TINY_FLOAT64)))\n return D_KL\n\n\n# Given a set of data, compute p-value of an arbitrary data point\ndef p_value_cal(data, point):\n count = 0\n for i in range(len(data)):\n if data[i] <= point:\n count += 1\n p_value = count/len(data)\n return p_value\n\n\n\"\"\"\nEntropy Estimators\n\"\"\"\n\n\n# Naive estimator. Ref: Justin's dissertation\ndef naive_estimator(data, N, G, bbox):\n\n # Make a histogram of the data and get the count in each bin\n bin_edges = np.linspace(bbox[0], bbox[1], G+1)\n counts, bin_edges = np.histogram(a=data, bins=bin_edges)\n\n # Turn counts into frequencies\n freqs = counts/N\n\n # Compute entropy, Eqn.(3.15)\n H = -sp.sum(freqs * sp.log(freqs+TINY_FLOAT64))\n\n # Correct entropy by adding log(L/G)\n L = bbox[1] - bbox[0]\n H += sp.log(L/G)\n\n # Convert from nats to bits\n H *= sp.log2(sp.exp(1))\n\n # Return\n return H\n\n\n# kNN estimator. Ref: A. Kraskov et al, Phys. Rev. E 69, 066138 (2004)\ndef kNN_estimator(data, N, k):\n\n # Compute pair-distances between the data points\n pair_dists = abs(sp.array(sp.mat(data).T * sp.mat(np.ones(N)) - sp.mat(np.ones(N)).T * sp.mat(data)))\n\n # Sort pair-distances, from small to large, for each row\n pair_dists.sort(axis=1)\n\n # Choose the kNN pair-distances\n kNN_pair_dist = pair_dists[:,k]\n\n # Compute entropy, Eqn.(20)\n H = polygamma(0,N) - polygamma(0,k) + 1/N * sp.sum(sp.log(2*kNN_pair_dist+TINY_FLOAT64))\n\n # Convert from nats to bits\n H *= sp.log2(sp.exp(1))\n\n # Return\n return H\n\n\n# NSB estimator. Ref: Justin's dissertation\ndef NSB_estimator(data, N, G, bbox):\n\n # Make a histogram of the data and get the count in each bin\n bin_edges = np.linspace(bbox[0], bbox[1], G+1)\n counts, bin_edges = np.histogram(a=data, bins=bin_edges)\n\n # Determine the maximum of the log probability\n beta_star = minimize_scalar(neg_log_prob, method='golden', bounds=(0, np.inf), args=(G, N, counts)).x\n log_prob_beta_star = log_prob(beta_star, G, N, counts)\n\n # Compute entropy and its variance, Eqn.(3.29) and Eqn.(3.33)\n denom = quad(integrand_p, 0, np.inf, args=(G, N, counts, log_prob_beta_star))[0]\n numer_H = quad(integrand_pH, 0, np.inf, args=(G, N, counts, log_prob_beta_star))[0]\n numer_Hsq = quad(integrand_pHsq, 0, np.inf, args=(G, N, counts, log_prob_beta_star))[0]\n numer_varH = quad(integrand_pvarH, 0, np.inf, args=(G, N, counts, log_prob_beta_star))[0]\n\n H_mean = numer_H/denom\n H_sq_mean = numer_Hsq/denom\n H_var = numer_varH/denom + H_sq_mean - H_mean**2\n\n # Correct H mean by adding log(L/G)\n L = bbox[1] - bbox[0]\n H_mean += sp.log(L/G)\n\n # Convert from nats to bits\n H_mean *= sp.log2(sp.exp(1))\n H_error = np.sqrt(H_var) * sp.log2(sp.exp(1))\n\n # Return\n return H_mean, H_error\n\n\n# log of Eqn.(3.32)\ndef log_prob(beta, G, N, counts):\n if beta <= 0:\n return -np.inf\n else:\n return gammaln(beta*G) - G*gammaln(beta) + sp.sum(gammaln(counts+beta)) - gammaln(N+beta*G) + sp.log(G*polygamma(1,beta*G+1) - polygamma(1,beta+1))\n\n\n# Negative of log_prob\ndef neg_log_prob(beta, G, N, counts):\n return -log_prob(beta, G, N, counts)\n\n\n# Eqn.(3.22)\ndef H(beta, G, N, counts):\n A = counts + beta + 1\n B = N + beta*G + 1\n return polygamma(0,B) - sp.sum((A-1)/(B-1)*polygamma(0,A))\n\n\n# Eqn.(3.24)\ndef var_H(beta, G, N, counts):\n A = counts + beta + 1\n B = N + beta*G + 1\n return sp.sum(A/B*(A-1)/(B-1)*polygamma(1,A)) - polygamma(1,B) + sp.sum(1/B*(A-1)/(B-1)*polygamma(0,A)**2) - 1/B*sp.sum((A-1)/(B-1)*polygamma(0,A))**2\n\n\ndef integrand_p(beta, G, N, counts, log_prob_beta_star):\n return np.exp(log_prob(beta, G, N, counts)-log_prob_beta_star)\n\n\ndef integrand_pH(beta, G, N, counts, log_prob_beta_star):\n return np.exp(log_prob(beta, G, N, counts)-log_prob_beta_star) * H(beta, G, N, counts)\n\n\ndef integrand_pHsq(beta, G, N, counts, log_prob_beta_star):\n return np.exp(log_prob(beta, G, N, counts)-log_prob_beta_star) * H(beta, G, N, counts)**2\n\n\ndef integrand_pvarH(beta, G, N, counts, log_prob_beta_star):\n return np.exp(log_prob(beta, G, N, counts)-log_prob_beta_star) * var_H(beta, G, N, counts)\n"}}},{"rowIdx":542684,"cells":{"filename":{"kind":"string","value":"the-stack_106_31023"},"text":{"kind":"string","value":"from time import *\nfrom picamera import *\nimport numpy as np\nfrom drawTheTableauLib import *\n\n\"\"\"\nTakes a picture from the camera and saves it in the current directory in a jpg format\nprereq :\tresX > 0, resY > 0\n\t\t \tresX <= 2592, resY <= 1944\nparam :\t\tString\tfilename\tThe name of the file\n\t\t\tInt\t\tresX \t\tThe X resolution of the picture\n\t\t\tInt \tresY\t\tThe Y resolution of the picture\n\"\"\"\ndef takePic (filename ='image', resX = 1024, resY = 768):\n\tcamera = PiCamera()\n\tcamera.resolution = (resX, resY)\n\tcamera.start_preview()\n\tcamera.capture(filename + '.jpg')\n\tcamera.close()\n\n\"\"\"\nTakes a videp from the camera and saves it in the current directory in a h264 format\nprereq :\tresX > 0, resY > 0\n\t\t \tresX <= 1360, resY <= 720\nparam :\t\tString\tfilename\tThe name of the file\n\t\t\tInt\t\tresX \t\tThe X resolution of the picture\n\t\t\tInt \tresY\t\tThe Y resolution of the picture\n\"\"\"\ndef takeVid(filename ='video', time = 60, resX = 1024, resY = 768):\n\tcamera = picamera.PiCamera()\n\tcamera.resolution = (resX, resY)\n\tcamera.start_recording(filename + '.h264')\n\tcamera.wait_recording(time)\n\tcamera.stop_recording()\n\n\n\"\"\"\nTakes a picture from the camera and returns it in a numpy array\nprereq :\tresX > 0, resY > 0\n\t\t \tresX <= 2592, resY <= 1944\nparam :\t\tInt\t\tresX \t\tThe X resolution of the picture\n\t\t\tInt \tresY\t\tThe Y resolution of the picture\n\"\"\"\ndef takePicToNumpy(resX = 1024, resY = 768):\n\tcamera = PiCamera()\n\tcamera.resolution = (resX, resY)\n\tcamera.framerate = 24\n\txRounded = roundToNearestMultiple (resX, 16)\n\tyRounded = roundToNearestMultiple (resY, 32)\n\toutput = np.empty((xRounded * yRounded * 3,), dtype=np.uint8)\n\tcamera.capture(output, 'rgb')\n\toutput = output.reshape((xRounded, yRounded, 3))\n\toutput = output[:resX, :resY, :]\n\treturn output\n"}}},{"rowIdx":542685,"cells":{"filename":{"kind":"string","value":"the-stack_106_31024"},"text":{"kind":"string","value":"import re\nimport nidigital\nimport nitsm.codemoduleapi\nfrom nitsm.codemoduleapi import SemiconductorModuleContext\n\nOPTIONS = {\"Simulate\": True, \"driver_setup\": {\"Model\": \"6570\"}}\n\n\n@nitsm.codemoduleapi.code_module\ndef open_sessions(tsm_context: SemiconductorModuleContext):\n instrument_names = tsm_context.get_all_nidigital_instrument_names()\n for instrument_name in instrument_names:\n session = nidigital.Session(instrument_name, options=OPTIONS)\n session.load_pin_map(tsm_context.pin_map_file_path)\n session.load_specifications_levels_and_timing(\n tsm_context.nidigital_project_specifications_file_paths,\n tsm_context.nidigital_project_levels_file_paths,\n tsm_context.nidigital_project_timing_file_paths,\n )\n session.apply_levels_and_timing(\"nidigital\", \"nidigital\")\n for pattern_file_path in tsm_context.nidigital_project_pattern_file_paths:\n session.load_pattern(pattern_file_path)\n tsm_context.set_nidigital_session(instrument_name, session)\n\n\n@nitsm.codemoduleapi.code_module\ndef measure_ppmu(\n tsm_context: SemiconductorModuleContext,\n pins,\n expected_instrument_names,\n expected_pin_set_strings,\n):\n pin_query, sessions, pin_set_strings = tsm_context.pins_to_nidigital_sessions_for_ppmu(pins)\n expected_instrument_pin_sets = set(zip(expected_instrument_names, expected_pin_set_strings))\n valid_pin_sets = []\n\n for session, pin_set_string in zip(sessions, pin_set_strings):\n # call some methods on the session to ensure no errors\n session.pins[pin_set_string].ppmu_aperture_time = 4e-6\n session.pins[\n pin_set_string\n ].ppmu_aperture_time_units = nidigital.PPMUApertureTimeUnits.SECONDS\n session.pins[pin_set_string].ppmu_output_function = nidigital.PPMUOutputFunction.CURRENT\n session.pins[pin_set_string].ppmu_current_level_range = 2e-6\n session.pins[pin_set_string].ppmu_current_level = 2e-6\n session.pins[pin_set_string].ppmu_voltage_limit_high = 3.3\n session.pins[pin_set_string].ppmu_voltage_limit_low = 0\n session.pins[pin_set_string].ppmu_source()\n session.pins[pin_set_string].ppmu_measure(nidigital.PPMUMeasurementType.CURRENT)\n session.abort()\n\n # check instrument pin set we received is in the set of instrument pin sets we expected\n actual_instrument_pin_set = (session.io_resource_descriptor, pin_set_string)\n num_pins_for_session = len(pin_set_string.split(\",\"))\n valid_pin_sets.extend(\n [actual_instrument_pin_set in expected_instrument_pin_sets] * num_pins_for_session\n )\n expected_instrument_pin_sets -= {actual_instrument_pin_set}\n\n pin_query.publish(valid_pin_sets, \"ValidPinSetStrings\")\n num_missing_pin_sets = [len(expected_instrument_pin_sets)] * len(valid_pin_sets)\n pin_query.publish(num_missing_pin_sets, \"NumMissingPinSetStrings\")\n\n\n@nitsm.codemoduleapi.code_module\ndef measure_pattern(\n tsm_context: SemiconductorModuleContext, pins, expected_instrument_names, expected_site_lists\n):\n pin_query, sessions, site_lists = tsm_context.pins_to_nidigital_sessions_for_pattern(pins)\n expected_instrument_site_lists = set(zip(expected_instrument_names, expected_site_lists))\n valid_site_lists = []\n re_pattern = re.compile(r\"\\s*site(\\d+)\")\n\n for session, site_list in zip(sessions, site_lists):\n # call some methods on the session to ensure no errors\n session.sites[site_list].burst_pattern(\"start_label\")\n\n # check instrument site we received is in the set of instrument sites we expected\n actual_instrument_site_list = (session.io_resource_descriptor, site_list)\n actual_in_expected = actual_instrument_site_list in expected_instrument_site_lists\n site_numbers = (int(re_pattern.match(site)[1]) for site in site_list.split(\",\"))\n valid_site_lists.append({site: actual_in_expected for site in site_numbers})\n expected_instrument_site_lists -= {actual_instrument_site_list}\n\n pin_query.publish_pattern_results(valid_site_lists, \"ValidSiteLists\")\n num_missing_site_lists = [len(expected_instrument_site_lists)] * len(tsm_context.site_numbers)\n tsm_context.publish_per_site(num_missing_site_lists, \"NumMissingSiteLists\")\n\n\n@nitsm.codemoduleapi.code_module\ndef check_project_paths(\n tsm_context: SemiconductorModuleContext,\n specifications_paths,\n levels_paths,\n timing_paths,\n pattern_paths,\n source_waveform_paths,\n capture_waveform_paths,\n):\n site_count = len(tsm_context.site_numbers)\n valid_project_paths = [\n tsm_context.nidigital_project_specifications_file_paths == tuple(specifications_paths)\n ] * site_count\n valid_levels_paths = [\n tsm_context.nidigital_project_levels_file_paths == tuple(levels_paths)\n ] * site_count\n valid_timing_paths = [\n tsm_context.nidigital_project_timing_file_paths == tuple(timing_paths)\n ] * site_count\n valid_pattern_paths = [\n tsm_context.nidigital_project_pattern_file_paths == tuple(pattern_paths)\n ] * site_count\n valid_source_waveform_paths = [\n tsm_context.nidigital_project_source_waveform_file_paths == tuple(source_waveform_paths)\n ] * site_count\n valid_capture_waveform_paths = [\n tsm_context.nidigital_project_capture_waveform_file_paths == tuple(capture_waveform_paths)\n ] * site_count\n\n tsm_context.publish_per_site(valid_project_paths, \"ValidSpecificationsPaths\")\n tsm_context.publish_per_site(valid_levels_paths, \"ValidLevelsPaths\")\n tsm_context.publish_per_site(valid_timing_paths, \"ValidTimingPaths\")\n tsm_context.publish_per_site(valid_pattern_paths, \"ValidPatternPaths\")\n tsm_context.publish_per_site(valid_source_waveform_paths, \"ValidSourceWaveformPaths\")\n tsm_context.publish_per_site(valid_capture_waveform_paths, \"ValidCaptureWaveformPaths\")\n\n\n@nitsm.codemoduleapi.code_module\ndef close_sessions(tsm_context: SemiconductorModuleContext):\n sessions = tsm_context.get_all_nidigital_sessions()\n for session in sessions:\n session.close()\n"}}},{"rowIdx":542686,"cells":{"filename":{"kind":"string","value":"the-stack_106_31026"},"text":{"kind":"string","value":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\n\nimport copy\nimport io\nimport logging\nimport os\nimport pickle\nfrom six import string_types\nimport shutil\nimport tempfile\nimport time\nimport uuid\n\nimport ray\nfrom ray.tune.logger import UnifiedLogger\nfrom ray.tune.result import (DEFAULT_RESULTS_DIR, TIME_THIS_ITER_S,\n TIMESTEPS_THIS_ITER, DONE, TIMESTEPS_TOTAL,\n EPISODES_THIS_ITER, EPISODES_TOTAL,\n TRAINING_ITERATION, RESULT_DUPLICATE)\n\nfrom ray.tune.util import UtilMonitor\n\nlogger = logging.getLogger(__name__)\n\nSETUP_TIME_THRESHOLD = 10\n\n\nclass Trainable(object):\n \"\"\"Abstract class for trainable models, functions, etc.\n\n A call to ``train()`` on a trainable will execute one logical iteration of\n training. As a rule of thumb, the execution time of one train call should\n be large enough to avoid overheads (i.e. more than a few seconds), but\n short enough to report progress periodically (i.e. at most a few minutes).\n\n Calling ``save()`` should save the training state of a trainable to disk,\n and ``restore(path)`` should restore a trainable to the given state.\n\n Generally you only need to implement ``_train``, ``_save``, and\n ``_restore`` here when subclassing Trainable.\n\n Note that, if you don't require checkpoint/restore functionality, then\n instead of implementing this class you can also get away with supplying\n just a ``my_train(config, reporter)`` function to the config.\n The function will be automatically converted to this interface\n (sans checkpoint functionality).\n\n When using Tune, Tune will convert this class into a Ray actor, which\n runs on a separate process. Tune will also change the current working\n directory of this process to `self.logdir`.\n\n \"\"\"\n\n def __init__(self, config=None, logger_creator=None):\n \"\"\"Initialize an Trainable.\n\n Sets up logging and points ``self.logdir`` to a directory in which\n training outputs should be placed.\n\n Subclasses should prefer defining ``_setup()`` instead of overriding\n ``__init__()`` directly.\n\n Args:\n config (dict): Trainable-specific configuration data. By default\n will be saved as ``self.config``.\n logger_creator (func): Function that creates a ray.tune.Logger\n object. If unspecified, a default logger is created.\n \"\"\"\n\n self._experiment_id = uuid.uuid4().hex\n self.config = config or {}\n log_sys_usage = self.config.get(\"log_sys_usage\", False)\n\n if logger_creator:\n self._result_logger = logger_creator(self.config)\n self._logdir = self._result_logger.logdir\n else:\n logdir_prefix = datetime.today().strftime(\"%Y-%m-%d_%H-%M-%S\")\n if not os.path.exists(DEFAULT_RESULTS_DIR):\n os.makedirs(DEFAULT_RESULTS_DIR)\n self._logdir = tempfile.mkdtemp(\n prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)\n self._result_logger = UnifiedLogger(self.config, self._logdir,\n None)\n\n self._iteration = 0\n self._time_total = 0.0\n self._timesteps_total = None\n self._episodes_total = None\n self._time_since_restore = 0.0\n self._timesteps_since_restore = 0\n self._iterations_since_restore = 0\n self._restored = False\n start_time = time.time()\n self._setup(copy.deepcopy(self.config))\n setup_time = time.time() - start_time\n if setup_time > SETUP_TIME_THRESHOLD:\n logger.info(\"_setup took {:.3f} seconds. If your trainable is \"\n \"slow to initialize, consider setting \"\n \"reuse_actors=True to reduce actor creation \"\n \"overheads.\".format(setup_time))\n self._local_ip = ray.services.get_node_ip_address()\n self._monitor = UtilMonitor(start=log_sys_usage)\n\n @classmethod\n def default_resource_request(cls, config):\n \"\"\"Returns the resource requirement for the given configuration.\n\n This can be overriden by sub-classes to set the correct trial resource\n allocation, so the user does not need to.\n \"\"\"\n\n return None\n\n @classmethod\n def resource_help(cls, config):\n \"\"\"Returns a help string for configuring this trainable's resources.\"\"\"\n\n return \"\"\n\n def current_ip(self):\n logger.warning(\"Getting current IP.\")\n self._local_ip = ray.services.get_node_ip_address()\n return self._local_ip\n\n def train(self):\n \"\"\"Runs one logical iteration of training.\n\n Subclasses should override ``_train()`` instead to return results.\n This class automatically fills the following fields in the result:\n\n `done` (bool): training is terminated. Filled only if not provided.\n\n `time_this_iter_s` (float): Time in seconds this iteration\n took to run. This may be overriden in order to override the\n system-computed time difference.\n\n `time_total_s` (float): Accumulated time in seconds for this\n entire experiment.\n\n `experiment_id` (str): Unique string identifier\n for this experiment. This id is preserved\n across checkpoint / restore calls.\n\n `training_iteration` (int): The index of this\n training iteration, e.g. call to train(). This is incremented\n after `_train()` is called.\n\n `pid` (str): The pid of the training process.\n\n `date` (str): A formatted date of when the result was processed.\n\n `timestamp` (str): A UNIX timestamp of when the result\n was processed.\n\n `hostname` (str): Hostname of the machine hosting the training\n process.\n\n `node_ip` (str): Node ip of the machine hosting the training\n process.\n\n Returns:\n A dict that describes training progress.\n \"\"\"\n\n start = time.time()\n result = self._train()\n assert isinstance(result, dict), \"_train() needs to return a dict.\"\n\n # We do not modify internal state nor update this result if duplicate.\n if RESULT_DUPLICATE in result:\n return result\n\n result = result.copy()\n\n self._iteration += 1\n self._iterations_since_restore += 1\n\n if result.get(TIME_THIS_ITER_S) is not None:\n time_this_iter = result[TIME_THIS_ITER_S]\n else:\n time_this_iter = time.time() - start\n self._time_total += time_this_iter\n self._time_since_restore += time_this_iter\n\n result.setdefault(DONE, False)\n\n # self._timesteps_total should only be tracked if increments provided\n if result.get(TIMESTEPS_THIS_ITER) is not None:\n if self._timesteps_total is None:\n self._timesteps_total = 0\n self._timesteps_total += result[TIMESTEPS_THIS_ITER]\n self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER]\n\n # self._episodes_total should only be tracked if increments provided\n if result.get(EPISODES_THIS_ITER) is not None:\n if self._episodes_total is None:\n self._episodes_total = 0\n self._episodes_total += result[EPISODES_THIS_ITER]\n\n # self._timesteps_total should not override user-provided total\n result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total)\n result.setdefault(EPISODES_TOTAL, self._episodes_total)\n result.setdefault(TRAINING_ITERATION, self._iteration)\n\n # Provides auto-filled neg_mean_loss for avoiding regressions\n if result.get(\"mean_loss\"):\n result.setdefault(\"neg_mean_loss\", -result[\"mean_loss\"])\n\n now = datetime.today()\n result.update(\n experiment_id=self._experiment_id,\n date=now.strftime(\"%Y-%m-%d_%H-%M-%S\"),\n timestamp=int(time.mktime(now.timetuple())),\n time_this_iter_s=time_this_iter,\n time_total_s=self._time_total,\n pid=os.getpid(),\n hostname=os.uname()[1],\n node_ip=self._local_ip,\n config=self.config,\n time_since_restore=self._time_since_restore,\n timesteps_since_restore=self._timesteps_since_restore,\n iterations_since_restore=self._iterations_since_restore)\n\n monitor_data = self._monitor.get_data()\n if monitor_data:\n result.update(monitor_data)\n\n self._log_result(result)\n\n return result\n\n def delete_checkpoint(self, checkpoint_dir):\n \"\"\"Removes subdirectory within checkpoint_folder\n\n Args:\n checkpoint_dir : path to checkpoint\n \"\"\"\n if os.path.isfile(checkpoint_dir):\n shutil.rmtree(os.path.dirname(checkpoint_dir))\n else:\n shutil.rmtree(checkpoint_dir)\n\n def save(self, checkpoint_dir=None):\n \"\"\"Saves the current model state to a checkpoint.\n\n Subclasses should override ``_save()`` instead to save state.\n This method dumps additional metadata alongside the saved path.\n\n Args:\n checkpoint_dir (str): Optional dir to place the checkpoint.\n\n Returns:\n Checkpoint path or prefix that may be passed to restore().\n \"\"\"\n\n checkpoint_dir = os.path.join(checkpoint_dir or self.logdir,\n \"checkpoint_{}\".format(self._iteration))\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n checkpoint = self._save(checkpoint_dir)\n saved_as_dict = False\n if isinstance(checkpoint, string_types):\n if not checkpoint.startswith(checkpoint_dir):\n raise ValueError(\n \"The returned checkpoint path must be within the \"\n \"given checkpoint dir {}: {}\".format(\n checkpoint_dir, checkpoint))\n checkpoint_path = checkpoint\n elif isinstance(checkpoint, dict):\n saved_as_dict = True\n checkpoint_path = os.path.join(checkpoint_dir, \"checkpoint\")\n with open(checkpoint_path, \"wb\") as f:\n pickle.dump(checkpoint, f)\n else:\n raise ValueError(\"Returned unexpected type {}. \"\n \"Expected str or dict.\".format(type(checkpoint)))\n\n with open(checkpoint_path + \".tune_metadata\", \"wb\") as f:\n pickle.dump({\n \"experiment_id\": self._experiment_id,\n \"iteration\": self._iteration,\n \"timesteps_total\": self._timesteps_total,\n \"time_total\": self._time_total,\n \"episodes_total\": self._episodes_total,\n \"saved_as_dict\": saved_as_dict\n }, f)\n return checkpoint_path\n\n def save_to_object(self):\n \"\"\"Saves the current model state to a Python object.\n\n It also saves to disk but does not return the checkpoint path.\n\n Returns:\n Object holding checkpoint data.\n \"\"\"\n\n tmpdir = tempfile.mkdtemp(\"save_to_object\", dir=self.logdir)\n checkpoint_path = self.save(tmpdir)\n\n # Save all files in subtree.\n data = {}\n for basedir, _, file_names in os.walk(tmpdir):\n for file_name in file_names:\n path = os.path.join(basedir, file_name)\n\n with open(path, \"rb\") as f:\n data[os.path.relpath(path, tmpdir)] = f.read()\n\n out = io.BytesIO()\n data_dict = pickle.dumps({\n \"checkpoint_name\": os.path.relpath(checkpoint_path, tmpdir),\n \"data\": data,\n })\n if len(data_dict) > 10e6: # getting pretty large\n logger.info(\"Checkpoint size is {} bytes\".format(len(data_dict)))\n out.write(data_dict)\n shutil.rmtree(tmpdir)\n return out.getvalue()\n\n def restore(self, checkpoint_path):\n \"\"\"Restores training state from a given model checkpoint.\n\n These checkpoints are returned from calls to save().\n\n Subclasses should override ``_restore()`` instead to restore state.\n This method restores additional metadata saved with the checkpoint.\n \"\"\"\n with open(checkpoint_path + \".tune_metadata\", \"rb\") as f:\n metadata = pickle.load(f)\n self._experiment_id = metadata[\"experiment_id\"]\n self._iteration = metadata[\"iteration\"]\n self._timesteps_total = metadata[\"timesteps_total\"]\n self._time_total = metadata[\"time_total\"]\n self._episodes_total = metadata[\"episodes_total\"]\n saved_as_dict = metadata[\"saved_as_dict\"]\n if saved_as_dict:\n with open(checkpoint_path, \"rb\") as loaded_state:\n checkpoint_dict = pickle.load(loaded_state)\n checkpoint_dict.update(tune_checkpoint_path=checkpoint_path)\n self._restore(checkpoint_dict)\n else:\n self._restore(checkpoint_path)\n self._time_since_restore = 0.0\n self._timesteps_since_restore = 0\n self._iterations_since_restore = 0\n self._restored = True\n\n def restore_from_object(self, obj):\n \"\"\"Restores training state from a checkpoint object.\n\n These checkpoints are returned from calls to save_to_object().\n \"\"\"\n info = pickle.loads(obj)\n data = info[\"data\"]\n tmpdir = tempfile.mkdtemp(\"restore_from_object\", dir=self.logdir)\n checkpoint_path = os.path.join(tmpdir, info[\"checkpoint_name\"])\n\n for relpath_name, file_contents in data.items():\n path = os.path.join(tmpdir, relpath_name)\n\n # This may be a subdirectory, hence not just using tmpdir\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n with open(path, \"wb\") as f:\n f.write(file_contents)\n\n self.restore(checkpoint_path)\n shutil.rmtree(tmpdir)\n\n def export_model(self, export_formats, export_dir=None):\n \"\"\"Exports model based on export_formats.\n\n Subclasses should override _export_model() to actually\n export model to local directory.\n\n Args:\n export_formats (list): List of formats that should be exported.\n export_dir (str): Optional dir to place the exported model.\n Defaults to self.logdir.\n\n Return:\n A dict that maps ExportFormats to successfully exported models.\n \"\"\"\n export_dir = export_dir or self.logdir\n return self._export_model(export_formats, export_dir)\n\n def reset_config(self, new_config):\n \"\"\"Resets configuration without restarting the trial.\n\n This method is optional, but can be implemented to speed up algorithms\n such as PBT, and to allow performance optimizations such as running\n experiments with reuse_actors=True.\n\n Args:\n new_config (dir): Updated hyperparameter configuration\n for the trainable.\n\n Returns:\n True if reset was successful else False.\n \"\"\"\n return False\n\n def stop(self):\n \"\"\"Releases all resources used by this trainable.\"\"\"\n\n self._result_logger.close()\n self._stop()\n\n @property\n def logdir(self):\n \"\"\"Directory of the results and checkpoints for this Trainable.\n\n Tune will automatically sync this folder with the driver if execution\n is distributed.\n\n Note that the current working directory will also be changed to this.\n\n \"\"\"\n return self._logdir\n\n @property\n def iteration(self):\n \"\"\"Current training iteration.\n\n This value is automatically incremented every time `train()` is called\n and is automatically inserted into the training result dict.\n\n \"\"\"\n return self._iteration\n\n def get_config(self):\n \"\"\"Returns configuration passed in by Tune.\"\"\"\n return self.config\n\n def _train(self):\n \"\"\"Subclasses should override this to implement train().\n\n The return value will be automatically passed to the loggers. Users\n can also return `tune.result.DONE` or `tune.result.SHOULD_CHECKPOINT`\n to manually trigger termination of this trial or checkpointing of this\n trial. Note that manual checkpointing only works when subclassing\n Trainables.\n\n Returns:\n A dict that describes training progress.\n\n \"\"\"\n\n raise NotImplementedError\n\n def _save(self, checkpoint_dir):\n \"\"\"Subclasses should override this to implement save().\n\n Args:\n checkpoint_dir (str): The directory where the checkpoint\n file must be stored.\n\n Returns:\n checkpoint (str | dict): If string, the return value is\n expected to be the checkpoint path or prefix to be passed to\n `_restore()`. If dict, the return value will be automatically\n serialized by Tune and passed to `_restore()`.\n\n Examples:\n >>> print(trainable1._save(\"/tmp/checkpoint_1\"))\n \"/tmp/checkpoint_1/my_checkpoint_file\"\n >>> print(trainable2._save(\"/tmp/checkpoint_2\"))\n {\"some\": \"data\"}\n \"\"\"\n\n raise NotImplementedError\n\n def _restore(self, checkpoint):\n \"\"\"Subclasses should override this to implement restore().\n\n Args:\n checkpoint (str | dict): Value as returned by `_save`.\n If a string, then it is the checkpoint path.\n \"\"\"\n\n raise NotImplementedError\n\n def _setup(self, config):\n \"\"\"Subclasses should override this for custom initialization.\n\n Args:\n config (dict): Hyperparameters and other configs given.\n Copy of `self.config`.\n \"\"\"\n pass\n\n def _log_result(self, result):\n \"\"\"Subclasses can optionally override this to customize logging.\n\n Args:\n result (dict): Training result returned by _train().\n \"\"\"\n self._result_logger.on_result(result)\n\n def _stop(self):\n \"\"\"Subclasses should override this for any cleanup on stop.\"\"\"\n pass\n\n def _export_model(self, export_formats, export_dir):\n \"\"\"Subclasses should override this to export model.\n\n Args:\n export_formats (list): List of formats that should be exported.\n export_dir (str): Directory to place exported models.\n\n Return:\n A dict that maps ExportFormats to successfully exported models.\n \"\"\"\n return {}\n"}}},{"rowIdx":542687,"cells":{"filename":{"kind":"string","value":"the-stack_106_31027"},"text":{"kind":"string","value":"\nfrom thingset.cansocket import CANsocket\nsock = CANsocket('can0') # or other interface\n\nwhile(True):\n\tframe = sock.receive()\n\tif isinstance(frame.cbor, float):\n\t\tprint(\"device: 0x%x data id: 0x%x value: %.2f\" % (frame.source, frame.dataobjectID, frame.cbor))\n\telse:\n\t\tprint(\"device:\", hex(frame.source), \" data id:\", hex(frame.dataobjectID), \" value:\", frame.cbor)\n"}}},{"rowIdx":542688,"cells":{"filename":{"kind":"string","value":"the-stack_106_31030"},"text":{"kind":"string","value":"from __future__ import division # Use floating point for math calculations\n\nimport math\n\nfrom flask import Blueprint\n\nfrom CTFd.models import (\n ChallengeFiles,\n Challenges,\n Fails,\n Flags,\n Hints,\n Solves,\n Tags,\n db,\n)\nfrom CTFd.plugins import register_plugin_assets_directory\nfrom CTFd.plugins.migrations import upgrade\nfrom CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge\nfrom CTFd.plugins.flags import get_flag_class\nfrom CTFd.utils.modes import get_model\nfrom CTFd.utils.uploads import delete_file\nfrom CTFd.utils.user import get_ip\n\n\nclass DynamicValueChallenge(BaseChallenge):\n id = \"dynamic\" # Unique identifier used to register challenges\n name = \"dynamic\" # Name of a challenge type\n templates = { # Handlebars templates used for each aspect of challenge editing & viewing\n \"create\": \"/plugins/dynamic_challenges/assets/create.html\",\n \"update\": \"/plugins/dynamic_challenges/assets/update.html\",\n \"view\": \"/plugins/dynamic_challenges/assets/view.html\",\n }\n scripts = { # Scripts that are loaded when a template is loaded\n \"create\": \"/plugins/dynamic_challenges/assets/create.js\",\n \"update\": \"/plugins/dynamic_challenges/assets/update.js\",\n \"view\": \"/plugins/dynamic_challenges/assets/view.js\",\n }\n # Route at which files are accessible. This must be registered using register_plugin_assets_directory()\n route = \"/plugins/dynamic_challenges/assets/\"\n # Blueprint used to access the static_folder directory.\n blueprint = Blueprint(\n \"dynamic_challenges\",\n __name__,\n template_folder=\"templates\",\n static_folder=\"assets\",\n )\n\n @classmethod\n def calculate_value(cls, challenge):\n Model = get_model()\n\n solve_count = (\n Solves.query.join(Model, Solves.account_id == Model.id)\n .filter(\n Solves.challenge_id == challenge.id,\n Model.hidden == False,\n Model.banned == False,\n )\n .count()\n )\n\n # If the solve count is 0 we shouldn't manipulate the solve count to\n # let the math update back to normal\n if solve_count != 0:\n # We subtract -1 to allow the first solver to get max point value\n solve_count -= 1\n\n # It is important that this calculation takes into account floats.\n # Hence this file uses from __future__ import division\n value = (\n ((challenge.minimum - challenge.initial) / (challenge.decay ** 2))\n * (solve_count ** 2)\n ) + challenge.initial\n\n value = math.ceil(value)\n\n if value < challenge.minimum:\n value = challenge.minimum\n\n challenge.value = value\n db.session.commit()\n return challenge\n\n @staticmethod\n def create(request):\n \"\"\"\n This method is used to process the challenge creation request.\n\n :param request:\n :return:\n \"\"\"\n data = request.form or request.get_json()\n challenge = DynamicChallenge(**data)\n\n db.session.add(challenge)\n db.session.commit()\n\n return challenge\n\n @staticmethod\n def read(challenge):\n \"\"\"\n This method is in used to access the data of a challenge in a format processable by the front end.\n\n :param challenge:\n :return: Challenge object, data dictionary to be returned to the user\n \"\"\"\n challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()\n data = {\n \"id\": challenge.id,\n \"name\": challenge.name,\n \"value\": challenge.value,\n \"initial\": challenge.initial,\n \"decay\": challenge.decay,\n \"minimum\": challenge.minimum,\n \"description\": challenge.description,\n \"category\": challenge.category,\n \"state\": challenge.state,\n \"max_attempts\": challenge.max_attempts,\n \"type\": challenge.type,\n \"type_data\": {\n \"id\": DynamicValueChallenge.id,\n \"name\": DynamicValueChallenge.name,\n \"templates\": DynamicValueChallenge.templates,\n \"scripts\": DynamicValueChallenge.scripts,\n },\n }\n return data\n\n @staticmethod\n def update(challenge, request):\n \"\"\"\n This method is used to update the information associated with a challenge. This should be kept strictly to the\n Challenges table and any child tables.\n\n :param challenge:\n :param request:\n :return:\n \"\"\"\n data = request.form or request.get_json()\n\n for attr, value in data.items():\n # We need to set these to floats so that the next operations don't operate on strings\n if attr in (\"initial\", \"minimum\", \"decay\"):\n value = float(value)\n setattr(challenge, attr, value)\n\n return DynamicValueChallenge.calculate_value(challenge)\n\n @staticmethod\n def delete(challenge):\n \"\"\"\n This method is used to delete the resources used by a challenge.\n\n :param challenge:\n :return:\n \"\"\"\n Fails.query.filter_by(challenge_id=challenge.id).delete()\n Solves.query.filter_by(challenge_id=challenge.id).delete()\n Flags.query.filter_by(challenge_id=challenge.id).delete()\n files = ChallengeFiles.query.filter_by(challenge_id=challenge.id).all()\n for f in files:\n delete_file(f.id)\n ChallengeFiles.query.filter_by(challenge_id=challenge.id).delete()\n Tags.query.filter_by(challenge_id=challenge.id).delete()\n Hints.query.filter_by(challenge_id=challenge.id).delete()\n DynamicChallenge.query.filter_by(id=challenge.id).delete()\n Challenges.query.filter_by(id=challenge.id).delete()\n db.session.commit()\n\n @staticmethod\n def attempt(challenge, request):\n \"\"\"\n This method is used to check whether a given input is right or wrong. It does not make any changes and should\n return a boolean for correctness and a string to be shown to the user. It is also in charge of parsing the\n user's input from the request itself.\n\n :param challenge: The Challenge object from the database\n :param request: The request the user submitted\n :return: (boolean, string)\n \"\"\"\n data = request.form or request.get_json()\n submission = data[\"submission\"].strip()\n flags = Flags.query.filter_by(challenge_id=challenge.id).all()\n for flag in flags:\n if get_flag_class(flag.type).compare(flag, submission):\n return True, \"Correct\"\n return False, \"Incorrect\"\n\n @staticmethod\n def solve(user, team, challenge, request):\n \"\"\"\n This method is used to insert Solves into the database in order to mark a challenge as solved.\n\n :param team: The Team object from the database\n :param chal: The Challenge object from the database\n :param request: The request the user submitted\n :return:\n \"\"\"\n challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()\n data = request.form or request.get_json()\n submission = data[\"submission\"].strip()\n\n solve = Solves(\n user_id=user.id,\n team_id=team.id if team else None,\n challenge_id=challenge.id,\n ip=get_ip(req=request),\n provided=submission,\n )\n db.session.add(solve)\n db.session.commit()\n\n DynamicValueChallenge.calculate_value(challenge)\n\n @staticmethod\n def fail(user, team, challenge, request):\n \"\"\"\n This method is used to insert Fails into the database in order to mark an answer incorrect.\n\n :param team: The Team object from the database\n :param challenge: The Challenge object from the database\n :param request: The request the user submitted\n :return:\n \"\"\"\n data = request.form or request.get_json()\n submission = data[\"submission\"].strip()\n wrong = Fails(\n user_id=user.id,\n team_id=team.id if team else None,\n challenge_id=challenge.id,\n ip=get_ip(request),\n provided=submission,\n )\n db.session.add(wrong)\n db.session.commit()\n db.session.close()\n\n\nclass DynamicChallenge(Challenges):\n __mapper_args__ = {\"polymorphic_identity\": \"dynamic\"}\n id = db.Column(\n db.Integer, db.ForeignKey(\"challenges.id\", ondelete=\"CASCADE\"), primary_key=True\n )\n initial = db.Column(db.Integer, default=0)\n minimum = db.Column(db.Integer, default=0)\n decay = db.Column(db.Integer, default=0)\n\n def __init__(self, *args, **kwargs):\n super(DynamicChallenge, self).__init__(**kwargs)\n self.initial = kwargs[\"value\"]\n\n\ndef load(app):\n upgrade()\n CHALLENGE_CLASSES[\"dynamic\"] = DynamicValueChallenge\n register_plugin_assets_directory(\n app, base_path=\"/plugins/dynamic_challenges/assets/\"\n )\n"}}},{"rowIdx":542689,"cells":{"filename":{"kind":"string","value":"the-stack_106_31034"},"text":{"kind":"string","value":"import RPi.GPIO as GPIO\nfrom lib_nrf24 import NRF24\nimport time\nimport spidev\nfrom threading import Timer\n\n# Define Board GPIOs\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\n# needed GPIO PINS\nPINS = [2,3,4,5,6,7]\n\n# set all pins off\ndef allPinsOff():\n for i in PINS:\n GPIO.setup(i, GPIO.OUT, initial=GPIO.HIGH)\n# initial off\nallPinsOff()\n\npipes = [[0xe7, 0xe7, 0xe7, 0xe7, 0xe7], [0xc2, 0xc2, 0xc2, 0xc2, 0xc2]]\n\nradio = NRF24(GPIO, spidev.SpiDev())\nradio.begin(0, 17)\nradio.setRetries(15,15)\n\nradio.setChannel(100)\n\nradio.setDataRate(NRF24.BR_250KBPS)\nradio.setPALevel(NRF24.PA_MAX)\n\nradio.setPayloadSize(7)\nradio.setAutoAck(True)\nradio.enableAckPayload()\nradio.enableDynamicPayloads()\n\nradio.openWritingPipe(pipes[1])\nradio.openReadingPipe(1, pipes[0])\n\nradio.stopListening()\nradio.startListening()\n\nmillis = lambda: int(round(time.time() * 1000))\n\nlast_call = millis()\n\n# timer status\nisTimerActive = False\n\ndef pinOff(pin):\n global isTimerActive\n GPIO.output(pin, GPIO.HIGH)\n isTimerActive = False\n \nwhile True:\n while not radio.available():\n time.sleep(1/100)\n recv = []\n radio.read(recv, radio.getDynamicPayloadSize())\n radio.stopListening()\n radio.write(recv)\n radio.startListening()\n\n try:\n # translate message\n arr = []\n for n in recv:\n # Decode into standard unicode set\n if (n >= 32 and n <= 126):\n arr.append(chr(n))\n\n # validation\n if len(arr) > 0 and int(arr[0]) in PINS:\n # validation complete --> check last call to prevent bubbles\n delta = millis() - last_call\n if delta > 30:\n # on\n if arr[1] == '1':\n # reset all active pins\n for pin in PINS:\n if not GPIO.input(pin):\n GPIO.output(pin, GPIO.HIGH)\n\n # check if time is relevant\n secs = int(\"\".join(arr)[2:])\n if secs > 0:\n # activate\n GPIO.output(int(arr[0]), GPIO.LOW)\n\n # timer\n if isTimerActive:\n t.cancel()\n isTimerActive = False\n t = Timer(secs, pinOff, [int(arr[0])])\n t.start()\n isTimerActive = True\n else:\n # off\n GPIO.output(int(arr[0]), GPIO.HIGH)\n # update last reply\n last_call = millis()\n except Exception as e:\n # secure off all\n for pin in PINS:\n if not GPIO.input(pin):\n GPIO.output(pin, GPIO.HIGH)\n pass\n"}}},{"rowIdx":542690,"cells":{"filename":{"kind":"string","value":"the-stack_106_31035"},"text":{"kind":"string","value":"#!/bin/env python\n# -*- coding: utf8 -*-\n\ndef shellSort(A):\n def getCols(n):\n cols = [1]\n val = 1\n while val < n:\n val = int(val * 2.2)\n cols.insert(0, val)\n return cols\n for h in getCols(len(A)):\n for i in range(h, len(A)):\n cur = A[i]\n j = i\n while j >= h and A[j - h] > cur:\n A[j] = A[j - h]\n j -= h\n A[j] = cur\n return A\n"}}},{"rowIdx":542691,"cells":{"filename":{"kind":"string","value":"the-stack_106_31036"},"text":{"kind":"string","value":"# vim: set fileencoding=utf-8 :\n# Copyright (C) 2010 Google Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# NOTE: The fileencoding comment on the first line of the file is\n# important; without it, Python will choke while trying to parse the file,\n# since it includes non-ASCII characters.\n\nimport os\nimport stat\nimport sys\nimport tempfile\nimport unittest\n\nfrom webkitpy.common.system.filesystem import FileSystem\n\n\nclass GenericFileSystemTests(object):\n \"\"\"Tests that should pass on either a real or mock filesystem.\"\"\"\n # pylint gets confused about this being a mixin: pylint: disable=E1101\n\n def setup_generic_test_dir(self):\n fs = self.fs\n self.generic_test_dir = str(self.fs.mkdtemp())\n self.orig_cwd = fs.getcwd()\n fs.chdir(self.generic_test_dir)\n fs.write_text_file('foo.txt', 'foo')\n fs.write_text_file('foobar', 'foobar')\n fs.maybe_make_directory('foodir')\n fs.write_text_file(fs.join('foodir', 'baz'), 'baz')\n fs.chdir(self.orig_cwd)\n\n def teardown_generic_test_dir(self):\n self.fs.rmtree(self.generic_test_dir)\n self.fs.chdir(self.orig_cwd)\n self.generic_test_dir = None\n\n def test_glob__trailing_asterisk(self):\n self.fs.chdir(self.generic_test_dir)\n self.assertEqual(set(self.fs.glob('fo*')), set(['foo.txt', 'foobar', 'foodir']))\n\n def test_glob__leading_asterisk(self):\n self.fs.chdir(self.generic_test_dir)\n self.assertEqual(set(self.fs.glob('*xt')), set(['foo.txt']))\n\n def test_glob__middle_asterisk(self):\n self.fs.chdir(self.generic_test_dir)\n self.assertEqual(set(self.fs.glob('f*r')), set(['foobar', 'foodir']))\n\n def test_glob__period_is_escaped(self):\n self.fs.chdir(self.generic_test_dir)\n self.assertEqual(set(self.fs.glob('foo.*')), set(['foo.txt']))\n\n def test_relpath_unix(self):\n if sys.platform == 'win32':\n return\n self.assertEqual(self.fs.relpath('aaa/bbb'), 'aaa/bbb')\n self.assertEqual(self.fs.relpath('aaa/bbb/'), 'aaa/bbb')\n self.assertEqual(self.fs.relpath('aaa/bbb/.'), 'aaa/bbb')\n self.assertEqual(self.fs.relpath('aaa/./bbb'), 'aaa/bbb')\n self.assertEqual(self.fs.relpath('aaa/../bbb/'), 'bbb')\n self.assertEqual(self.fs.relpath('aaa/bbb', 'aaa/bbb'), '.')\n self.assertEqual(self.fs.relpath('aaa/bbb/ccc', 'aaa/bbb'), 'ccc')\n self.assertEqual(self.fs.relpath('aaa/./ccc', 'aaa/bbb'), '../ccc')\n self.assertEqual(self.fs.relpath('aaa/../ccc', 'aaa/bbb'), '../../ccc')\n self.assertEqual(self.fs.relpath('aaa/bbb', 'aaa/ccc'), '../bbb')\n self.assertEqual(self.fs.relpath('aaa/bbb', 'ccc/ddd'), '../../aaa/bbb')\n self.assertEqual(self.fs.relpath('aaa/bbb', 'aaa/b'), '../bbb')\n self.assertEqual(self.fs.relpath('aaa/bbb', 'a/bbb'), '../../aaa/bbb')\n\n def test_relpath_win32(self):\n if sys.platform != 'win32':\n return\n self.assertEqual(self.fs.relpath('aaa\\\\bbb'), 'aaa\\\\bbb')\n self.assertEqual(self.fs.relpath('aaa\\\\bbb\\\\'), 'aaa\\\\bbb')\n self.assertEqual(self.fs.relpath('aaa\\\\bbb\\\\.'), 'aaa\\\\bbb')\n self.assertEqual(self.fs.relpath('aaa\\\\.\\\\bbb'), 'aaa\\\\bbb')\n self.assertEqual(self.fs.relpath('aaa\\\\..\\\\bbb\\\\'), 'bbb')\n self.assertEqual(self.fs.relpath('aaa\\\\bbb', 'aaa\\\\bbb'), '.')\n self.assertEqual(self.fs.relpath('aaa\\\\bbb\\\\ccc', 'aaa\\\\bbb'), 'ccc')\n self.assertEqual(self.fs.relpath('aaa\\\\.\\\\ccc', 'aaa\\\\bbb'), '..\\\\ccc')\n self.assertEqual(self.fs.relpath('aaa\\\\..\\\\ccc', 'aaa\\\\bbb'), '..\\\\..\\\\ccc')\n self.assertEqual(self.fs.relpath('aaa\\\\bbb', 'aaa\\\\ccc'), '..\\\\bbb')\n self.assertEqual(self.fs.relpath('aaa\\\\bbb', 'ccc\\\\ddd'), '..\\\\..\\\\aaa\\\\bbb')\n self.assertEqual(self.fs.relpath('aaa\\\\bbb', 'aaa\\\\b'), '..\\\\bbb')\n self.assertEqual(self.fs.relpath('aaa\\\\bbb', 'a\\\\bbb'), '..\\\\..\\\\aaa\\\\bbb')\n\n def test_rmtree(self):\n self.fs.chdir(self.generic_test_dir)\n self.fs.rmtree('foo')\n self.assertTrue(self.fs.exists('foodir'))\n self.assertTrue(self.fs.exists(self.fs.join('foodir', 'baz')))\n self.fs.rmtree('foodir')\n self.assertFalse(self.fs.exists('foodir'))\n self.assertFalse(self.fs.exists(self.fs.join('foodir', 'baz')))\n\n def test_copytree(self):\n self.fs.chdir(self.generic_test_dir)\n self.fs.copytree('foodir/', 'bardir/')\n self.assertTrue(self.fs.exists('bardir'))\n self.assertTrue(self.fs.exists(self.fs.join('bardir', 'baz')))\n\n def test_move(self):\n self.fs.chdir(self.generic_test_dir)\n self.fs.move('foo.txt', 'bar.txt')\n self.assertFalse(self.fs.exists('foo.txt'))\n self.assertTrue(self.fs.exists('bar.txt'))\n self.fs.move('foodir', 'bardir')\n self.assertFalse(self.fs.exists('foodir'))\n self.assertFalse(self.fs.exists(self.fs.join('foodir', 'baz')))\n self.assertTrue(self.fs.exists('bardir'))\n self.assertTrue(self.fs.exists(self.fs.join('bardir', 'baz')))\n\n\nclass RealFileSystemTest(unittest.TestCase, GenericFileSystemTests):\n\n def setUp(self):\n self.fs = FileSystem()\n self.setup_generic_test_dir()\n\n self._this_dir = os.path.dirname(os.path.abspath(__file__))\n self._missing_file = os.path.join(self._this_dir, 'missing_file.py')\n self._this_file = os.path.join(self._this_dir, 'filesystem_unittest.py')\n\n def tearDown(self):\n self.teardown_generic_test_dir()\n self.fs = None\n\n def test_chdir(self):\n fs = FileSystem()\n cwd = fs.getcwd()\n newdir = '/'\n if sys.platform == 'win32':\n newdir = 'c:\\\\'\n fs.chdir(newdir)\n self.assertEqual(fs.getcwd(), newdir)\n fs.chdir(cwd)\n\n def test_chdir__notexists(self):\n fs = FileSystem()\n newdir = '/dirdoesnotexist'\n if sys.platform == 'win32':\n newdir = 'c:\\\\dirdoesnotexist'\n self.assertRaises(OSError, fs.chdir, newdir)\n\n def test_exists__true(self):\n fs = FileSystem()\n self.assertTrue(fs.exists(self._this_file))\n\n def test_exists__false(self):\n fs = FileSystem()\n self.assertFalse(fs.exists(self._missing_file))\n\n def test_getcwd(self):\n fs = FileSystem()\n self.assertTrue(fs.exists(fs.getcwd()))\n\n def test_isdir__true(self):\n fs = FileSystem()\n self.assertTrue(fs.isdir(self._this_dir))\n\n def test_isdir__false(self):\n fs = FileSystem()\n self.assertFalse(fs.isdir(self._this_file))\n\n def test_join(self):\n fs = FileSystem()\n self.assertEqual(fs.join('foo', 'bar'),\n os.path.join('foo', 'bar'))\n\n def test_listdir(self):\n fs = FileSystem()\n with fs.mkdtemp(prefix='filesystem_unittest_') as d:\n self.assertEqual(fs.listdir(d), [])\n new_file = os.path.join(d, 'foo')\n fs.write_text_file(new_file, u'foo')\n self.assertEqual(fs.listdir(d), ['foo'])\n os.remove(new_file)\n\n def test_walk(self):\n fs = FileSystem()\n with fs.mkdtemp(prefix='filesystem_unittest_') as d:\n self.assertEqual(list(fs.walk(d)), [(d, [], [])])\n new_file = os.path.join(d, 'foo')\n fs.write_text_file(new_file, u'foo')\n self.assertEqual(list(fs.walk(d)), [(d, [], ['foo'])])\n os.remove(new_file)\n\n def test_maybe_make_directory__success(self):\n fs = FileSystem()\n\n with fs.mkdtemp(prefix='filesystem_unittest_') as base_path:\n sub_path = os.path.join(base_path, \"newdir\")\n self.assertFalse(os.path.exists(sub_path))\n self.assertFalse(fs.isdir(sub_path))\n\n fs.maybe_make_directory(sub_path)\n self.assertTrue(os.path.exists(sub_path))\n self.assertTrue(fs.isdir(sub_path))\n\n # Make sure we can re-create it.\n fs.maybe_make_directory(sub_path)\n self.assertTrue(os.path.exists(sub_path))\n self.assertTrue(fs.isdir(sub_path))\n\n # Clean up.\n os.rmdir(sub_path)\n\n self.assertFalse(os.path.exists(base_path))\n self.assertFalse(fs.isdir(base_path))\n\n def test_maybe_make_directory__failure(self):\n # FIXME: os.chmod() doesn't work on Windows to set directories\n # as readonly, so we skip this test for now.\n if sys.platform in ('win32', 'cygwin'):\n return\n\n fs = FileSystem()\n with fs.mkdtemp(prefix='filesystem_unittest_') as d:\n # Remove write permissions on the parent directory.\n os.chmod(d, stat.S_IRUSR)\n\n # Now try to create a sub directory - should fail.\n sub_dir = fs.join(d, 'subdir')\n self.assertRaises(OSError, fs.maybe_make_directory, sub_dir)\n\n # Clean up in case the test failed and we did create the\n # directory.\n if os.path.exists(sub_dir):\n os.rmdir(sub_dir)\n\n def test_read_and_write_text_file(self):\n fs = FileSystem()\n text_path = None\n\n unicode_text_string = u'\\u016An\\u012Dc\\u014Dde\\u033D'\n hex_equivalent = '\\xC5\\xAA\\x6E\\xC4\\xAD\\x63\\xC5\\x8D\\x64\\x65\\xCC\\xBD'\n try:\n text_path = tempfile.mktemp(prefix='tree_unittest_')\n file = fs.open_text_file_for_writing(text_path)\n file.write(unicode_text_string)\n file.close()\n\n file = fs.open_text_file_for_reading(text_path)\n read_text = file.read()\n file.close()\n\n self.assertEqual(read_text, unicode_text_string)\n finally:\n if text_path and fs.isfile(text_path):\n os.remove(text_path)\n\n def test_read_and_write_file(self):\n fs = FileSystem()\n text_path = None\n binary_path = None\n\n unicode_text_string = u'\\u016An\\u012Dc\\u014Dde\\u033D'\n hex_equivalent = '\\xC5\\xAA\\x6E\\xC4\\xAD\\x63\\xC5\\x8D\\x64\\x65\\xCC\\xBD'\n try:\n text_path = tempfile.mktemp(prefix='tree_unittest_')\n binary_path = tempfile.mktemp(prefix='tree_unittest_')\n fs.write_text_file(text_path, unicode_text_string)\n contents = fs.read_binary_file(text_path)\n self.assertEqual(contents, hex_equivalent)\n\n fs.write_binary_file(binary_path, hex_equivalent)\n text_contents = fs.read_text_file(binary_path)\n self.assertEqual(text_contents, unicode_text_string)\n finally:\n if text_path and fs.isfile(text_path):\n os.remove(text_path)\n if binary_path and fs.isfile(binary_path):\n os.remove(binary_path)\n\n def test_read_binary_file__missing(self):\n fs = FileSystem()\n self.assertRaises(IOError, fs.read_binary_file, self._missing_file)\n\n def test_read_text_file__missing(self):\n fs = FileSystem()\n self.assertRaises(IOError, fs.read_text_file, self._missing_file)\n\n def test_remove_file_with_retry(self):\n RealFileSystemTest._remove_failures = 2\n\n def remove_with_exception(filename):\n RealFileSystemTest._remove_failures -= 1\n if RealFileSystemTest._remove_failures >= 0:\n try:\n raise WindowsError\n except NameError:\n raise FileSystem._WindowsError\n\n fs = FileSystem()\n self.assertTrue(fs.remove('filename', remove_with_exception))\n self.assertEqual(-1, RealFileSystemTest._remove_failures)\n\n def test_sep(self):\n fs = FileSystem()\n\n self.assertEqual(fs.sep, os.sep)\n self.assertEqual(fs.join(\"foo\", \"bar\"),\n os.path.join(\"foo\", \"bar\"))\n"}}},{"rowIdx":542692,"cells":{"filename":{"kind":"string","value":"the-stack_106_31037"},"text":{"kind":"string","value":"try:\n from collections.abc import Sized\nexcept ImportError:\n from collections import Sized\nfrom collections import defaultdict\nfrom functools import partial\n\nimport numpy as np\nfrom scipy.stats import rankdata\n\nimport sklearn\nfrom sklearn.base import is_classifier, clone\nfrom joblib import Parallel, delayed\nfrom sklearn.model_selection._search import BaseSearchCV\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils.fixes import MaskedArray\nfrom sklearn.utils.validation import indexable, check_is_fitted\ntry:\n from sklearn.metrics import check_scoring\nexcept ImportError:\n from sklearn.metrics.scorer import check_scoring\n\nfrom . import Optimizer\nfrom .utils import point_asdict, dimensions_aslist, eval_callbacks\nfrom .space import check_dimension\nfrom .callbacks import check_callback\n\n\nclass BayesSearchCV(BaseSearchCV):\n \"\"\"Bayesian optimization over hyper parameters.\n\n BayesSearchCV implements a \"fit\" and a \"score\" method.\n It also implements \"predict\", \"predict_proba\", \"decision_function\",\n \"transform\" and \"inverse_transform\" if they are implemented in the\n estimator used.\n\n The parameters of the estimator used to apply these methods are optimized\n by cross-validated search over parameter settings.\n\n In contrast to GridSearchCV, not all parameter values are tried out, but\n rather a fixed number of parameter settings is sampled from the specified\n distributions. The number of parameter settings that are tried is\n given by n_iter.\n\n Parameters are presented as a list of skopt.space.Dimension objects.\n\n Parameters\n ----------\n estimator : estimator object.\n A object of that type is instantiated for each search point.\n This object is assumed to implement the scikit-learn estimator api.\n Either estimator needs to provide a ``score`` function,\n or ``scoring`` must be passed.\n\n search_spaces : dict, list of dict or list of tuple containing\n (dict, int).\n One of these cases:\n 1. dictionary, where keys are parameter names (strings)\n and values are skopt.space.Dimension instances (Real, Integer\n or Categorical) or any other valid value that defines skopt\n dimension (see skopt.Optimizer docs). Represents search space\n over parameters of the provided estimator.\n 2. list of dictionaries: a list of dictionaries, where every\n dictionary fits the description given in case 1 above.\n If a list of dictionary objects is given, then the search is\n performed sequentially for every parameter space with maximum\n number of evaluations set to self.n_iter.\n 3. list of (dict, int > 0): an extension of case 2 above,\n where first element of every tuple is a dictionary representing\n some search subspace, similarly as in case 2, and second element\n is a number of iterations that will be spent optimizing over\n this subspace.\n\n n_iter : int, default=50\n Number of parameter settings that are sampled. n_iter trades\n off runtime vs quality of the solution. Consider increasing\n ``n_points`` if you want to try more parameter settings in\n parallel.\n\n optimizer_kwargs : dict, optional\n Dict of arguments passed to :class:`Optimizer`. For example,\n ``{'base_estimator': 'RF'}`` would use a Random Forest surrogate\n instead of the default Gaussian Process.\n\n scoring : string, callable or None, default=None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n If ``None``, the ``score`` method of the estimator is used.\n\n fit_params : dict, optional\n Parameters to pass to the fit method.\n\n n_jobs : int, default=1\n Number of jobs to run in parallel. At maximum there are\n ``n_points`` times ``cv`` jobs available during each iteration.\n\n n_points : int, default=1\n Number of parameter settings to sample in parallel. If this does\n not align with ``n_iter``, the last iteration will sample less\n points. See also :func:`~Optimizer.ask`\n\n pre_dispatch : int, or string, optional\n Controls the number of jobs that get dispatched during parallel\n execution. Reducing this number can be useful to avoid an\n explosion of memory consumption when more jobs get dispatched\n than CPUs can process. This parameter can be:\n\n - None, in which case all the jobs are immediately\n created and spawned. Use this for lightweight and\n fast-running jobs, to avoid delays due to on-demand\n spawning of the jobs\n - An int, giving the exact number of total jobs that are\n spawned\n - A string, giving an expression as a function of n_jobs,\n as in '2*n_jobs'\n\n iid : boolean, default=True\n If True, the data is assumed to be identically distributed across\n the folds, and the loss minimized is the total loss per sample,\n and not the mean loss across the folds.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - An object to be used as a cross-validation generator.\n - An iterable yielding train, test splits.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n refit : boolean, default=True\n Refit the best estimator with the entire dataset.\n If \"False\", it is impossible to make predictions using\n this RandomizedSearchCV instance after fitting.\n\n verbose : integer\n Controls the verbosity: the higher, the more messages.\n\n random_state : int or RandomState\n Pseudo random number generator state used for random uniform sampling\n from lists of possible values instead of scipy.stats distributions.\n\n error_score : 'raise' (default) or numeric\n Value to assign to the score if an error occurs in estimator fitting.\n If set to 'raise', the error is raised. If a numeric value is given,\n FitFailedWarning is raised. This parameter does not affect the refit\n step, which will always raise the error.\n\n return_train_score : boolean, default=False\n If ``'True'``, the ``cv_results_`` attribute will include training\n scores.\n\n Examples\n --------\n\n >>> from skopt import BayesSearchCV\n >>> # parameter ranges are specified by one of below\n >>> from skopt.space import Real, Categorical, Integer\n >>>\n >>> from sklearn.datasets import load_iris\n >>> from sklearn.svm import SVC\n >>> from sklearn.model_selection import train_test_split\n >>>\n >>> X, y = load_iris(True)\n >>> X_train, X_test, y_train, y_test = train_test_split(X, y,\n ... train_size=0.75,\n ... random_state=0)\n >>>\n >>> # log-uniform: understand as search over p = exp(x) by varying x\n >>> opt = BayesSearchCV(\n ... SVC(),\n ... {\n ... 'C': Real(1e-6, 1e+6, prior='log-uniform'),\n ... 'gamma': Real(1e-6, 1e+1, prior='log-uniform'),\n ... 'degree': Integer(1,8),\n ... 'kernel': Categorical(['linear', 'poly', 'rbf']),\n ... },\n ... n_iter=32,\n ... random_state=0\n ... )\n >>>\n >>> # executes bayesian optimization\n >>> _ = opt.fit(X_train, y_train)\n >>>\n >>> # model can be saved, used for predictions or scoring\n >>> print(opt.score(X_test, y_test))\n 0.973...\n\n Attributes\n ----------\n cv_results_ : dict of numpy (masked) ndarrays\n A dict with keys as column headers and values as columns, that can be\n imported into a pandas ``DataFrame``.\n\n For instance the below given table\n\n +--------------+-------------+-------------------+---+---------------+\n | param_kernel | param_gamma | split0_test_score |...|rank_test_score|\n +==============+=============+===================+===+===============+\n | 'rbf' | 0.1 | 0.8 |...| 2 |\n +--------------+-------------+-------------------+---+---------------+\n | 'rbf' | 0.2 | 0.9 |...| 1 |\n +--------------+-------------+-------------------+---+---------------+\n | 'rbf' | 0.3 | 0.7 |...| 1 |\n +--------------+-------------+-------------------+---+---------------+\n\n will be represented by a ``cv_results_`` dict of::\n\n {\n 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],\n mask = False),\n 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),\n 'split0_test_score' : [0.8, 0.9, 0.7],\n 'split1_test_score' : [0.82, 0.5, 0.7],\n 'mean_test_score' : [0.81, 0.7, 0.7],\n 'std_test_score' : [0.02, 0.2, 0.],\n 'rank_test_score' : [3, 1, 1],\n 'split0_train_score' : [0.8, 0.9, 0.7],\n 'split1_train_score' : [0.82, 0.5, 0.7],\n 'mean_train_score' : [0.81, 0.7, 0.7],\n 'std_train_score' : [0.03, 0.03, 0.04],\n 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],\n 'std_fit_time' : [0.01, 0.02, 0.01, 0.01],\n 'mean_score_time' : [0.007, 0.06, 0.04, 0.04],\n 'std_score_time' : [0.001, 0.002, 0.003, 0.005],\n 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],\n }\n\n NOTE that the key ``'params'`` is used to store a list of parameter\n settings dict for all the parameter candidates.\n\n The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and\n ``std_score_time`` are all in seconds.\n\n best_estimator_ : estimator\n Estimator that was chosen by the search, i.e. estimator\n which gave highest score (or smallest loss if specified)\n on the left out data. Not available if refit=False.\n\n optimizer_results_ : list of `OptimizeResult`\n Contains a `OptimizeResult` for each search space. The search space\n parameter are sorted by its name.\n\n best_score_ : float\n Score of best_estimator on the left out data.\n\n best_params_ : dict\n Parameter setting that gave the best results on the hold out data.\n\n best_index_ : int\n The index (of the ``cv_results_`` arrays) which corresponds to the best\n candidate parameter setting.\n\n The dict at ``search.cv_results_['params'][search.best_index_]`` gives\n the parameter setting for the best model, that gives the highest\n mean score (``search.best_score_``).\n\n scorer_ : function\n Scorer function used on the held out data to choose the best\n parameters for the model.\n\n n_splits_ : int\n The number of cross-validation splits (folds/iterations).\n\n Notes\n -----\n The parameters selected are those that maximize the score of the held-out\n data, according to the scoring parameter.\n\n If `n_jobs` was set to a value higher than one, the data is copied for each\n parameter setting(and not `n_jobs` times). This is done for efficiency\n reasons if individual jobs take very little time, but may raise errors if\n the dataset is large and not enough memory is available. A workaround in\n this case is to set `pre_dispatch`. Then, the memory is copied only\n `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *\n n_jobs`.\n\n See Also\n --------\n :class:`GridSearchCV`:\n Does exhaustive search over a grid of parameters.\n\n \"\"\"\n\n def __init__(self, estimator, search_spaces, optimizer_kwargs=None,\n n_iter=50, scoring=None, fit_params=None, n_jobs=1,\n n_points=1, iid=True, refit=True, cv=None, verbose=0,\n pre_dispatch='2*n_jobs', random_state=None,\n error_score='raise', return_train_score=False):\n\n self.search_spaces = search_spaces\n self.n_iter = n_iter\n self.n_points = n_points\n self.random_state = random_state\n self.optimizer_kwargs = optimizer_kwargs\n self._check_search_space(self.search_spaces)\n # Temporary fix for compatibility with sklearn 0.20 and 0.21\n # See scikit-optimize#762\n # To be consistent with sklearn 0.21+, fit_params should be deprecated\n # in the constructor and be passed in ``fit``.\n self.fit_params = fit_params\n\n super(BayesSearchCV, self).__init__(\n estimator=estimator, scoring=scoring,\n n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,\n pre_dispatch=pre_dispatch, error_score=error_score,\n return_train_score=return_train_score)\n\n def _check_search_space(self, search_space):\n \"\"\"Checks whether the search space argument is correct\"\"\"\n\n if len(search_space) == 0:\n raise ValueError(\n \"The search_spaces parameter should contain at least one\"\n \"non-empty search space, got %s\" % search_space\n )\n\n # check if space is a single dict, convert to list if so\n if isinstance(search_space, dict):\n search_space = [search_space]\n\n # check if the structure of the space is proper\n if isinstance(search_space, list):\n # convert to just a list of dicts\n dicts_only = []\n\n # 1. check the case when a tuple of space, n_iter is provided\n for elem in search_space:\n if isinstance(elem, tuple):\n if len(elem) != 2:\n raise ValueError(\n \"All tuples in list of search spaces should have\"\n \"length 2, and contain (dict, int), got %s\" % elem\n )\n subspace, n_iter = elem\n\n if (not isinstance(n_iter, int)) or n_iter < 0:\n raise ValueError(\n \"Number of iterations in search space should be\"\n \"positive integer, got %s in tuple %s \" %\n (n_iter, elem)\n )\n\n # save subspaces here for further checking\n dicts_only.append(subspace)\n elif isinstance(elem, dict):\n dicts_only.append(elem)\n else:\n raise TypeError(\n \"A search space should be provided as a dict or\"\n \"tuple (dict, int), got %s\" % elem)\n\n # 2. check all the dicts for correctness of contents\n for subspace in dicts_only:\n for k, v in subspace.items():\n check_dimension(v)\n else:\n raise TypeError(\n \"Search space should be provided as a dict or list of dict,\"\n \"got %s\" % search_space)\n\n # copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV\n @property\n def best_score_(self):\n check_is_fitted(self, 'cv_results_')\n return self.cv_results_['mean_test_score'][self.best_index_]\n\n # copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV\n @property\n def best_params_(self):\n check_is_fitted(self, 'cv_results_')\n return self.cv_results_['params'][self.best_index_]\n\n @property\n def optimizer_results_(self):\n check_is_fitted(self, '_optim_results')\n return self._optim_results\n\n # copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV\n def _fit(self, X, y, groups, parameter_iterable):\n \"\"\"\n Actual fitting, performing the search over parameters.\n Taken from https://github.com/scikit-learn/scikit-learn/blob/0.18.X\n .../sklearn/model_selection/_search.py\n \"\"\"\n estimator = self.estimator\n cv = sklearn.model_selection._validation.check_cv(\n self.cv, y, classifier=is_classifier(estimator))\n self.scorer_ = check_scoring(\n self.estimator, scoring=self.scoring)\n\n X, y, groups = indexable(X, y, groups)\n n_splits = cv.get_n_splits(X, y, groups)\n if self.verbose > 0 and isinstance(parameter_iterable, Sized):\n n_candidates = len(parameter_iterable)\n print(\"Fitting {0} folds for each of {1} candidates, totalling\"\n \" {2} fits\".format(n_splits, n_candidates,\n n_candidates * n_splits))\n\n base_estimator = clone(self.estimator)\n pre_dispatch = self.pre_dispatch\n\n cv_iter = list(cv.split(X, y, groups))\n out = Parallel(\n n_jobs=self.n_jobs, verbose=self.verbose,\n pre_dispatch=pre_dispatch\n )(delayed(sklearn.model_selection._validation._fit_and_score)(\n clone(base_estimator),\n X, y, self.scorer_,\n train, test, self.verbose, parameters,\n fit_params=self.fit_params,\n return_train_score=self.return_train_score,\n return_n_test_samples=True,\n return_times=True, return_parameters=True,\n error_score=self.error_score\n )\n for parameters in parameter_iterable\n for train, test in cv_iter)\n\n # if one choose to see train score, \"out\" will contain train score info\n if self.return_train_score:\n (train_scores, test_scores, test_sample_counts,\n fit_time, score_time, parameters) = zip(*out)\n else:\n (test_scores, test_sample_counts,\n fit_time, score_time, parameters) = zip(*out)\n\n candidate_params = parameters[::n_splits]\n n_candidates = len(candidate_params)\n\n results = dict()\n\n def _store(key_name, array, weights=None, splits=False, rank=False):\n \"\"\"A small helper to store the scores/times to the cv_results_\"\"\"\n array = np.array(array, dtype=np.float64).reshape(n_candidates,\n n_splits)\n if splits:\n for split_i in range(n_splits):\n results[\"split%d_%s\"\n % (split_i, key_name)] = array[:, split_i]\n\n array_means = np.average(array, axis=1, weights=weights)\n results['mean_%s' % key_name] = array_means\n # Weighted std is not directly available in numpy\n array_stds = np.sqrt(np.average((array -\n array_means[:, np.newaxis]) ** 2,\n axis=1, weights=weights))\n results['std_%s' % key_name] = array_stds\n\n if rank:\n results[\"rank_%s\" % key_name] = np.asarray(\n rankdata(-array_means, method='min'), dtype=np.int32)\n\n # Computed the (weighted) mean and std for test scores alone\n # NOTE test_sample counts (weights) remain the same for all candidates\n test_sample_counts = np.array(test_sample_counts[:n_splits],\n dtype=np.int)\n\n _store('test_score', test_scores, splits=True, rank=True,\n weights=test_sample_counts if self.iid else None)\n if self.return_train_score:\n _store('train_score', train_scores, splits=True)\n _store('fit_time', fit_time)\n _store('score_time', score_time)\n\n best_index = np.flatnonzero(results[\"rank_test_score\"] == 1)[0]\n best_parameters = candidate_params[best_index]\n\n # Use one MaskedArray and mask all the places where the param is not\n # applicable for that candidate. Use defaultdict as each candidate may\n # not contain all the params\n param_results = defaultdict(partial(\n MaskedArray,\n np.empty(n_candidates,),\n mask=True,\n dtype=object))\n for cand_i, params in enumerate(candidate_params):\n for name, value in params.items():\n # An all masked empty array gets created for the key\n # `\"param_%s\" % name` at the first occurence of `name`.\n # Setting the value at an index also unmasks that index\n param_results[\"param_%s\" % name][cand_i] = value\n\n results.update(param_results)\n\n # Store a list of param dicts at the key 'params'\n results['params'] = candidate_params\n\n self.cv_results_ = results\n self.best_index_ = best_index\n self.n_splits_ = n_splits\n\n if self.refit:\n # fit the best estimator using the entire dataset\n # clone first to work around broken estimators\n best_estimator = clone(base_estimator).set_params(\n **best_parameters)\n if y is not None:\n best_estimator.fit(X, y, **self.fit_params)\n else:\n best_estimator.fit(X, **self.fit_params)\n self.best_estimator_ = best_estimator\n return self\n\n def _fit_best_model(self, X, y):\n \"\"\"Fit the estimator copy with best parameters found to the\n provided data.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Input data, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape = [n_samples] or [n_samples, n_output],\n Target relative to X for classification or regression.\n\n Returns\n -------\n self\n \"\"\"\n self.best_estimator_ = clone(self.estimator)\n self.best_estimator_.set_params(**self.best_params_)\n self.best_estimator_.fit(X, y, **(self.fit_params or {}))\n return self\n\n def _make_optimizer(self, params_space):\n \"\"\"Instantiate skopt Optimizer class.\n\n Parameters\n ----------\n params_space : dict\n Represents parameter search space. The keys are parameter\n names (strings) and values are skopt.space.Dimension instances,\n one of Real, Integer or Categorical.\n\n Returns\n -------\n optimizer: Instance of the `Optimizer` class used for for search\n in some parameter space.\n\n \"\"\"\n\n kwargs = self.optimizer_kwargs_.copy()\n kwargs['dimensions'] = dimensions_aslist(params_space)\n optimizer = Optimizer(**kwargs)\n\n return optimizer\n\n def _step(self, X, y, search_space, optimizer, groups=None, n_points=1):\n \"\"\"Generate n_jobs parameters and evaluate them in parallel.\n \"\"\"\n\n # get parameter values to evaluate\n params = optimizer.ask(n_points=n_points)\n\n # convert parameters to python native types\n params = [[np.array(v).item() for v in p] for p in params]\n\n # make lists into dictionaries\n params_dict = [point_asdict(search_space, p) for p in params]\n\n # HACK: self.cv_results_ is reset at every call to _fit, keep current\n all_cv_results = self.cv_results_\n\n # HACK: this adds compatibility with different versions of sklearn\n refit = self.refit\n self.refit = False\n self._fit(X, y, groups, params_dict)\n self.refit = refit\n\n # merge existing and new cv_results_\n for k in self.cv_results_:\n all_cv_results[k].extend(self.cv_results_[k])\n\n all_cv_results[\"rank_test_score\"] = list(np.asarray(\n rankdata(-np.array(all_cv_results['mean_test_score']),\n method='min'), dtype=np.int32))\n if self.return_train_score:\n all_cv_results[\"rank_train_score\"] = list(np.asarray(\n rankdata(-np.array(all_cv_results['mean_train_score']),\n method='min'), dtype=np.int32))\n self.cv_results_ = all_cv_results\n self.best_index_ = np.argmax(self.cv_results_['mean_test_score'])\n\n # feed the point and objective back into optimizer\n local_results = self.cv_results_['mean_test_score'][-len(params):]\n\n # optimizer minimizes objective, hence provide negative score\n return optimizer.tell(params, [-score for score in local_results])\n\n @property\n def total_iterations(self):\n \"\"\"\n Count total iterations that will be taken to explore\n all subspaces with `fit` method.\n\n Returns\n -------\n max_iter: int, total number of iterations to explore\n \"\"\"\n total_iter = 0\n\n for elem in self.search_spaces:\n\n if isinstance(elem, tuple):\n space, n_iter = elem\n else:\n n_iter = self.n_iter\n\n total_iter += n_iter\n\n return total_iter\n\n def _run_search(self, x):\n pass\n\n def fit(self, X, y=None, groups=None, callback=None):\n \"\"\"Run fit on the estimator with randomly drawn parameters.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape = [n_samples, n_features]\n The training input samples.\n\n y : array-like, shape = [n_samples] or [n_samples, n_output]\n Target relative to X for classification or regression (class\n labels should be integers or strings).\n\n groups : array-like, with shape (n_samples,), optional\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n callback: [callable, list of callables, optional]\n If callable then `callback(res)` is called after each parameter\n combination tested. If list of callables, then each callable in\n the list is called.\n \"\"\"\n\n # check if space is a single dict, convert to list if so\n search_spaces = self.search_spaces\n if isinstance(search_spaces, dict):\n search_spaces = [search_spaces]\n\n callbacks = check_callback(callback)\n\n if self.optimizer_kwargs is None:\n self.optimizer_kwargs_ = {}\n else:\n self.optimizer_kwargs_ = dict(self.optimizer_kwargs)\n random_state = check_random_state(self.random_state)\n self.optimizer_kwargs_['random_state'] = random_state\n\n # Instantiate optimizers for all the search spaces.\n optimizers = []\n for search_space in search_spaces:\n if isinstance(search_space, tuple):\n search_space = search_space[0]\n optimizers.append(self._make_optimizer(search_space))\n self.optimizers_ = optimizers # will save the states of the optimizers\n\n self.cv_results_ = defaultdict(list)\n self.best_index_ = None\n self.multimetric_ = False\n self._optim_results = []\n\n n_points = self.n_points\n\n for search_space, optimizer in zip(search_spaces, optimizers):\n # if not provided with search subspace, n_iter is taken as\n # self.n_iter\n if isinstance(search_space, tuple):\n search_space, n_iter = search_space\n else:\n n_iter = self.n_iter\n\n # do the optimization for particular search space\n while n_iter > 0:\n # when n_iter < n_points points left for evaluation\n n_points_adjusted = min(n_iter, n_points)\n\n optim_result = self._step(\n X, y, search_space, optimizer,\n groups=groups, n_points=n_points_adjusted\n )\n n_iter -= n_points\n\n if eval_callbacks(callbacks, optim_result):\n break\n self._optim_results.append(optim_result)\n\n # Refit the best model on the the whole dataset\n if self.refit:\n self._fit_best_model(X, y)\n\n return self\n"}}},{"rowIdx":542693,"cells":{"filename":{"kind":"string","value":"the-stack_106_31042"},"text":{"kind":"string","value":"# SPDX-FileCopyrightText: 2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors)\n#\n# SPDX-License-Identifier: MIT\n\nimport argparse\n\nimport os\nimport struct\nimport sys\n\nsys.path.insert(0, \"bitmap_font\")\nsys.path.insert(0, \"../../tools/bitmap_font\")\n\nfrom adafruit_bitmap_font import bitmap_font\n\nparser = argparse.ArgumentParser(description=\"Generate USB descriptors.\")\nparser.add_argument(\"--font\", type=str, help=\"Font path\", required=True)\nparser.add_argument(\"--extra_characters\", type=str, help=\"Unicode string of extra characters\")\nparser.add_argument(\n \"--sample_file\",\n type=argparse.FileType(\"r\", encoding=\"utf-8\"),\n help=\"Text file that includes strings to support.\",\n)\nparser.add_argument(\"--output_c_file\", type=argparse.FileType(\"w\"), required=True)\n\nargs = parser.parse_args()\n\n\nclass BitmapStub:\n def __init__(self, width, height, color_depth):\n self.width = width\n self.rows = [b\"\"] * height\n\n def _load_row(self, y, row):\n self.rows[y] = bytes(row)\n\n\nf = bitmap_font.load_font(args.font, BitmapStub)\n\n# Load extra characters from the sample file.\nsample_characters = set()\nif args.sample_file:\n for line in args.sample_file:\n # Skip comments because we add additional characters in our huffman comments.\n if line.startswith(\"//\"):\n continue\n for c in line.strip():\n sample_characters.add(c)\n\n# Merge visible ascii, sample characters and extra characters.\nvisible_ascii = bytes(range(0x20, 0x7F)).decode(\"utf-8\")\nall_characters = visible_ascii\nfor c in sample_characters:\n if c not in all_characters:\n all_characters += c\nif args.extra_characters:\n all_characters.extend(args.extra_characters)\nall_characters = \"\".join(sorted(set(all_characters)))\nfiltered_characters = all_characters\n\n# Try to pre-load all of the glyphs. Misses will still be slow later.\nf.load_glyphs(set(ord(c) for c in all_characters))\n\nmissing = 0\n# Get each glyph.\nfor c in set(all_characters):\n if ord(c) not in f._glyphs:\n missing += 1\n filtered_characters = filtered_characters.replace(c, \"\")\n continue\n g = f.get_glyph(ord(c))\n if g[\"shift\"][1] != 0:\n raise RuntimeError(\"y shift\")\n\nif missing > 0:\n print(\"Font missing\", missing, \"characters\", file=sys.stderr)\n\nx, y, dx, dy = f.get_bounding_box()\ntile_x, tile_y = x - dx, y - dy\ntotal_bits = tile_x * len(all_characters)\ntotal_bits += 32 - total_bits % 32\nbytes_per_row = total_bits // 8\nb = bytearray(bytes_per_row * tile_y)\n\nfor x, c in enumerate(filtered_characters):\n g = f.get_glyph(ord(c))\n start_bit = x * tile_x + g[\"bounds\"][2]\n start_y = (tile_y - 2) - (g[\"bounds\"][1] + g[\"bounds\"][3])\n for y, row in enumerate(g[\"bitmap\"].rows):\n for i in range(g[\"bounds\"][0]):\n byte = i // 8\n bit = i % 8\n if row[byte] & (1 << (7 - bit)) != 0:\n overall_bit = start_bit + (start_y + y) * bytes_per_row * 8 + i\n b[overall_bit // 8] |= 1 << (7 - (overall_bit % 8))\n\n\nextra_characters = \"\"\nfor c in filtered_characters:\n if c not in visible_ascii:\n extra_characters += c\n\nc_file = args.output_c_file\n\nc_file.write(\n \"\"\"\\\n\n#include \"shared-bindings/displayio/Palette.h\"\n#include \"supervisor/shared/display.h\"\n\n\"\"\"\n)\n\nc_file.write(\n \"\"\"\\\n_displayio_color_t terminal_colors[2] = {\n {\n .rgb888 = 0x000000,\n .rgb565 = 0x0000,\n .luma = 0x00,\n .chroma = 0\n },\n {\n .rgb888 = 0xffffff,\n .rgb565 = 0xffff,\n .luma = 0xff,\n .chroma = 0\n },\n};\n\ndisplayio_palette_t supervisor_terminal_color = {\n .base = {.type = &displayio_palette_type },\n .colors = terminal_colors,\n .color_count = 2,\n .needs_refresh = false\n};\n\"\"\"\n)\n\nc_file.write(\n \"\"\"\\\ndisplayio_tilegrid_t supervisor_terminal_text_grid = {{\n .base = {{ .type = &displayio_tilegrid_type }},\n .bitmap = (displayio_bitmap_t*) &supervisor_terminal_font_bitmap,\n .pixel_shader = &supervisor_terminal_color,\n .x = 16,\n .y = 0,\n .pixel_width = {1},\n .pixel_height = {2},\n .bitmap_width_in_tiles = {0},\n .tiles_in_bitmap = {0},\n .width_in_tiles = 1,\n .height_in_tiles = 1,\n .tile_width = {1},\n .tile_height = {2},\n .tiles = NULL,\n .partial_change = false,\n .full_change = false,\n .hidden = false,\n .hidden_by_parent = false,\n .moved = false,\n .inline_tiles = false,\n .in_group = true\n}};\n\"\"\".format(\n len(all_characters), tile_x, tile_y\n )\n)\n\nc_file.write(\n \"\"\"\\\nconst uint32_t font_bitmap_data[{}] = {{\n\"\"\".format(\n bytes_per_row * tile_y // 4\n )\n)\n\nfor i, word in enumerate(struct.iter_unpack(\">I\", b)):\n c_file.write(\"0x{:08x}, \".format(word[0]))\n if (i + 1) % (bytes_per_row // 4) == 0:\n c_file.write(\"\\n\")\n\nc_file.write(\n \"\"\"\\\n};\n\"\"\"\n)\n\nc_file.write(\n \"\"\"\\\ndisplayio_bitmap_t supervisor_terminal_font_bitmap = {{\n .base = {{.type = &displayio_bitmap_type }},\n .width = {},\n .height = {},\n .data = (size_t*) font_bitmap_data,\n .stride = {},\n .bits_per_value = 1,\n .x_shift = 5,\n .x_mask = 0x1f,\n .bitmask = 0x1,\n .read_only = true\n}};\n\"\"\".format(\n len(all_characters) * tile_x, tile_y, bytes_per_row / 4\n )\n)\n\n\nc_file.write(\n \"\"\"\\\nconst fontio_builtinfont_t supervisor_terminal_font = {{\n .base = {{.type = &fontio_builtinfont_type }},\n .bitmap = &supervisor_terminal_font_bitmap,\n .width = {},\n .height = {},\n .unicode_characters = (const uint8_t*) \"{}\",\n .unicode_characters_len = {}\n}};\n\"\"\".format(\n tile_x, tile_y, extra_characters, len(extra_characters.encode(\"utf-8\"))\n )\n)\n\nc_file.write(\n \"\"\"\\\nterminalio_terminal_obj_t supervisor_terminal = {\n .base = { .type = &terminalio_terminal_type },\n .font = &supervisor_terminal_font,\n .cursor_x = 0,\n .cursor_y = 0,\n .tilegrid = &supervisor_terminal_text_grid\n};\n\"\"\"\n)\n"}}},{"rowIdx":542694,"cells":{"filename":{"kind":"string","value":"the-stack_106_31043"},"text":{"kind":"string","value":"\"\"\"\nAuthor: Matheus Felinto\nDescription: A simple electronic circuit simulator\n\"\"\"\n\nimport sys\nimport numpy as np\n\nfrom lib.netlist import NetList\nfrom lib import components\n\n\nif __name__ == \"__main__\":\n netlist = NetList(sys.argv[1])\n netlist.read_netlist()\n\n nodes_number, auxiliary_equations_number = netlist.define_matrix_range() \n\n N = nodes_number + auxiliary_equations_number + 1\n admittance_matrix = np.zeros((N, N), dtype=complex)\n current_vector = np.zeros(N, dtype=complex)\n\n frequency = 0\n if netlist.lines[-1].split()[0].upper() == \".SIN\":\n frequency = float(netlist.lines[-1].split()[1])\n auxiliary_elements = components.create_component_stamps(netlist.lines, admittance_matrix, current_vector, nodes_number, frequency)\n admittance_matrix = np.delete(np.delete(admittance_matrix, 0, 0), 0, 1)\n current_vector = np.delete(current_vector, 0, 0)\n\n nodes_voltage = np.linalg.solve(admittance_matrix, current_vector)\n\n print(\"\"\"\n _____ _ _ _ _ _ \n|_ _| |__ ___ ___(_)_ __ ___ _ _(_) |_ __ ____ _| |_ _ ___ ___ \n | | | '_ \\ / _ \\ / __| | '__/ __| | | | | __| \\ \\ / / _` | | | | |/ _ \\/ __|\n | | | | | | __/ | (__| | | | (__| |_| | | |_ \\ V / (_| | | |_| | __/\\__ \\\\\n |_| |_| |_|\\___| \\___|_|_| \\___|\\__,_|_|\\__| \\_/ \\__,_|_|\\__,_|\\___||___/\n \n \n __ _ _ __ ___ _ \n / _` | '__/ _ (_)\n| (_| | | | __/_ \n \\__,_|_| \\___(_)\n \"\"\")\n\n if netlist.lines[-1].split()[0].upper() == \".DC\":\n for index in range(1, len(nodes_voltage) + 1):\n print(f\"{f'node({index})' if index <= nodes_number else f'current({auxiliary_elements[index - nodes_number - 1]})'} = {nodes_voltage[index - 1].real:.3f}\")\n\n elif netlist.lines[-1].split()[0].upper() == \".SIN\":\n for index in range(1, len(nodes_voltage) + 1):\n print(f\"{f'node({index})' if index <= nodes_number else f'current({auxiliary_elements[index - nodes_number - 1]})'} = {nodes_voltage[index - 1].real:.3f} Cos({frequency}t) + {-nodes_voltage[index - 1].imag:.3f} Sin({frequency}t)\")\n"}}},{"rowIdx":542695,"cells":{"filename":{"kind":"string","value":"the-stack_106_31044"},"text":{"kind":"string","value":"import urllib2\nimport hashlib\nimport tarfile\nimport random\nimport string\nimport sys\nimport os\nimport logging\nimport json\nimport socket\nimport shutil\nimport errno\nimport datetime as dt\n\nimport retry\n\nINFRASTRUCTURE_ERROR = 12\n\n\ndef make_user_agent():\n return 'fetch_from: {host}'.format(host=socket.gethostname())\n\n\ndef add_common_arguments(parser):\n parser.add_argument('--copy-to') # used by jbuild in fetch_resource\n parser.add_argument('--rename-to') # used by test_node in inject_mds_resource_to_graph\n parser.add_argument('--copy-to-dir')\n parser.add_argument('--untar-to')\n parser.add_argument('--rename', action='append', default=[], metavar='FILE', help='rename FILE to the corresponding output')\n parser.add_argument('--executable', action='store_true', help='make outputs executable')\n parser.add_argument('--log-path')\n parser.add_argument('outputs', nargs='*')\n\n\ndef ensure_dir(path):\n if not (path == '' or os.path.isdir(path)):\n os.makedirs(path)\n\n\ndef hardlink_or_copy(src, dst):\n ensure_dir(os.path.dirname(dst))\n\n if os.name == 'nt':\n shutil.copy(src, dst)\n else:\n try:\n os.link(src, dst)\n except OSError as e:\n if e.errno == errno.EEXIST:\n return\n elif e.errno == errno.EXDEV:\n sys.stderr.write(\"Can't make cross-device hardlink - fallback to copy: {} -> {}\\n\".format(src, dst))\n shutil.copy(src, dst)\n else:\n raise\n\n\ndef rename_or_copy_and_remove(src, dst):\n ensure_dir(os.path.dirname(dst))\n\n try:\n os.rename(src, dst)\n except OSError:\n shutil.copy(src, dst)\n os.remove(src)\n\n\nclass BadChecksumFetchError(Exception):\n pass\n\n\nclass IncompleteFetchError(Exception):\n pass\n\n\nclass ResourceUnpackingError(Exception):\n pass\n\n\nclass ResourceIsDirectoryError(Exception):\n pass\n\n\nclass OutputIsDirectoryError(Exception):\n pass\n\n\nclass OutputNotExistError(Exception):\n pass\n\n\ndef setup_logging(args, base_name):\n def makedirs(path):\n try:\n os.makedirs(path)\n except OSError:\n pass\n\n if args.log_path:\n log_file_name = args.log_path\n else:\n log_file_name = base_name + \".log\"\n\n args.abs_log_path = os.path.abspath(log_file_name)\n makedirs(os.path.dirname(args.abs_log_path))\n logging.basicConfig(filename=args.abs_log_path, level=logging.DEBUG)\n\n\ndef is_temporary(e):\n return isinstance(e, (BadChecksumFetchError, IncompleteFetchError, urllib2.URLError, socket.timeout, socket.error))\n\n\ndef uniq_string_generator(size=6, chars=string.ascii_lowercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef report_to_snowden(value):\n def inner():\n body = {\n 'namespace': 'ygg',\n 'key': 'fetch-from-sandbox',\n 'value': json.dumps(value),\n }\n\n urllib2.urlopen(\n 'https://back-snowden.qloud.yandex-team.ru/report/add',\n json.dumps([body, ]),\n timeout=5,\n )\n\n try:\n inner()\n except Exception as e:\n logging.error(e)\n\n\ndef copy_stream(read, *writers, **kwargs):\n chunk_size = kwargs.get('size', 1024*1024)\n while True:\n data = read(chunk_size)\n if not data:\n break\n for write in writers:\n write(data)\n\n\ndef md5file(fname):\n res = hashlib.md5()\n with open(fname, 'rb') as f:\n copy_stream(f.read, res.update)\n return res.hexdigest()\n\n\ndef git_like_hash_with_size(filepath):\n \"\"\"\n Calculate git like hash for path\n \"\"\"\n sha = hashlib.sha1()\n\n file_size = 0\n\n with open(filepath, 'rb') as f:\n while True:\n block = f.read(2 ** 16)\n\n if not block:\n break\n\n file_size += len(block)\n sha.update(block)\n\n sha.update('\\0')\n sha.update(str(file_size))\n\n return sha.hexdigest(), file_size\n\n\ndef size_printer(display_name, size):\n sz = [0]\n last_stamp = [dt.datetime.now()]\n\n def printer(chunk):\n sz[0] += len(chunk)\n now = dt.datetime.now()\n if last_stamp[0] + dt.timedelta(seconds=10) < now:\n if size:\n print >>sys.stderr, \"##status##{} - [[imp]]{:.1f}%[[rst]]\".format(display_name, 100.0 * sz[0] / size)\n last_stamp[0] = now\n\n return printer\n\n\ndef fetch_url(url, unpack, resource_file_name, expected_md5=None, expected_sha1=None, tries=10):\n logging.info('Downloading from url %s name %s and expected md5 %s', url, resource_file_name, expected_md5)\n tmp_file_name = uniq_string_generator()\n\n request = urllib2.Request(url, headers={'User-Agent': make_user_agent()})\n req = retry.retry_func(lambda: urllib2.urlopen(request, timeout=30), tries=tries, delay=5, backoff=1.57079)\n logging.debug('Headers: %s', req.headers.headers)\n expected_file_size = int(req.headers['Content-Length'])\n real_md5 = hashlib.md5()\n real_sha1 = hashlib.sha1()\n\n with open(tmp_file_name, 'wb') as fp:\n copy_stream(req.read, fp.write, real_md5.update, real_sha1.update, size_printer(resource_file_name, expected_file_size))\n\n real_md5 = real_md5.hexdigest()\n real_file_size = os.path.getsize(tmp_file_name)\n real_sha1.update('\\0')\n real_sha1.update(str(real_file_size))\n real_sha1 = real_sha1.hexdigest()\n\n if unpack:\n tmp_dir = tmp_file_name + '.dir'\n os.makedirs(tmp_dir)\n with tarfile.open(tmp_file_name, mode=\"r|gz\") as tar:\n tar.extractall(tmp_dir)\n tmp_file_name = os.path.join(tmp_dir, resource_file_name)\n real_md5 = md5file(tmp_file_name)\n\n logging.info('File size %s (expected %s)', real_file_size, expected_file_size)\n logging.info('File md5 %s (expected %s)', real_md5, expected_md5)\n logging.info('File sha1 %s (expected %s)', real_sha1, expected_sha1)\n\n if expected_md5 and real_md5 != expected_md5:\n report_to_snowden(\n {\n 'headers': req.headers.headers,\n 'expected_md5': expected_md5,\n 'real_md5': real_md5\n }\n )\n\n raise BadChecksumFetchError(\n 'Downloaded {}, but expected {} for {}'.format(\n real_md5,\n expected_md5,\n url,\n )\n )\n\n if expected_sha1 and real_sha1 != expected_sha1:\n report_to_snowden(\n {\n 'headers': req.headers.headers,\n 'expected_sha1': expected_sha1,\n 'real_sha1': real_sha1\n }\n )\n\n raise BadChecksumFetchError(\n 'Downloaded {}, but expected {} for {}'.format(\n real_sha1,\n expected_sha1,\n url,\n )\n )\n\n if expected_file_size != real_file_size:\n report_to_snowden({'headers': req.headers.headers, 'file_size': real_file_size})\n\n raise IncompleteFetchError(\n 'Downloaded {}, but expected {} for {}'.format(\n real_file_size,\n expected_file_size,\n url,\n )\n )\n\n return tmp_file_name\n\n\ndef process(fetched_file, file_name, args, remove=True):\n assert len(args.rename) <= len(args.outputs), (\n 'too few outputs to rename', args.rename, 'into', args.outputs)\n\n if not os.path.isfile(fetched_file):\n raise ResourceIsDirectoryError('Resource must be a file, not a directory: %s' % fetched_file)\n\n if args.copy_to:\n hardlink_or_copy(fetched_file, args.copy_to)\n if not args.outputs:\n args.outputs = [args.copy_to]\n\n if args.rename_to:\n args.rename.append(fetched_file)\n if not args.outputs:\n args.outputs = [args.rename_to]\n\n if args.copy_to_dir:\n hardlink_or_copy(fetched_file, os.path.join(args.copy_to_dir, file_name))\n\n if args.untar_to:\n ensure_dir(args.untar_to)\n try:\n with tarfile.open(fetched_file, mode='r:*') as tar:\n tar.extractall(args.untar_to)\n except tarfile.ReadError as e:\n logging.exception(e)\n raise ResourceUnpackingError('File {} cannot be untared'.format(fetched_file))\n\n for src, dst in zip(args.rename, args.outputs):\n if src == 'RESOURCE':\n src = fetched_file\n if os.path.abspath(src) == os.path.abspath(fetched_file):\n logging.info('Copying %s to %s', src, dst)\n hardlink_or_copy(src, dst)\n else:\n logging.info('Renaming %s to %s', src, dst)\n if remove:\n rename_or_copy_and_remove(src, dst)\n else:\n shutil.copy(src, dst)\n\n for path in args.outputs:\n if not os.path.exists(path):\n raise OutputNotExistError('Output does not exist: %s' % os.path.abspath(path))\n if not os.path.isfile(path):\n raise OutputIsDirectoryError('Output must be a file, not a directory: %s' % os.path.abspath(path))\n if args.executable:\n os.chmod(path, os.stat(path).st_mode | 0o111)\n if os.path.abspath(path) == os.path.abspath(fetched_file):\n remove = False\n\n if remove:\n os.remove(fetched_file)\n"}}},{"rowIdx":542696,"cells":{"filename":{"kind":"string","value":"the-stack_106_31045"},"text":{"kind":"string","value":"import os\nimport json\n\nfrom flask import render_template, g, session, redirect, url_for, request\n# noinspection PyPackageRequirements\nfrom bson.objectid import ObjectId\n\nfrom app import app, app_mongo, cdn_theme_url, app_redis\n\nfrom views.navigation import Navigation\nfrom views.auth import auth\nfrom views.jump_freighter import jf\nfrom views.admin import admin\nfrom views.account import account\nfrom views.corp import corp\nfrom views.fittings import fittings\nfrom views.buyback import buyback\nfrom views.ordering import ordering\nfrom views.security import security\nfrom views.recruitment import recruitment\nfrom views.auth import requires_sso, auth_check\n# noinspection PyUnresolvedReferences\nfrom views import api # Attaches API module\n\n\napp.register_blueprint(auth, url_prefix=\"/auth\")\napp.register_blueprint(jf, url_prefix=\"/jf\")\napp.register_blueprint(admin, url_prefix=\"/admin\")\napp.register_blueprint(account, url_prefix=\"/account\")\napp.register_blueprint(corp, url_prefix=\"/corp\")\napp.register_blueprint(fittings, url_prefix=\"/fittings\")\napp.register_blueprint(buyback, url_prefix=\"/buyback\")\napp.register_blueprint(ordering, url_prefix=\"/ordering\")\napp.register_blueprint(security, url_prefix=\"/security\")\napp.register_blueprint(recruitment, url_prefix=\"/recruitment\")\nNavigation(app)\n\n\n@app.before_first_request\ndef app_init():\n # Check if stations are loaded\n db_check_stations = app_mongo.db.stations.find_one({\"_id\": 60003760}) # Use Jita as check\n if not db_check_stations:\n # Load statics into memory\n with open(\"resources/staStations.json\", \"r\") as staStations_file:\n stations_list = json.load(staStations_file)\n app_mongo.db.stations.insert([{\"_id\": int(key), \"name\": value} for key, value in stations_list.items()])\n\n # Refresh Items\n app_mongo.db.items.drop()\n with open(\"resources/invTypes.json\", \"r\") as invTypes_file:\n items_list = json.load(invTypes_file)\n # Volumes by market group\n with open(\"resources/invPackaged.json\", \"r\") as invPackaged_file:\n package_list = json.load(invPackaged_file)\n # Fallback packed volumes of ships\n with open(\"resources/invVolumes.json\", \"r\") as invVolumes_file:\n volumes_list = json.load(invVolumes_file)\n # Open refine amounts\n with open(\"resources/invTypeMaterials.json\", \"r\") as invTypesMaterials_file:\n materials_list = json.load(invTypesMaterials_file)\n\n adjusted_items_list = []\n for key, value in items_list.items():\n if package_list.get(str(value[\"ship_group_id\"])):\n # Adjust for strategic cruisers\n if value[\"name\"] in [\"Legion\", \"Tengu\", \"Proteus\", \"Loki\"]:\n corrected_volume = 5000\n else:\n corrected_volume = package_list.get(str(value[\"ship_group_id\"]))\n else:\n corrected_volume = volumes_list[key] if volumes_list.get(key) else value[\"volume\"]\n adjusted_items_list.append({\"_id\": int(key), \"name\": value[\"name\"], \"volume\": corrected_volume,\n \"meta\": value[\"meta\"], \"materials\": materials_list.get(key, []),\n \"market_group_id\": value[\"market_group_id\"], \"skill_id\": value[\"skill_id\"],\n \"batch\": value[\"batch\"], \"ship_group_id\": value[\"ship_group_id\"]})\n app_mongo.db.items.insert(adjusted_items_list)\n\n # Check if roles are loaded\n app_mongo.db.eve_auth.update({\"_id\": \"super_admin\"}, {\"$setOnInsert\": {\"users\": []}}, upsert=True)\n app_mongo.db.eve_auth.update({\"_id\": \"jf_admin\"}, {\"$setOnInsert\": {\"users\": []}}, upsert=True)\n app_mongo.db.eve_auth.update({\"_id\": \"jf_pilot\"}, {\"$setOnInsert\": {\"users\": []}}, upsert=True)\n app_mongo.db.eve_auth.update({\"_id\": \"user_admin\"}, {\"$setOnInsert\": {\"users\": []}}, upsert=True)\n app_mongo.db.eve_auth.update({\"_id\": \"fittings_admin\"}, {\"$setOnInsert\": {\"users\": []}}, upsert=True)\n app_mongo.db.eve_auth.update({\"_id\": \"buyback_admin\"}, {\"$setOnInsert\": {\"users\": []}}, upsert=True)\n app_mongo.db.eve_auth.update({\"_id\": \"ordering_admin\"}, {\"$setOnInsert\": {\"users\": []}}, upsert=True)\n app_mongo.db.eve_auth.update({\"_id\": \"ordering_marketeer\"}, {\"$setOnInsert\": {\"users\": []}}, upsert=True)\n app_mongo.db.eve_auth.update({\"_id\": \"security_officer\"}, {\"$setOnInsert\": {\"users\": []}}, upsert=True)\n app_mongo.db.eve_auth.update({\"_id\": \"recruiter\"}, {\"$setOnInsert\": {\"users\": []}}, upsert=True)\n\n\n@app.before_request\ndef db_init():\n g.mongo = app_mongo\n g.redis = app_redis\n\n if request.path not in [\"/settings\"] and not any([\n request.path.endswith(\".js\"),\n request.path.endswith(\".css\"),\n request.path.endswith(\".ico\"),\n request.path.endswith(\".png\"),\n ]):\n session[\"prev_path\"] = request.path\n\n # Check css\n if session.get(\"default_css\", True):\n app.extensions['bootstrap']['cdns'][\"theme\"].baseurl = cdn_theme_url\n else:\n cdn_theme_alt_url = \"https://maxcdn.bootstrapcdn.com/bootswatch/3.3.5/sandstone/\"\n app.extensions['bootstrap']['cdns'][\"theme\"].baseurl = cdn_theme_alt_url\n\n if os.environ.get(\"maintenance\") == \"True\":\n return render_template(\"maintenance.html\")\n\n\n@app.teardown_request\ndef cleanup(exception=None):\n if exception:\n print(\"Error: \", exception)\n\n\n@app.route('/')\ndef home():\n with open(\"configs/base.json\", \"r\") as base_config_file:\n base_config = json.load(base_config_file)\n return render_template(\"index.html\", forum_url=base_config[\"forum_url\"])\n\n\n@app.route(\"/settings\")\ndef settings():\n session.setdefault(\"default_css\", True)\n session[\"default_css\"] = False if session.get(\"default_css\") else True\n if session.get(\"CharacterOwnerHash\"):\n return redirect(session.get(\"prev_path\", url_for(\"account.home\")))\n else:\n return redirect(session.get(\"prev_path\", url_for(\"home\")))\n\n\n@requires_sso(None)\n@app.route(\"/issues\", methods=[\"GET\", \"POST\"])\ndef issues():\n editor = auth_check(\"user_admin\")\n if request.form.get(\"action\") == \"submit\":\n g.mongo.db.issues.insert({\n \"submitter\": session[\"CharacterName\"],\n \"issue\": request.form.get(\"issue\").strip()\n })\n elif request.form.get(\"action\") == \"delete\":\n if editor:\n g.mongo.db.issues.remove({\"_id\": ObjectId(request.form.get(\"id\"))})\n else:\n g.mongo.db.issues.remove({\"_id\": ObjectId(request.form.get(\"id\")), \"submitter\": session[\"CharacterName\"]})\n\n issue_list = []\n for db_issue in g.mongo.db.issues.find():\n timestamp = ObjectId(db_issue[\"_id\"]).generation_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n can_delete = True if editor or session[\"CharacterName\"] == db_issue[\"submitter\"] else False\n issue_list.append([timestamp, db_issue[\"issue\"], db_issue[\"submitter\"], can_delete, db_issue[\"_id\"]])\n\n return render_template(\"issues.html\", issue_list=issue_list)\n\n\n# noinspection PyUnusedLocal\n@app.errorhandler(404)\ndef error_missing(exception):\n error_message = \"This page cannot be found.\"\n return render_template(\"error.html\", error_code=404, error_message=error_message), 404\n\n\n# noinspection PyUnusedLocal\n@app.errorhandler(403)\ndef error_unauthorized(exception):\n error_message = \"You are not authorized to view this page. Ensure you have the correct permissions.\"\n return render_template(\"error.html\", error_code=403, error_message=error_message), 403\n\n\n# noinspection PyUnusedLocal\n@app.errorhandler(500)\ndef error_crash(exception):\n error_message = \"This page has crashed due to an exception. Contact Kazuki Ishikawa and submit a bug report.\"\n return render_template(\"error.html\", error_code=500, error_message=error_message), 500\n\n\nif not os.environ.get(\"EXTERNAL\") and __name__ == \"__main__\":\n\n os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = 'true'\n os.environ[\"maintenance\"] = 'False'\n\n @app.route('/test')\n def test():\n g.redis.publish('titdev-test', 'Look at this. Very \" \\'cool # message;. ')\n g.redis.publish('titdev-marketeer', 'This is a test of the emergency annoyance system.')\n return render_template(\"base.html\")\n\n profile = False\n # Profiling\n if profile:\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app.config[\"PROFILE\"] = True\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n app.debug = True\n app.run(host=\"0.0.0.0\")\n"}}},{"rowIdx":542697,"cells":{"filename":{"kind":"string","value":"the-stack_106_31046"},"text":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"This plugin renders the client search page.\"\"\"\nimport urllib\n\nfrom grr.gui import renderers\nfrom grr.gui.plugins import forms\nfrom grr.gui.plugins import semantic\n\nfrom grr.lib import aff4\nfrom grr.lib import flow\nfrom grr.lib import rdfvalue\nfrom grr.lib import utils\n\nfrom grr.lib.aff4_objects import users as aff4_users\n\n\nclass NotificationCount(renderers.TemplateRenderer):\n \"\"\"Display the number of outstanding notifications.\"\"\"\n\n def RenderAjax(self, request, response):\n \"\"\"Return the count on unseen notifications.\"\"\"\n response = super(NotificationCount, self).RenderAjax(request, response)\n number = 0\n\n try:\n user_fd = aff4.FACTORY.Open(aff4.ROOT_URN.Add(\"users\").Add(\n request.user), token=request.token)\n notifications = user_fd.Get(user_fd.Schema.PENDING_NOTIFICATIONS)\n if notifications:\n number = len(notifications)\n except IOError:\n pass\n\n return renderers.JsonResponse(dict(number=number))\n\n\nclass NotificationBar(renderers.TemplateRenderer):\n \"\"\"Render a notification bar for the user.\"\"\"\n\n layout_template = renderers.Template(\"\"\"\n
\n
\n
\n
\n \n

Notifications for {{this.user|escape}}

\n
\n
\n
\n
\n \n
\n
\n
\n
\n\n
\n
\n\n
    \n
  • User: {{this.user|escape}}

  • \n
\n\n
\n
\n\"\"\")\n\n def Layout(self, request, response):\n \"\"\"Show the number of notifications outstanding for the user.\"\"\"\n self.user = request.user\n response = super(NotificationBar, self).Layout(request, response)\n return self.CallJavascript(response, \"Layout\")\n\n\nclass UpdateSettingsFlow(flow.GRRFlow):\n \"\"\"Update the User's GUI settings.\"\"\"\n # This flow can run without ACL enforcement (an SUID flow).\n ACL_ENFORCED = False\n\n args_type = aff4_users.GUISettings\n\n @flow.StateHandler()\n def Start(self):\n with aff4.FACTORY.Create(\n aff4.ROOT_URN.Add(\"users\").Add(self.token.username),\n aff4_type=\"GRRUser\", mode=\"w\",\n token=self.token) as user_fd:\n user_fd.Set(user_fd.Schema.GUI_SETTINGS(self.args))\n\n\nclass UserSettingsDialog(renderers.ConfirmationDialogRenderer):\n \"\"\"Dialog that allows user to change his settings.\"\"\"\n\n header = \"Settings\"\n proceed_button_title = \"Apply\"\n\n content_template = renderers.Template(\"\"\"\n{{this.user_settings_form|safe}}\n\"\"\")\n\n ajax_template = renderers.Template(\"\"\"\nSettings were successfully updated. Reloading...\n\"\"\")\n\n def GetUserSettings(self, request):\n try:\n user_record = aff4.FACTORY.Open(\n aff4.ROOT_URN.Add(\"users\").Add(request.user), \"GRRUser\",\n token=request.token)\n\n return user_record.Get(user_record.Schema.GUI_SETTINGS)\n except IOError:\n return aff4.GRRUser.SchemaCls.GUI_SETTINGS()\n\n def Layout(self, request, response):\n user_settings = self.GetUserSettings(request)\n self.user_settings_form = forms.SemanticProtoFormRenderer(\n proto_obj=user_settings, prefix=\"settings\").RawHTML(request)\n\n return super(UserSettingsDialog, self).Layout(request, response)\n\n def RenderAjax(self, request, response):\n \"\"\"Ajax hanlder for this renderer.\"\"\"\n settings = forms.SemanticProtoFormRenderer(\n proto_obj=aff4_users.GUISettings(),\n prefix=\"settings\").ParseArgs(request)\n\n flow.GRRFlow.StartFlow(flow_name=\"UpdateSettingsFlow\",\n args=settings, token=request.token)\n\n response = self.RenderFromTemplate(self.ajax_template, response,\n unique=self.unique)\n return self.CallJavascript(response, \"RenderAjax\")\n\n\nclass ResetUserNotifications(flow.GRRFlow):\n \"\"\"A flow to reset user's notifications.\"\"\"\n\n # This flow can run without ACL enforcement (an SUID flow).\n ACL_ENFORCED = False\n\n @flow.StateHandler()\n def Start(self):\n user_fd = aff4.FACTORY.Open(aff4.ROOT_URN.Add(\"users\").Add(\n self.token.username), aff4_type=\"GRRUser\", mode=\"rw\", token=self.token)\n user_fd.ShowNotifications(reset=True)\n\n\nclass ViewNotifications(renderers.TableRenderer):\n \"\"\"Render the notifications for the user.\"\"\"\n\n target_template = renderers.Template(\"\"\"\n{{target}}\"\"\")\n\n layout_template = renderers.TableRenderer.layout_template\n\n def __init__(self, **kwargs):\n renderers.TableRenderer.__init__(self, **kwargs)\n\n self.AddColumn(semantic.RDFValueColumn(\"Timestamp\"))\n self.AddColumn(semantic.RDFValueColumn(\"Message\", width=\"100%\"))\n self.AddColumn(semantic.RDFValueColumn(\"Target\"))\n\n def Layout(self, request, response):\n response = super(ViewNotifications, self).Layout(request, response)\n return self.CallJavascript(response, \"ViewNotifications.Layout\")\n\n def BuildTable(self, start_row, end_row, request):\n \"\"\"Add all the notifications to this table.\"\"\"\n row_index = 0\n search_term = request.REQ.get(\"sSearch\")\n\n # We modify this object by changing the notification from pending to\n # shown.\n try:\n user_fd = aff4.FACTORY.Open(aff4.ROOT_URN.Add(\"users\").Add(\n request.user), aff4_type=\"GRRUser\", token=request.token)\n except IOError:\n return\n\n # Hack for sorting. Requires retrieval of all notifications.\n notifications = list(user_fd.ShowNotifications(reset=False))\n for notification in sorted(notifications, key=lambda x: x.timestamp,\n reverse=True):\n if row_index < start_row: continue\n if row_index > end_row: break\n\n if (search_term and\n search_term.lower() not in notification.message.lower()):\n continue\n\n row = {\"Message\": notification.message,\n \"Target\": self.FormatFromTemplate(\n self.target_template,\n hash=self.BuildHashFromNotification(notification),\n notification_type=notification.type,\n target=notification.subject),\n \"Timestamp\": rdfvalue.RDFDatetime(notification.timestamp)}\n self.AddRow(row, row_index)\n row_index += 1\n\n flow.GRRFlow.StartFlow(flow_name=\"ResetUserNotifications\",\n token=request.token)\n\n @staticmethod\n def BuildHashFromNotification(notification):\n \"\"\"Navigate to the most appropriate location for this navigation.\"\"\"\n h = {}\n\n # Still display if subject doesn't get set, this will appear in the GUI with\n # a target of \"None\"\n urn = \"/\"\n if notification.subject is not None:\n urn = notification.subject\n\n # General Host information\n if notification.type == \"Discovery\":\n path = rdfvalue.RDFURN(urn)\n components = path.Path().split(\"/\")[1:]\n h[\"c\"] = components[0]\n h[\"main\"] = \"HostInformation\"\n\n elif notification.type == \"DownloadFile\":\n h[\"aff4_path\"] = notification.subject\n h[\"main\"] = \"DownloadFile\"\n\n elif notification.type == \"ViewObject\":\n path = rdfvalue.RDFURN(urn)\n components = path.Path().split(\"/\")[1:]\n if len(components) == 2 and components[0] == \"hunts\":\n h[\"hunt_id\"] = notification.subject\n h[\"main\"] = \"ManageHunts\"\n elif len(components) == 2 and components[0] == \"cron\":\n h[\"cron_job_urn\"] = notification.subject\n h[\"main\"] = \"ManageCron\"\n elif len(components) == 3 and components[1] == \"flows\":\n h[\"flow\"] = notification.subject\n h[\"c\"] = components[0]\n h[\"main\"] = \"ManageFlows\"\n else:\n h[\"c\"] = components[0]\n h[\"aff4_path\"] = notification.subject\n h[\"t\"] = renderers.DeriveIDFromPath(\"/\".join(components[1:-1]))\n h[\"main\"] = \"VirtualFileSystemView\"\n\n # Error with a flow\n elif notification.type == \"FlowStatus\":\n path = rdfvalue.RDFURN(urn)\n components = path.Path().split(\"/\")[1:]\n h[\"flow\"] = notification.source\n h[\"c\"] = components[0]\n h[\"main\"] = \"ManageFlows\"\n\n elif notification.type == \"GrantAccess\":\n h[\"main\"] = \"GrantAccess\"\n h[\"acl\"] = notification.subject\n\n return urllib.urlencode(\n dict([(x, utils.SmartStr(y)) for x, y in h.items()]))\n"}}},{"rowIdx":542698,"cells":{"filename":{"kind":"string","value":"the-stack_106_31047"},"text":{"kind":"string","value":"\"\"\"\nWeb Steps\nSteps file for web interactions with Selenium\nFor information on Waiting until elements are present in the HTML see:\n https://selenium-python.readthedocs.io/waits.html\n\"\"\"\nimport logging\nfrom behave import when, then\nfrom compare import expect, ensure\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support import expected_conditions\n\nID_PREFIX = 'customer_'\n\n@when('I visit the \"home page\"')\ndef step_impl(context):\n \"\"\" Make a call to the base URL \"\"\"\n context.driver.get(context.base_url)\n # Uncomment next line to take a screenshot of the web page\n #context.driver.save_screenshot('home_page.png')\n\n@when('I visit the \"Address Page\"')\ndef step_impl(context):\n \"\"\" Make a call to the address sub-url\"\"\"\n context.driver.get(context.base_url+'/address')\n\n@then('I should see \"{message}\" in the title')\ndef step_impl(context, message):\n \"\"\" Check the document title for a message \"\"\"\n expect(context.driver.title).to_contain(message)\n\n@then('I should not see \"{message}\"')\ndef step_impl(context, message):\n error_msg = \"I should not see '%s' in '%s'\" % (message, context.resp.text)\n ensure(message in context.resp.text, False, error_msg)\n\n@when('I set the \"{element_name}\" to \"{text_string}\"')\ndef step_impl(context, element_name, text_string):\n element_id = ID_PREFIX + element_name.lower().replace(' ', '_')\n element = context.driver.find_element_by_id(element_id)\n element.clear()\n element.send_keys(text_string)\n\n@when('I select \"{text}\" in the \"{element_name}\" dropdown')\ndef step_impl(context, text, element_name):\n element_id = ID_PREFIX + element_name.lower().replace(' ', '_')\n element = Select(context.driver.find_element_by_id(element_id))\n element.select_by_visible_text(text)\n\n@then('I should see \"{text}\" in the \"{element_name}\" dropdown')\ndef step_impl(context, text, element_name):\n element_id = ID_PREFIX + element_name.lower().replace(' ', '_')\n element = Select(context.driver.find_element_by_id(element_id))\n expect(element.first_selected_option.text).to_equal(text)\n\n@then('the \"{element_name}\" field should be empty')\ndef step_impl(context, element_name):\n element_id = ID_PREFIX + element_name.lower().replace(' ', '_')\n element = context.driver.find_element_by_id(element_id)\n expect(element.get_attribute('value')).to_be(u'')\n\n##################################################################\n# These two function simulate copy and paste\n##################################################################\n@when('I copy the \"{element_name}\" field')\ndef step_impl(context, element_name):\n element_id = ID_PREFIX + element_name.lower().replace(' ', '_')\n element = WebDriverWait(context.driver, context.WAIT_SECONDS).until(\n expected_conditions.presence_of_element_located((By.ID, element_id))\n )\n context.clipboard = element.get_attribute('value')\n logging.info('Clipboard contains: %s', context.clipboard)\n\n@when('I copy the \"ID\" field from the results')\ndef step_impl(context):\n value = context.driver.find_element_by_xpath(\"/html/body/div[@class='container']/div[@id='search_results']/table[@class='table table-striped']/tbody/tr[@id='row_0']/td[1]\").text\n context.clipboard = value\n logging.info(\"Clipboard contains: %s\", context.clipboard)\n\n@when('I copy the \"Customer ID\" field from the results')\ndef step_impl(context):\n value = context.driver.find_element_by_xpath(\"/html/body/div[@class='container']/div[@id='search_results']/table[@class='table table-striped']/tbody/tr[@id='row_0']/td[2]\").text\n context.clipboard = value\n logging.info(\"Clipboard contains: %s\", context.clipboard)\n\n@when('I paste the \"{element_name}\" field')\ndef step_impl(context, element_name):\n element_id = ID_PREFIX + element_name.lower().replace(' ', '_')\n element = WebDriverWait(context.driver, context.WAIT_SECONDS).until(\n expected_conditions.presence_of_element_located((By.ID, element_id))\n )\n element.clear()\n element.send_keys(context.clipboard)\n\n##################################################################\n# This code works because of the following naming convention:\n# The buttons have an id in the html hat is the button text\n# in lowercase followed by '-btn' so the Clean button has an id of\n# id='clear-btn'. That allows us to lowercase the name and add '-btn'\n# to get the element id of any button\n##################################################################\n\n@when('I press the \"{button}\" button')\ndef step_impl(context, button):\n button_id = button.lower() + '-btn'\n context.driver.find_element_by_id(button_id).click()\n\n@then('I should see \"{name}\" in the results')\ndef step_impl(context, name):\n found = WebDriverWait(context.driver, context.WAIT_SECONDS).until(\n expected_conditions.text_to_be_present_in_element(\n (By.ID, 'search_results'),\n name\n )\n )\n expect(found).to_be(True)\n\n@then('I should not see \"{name}\" in the results')\ndef step_impl(context, name):\n element = context.driver.find_element_by_id('search_results')\n error_msg = \"I should not see '%s' in '%s'\" % (name, element.text)\n ensure(name in element.text, False, error_msg)\n\n@then('I should see the message \"{message}\"')\ndef step_impl(context, message):\n found = WebDriverWait(context.driver, context.WAIT_SECONDS).until(\n expected_conditions.text_to_be_present_in_element(\n (By.ID, 'flash_message'),\n message\n )\n )\n expect(found).to_be(True)\n\n##################################################################\n# This code works because of the following naming convention:\n# The id field for text input in the html is the element name\n# prefixed by ID_PREFIX so the Name field has an id='customer_name'\n# We can then lowercase the name and prefix with customer_ to get the id\n##################################################################\n\n@then('I should see \"{text_string}\" in the \"{element_name}\" field')\ndef step_impl(context, text_string, element_name):\n element_id = ID_PREFIX + element_name.lower().replace(' ', '_')\n found = WebDriverWait(context.driver, context.WAIT_SECONDS).until(\n expected_conditions.text_to_be_present_in_element_value(\n (By.ID, element_id),\n text_string\n )\n )\n expect(found).to_be(True)\n\n@when('I change \"{element_name}\" to \"{text_string}\"')\ndef step_impl(context, element_name, text_string):\n element_id = ID_PREFIX + element_name.lower().replace(' ', '_')\n element = WebDriverWait(context.driver, context.WAIT_SECONDS).until(\n expected_conditions.presence_of_element_located((By.ID, element_id))\n )\n element.clear()\n element.send_keys(text_string)"}}},{"rowIdx":542699,"cells":{"filename":{"kind":"string","value":"the-stack_106_31049"},"text":{"kind":"string","value":"# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE file in the project root for full license information\nimport datetime\nfrom dictionary_object import DictionaryObject\n\n\nclass LonghaulConfig(DictionaryObject):\n def __init__(self):\n super(LonghaulConfig, self).__init__()\n self.timeout_interval = datetime.timedelta(minutes=5)\n\n self.longhaul_total_duration = datetime.timedelta()\n self.longhaul_property_update_interval = datetime.timedelta(seconds=10)\n self.longhaul_telemetry_interval = datetime.timedelta(seconds=10)\n\n self.longhaul_d2c_enabled = False\n self.longhaul_d2c_interval_length = 1\n self.longhaul_d2c_ops_per_interval = 10\n self.longhaul_d2c_count_failures_allowed = 0\n\n self.lock_attributes()\n\n\nLonghaulConfig._defaults = LonghaulConfig()\n"}}}],"truncated":false,"partial":true},"paginationData":{"pageIndex":5426,"numItemsPerPage":100,"numTotalItems":543490,"offset":542600,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODQ3NzQ3NSwic3ViIjoiL2RhdGFzZXRzL3R5emh1L3B5c3RhY2tfY2xlYW4iLCJleHAiOjE3NTg0ODEwNzUsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.siVUvGh5PnxtkffcEc1Sqstks3rHvPKpRPYdJA0lg7E0gKtqV1c2ZDBvy9eXA08wNNpom20wiABqxRruiLG1Ag","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
filename
stringlengths
13
19
text
stringlengths
134
1.04M
the-stack_106_30895
from torch.utils.data import Dataset import torchvision.transforms as transforms import h5py import sys import numpy as np from helen.modules.python.TextColor import TextColor from helen.modules.python.FileManager import FileManager """ WARNING: THIS IS A DEBUGGING TOOL INTENDED TO BE USED BY THE DEVELOPERS ONLY. """ class SequenceDataset(Dataset): """ This class implements the dataset class for the dataloader to use. This version is intended to use with train.py and test.py as this method loads labels with the images. It initializes all the given images and returns each image through __getitem__. """ def __init__(self, image_directory): """ This method initializes the dataset by loading all the image information. It creates a sequential list call all_images from which we can grab images iteratively through __getitem__. :param image_directory: Path to a directory where all the images are saved. """ # transformer to convert loaded objects to tensors self.transform = transforms.Compose([transforms.ToTensor()]) # a list of file-image pairs, where we have (file_name, image_name) as values so we can fetch images # from the list of files. file_image_pair = [] # get all the h5 files that we have in the directory hdf_files = FileManager.get_file_paths_from_directory(image_directory) for hdf5_file_path in hdf_files: # for each of the files get all the images with h5py.File(hdf5_file_path, 'r') as hdf5_file: # check if marginpolish somehow generated an empty file if 'images' in hdf5_file: image_names = list(hdf5_file['images'].keys()) # save the file-image pair to the list for image_name in image_names: file_image_pair.append((hdf5_file_path, image_name)) else: sys.stderr.write(TextColor.YELLOW + "WARN: NO IMAGES FOUND IN FILE: " + hdf5_file_path + "\n" + TextColor.END) # save the list to all_images so we can access the list inside other methods self.all_images = file_image_pair def __getitem__(self, index): """ This method returns a single object. Dataloader uses this method to load images and then minibatches the loaded images :param index: Index indicating which image from all_images to be loaded :return: image and their auxiliary information """ hdf5_filepath, image_name = self.all_images[index] # load all the information we need to save in the prediction hdf5 with h5py.File(hdf5_filepath, 'r') as hdf5_file: contig = np.array2string(hdf5_file['images'][image_name]['contig'][()][0].astype(np.str)).replace("'", '') contig_start = hdf5_file['images'][image_name]['contig_start'][()][0].astype(np.int) contig_end = hdf5_file['images'][image_name]['contig_end'][()][0].astype(np.int) chunk_id = hdf5_file['images'][image_name]['feature_chunk_idx'][()][0].astype(np.int) image = hdf5_file['images'][image_name]['image'][()].astype(np.uint8) position = hdf5_file['images'][image_name]['position'][()].astype(np.int) label_base = hdf5_file['images'][image_name]['label_base'][()] label_run_length = hdf5_file['images'][image_name]['label_run_length'][()] return image, label_base, label_run_length, position, contig, contig_start, contig_end, chunk_id, hdf5_filepath def __len__(self): """ Returns the length of the dataset :return: """ return len(self.all_images)
the-stack_106_30899
from dataclasses import dataclass from dataclasses import field import os import pickle from typing import ( Dict, Optional, Mapping, Callable, Any, List, Type, Union ) import time import dbt.exceptions import dbt.tracking import dbt.flags as flags from dbt.adapters.factory import ( get_adapter, get_relation_class_by_name, get_adapter_package_names, ) from dbt.helper_types import PathSet from dbt.logger import GLOBAL_LOGGER as logger, DbtProcessState from dbt.node_types import NodeType from dbt.clients.jinja import get_rendered, statically_extract_macro_calls from dbt.clients.system import make_directory from dbt.config import Project, RuntimeConfig from dbt.context.docs import generate_runtime_docs from dbt.context.macro_resolver import MacroResolver from dbt.context.base import generate_base_context from dbt.contracts.files import FileHash, ParseFileType from dbt.parser.read_files import read_files, load_source_file from dbt.contracts.graph.compiled import ManifestNode from dbt.contracts.graph.manifest import ( Manifest, Disabled, MacroManifest, ManifestStateCheck ) from dbt.contracts.graph.parsed import ( ParsedSourceDefinition, ParsedNode, ParsedMacro, ColumnInfo, ParsedExposure ) from dbt.contracts.util import Writable from dbt.exceptions import ( ref_target_not_found, get_target_not_found_or_disabled_msg, source_target_not_found, get_source_not_found_or_disabled_msg, warn_or_error, ) from dbt.parser.base import BaseParser, Parser from dbt.parser.analysis import AnalysisParser from dbt.parser.data_test import DataTestParser from dbt.parser.docs import DocumentationParser from dbt.parser.hooks import HookParser from dbt.parser.macros import MacroParser from dbt.parser.models import ModelParser from dbt.parser.schemas import SchemaParser from dbt.parser.search import FileBlock from dbt.parser.seeds import SeedParser from dbt.parser.snapshots import SnapshotParser from dbt.parser.sources import patch_sources from dbt.ui import warning_tag from dbt.version import __version__ from dbt.dataclass_schema import dbtClassMixin PARTIAL_PARSE_FILE_NAME = 'partial_parse.pickle' PARSING_STATE = DbtProcessState('parsing') DEFAULT_PARTIAL_PARSE = False # Part of saved performance info @dataclass class ParserInfo(dbtClassMixin): parser: str elapsed: float path_count: int = 0 # Part of saved performance info @dataclass class ProjectLoaderInfo(dbtClassMixin): project_name: str elapsed: float parsers: List[ParserInfo] = field(default_factory=list) path_count: int = 0 # Part of saved performance info @dataclass class ManifestLoaderInfo(dbtClassMixin, Writable): path_count: int = 0 is_partial_parse_enabled: Optional[bool] = None read_files_elapsed: Optional[float] = None load_macros_elapsed: Optional[float] = None parse_project_elapsed: Optional[float] = None patch_sources_elapsed: Optional[float] = None process_manifest_elapsed: Optional[float] = None load_all_elapsed: Optional[float] = None projects: List[ProjectLoaderInfo] = field(default_factory=list) _project_index: Dict[str, ProjectLoaderInfo] = field(default_factory=dict) def __post_serialize__(self, dct): del dct['_project_index'] return dct # The ManifestLoader loads the manifest. The standard way to use the # ManifestLoader is using the 'get_full_manifest' class method, but # many tests use abbreviated processes. class ManifestLoader: def __init__( self, root_project: RuntimeConfig, all_projects: Mapping[str, Project], macro_hook: Optional[Callable[[Manifest], Any]] = None, ) -> None: self.root_project: RuntimeConfig = root_project self.all_projects: Mapping[str, Project] = all_projects self.manifest: Manifest = Manifest({}, {}, {}, {}, {}, {}, [], {}) self.manifest.metadata = root_project.get_metadata() # This is a MacroQueryStringSetter callable, which is called # later after we set the MacroManifest in the adapter. It sets # up the query headers. self.macro_hook: Callable[[Manifest], Any] if macro_hook is None: self.macro_hook = lambda m: None else: self.macro_hook = macro_hook # State check determines whether the old_manifest and the current # manifest match well enough to do partial parsing self.manifest.state_check = self.build_manifest_state_check() self._perf_info = self.build_perf_info() # This is a saved manifest from a previous run that's used for partial parsing self.old_manifest: Optional[Manifest] = self.read_saved_manifest() # This is the method that builds a complete manifest. We sometimes # use an abbreviated process in tests. @classmethod def get_full_manifest( cls, config: RuntimeConfig, *, reset: bool = False, ) -> Manifest: adapter = get_adapter(config) # type: ignore # reset is set in a TaskManager load_manifest call, since # the config and adapter may be persistent. if reset: config.clear_dependencies() adapter.clear_macro_manifest() macro_hook = adapter.connections.set_query_header with PARSING_STATE: # set up logbook.Processor for parsing # Start performance counting start_load_all = time.perf_counter() projects = config.load_dependencies() loader = ManifestLoader(config, projects, macro_hook) loader.load() # The goal is to move partial parse writing to after update_manifest loader.write_manifest_for_partial_parse() manifest = loader.update_manifest() # Move write_manifest_for_partial_parse here _check_manifest(manifest, config) manifest.build_flat_graph() # This needs to happen after loading from a partial parse, # so that the adapter has the query headers from the macro_hook. loader.save_macros_to_adapter(adapter) # Save performance info loader._perf_info.load_all_elapsed = ( time.perf_counter() - start_load_all ) loader.track_project_load() return manifest # This is where the main action happens def load(self): if self.old_manifest is not None: logger.debug('Got an acceptable saved parse result') # Read files creates a dictionary of projects to a dictionary # of parsers to lists of file strings. The file strings are # used to get the SourceFiles from the manifest files. # In the future the loaded files will be used to control # partial parsing, but right now we're just moving the # file loading out of the individual parsers and doing it # all at once. start_read_files = time.perf_counter() project_parser_files = {} for project in self.all_projects.values(): read_files(project, self.manifest.files, project_parser_files) self._perf_info.read_files_elapsed = (time.perf_counter() - start_read_files) # We need to parse the macros first, so they're resolvable when # the other files are loaded start_load_macros = time.perf_counter() for project in self.all_projects.values(): parser = MacroParser(project, self.manifest) parser_files = project_parser_files[project.project_name] for search_key in parser_files['MacroParser']: block = FileBlock(self.manifest.files[search_key]) self.parse_with_cache(block, parser) self.reparse_macros() # This is where a loop over self.manifest.macros should be performed # to set the 'depends_on' information from static rendering. self._perf_info.load_macros_elapsed = (time.perf_counter() - start_load_macros) # Load the rest of the files except for schema yaml files parser_types: List[Type[Parser]] = [ ModelParser, SnapshotParser, AnalysisParser, DataTestParser, SeedParser, DocumentationParser, HookParser] for project in self.all_projects.values(): if project.project_name not in project_parser_files: continue self.parse_project(project, project_parser_files[project.project_name], parser_types) # Load yaml files parser_types = [SchemaParser] for project in self.all_projects.values(): if project.project_name not in project_parser_files: continue self.parse_project(project, project_parser_files[project.project_name], parser_types) # Parse every file in this project, except macros (already done) def parse_project( self, project: Project, parser_files, parser_types: List[Type[Parser]], ) -> None: project_loader_info = self._perf_info._project_index[project.project_name] start_timer = time.perf_counter() total_path_count = 0 # Loop through parsers with loaded files. for parser_cls in parser_types: parser_name = parser_cls.__name__ # No point in creating a parser if we don't have files for it if parser_name not in parser_files or not parser_files[parser_name]: continue # Initialize timing info parser_path_count = 0 parser_start_timer = time.perf_counter() # Parse the project files for this parser parser: Parser = parser_cls(project, self.manifest, self.root_project) for file_id in parser_files[parser_name]: block = FileBlock(self.manifest.files[file_id]) if isinstance(parser, SchemaParser): dct = block.file.dict_from_yaml parser.parse_file(block, dct=dct) else: parser.parse_file(block) parser_path_count = parser_path_count + 1 # Save timing info project_loader_info.parsers.append(ParserInfo( parser=parser.resource_type, path_count=parser_path_count, elapsed=time.perf_counter() - parser_start_timer )) total_path_count = total_path_count + parser_path_count # HookParser doesn't run from loaded files, just dbt_project.yml, # so do separately if HookParser in parser_types: hook_parser = HookParser(project, self.manifest, self.root_project) path = hook_parser.get_path() file_block = FileBlock( load_source_file(path, ParseFileType.Hook, project.project_name) ) hook_parser.parse_file(file_block) # Store the performance info elapsed = time.perf_counter() - start_timer project_loader_info.path_count = project_loader_info.path_count + total_path_count project_loader_info.elapsed = project_loader_info.elapsed + elapsed self._perf_info.path_count = ( self._perf_info.path_count + total_path_count ) # Loop through macros in the manifest and statically parse # the 'macro_sql' to find depends_on.macros def reparse_macros(self): internal_package_names = get_adapter_package_names( self.root_project.credentials.type ) macro_resolver = MacroResolver( self.manifest.macros, self.root_project.project_name, internal_package_names ) base_ctx = generate_base_context({}) for macro in self.manifest.macros.values(): possible_macro_calls = statically_extract_macro_calls(macro.macro_sql, base_ctx) for macro_name in possible_macro_calls: # adapter.dispatch calls can generate a call with the same name as the macro # it ought to be an adapter prefix (postgres_) or default_ if macro_name == macro.name: continue package_name = macro.package_name if '.' in macro_name: package_name, macro_name = macro_name.split('.') dep_macro_id = macro_resolver.get_macro_id(package_name, macro_name) if dep_macro_id: macro.depends_on.add_macro(dep_macro_id) # will check for dupes # This is where we use the partial-parse state from the # pickle file (if it exists) def parse_with_cache( self, block: FileBlock, parser: BaseParser, ) -> None: # _get_cached actually copies the nodes, etc, that were # generated from the file to the results, in 'sanitized_update' if not self._get_cached(block, parser): parser.parse_file(block) # check if we have a stored parse file, then check if # file checksums are the same or not and either return # the old ... stuff or return false (not cached) def _get_cached( self, block: FileBlock, parser: BaseParser, ) -> bool: # TODO: handle multiple parsers w/ same files, by # tracking parser type vs node type? Or tracking actual # parser type during parsing? if self.old_manifest is None: return False # The 'has_file' method is where we check to see if # the checksum of the old file is the same as the new # file. If the checksum is different, 'has_file' returns # false. If it's the same, the file and the things that # were generated from it are used. if self.old_manifest.has_file(block.file): return self.manifest.sanitized_update( block.file, self.old_manifest, parser.resource_type ) return False def write_manifest_for_partial_parse(self): path = os.path.join(self.root_project.target_path, PARTIAL_PARSE_FILE_NAME) make_directory(self.root_project.target_path) with open(path, 'wb') as fp: pickle.dump(self.manifest, fp) def matching_parse_results(self, manifest: Manifest) -> bool: """Compare the global hashes of the read-in parse results' values to the known ones, and return if it is ok to re-use the results. """ try: if manifest.metadata.dbt_version != __version__: logger.debug( 'dbt version mismatch: {} != {}, cache invalidated' .format(manifest.metadata.dbt_version, __version__) ) return False except AttributeError as exc: logger.debug(f"malformed result file, cache invalidated: {exc}") return False valid = True if not self.manifest.state_check or not manifest.state_check: return False if self.manifest.state_check.vars_hash != manifest.state_check.vars_hash: logger.debug('vars hash mismatch, cache invalidated') valid = False if self.manifest.state_check.profile_hash != manifest.state_check.profile_hash: logger.debug('profile hash mismatch, cache invalidated') valid = False missing_keys = { k for k in self.manifest.state_check.project_hashes if k not in manifest.state_check.project_hashes } if missing_keys: logger.debug( 'project hash mismatch: values missing, cache invalidated: {}' .format(missing_keys) ) valid = False for key, new_value in self.manifest.state_check.project_hashes.items(): if key in manifest.state_check.project_hashes: old_value = manifest.state_check.project_hashes[key] if new_value != old_value: logger.debug( 'For key {}, hash mismatch ({} -> {}), cache ' 'invalidated' .format(key, old_value, new_value) ) valid = False return valid def _partial_parse_enabled(self): # if the CLI is set, follow that if flags.PARTIAL_PARSE is not None: return flags.PARTIAL_PARSE # if the config is set, follow that elif self.root_project.config.partial_parse is not None: return self.root_project.config.partial_parse else: return DEFAULT_PARTIAL_PARSE def read_saved_manifest(self) -> Optional[Manifest]: if not self._partial_parse_enabled(): logger.debug('Partial parsing not enabled') return None path = os.path.join(self.root_project.target_path, PARTIAL_PARSE_FILE_NAME) if os.path.exists(path): try: with open(path, 'rb') as fp: manifest: Manifest = pickle.load(fp) # keep this check inside the try/except in case something about # the file has changed in weird ways, perhaps due to being a # different version of dbt if self.matching_parse_results(manifest): return manifest except Exception as exc: logger.debug( 'Failed to load parsed file from disk at {}: {}' .format(path, exc), exc_info=True ) return None # This find the sources, refs, and docs and resolves them # for nodes and exposures def process_manifest(self): project_name = self.root_project.project_name process_sources(self.manifest, project_name) process_refs(self.manifest, project_name) process_docs(self.manifest, self.root_project) def update_manifest(self) -> Manifest: start_patch = time.perf_counter() # patch_sources converts the UnparsedSourceDefinitions in the # Manifest.sources to ParsedSourceDefinition via 'patch_source' # in SourcePatcher sources = patch_sources(self.root_project, self.manifest) self.manifest.sources = sources # ParseResults had a 'disabled' attribute which was a dictionary # which is now named '_disabled'. This used to copy from # ParseResults to the Manifest. Can this be normalized so # there's only one disabled? disabled = [] for value in self.manifest._disabled.values(): disabled.extend(value) self.manifest.disabled = disabled self._perf_info.patch_sources_elapsed = ( time.perf_counter() - start_patch ) self.manifest.selectors = self.root_project.manifest_selectors # do the node and macro patches self.manifest.patch_nodes() self.manifest.patch_macros() # process_manifest updates the refs, sources, and docs start_process = time.perf_counter() self.process_manifest() self._perf_info.process_manifest_elapsed = ( time.perf_counter() - start_process ) return self.manifest def build_perf_info(self): mli = ManifestLoaderInfo( is_partial_parse_enabled=self._partial_parse_enabled() ) for project in self.all_projects.values(): project_info = ProjectLoaderInfo( project_name=project.project_name, path_count=0, elapsed=0, ) mli.projects.append(project_info) mli._project_index[project.project_name] = project_info return mli # TODO: this should be calculated per-file based on the vars() calls made in # parsing, so changing one var doesn't invalidate everything. also there should # be something like that for env_var - currently changing env_vars in way that # impact graph selection or configs will result in weird test failures. # finally, we should hash the actual profile used, not just root project + # profiles.yml + relevant args. While sufficient, it is definitely overkill. def build_manifest_state_check(self): config = self.root_project all_projects = self.all_projects # if any of these change, we need to reject the parser vars_hash = FileHash.from_contents( '\x00'.join([ getattr(config.args, 'vars', '{}') or '{}', getattr(config.args, 'profile', '') or '', getattr(config.args, 'target', '') or '', __version__ ]) ) profile_path = os.path.join(config.args.profiles_dir, 'profiles.yml') with open(profile_path) as fp: profile_hash = FileHash.from_contents(fp.read()) project_hashes = {} for name, project in all_projects.items(): path = os.path.join(project.project_root, 'dbt_project.yml') with open(path) as fp: project_hashes[name] = FileHash.from_contents(fp.read()) state_check = ManifestStateCheck( vars_hash=vars_hash, profile_hash=profile_hash, project_hashes=project_hashes, ) return state_check def save_macros_to_adapter(self, adapter): macro_manifest = MacroManifest(self.manifest.macros) adapter._macro_manifest_lazy = macro_manifest # This executes the callable macro_hook and sets the # query headers self.macro_hook(macro_manifest) # This creates a MacroManifest which contains the macros in # the adapter. Only called by the load_macros call from the # adapter. def create_macro_manifest(self): for project in self.all_projects.values(): # what is the manifest passed in actually used for? macro_parser = MacroParser(project, self.manifest) for path in macro_parser.get_paths(): source_file = load_source_file( path, ParseFileType.Macro, project.project_name) block = FileBlock(source_file) # This does not add the file to the manifest.files, # but that shouldn't be necessary here. self.parse_with_cache(block, macro_parser) macro_manifest = MacroManifest(self.manifest.macros) return macro_manifest # This is called by the adapter code only, to create the # MacroManifest that's stored in the adapter. # 'get_full_manifest' uses a persistent ManifestLoader while this # creates a temporary ManifestLoader and throws it away. # Not sure when this would actually get used except in tests. # The ManifestLoader loads macros with other files, then copies # into the adapter MacroManifest. @classmethod def load_macros( cls, root_config: RuntimeConfig, macro_hook: Callable[[Manifest], Any], ) -> Manifest: with PARSING_STATE: projects = root_config.load_dependencies() # This creates a loader object, including result, # and then throws it away, returning only the # manifest loader = cls(root_config, projects, macro_hook) macro_manifest = loader.create_macro_manifest() return macro_manifest # Create tracking event for saving performance info def track_project_load(self): invocation_id = dbt.tracking.active_user.invocation_id dbt.tracking.track_project_load({ "invocation_id": invocation_id, "project_id": self.root_project.hashed_name(), "path_count": self._perf_info.path_count, "read_files_elapsed": self._perf_info.read_files_elapsed, "load_macros_elapsed": self._perf_info.load_macros_elapsed, "parse_project_elapsed": self._perf_info.parse_project_elapsed, "patch_sources_elapsed": self._perf_info.patch_sources_elapsed, "process_manifest_elapsed": ( self._perf_info.process_manifest_elapsed ), "load_all_elapsed": self._perf_info.load_all_elapsed, "is_partial_parse_enabled": ( self._perf_info.is_partial_parse_enabled ), }) def invalid_ref_fail_unless_test(node, target_model_name, target_model_package, disabled): if node.resource_type == NodeType.Test: msg = get_target_not_found_or_disabled_msg( node, target_model_name, target_model_package, disabled ) if disabled: logger.debug(warning_tag(msg)) else: warn_or_error( msg, log_fmt=warning_tag('{}') ) else: ref_target_not_found( node, target_model_name, target_model_package, disabled=disabled, ) def invalid_source_fail_unless_test( node, target_name, target_table_name, disabled ): if node.resource_type == NodeType.Test: msg = get_source_not_found_or_disabled_msg( node, target_name, target_table_name, disabled ) if disabled: logger.debug(warning_tag(msg)) else: warn_or_error( msg, log_fmt=warning_tag('{}') ) else: source_target_not_found( node, target_name, target_table_name, disabled=disabled ) def _check_resource_uniqueness( manifest: Manifest, config: RuntimeConfig, ) -> None: names_resources: Dict[str, ManifestNode] = {} alias_resources: Dict[str, ManifestNode] = {} for resource, node in manifest.nodes.items(): if node.resource_type not in NodeType.refable(): continue # appease mypy - sources aren't refable! assert not isinstance(node, ParsedSourceDefinition) name = node.name # the full node name is really defined by the adapter's relation relation_cls = get_relation_class_by_name(config.credentials.type) relation = relation_cls.create_from(config=config, node=node) full_node_name = str(relation) existing_node = names_resources.get(name) if existing_node is not None: dbt.exceptions.raise_duplicate_resource_name( existing_node, node ) existing_alias = alias_resources.get(full_node_name) if existing_alias is not None: dbt.exceptions.raise_ambiguous_alias( existing_alias, node, full_node_name ) names_resources[name] = node alias_resources[full_node_name] = node def _warn_for_unused_resource_config_paths( manifest: Manifest, config: RuntimeConfig ) -> None: resource_fqns: Mapping[str, PathSet] = manifest.get_resource_fqns() disabled_fqns: PathSet = frozenset(tuple(n.fqn) for n in manifest.disabled) config.warn_for_unused_resource_config_paths(resource_fqns, disabled_fqns) def _check_manifest(manifest: Manifest, config: RuntimeConfig) -> None: _check_resource_uniqueness(manifest, config) _warn_for_unused_resource_config_paths(manifest, config) # This is just used in test cases def _load_projects(config, paths): for path in paths: try: project = config.new_project(path) except dbt.exceptions.DbtProjectError as e: raise dbt.exceptions.DbtProjectError( 'Failed to read package at {}: {}' .format(path, e) ) else: yield project.project_name, project def _get_node_column(node, column_name): """Given a ParsedNode, add some fields that might be missing. Return a reference to the dict that refers to the given column, creating it if it doesn't yet exist. """ if column_name in node.columns: column = node.columns[column_name] else: node.columns[column_name] = ColumnInfo(name=column_name) node.columns[column_name] = column return column DocsContextCallback = Callable[ [Union[ParsedNode, ParsedSourceDefinition]], Dict[str, Any] ] # node and column descriptions def _process_docs_for_node( context: Dict[str, Any], node: ManifestNode, ): node.description = get_rendered(node.description, context) for column_name, column in node.columns.items(): column.description = get_rendered(column.description, context) # source and table descriptions, column descriptions def _process_docs_for_source( context: Dict[str, Any], source: ParsedSourceDefinition, ): table_description = source.description source_description = source.source_description table_description = get_rendered(table_description, context) source_description = get_rendered(source_description, context) source.description = table_description source.source_description = source_description for column in source.columns.values(): column_desc = column.description column_desc = get_rendered(column_desc, context) column.description = column_desc # macro argument descriptions def _process_docs_for_macro( context: Dict[str, Any], macro: ParsedMacro ) -> None: macro.description = get_rendered(macro.description, context) for arg in macro.arguments: arg.description = get_rendered(arg.description, context) # exposure descriptions def _process_docs_for_exposure( context: Dict[str, Any], exposure: ParsedExposure ) -> None: exposure.description = get_rendered(exposure.description, context) # nodes: node and column descriptions # sources: source and table descriptions, column descriptions # macros: macro argument descriptions # exposures: exposure descriptions def process_docs(manifest: Manifest, config: RuntimeConfig): for node in manifest.nodes.values(): ctx = generate_runtime_docs( config, node, manifest, config.project_name, ) _process_docs_for_node(ctx, node) for source in manifest.sources.values(): ctx = generate_runtime_docs( config, source, manifest, config.project_name, ) _process_docs_for_source(ctx, source) for macro in manifest.macros.values(): ctx = generate_runtime_docs( config, macro, manifest, config.project_name, ) _process_docs_for_macro(ctx, macro) for exposure in manifest.exposures.values(): ctx = generate_runtime_docs( config, exposure, manifest, config.project_name, ) _process_docs_for_exposure(ctx, exposure) def _process_refs_for_exposure( manifest: Manifest, current_project: str, exposure: ParsedExposure ): """Given a manifest and a exposure in that manifest, process its refs""" for ref in exposure.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None target_model_name: str target_model_package: Optional[str] = None if len(ref) == 1: target_model_name = ref[0] elif len(ref) == 2: target_model_package, target_model_name = ref else: raise dbt.exceptions.InternalException( f'Refs should always be 1 or 2 arguments - got {len(ref)}' ) target_model = manifest.resolve_ref( target_model_name, target_model_package, current_project, exposure.package_name, ) if target_model is None or isinstance(target_model, Disabled): # This may raise. Even if it doesn't, we don't want to add # this exposure to the graph b/c there is no destination exposure invalid_ref_fail_unless_test( exposure, target_model_name, target_model_package, disabled=(isinstance(target_model, Disabled)) ) continue target_model_id = target_model.unique_id exposure.depends_on.nodes.append(target_model_id) manifest.update_exposure(exposure) def _process_refs_for_node( manifest: Manifest, current_project: str, node: ManifestNode ): """Given a manifest and a node in that manifest, process its refs""" for ref in node.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None target_model_name: str target_model_package: Optional[str] = None if len(ref) == 1: target_model_name = ref[0] elif len(ref) == 2: target_model_package, target_model_name = ref else: raise dbt.exceptions.InternalException( f'Refs should always be 1 or 2 arguments - got {len(ref)}' ) target_model = manifest.resolve_ref( target_model_name, target_model_package, current_project, node.package_name, ) if target_model is None or isinstance(target_model, Disabled): # This may raise. Even if it doesn't, we don't want to add # this node to the graph b/c there is no destination node node.config.enabled = False invalid_ref_fail_unless_test( node, target_model_name, target_model_package, disabled=(isinstance(target_model, Disabled)) ) continue target_model_id = target_model.unique_id node.depends_on.nodes.append(target_model_id) # TODO: I think this is extraneous, node should already be the same # as manifest.nodes[node.unique_id] (we're mutating node here, not # making a new one) # Q: could we stop doing this? manifest.update_node(node) # Takes references in 'refs' array of nodes and exposures, finds the target # node, and updates 'depends_on.nodes' with the unique id def process_refs(manifest: Manifest, current_project: str): for node in manifest.nodes.values(): _process_refs_for_node(manifest, current_project, node) for exposure in manifest.exposures.values(): _process_refs_for_exposure(manifest, current_project, exposure) return manifest def _process_sources_for_exposure( manifest: Manifest, current_project: str, exposure: ParsedExposure ): target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None for source_name, table_name in exposure.sources: target_source = manifest.resolve_source( source_name, table_name, current_project, exposure.package_name, ) if target_source is None or isinstance(target_source, Disabled): invalid_source_fail_unless_test( exposure, source_name, table_name, disabled=(isinstance(target_source, Disabled)) ) continue target_source_id = target_source.unique_id exposure.depends_on.nodes.append(target_source_id) manifest.update_exposure(exposure) def _process_sources_for_node( manifest: Manifest, current_project: str, node: ManifestNode ): target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None for source_name, table_name in node.sources: target_source = manifest.resolve_source( source_name, table_name, current_project, node.package_name, ) if target_source is None or isinstance(target_source, Disabled): # this folows the same pattern as refs node.config.enabled = False invalid_source_fail_unless_test( node, source_name, table_name, disabled=(isinstance(target_source, Disabled)) ) continue target_source_id = target_source.unique_id node.depends_on.nodes.append(target_source_id) manifest.update_node(node) # Loops through all nodes and exposures, for each element in # 'sources' array finds the source node and updates the # 'depends_on.nodes' array with the unique id def process_sources(manifest: Manifest, current_project: str): for node in manifest.nodes.values(): if node.resource_type == NodeType.Source: continue assert not isinstance(node, ParsedSourceDefinition) _process_sources_for_node(manifest, current_project, node) for exposure in manifest.exposures.values(): _process_sources_for_exposure(manifest, current_project, exposure) return manifest # This is called in task.rpc.sql_commands when a "dynamic" node is # created in the manifest, in 'add_refs' def process_macro( config: RuntimeConfig, manifest: Manifest, macro: ParsedMacro ) -> None: ctx = generate_runtime_docs( config, macro, manifest, config.project_name, ) _process_docs_for_macro(ctx, macro) # This is called in task.rpc.sql_commands when a "dynamic" node is # created in the manifest, in 'add_refs' def process_node( config: RuntimeConfig, manifest: Manifest, node: ManifestNode ): _process_sources_for_node( manifest, config.project_name, node ) _process_refs_for_node(manifest, config.project_name, node) ctx = generate_runtime_docs(config, node, manifest, config.project_name) _process_docs_for_node(ctx, node)
the-stack_106_30900
""" @author: Michael Guarino """ import numpy as np np.set_printoptions(threshold=np.nan) import tensorflow as tf from tensorflow.contrib import rnn import tensorflow.contrib.layers as layers class HAN: def __init__(self, max_seq_len, max_sent_len, num_classes, vocab_size, embedding_size, max_grad_norm, dropout_keep_proba, learning_rate): # Parameters self.learning_rate = learning_rate self.vocab_size = vocab_size self.num_classes = num_classes self.max_seq_len = max_seq_len self.embedding_size = embedding_size self.word_encoder_num_hidden = max_seq_len self.word_output_size = max_seq_len self.sentence_encoder_num_hidden = max_sent_len self.sentence_output_size = max_sent_len self.max_grad_norm = max_grad_norm self.dropout_keep_proba = dropout_keep_proba # tf graph input self.input_x = tf.placeholder(shape=[None, None, None], dtype=tf.int32, name="input_x") self.input_y = tf.placeholder(shape=[None, self.num_classes], dtype=tf.int32, name="input_y") self.word_lengths = tf.placeholder(shape=[None, None], dtype=tf.int32, name="word_lengths") self.sentence_lengths = tf.placeholder(shape=[None, ], dtype=tf.int32, name="sentence_lengths") self.is_training = tf.placeholder(dtype=tf.bool, name="is_training") # input_x dims (self.document_size, self.sentence_size, self.word_size) = tf.unstack(tf.shape(self.input_x)) with tf.device("/gpu:0"), tf.name_scope("embedding_layer"): w = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), dtype=tf.float32, name="w") # TODO check if this needs to be marked as untrainable self.input_x_embedded = tf.nn.embedding_lookup(w, self.input_x) # reshape input_x after embedding self.input_x_embedded = tf.reshape(self.input_x_embedded, [self.document_size * self.sentence_size, self.word_size, self.embedding_size]) self.input_x_embedded_lengths = tf.reshape(self.word_lengths, [self.document_size * self.sentence_size]) with tf.variable_scope("word_level"): self.word_encoder_outputs = self.bidirectional_RNN(num_hidden=self.word_encoder_num_hidden, inputs=self.input_x_embedded) word_level_output = self.attention(inputs=self.word_encoder_outputs, output_size=self.word_output_size) with tf.variable_scope("dropout"): print('self.is_training: {}'.format(self.is_training)) word_level_output = layers.dropout(word_level_output, keep_prob=self.dropout_keep_proba, is_training=self.is_training) # reshape word_level output self.sentence_encoder_inputs = tf.reshape(word_level_output, [self.document_size, self.sentence_size, self.word_output_size]) with tf.variable_scope("sentence_level"): self.sentence_encoder_outputs = self.bidirectional_RNN(num_hidden=self.sentence_encoder_num_hidden, inputs=self.sentence_encoder_inputs) sentence_level_output = self.attention(inputs=self.sentence_encoder_outputs, output_size=self.sentence_output_size) with tf.variable_scope("dropout"): sentence_level_output = layers.dropout(sentence_level_output, keep_prob=self.dropout_keep_proba, is_training=self.is_training) # Final model prediction with tf.variable_scope("classifier_output"): self.logits = layers.fully_connected(sentence_level_output, self.num_classes, activation_fn=None) # trainable=self.is_training) self.predictions = tf.argmax(self.logits, axis=1, name="predictions") # Calculate mean cross-entropy loss with tf.variable_scope("loss"): losses = tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.logits) self.loss = tf.reduce_mean(losses) tf.summary.scalar("Loss", self.loss) # Accuracy with tf.variable_scope("accuracy"): correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, axis=1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") tf.summary.scalar("Accuracy", self.accuracy) def bidirectional_RNN(self, num_hidden, inputs): """ desc: create bidirectional rnn layer args: num_hidden: number of hidden units inputs: input word or sentence returns: concatenated encoder and decoder outputs """ with tf.name_scope("bidirectional_RNN"): encoder_fw_cell = rnn.GRUCell(num_hidden) encoder_bw_cell = rnn.GRUCell(num_hidden) ((encoder_fw_outputs, encoder_bw_outputs), (_, _)) = tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_fw_cell, cell_bw=encoder_bw_cell, inputs=inputs, dtype=tf.float32, time_major=True) encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2) return encoder_outputs # end def attention(self, inputs, output_size): """ desc: create attention mechanism args: inputs: input which is sentence or document level output from bidirectional rnn layer output_size: specify the dimensions of the output returns: output from attention distribution """ with tf.variable_scope("attention"): attention_context_vector_uw = tf.get_variable(name="attention_context_vector", shape=[output_size], # trainable=self.is_training, initializer=layers.xavier_initializer(), dtype=tf.float32) input_projection_u = layers.fully_connected(inputs, output_size, # trainable=self.is_training, activation_fn=tf.tanh) vector_attn = tf.reduce_sum(tf.multiply(input_projection_u, attention_context_vector_uw), axis=2, keep_dims=True) attention_weights = tf.nn.softmax(vector_attn, dim=1) weighted_projection = tf.multiply(input_projection_u, attention_weights) outputs = tf.reduce_sum(weighted_projection, axis=1) return outputs # end # end
the-stack_106_30904
""" Anisotropic orthogonal interpolation """ import numpy as np import pandas as pd import matplotlib.pyplot as plt from .. import utils from . import linestring_utils, interp_4d, wkb2shp, field from ..grid import unstructured_grid from ..model import unstructured_diffuser import stompy.grid.quad_laplacian as quads from scipy import sparse from shapely import geometry def poly_to_grid(poly,nom_res): xy=np.array(poly.exterior) gen=unstructured_grid.UnstructuredGrid(max_sides=len(xy)) nodes,edges=gen.add_linestring(xy, closed=True) gen.add_cell(nodes=nodes[:-1]) # drop repeated node gen.orient_cells() # polygon may be reversed sqg=quads.SimpleQuadGen(gen,nom_res=nom_res,cells=[0],execute=False) sqg.execute() return sqg.qgs[0].g_final def simple_quad_gen_to_grid(sqg,aniso=None): """ Combine the patches in the given instance of SimpleQuadGen, assigning 'K' to the edges. sqg.gen.cells should have a field 'anisotropy' which specifies how much smaller the off-axis diffusion coefficient is than the on-axis diffusion coefficient. Returns a grid with 'K' defined on edges. """ joined=None for qg in sqg.qgs: grid=qg.g_final.copy() Klong=1.0 try: Kshort=qg.gen.cells['anisotropy'][0] except ValueError: Kshort=aniso if len(qg.right_i)>len(qg.right_j): j_long=grid.edges['orient']==0 else: j_long=grid.edges['orient']==90 K=np.where( j_long, Klong, Kshort) grid.add_edge_field('K',K,on_exists='overwrite') grid.add_edge_field('long',j_long,on_exists='overwrite') grid.orient_cells() if joined: node_map,edge_map,cell_map=joined.add_grid(grid,merge_nodes='auto', tol=0.01) joined.edges['K'][edge_map] = 0.5*(joined.edges['K'][edge_map] + grid.edges['K']) else: joined=grid return joined class OrthoInterpolator(object): """ Given either a curvilinear grid or a boundary for a curvilinear grid, interpolate point data anisotropically and generate a raster DEM. Solves a laplacian on the nodes of the curvilinear grids. Input points are assigned to the nearest node. """ nom_res=None # resolution for new grid if a polygon is specified anisotropy=0.05 # lower values means less lateral diffusion # alpha=1.0 # lower values mean smoother results background_field=None # If True, only samples contained in the grid outline are retained. clip_samples=True # No support yet for weights. # background_weight=0.02 # tuples of (polygon,[Klon,Klat]) # Samples within the polygon are eliminated, and the given diffusivities # installed. Useful if the local point distribution is too gnarly for # anisotropy to handle overrides=() def __init__(self,region,samples=None,**kw): """ region: curvilinear UnstructuredGrid instance or shapely.Polygon suitable for automatic quad generation (simple, 4 smallest internal angles are corners) or SimpleQuadGen instance that has been executed, and optionally has a cell-field called 'anisotropy' samples: if given, a pd.DataFrame with x,y, and value background field: if given, a field.Field that can be queried to get extra data along boundaries. """ utils.set_keywords(self,kw) self.region=region if isinstance(region,unstructured_grid.UnstructuredGrid): self.grid=self.region elif isinstance(region,quads.SimpleQuadGen): # aniso is only used if sqg.gen doesn't have anisotropy self.grid=simple_quad_gen_to_grid(region,aniso=self.anisotropy) else: assert self.nom_res is not None self.grid=poly_to_grid(self.region,self.nom_res) if samples is not None: # Clipping only applies to these samples, not background # samples. Otherwise we have to worry about boundary # points falling just outside the grid boundary. if self.clip_samples: boundary=self.grid.boundary_polygon() sel=[boundary.contains(geometry.Point(xy)) for xy in samples[['x','y']].values] samples=samples.iloc[sel,:] self.samples=samples else: self.samples=pd.DataFrame() if self.background_field is not None: self.add_background_samples() for geom,Kxy in self.overrides: # Which samples to drop: to_drop = np.array([ geom.contains(geometry.Point(xy)) for xy in self.samples[ ['x','y'] ].values ]) j_sel=self.grid.select_edges_intersecting(geom) # Not quite right -- doesn't respect changing orientations # this is just a global orient, right? j_long=self.grid.edges['long'] self.grid.edges['K'][j_sel & j_long ]=Kxy[0] self.grid.edges['K'][j_sel & (~j_long)]=Kxy[1] self.samples=self.samples[~to_drop] self.result = self.solve() def add_background_samples(self): bnodes=self.grid.boundary_cycle() bg_samples=pd.DataFrame() xy=self.grid.nodes['x'][bnodes] bg_samples['x']=xy[:,0] bg_samples['y']=xy[:,1] bg_samples['value']=self.background_field(xy) # bg_samples['weight']=self.background_weight self.samples=pd.concat( [self.samples,bg_samples] ) def solve(self): grid=self.grid samples=self.samples dirich_idxs=[grid.select_nodes_nearest(xy) for xy in samples.loc[:,['x','y']].values] dirich_vals=samples['value'].values dirich={idx:val for idx,val in zip(dirich_idxs,dirich_vals)} # Recover the row/col indexes of the quads: # Note that the order of ijs corresponds to node_idxs, # not natural node index. node_idxs,ijs=grid.select_quad_subset(grid.nodes['x'][0]) ij_span = ijs.max(axis=0) - ijs.min(axis=0) # Arbitrarily set i to be the larger dimension. Need # this to be consistent to apply anisotropy if ij_span[0]<ij_span[1]: ijs=ijs[:,::-1] # force start at 0 ijs-= ijs.min(axis=0) nrows,ncols=1+ijs.max(axis=0) # ij max is 1 less than count # 2D index array to simplify things below patch_nodes=np.zeros( (nrows,ncols),np.int32)-1 # Map row,col back to grid.node index patch_nodes[ijs[:,0],ijs[:,1]]=node_idxs if nrows*ncols!=len(node_idxs): print("Brave new territory. Nodes are not in a dense rectangle") if not np.all(patch_nodes>=0): print("Yep, brave new territory.") # Build the matrix: N=len(node_idxs) M=sparse.dok_matrix( (N,N), np.float64) b=np.zeros(N,np.float64) # With the SQG code, there are two differences: # grid may not be dense # K is already given on edges, rather than just # by grid direction try: Kedge=grid.edges['K'] Kij=None print("Will use K from edges") except: Kedge=None Kij=[1,self.anisotropy] print("Will use K by grid orientation") # For now we only handle cases where the quad subset # includes the whole grid. # That means that the matrix here is indexed by grid.nodes, # rather than going through node_idxs # could be faster but no biggie right now. # While we iterate over nrows and ncols of patch_nodes, # the matrix itself is constructed in terms of grid.node # indexes. for row in range(nrows): for col in range(ncols): n=patch_nodes[row,col] if n<0: continue if n in dirich: M[n,n]=1 b[n]=dirich[n] else: # For each cardinal direction either None, # or a (node,K) tuple if row==0: node_north=-1 else: node_north=patch_nodes[row-1,col] if row==nrows-1: node_south=-1 else: node_south=patch_nodes[row+1,col] if col==0: node_west=-1 else: node_west=patch_nodes[row,col-1] if col==ncols-1: node_east=-1 else: node_east=patch_nodes[row,col+1] # mirror missing nodes for a no-flux BC if node_north<0: node_north=node_south if node_south<0: node_south=node_north if node_west<0: node_west=node_east if node_east<0: node_east=node_west nbrs=[node_north,node_south,node_west,node_east] assert np.array(nbrs).min()>=0 if Kedge is not None: Ks=[Kedge[grid.nodes_to_edge(n,nbr)] for nbr in nbrs ] else: Ks=[Kij[0], Kij[0], Kij[1], Kij[1]] M[n,n]=-np.sum(Ks) for nbr,K in zip(nbrs,Ks): M[n,nbr] = M[n,nbr] + K # This soln is indexed by node_idxs soln=sparse.linalg.spsolve(M.tocsr(),b) self.b=b # 0 for computational, and value for dirichlet. return soln def plot_result(self,num=1,**kw): plt.figure(num).clf() fig,ax=plt.subplots(num=num) ccoll=self.grid.contourf_node_values(self.result,32,cmap='jet', **kw) scat=ax.scatter( self.samples['x'] ,self.samples['y'], 40, self.samples['value'], cmap='jet', norm=ccoll.norm) ax.axis('equal') plt.colorbar(ccoll) scat.set_lw(0.5) scat.set_edgecolor('k') return fig,ax,[ccoll,scat] def field(self): fld=field.XYZField(X=self.grid.nodes['x'],F=self.result) fld._tri=self.grid.mpl_triangulation() return fld def rasterize(self,dx=None,dy=None): if dx is None: dx=self.nom_res/2 # why not? if dy is None: dy=dx fld=self.field() return fld.to_grid(self.grid.bounds(),dx=dx,dy=dy)
the-stack_106_30905
# Autodetecting setup.py script for building the Python extensions import argparse import importlib._bootstrap import importlib.machinery import importlib.util import os import re import sys import sysconfig from glob import glob, escape import _osx_support try: import subprocess del subprocess SUBPROCESS_BOOTSTRAP = False except ImportError: # Bootstrap Python: distutils.spawn uses subprocess to build C extensions, # subprocess requires C extensions built by setup.py like _posixsubprocess. # # Use _bootsubprocess which only uses the os module. # # It is dropped from sys.modules as soon as all C extension modules # are built. import _bootsubprocess sys.modules['subprocess'] = _bootsubprocess del _bootsubprocess SUBPROCESS_BOOTSTRAP = True from distutils import log from distutils.command.build_ext import build_ext from distutils.command.build_scripts import build_scripts from distutils.command.install import install from distutils.command.install_lib import install_lib from distutils.core import Extension, setup from distutils.errors import CCompilerError, DistutilsError from distutils.spawn import find_executable # Compile extensions used to test Python? TEST_EXTENSIONS = True # This global variable is used to hold the list of modules to be disabled. DISABLED_MODULE_LIST = [] def get_platform(): # Cross compiling if "_PYTHON_HOST_PLATFORM" in os.environ: return os.environ["_PYTHON_HOST_PLATFORM"] # Get value of sys.platform if sys.platform.startswith('osf1'): return 'osf1' return sys.platform CROSS_COMPILING = ("_PYTHON_HOST_PLATFORM" in os.environ) HOST_PLATFORM = get_platform() MS_WINDOWS = (HOST_PLATFORM == 'win32') CYGWIN = (HOST_PLATFORM == 'cygwin') MACOS = (HOST_PLATFORM == 'darwin') AIX = (HOST_PLATFORM.startswith('aix')) VXWORKS = ('vxworks' in HOST_PLATFORM) SUMMARY = """ Python is an interpreted, interactive, object-oriented programming language. It is often compared to Tcl, Perl, Scheme or Java. Python combines remarkable power with very clear syntax. It has modules, classes, exceptions, very high level dynamic data types, and dynamic typing. There are interfaces to many system calls and libraries, as well as to various windowing systems (X11, Motif, Tk, Mac, MFC). New built-in modules are easily written in C or C++. Python is also usable as an extension language for applications that need a programmable interface. The Python implementation is portable: it runs on many brands of UNIX, on Windows, DOS, Mac, Amiga... If your favorite system isn't listed here, it may still be supported, if there's a C compiler for it. Ask around on comp.lang.python -- or just try compiling Python yourself. """ CLASSIFIERS = """ Development Status :: 6 - Mature License :: OSI Approved :: Python Software Foundation License Natural Language :: English Programming Language :: C Programming Language :: Python Topic :: Software Development """ def run_command(cmd): status = os.system(cmd) return os.waitstatus_to_exitcode(status) # Set common compiler and linker flags derived from the Makefile, # reserved for building the interpreter and the stdlib modules. # See bpo-21121 and bpo-35257 def set_compiler_flags(compiler_flags, compiler_py_flags_nodist): flags = sysconfig.get_config_var(compiler_flags) py_flags_nodist = sysconfig.get_config_var(compiler_py_flags_nodist) sysconfig.get_config_vars()[compiler_flags] = flags + ' ' + py_flags_nodist def add_dir_to_list(dirlist, dir): """Add the directory 'dir' to the list 'dirlist' (after any relative directories) if: 1) 'dir' is not already in 'dirlist' 2) 'dir' actually exists, and is a directory. """ if dir is None or not os.path.isdir(dir) or dir in dirlist: return for i, path in enumerate(dirlist): if not os.path.isabs(path): dirlist.insert(i + 1, dir) return dirlist.insert(0, dir) def sysroot_paths(make_vars, subdirs): """Get the paths of sysroot sub-directories. * make_vars: a sequence of names of variables of the Makefile where sysroot may be set. * subdirs: a sequence of names of subdirectories used as the location for headers or libraries. """ dirs = [] for var_name in make_vars: var = sysconfig.get_config_var(var_name) if var is not None: m = re.search(r'--sysroot=([^"]\S*|"[^"]+")', var) if m is not None: sysroot = m.group(1).strip('"') for subdir in subdirs: if os.path.isabs(subdir): subdir = subdir[1:] path = os.path.join(sysroot, subdir) if os.path.isdir(path): dirs.append(path) break return dirs MACOS_SDK_ROOT = None MACOS_SDK_SPECIFIED = None def macosx_sdk_root(): """Return the directory of the current macOS SDK. If no SDK was explicitly configured, call the compiler to find which include files paths are being searched by default. Use '/' if the compiler is searching /usr/include (meaning system header files are installed) or use the root of an SDK if that is being searched. (The SDK may be supplied via Xcode or via the Command Line Tools). The SDK paths used by Apple-supplied tool chains depend on the setting of various variables; see the xcrun man page for more info. Also sets MACOS_SDK_SPECIFIED for use by macosx_sdk_specified(). """ global MACOS_SDK_ROOT, MACOS_SDK_SPECIFIED # If already called, return cached result. if MACOS_SDK_ROOT: return MACOS_SDK_ROOT cflags = sysconfig.get_config_var('CFLAGS') m = re.search(r'-isysroot\s*(\S+)', cflags) if m is not None: MACOS_SDK_ROOT = m.group(1) MACOS_SDK_SPECIFIED = MACOS_SDK_ROOT != '/' else: MACOS_SDK_ROOT = _osx_support._default_sysroot( sysconfig.get_config_var('CC')) MACOS_SDK_SPECIFIED = False return MACOS_SDK_ROOT def macosx_sdk_specified(): """Returns true if an SDK was explicitly configured. True if an SDK was selected at configure time, either by specifying --enable-universalsdk=(something other than no or /) or by adding a -isysroot option to CFLAGS. In some cases, like when making decisions about macOS Tk framework paths, we need to be able to know whether the user explicitly asked to build with an SDK versus the implicit use of an SDK when header files are no longer installed on a running system by the Command Line Tools. """ global MACOS_SDK_SPECIFIED # If already called, return cached result. if MACOS_SDK_SPECIFIED: return MACOS_SDK_SPECIFIED # Find the sdk root and set MACOS_SDK_SPECIFIED macosx_sdk_root() return MACOS_SDK_SPECIFIED def is_macosx_sdk_path(path): """ Returns True if 'path' can be located in a macOS SDK """ return ( (path.startswith('/usr/') and not path.startswith('/usr/local')) or path.startswith('/System/Library') or path.startswith('/System/iOSSupport') ) def grep_headers_for(function, headers): for header in headers: with open(header, 'r', errors='surrogateescape') as f: if function in f.read(): return True return False def find_file(filename, std_dirs, paths): """Searches for the directory where a given file is located, and returns a possibly-empty list of additional directories, or None if the file couldn't be found at all. 'filename' is the name of a file, such as readline.h or libcrypto.a. 'std_dirs' is the list of standard system directories; if the file is found in one of them, no additional directives are needed. 'paths' is a list of additional locations to check; if the file is found in one of them, the resulting list will contain the directory. """ if MACOS: # Honor the MacOSX SDK setting when one was specified. # An SDK is a directory with the same structure as a real # system, but with only header files and libraries. sysroot = macosx_sdk_root() # Check the standard locations for dir in std_dirs: f = os.path.join(dir, filename) if MACOS and is_macosx_sdk_path(dir): f = os.path.join(sysroot, dir[1:], filename) if os.path.exists(f): return [] # Check the additional directories for dir in paths: f = os.path.join(dir, filename) if MACOS and is_macosx_sdk_path(dir): f = os.path.join(sysroot, dir[1:], filename) if os.path.exists(f): return [dir] # Not found anywhere return None def find_library_file(compiler, libname, std_dirs, paths): result = compiler.find_library_file(std_dirs + paths, libname) if result is None: return None if MACOS: sysroot = macosx_sdk_root() # Check whether the found file is in one of the standard directories dirname = os.path.dirname(result) for p in std_dirs: # Ensure path doesn't end with path separator p = p.rstrip(os.sep) if MACOS and is_macosx_sdk_path(p): # Note that, as of Xcode 7, Apple SDKs may contain textual stub # libraries with .tbd extensions rather than the normal .dylib # shared libraries installed in /. The Apple compiler tool # chain handles this transparently but it can cause problems # for programs that are being built with an SDK and searching # for specific libraries. Distutils find_library_file() now # knows to also search for and return .tbd files. But callers # of find_library_file need to keep in mind that the base filename # of the returned SDK library file might have a different extension # from that of the library file installed on the running system, # for example: # /Applications/Xcode.app/Contents/Developer/Platforms/ # MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/ # usr/lib/libedit.tbd # vs # /usr/lib/libedit.dylib if os.path.join(sysroot, p[1:]) == dirname: return [ ] if p == dirname: return [ ] # Otherwise, it must have been in one of the additional directories, # so we have to figure out which one. for p in paths: # Ensure path doesn't end with path separator p = p.rstrip(os.sep) if MACOS and is_macosx_sdk_path(p): if os.path.join(sysroot, p[1:]) == dirname: return [ p ] if p == dirname: return [p] else: assert False, "Internal error: Path not found in std_dirs or paths" def validate_tzpath(): base_tzpath = sysconfig.get_config_var('TZPATH') if not base_tzpath: return tzpaths = base_tzpath.split(os.pathsep) bad_paths = [tzpath for tzpath in tzpaths if not os.path.isabs(tzpath)] if bad_paths: raise ValueError('TZPATH must contain only absolute paths, ' + f'found:\n{tzpaths!r}\nwith invalid paths:\n' + f'{bad_paths!r}') def find_module_file(module, dirlist): """Find a module in a set of possible folders. If it is not found return the unadorned filename""" list = find_file(module, [], dirlist) if not list: return module if len(list) > 1: log.info("WARNING: multiple copies of %s found", module) return os.path.join(list[0], module) class PyBuildExt(build_ext): def __init__(self, dist): build_ext.__init__(self, dist) self.srcdir = None self.lib_dirs = None self.inc_dirs = None self.config_h_vars = None self.failed = [] self.failed_on_import = [] self.missing = [] self.disabled_configure = [] if '-j' in os.environ.get('MAKEFLAGS', ''): self.parallel = True def add(self, ext): self.extensions.append(ext) def set_srcdir(self): self.srcdir = sysconfig.get_config_var('srcdir') if not self.srcdir: # Maybe running on Windows but not using CYGWIN? raise ValueError("No source directory; cannot proceed.") self.srcdir = os.path.abspath(self.srcdir) def remove_disabled(self): # Remove modules that are present on the disabled list extensions = [ext for ext in self.extensions if ext.name not in DISABLED_MODULE_LIST] # move ctypes to the end, it depends on other modules ext_map = dict((ext.name, i) for i, ext in enumerate(extensions)) if "_ctypes" in ext_map: ctypes = extensions.pop(ext_map["_ctypes"]) extensions.append(ctypes) self.extensions = extensions def update_sources_depends(self): # Fix up the autodetected modules, prefixing all the source files # with Modules/. moddirlist = [os.path.join(self.srcdir, 'Modules')] # Fix up the paths for scripts, too self.distribution.scripts = [os.path.join(self.srcdir, filename) for filename in self.distribution.scripts] # Python header files headers = [sysconfig.get_config_h_filename()] headers += glob(os.path.join(escape(sysconfig.get_path('include')), "*.h")) for ext in self.extensions: ext.sources = [ find_module_file(filename, moddirlist) for filename in ext.sources ] if ext.depends is not None: ext.depends = [find_module_file(filename, moddirlist) for filename in ext.depends] else: ext.depends = [] # re-compile extensions if a header file has been changed ext.depends.extend(headers) def remove_configured_extensions(self): # The sysconfig variables built by makesetup that list the already # built modules and the disabled modules as configured by the Setup # files. sysconf_built = sysconfig.get_config_var('MODBUILT_NAMES').split() sysconf_dis = sysconfig.get_config_var('MODDISABLED_NAMES').split() mods_built = [] mods_disabled = [] for ext in self.extensions: # If a module has already been built or has been disabled in the # Setup files, don't build it here. if ext.name in sysconf_built: mods_built.append(ext) if ext.name in sysconf_dis: mods_disabled.append(ext) mods_configured = mods_built + mods_disabled if mods_configured: self.extensions = [x for x in self.extensions if x not in mods_configured] # Remove the shared libraries built by a previous build. for ext in mods_configured: fullpath = self.get_ext_fullpath(ext.name) if os.path.exists(fullpath): os.unlink(fullpath) return (mods_built, mods_disabled) def set_compiler_executables(self): # When you run "make CC=altcc" or something similar, you really want # those environment variables passed into the setup.py phase. Here's # a small set of useful ones. compiler = os.environ.get('CC') args = {} # unfortunately, distutils doesn't let us provide separate C and C++ # compilers if compiler is not None: (ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS') args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags self.compiler.set_executables(**args) def build_extensions(self): self.set_srcdir() # Detect which modules should be compiled self.detect_modules() self.remove_disabled() self.update_sources_depends() mods_built, mods_disabled = self.remove_configured_extensions() self.set_compiler_executables() build_ext.build_extensions(self) if SUBPROCESS_BOOTSTRAP: # Drop our custom subprocess module: # use the newly built subprocess module del sys.modules['subprocess'] for ext in self.extensions: self.check_extension_import(ext) self.summary(mods_built, mods_disabled) def summary(self, mods_built, mods_disabled): longest = max([len(e.name) for e in self.extensions], default=0) if self.failed or self.failed_on_import: all_failed = self.failed + self.failed_on_import longest = max(longest, max([len(name) for name in all_failed])) def print_three_column(lst): lst.sort(key=str.lower) # guarantee zip() doesn't drop anything while len(lst) % 3: lst.append("") for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]): print("%-*s %-*s %-*s" % (longest, e, longest, f, longest, g)) if self.missing: print() print("Python build finished successfully!") print("The necessary bits to build these optional modules were not " "found:") print_three_column(self.missing) print("To find the necessary bits, look in setup.py in" " detect_modules() for the module's name.") print() if mods_built: print() print("The following modules found by detect_modules() in" " setup.py, have been") print("built by the Makefile instead, as configured by the" " Setup files:") print_three_column([ext.name for ext in mods_built]) print() if mods_disabled: print() print("The following modules found by detect_modules() in" " setup.py have not") print("been built, they are *disabled* in the Setup files:") print_three_column([ext.name for ext in mods_disabled]) print() if self.disabled_configure: print() print("The following modules found by detect_modules() in" " setup.py have not") print("been built, they are *disabled* by configure:") print_three_column(self.disabled_configure) print() if self.failed: failed = self.failed[:] print() print("Failed to build these modules:") print_three_column(failed) print() if self.failed_on_import: failed = self.failed_on_import[:] print() print("Following modules built successfully" " but were removed because they could not be imported:") print_three_column(failed) print() if any('_ssl' in l for l in (self.missing, self.failed, self.failed_on_import)): print() print("Could not build the ssl module!") print("Python requires an OpenSSL 1.0.2 or 1.1 compatible " "libssl with X509_VERIFY_PARAM_set1_host().") print("LibreSSL 2.6.4 and earlier do not provide the necessary " "APIs, https://github.com/libressl-portable/portable/issues/381") print() def build_extension(self, ext): if ext.name == '_ctypes': if not self.configure_ctypes(ext): self.failed.append(ext.name) return try: build_ext.build_extension(self, ext) except (CCompilerError, DistutilsError) as why: self.announce('WARNING: building of extension "%s" failed: %s' % (ext.name, why)) self.failed.append(ext.name) return def check_extension_import(self, ext): # Don't try to import an extension that has failed to compile if ext.name in self.failed: self.announce( 'WARNING: skipping import check for failed build "%s"' % ext.name, level=1) return # Workaround for Mac OS X: The Carbon-based modules cannot be # reliably imported into a command-line Python if 'Carbon' in ext.extra_link_args: self.announce( 'WARNING: skipping import check for Carbon-based "%s"' % ext.name) return if MACOS and ( sys.maxsize > 2**32 and '-arch' in ext.extra_link_args): # Don't bother doing an import check when an extension was # build with an explicit '-arch' flag on OSX. That's currently # only used to build 32-bit only extensions in a 4-way # universal build and loading 32-bit code into a 64-bit # process will fail. self.announce( 'WARNING: skipping import check for "%s"' % ext.name) return # Workaround for Cygwin: Cygwin currently has fork issues when many # modules have been imported if CYGWIN: self.announce('WARNING: skipping import check for Cygwin-based "%s"' % ext.name) return ext_filename = os.path.join( self.build_lib, self.get_ext_filename(self.get_ext_fullname(ext.name))) # If the build directory didn't exist when setup.py was # started, sys.path_importer_cache has a negative result # cached. Clear that cache before trying to import. sys.path_importer_cache.clear() # Don't try to load extensions for cross builds if CROSS_COMPILING: return loader = importlib.machinery.ExtensionFileLoader(ext.name, ext_filename) spec = importlib.util.spec_from_file_location(ext.name, ext_filename, loader=loader) try: importlib._bootstrap._load(spec) except ImportError as why: self.failed_on_import.append(ext.name) self.announce('*** WARNING: renaming "%s" since importing it' ' failed: %s' % (ext.name, why), level=3) assert not self.inplace basename, tail = os.path.splitext(ext_filename) newname = basename + "_failed" + tail if os.path.exists(newname): os.remove(newname) os.rename(ext_filename, newname) except: exc_type, why, tb = sys.exc_info() self.announce('*** WARNING: importing extension "%s" ' 'failed with %s: %s' % (ext.name, exc_type, why), level=3) self.failed.append(ext.name) def add_multiarch_paths(self): # Debian/Ubuntu multiarch support. # https://wiki.ubuntu.com/MultiarchSpec cc = sysconfig.get_config_var('CC') tmpfile = os.path.join(self.build_temp, 'multiarch') if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) ret = run_command( '%s -print-multiarch > %s 2> /dev/null' % (cc, tmpfile)) multiarch_path_component = '' try: if ret == 0: with open(tmpfile) as fp: multiarch_path_component = fp.readline().strip() finally: os.unlink(tmpfile) if multiarch_path_component != '': add_dir_to_list(self.compiler.library_dirs, '/usr/lib/' + multiarch_path_component) add_dir_to_list(self.compiler.include_dirs, '/usr/include/' + multiarch_path_component) return if not find_executable('dpkg-architecture'): return opt = '' if CROSS_COMPILING: opt = '-t' + sysconfig.get_config_var('HOST_GNU_TYPE') tmpfile = os.path.join(self.build_temp, 'multiarch') if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) ret = run_command( 'dpkg-architecture %s -qDEB_HOST_MULTIARCH > %s 2> /dev/null' % (opt, tmpfile)) try: if ret == 0: with open(tmpfile) as fp: multiarch_path_component = fp.readline().strip() add_dir_to_list(self.compiler.library_dirs, '/usr/lib/' + multiarch_path_component) add_dir_to_list(self.compiler.include_dirs, '/usr/include/' + multiarch_path_component) finally: os.unlink(tmpfile) def add_cross_compiling_paths(self): cc = sysconfig.get_config_var('CC') tmpfile = os.path.join(self.build_temp, 'ccpaths') if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) ret = run_command('%s -E -v - </dev/null 2>%s 1>/dev/null' % (cc, tmpfile)) is_gcc = False is_clang = False in_incdirs = False try: if ret == 0: with open(tmpfile) as fp: for line in fp.readlines(): if line.startswith("gcc version"): is_gcc = True elif line.startswith("clang version"): is_clang = True elif line.startswith("#include <...>"): in_incdirs = True elif line.startswith("End of search list"): in_incdirs = False elif (is_gcc or is_clang) and line.startswith("LIBRARY_PATH"): for d in line.strip().split("=")[1].split(":"): d = os.path.normpath(d) if '/gcc/' not in d: add_dir_to_list(self.compiler.library_dirs, d) elif (is_gcc or is_clang) and in_incdirs and '/gcc/' not in line and '/clang/' not in line: add_dir_to_list(self.compiler.include_dirs, line.strip()) finally: os.unlink(tmpfile) def add_ldflags_cppflags(self): # Add paths specified in the environment variables LDFLAGS and # CPPFLAGS for header and library files. # We must get the values from the Makefile and not the environment # directly since an inconsistently reproducible issue comes up where # the environment variable is not set even though the value were passed # into configure and stored in the Makefile (issue found on OS X 10.3). for env_var, arg_name, dir_list in ( ('LDFLAGS', '-R', self.compiler.runtime_library_dirs), ('LDFLAGS', '-L', self.compiler.library_dirs), ('CPPFLAGS', '-I', self.compiler.include_dirs)): env_val = sysconfig.get_config_var(env_var) if env_val: parser = argparse.ArgumentParser() parser.add_argument(arg_name, dest="dirs", action="append") options, _ = parser.parse_known_args(env_val.split()) if options.dirs: for directory in reversed(options.dirs): add_dir_to_list(dir_list, directory) def configure_compiler(self): # Ensure that /usr/local is always used, but the local build # directories (i.e. '.' and 'Include') must be first. See issue # 10520. if not CROSS_COMPILING: add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') # only change this for cross builds for 3.3, issues on Mageia if CROSS_COMPILING: self.add_cross_compiling_paths() self.add_multiarch_paths() self.add_ldflags_cppflags() def init_inc_lib_dirs(self): if (not CROSS_COMPILING and os.path.normpath(sys.base_prefix) != '/usr' and not sysconfig.get_config_var('PYTHONFRAMEWORK')): # OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework # (PYTHONFRAMEWORK is set) to avoid # linking problems when # building a framework with different architectures than # the one that is currently installed (issue #7473) add_dir_to_list(self.compiler.library_dirs, sysconfig.get_config_var("LIBDIR")) add_dir_to_list(self.compiler.include_dirs, sysconfig.get_config_var("INCLUDEDIR")) system_lib_dirs = ['/lib64', '/usr/lib64', '/lib', '/usr/lib'] system_include_dirs = ['/usr/include'] # lib_dirs and inc_dirs are used to search for files; # if a file is found in one of those directories, it can # be assumed that no additional -I,-L directives are needed. if not CROSS_COMPILING: self.lib_dirs = self.compiler.library_dirs + system_lib_dirs self.inc_dirs = self.compiler.include_dirs + system_include_dirs else: # Add the sysroot paths. 'sysroot' is a compiler option used to # set the logical path of the standard system headers and # libraries. self.lib_dirs = (self.compiler.library_dirs + sysroot_paths(('LDFLAGS', 'CC'), system_lib_dirs)) self.inc_dirs = (self.compiler.include_dirs + sysroot_paths(('CPPFLAGS', 'CFLAGS', 'CC'), system_include_dirs)) config_h = sysconfig.get_config_h_filename() with open(config_h) as file: self.config_h_vars = sysconfig.parse_config_h(file) # OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb) if HOST_PLATFORM in ['osf1', 'unixware7', 'openunix8']: self.lib_dirs += ['/usr/ccs/lib'] # HP-UX11iv3 keeps files in lib/hpux folders. if HOST_PLATFORM == 'hp-ux11': self.lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32'] if MACOS: # This should work on any unixy platform ;-) # If the user has bothered specifying additional -I and -L flags # in OPT and LDFLAGS we might as well use them here. # # NOTE: using shlex.split would technically be more correct, but # also gives a bootstrap problem. Let's hope nobody uses # directories with whitespace in the name to store libraries. cflags, ldflags = sysconfig.get_config_vars( 'CFLAGS', 'LDFLAGS') for item in cflags.split(): if item.startswith('-I'): self.inc_dirs.append(item[2:]) for item in ldflags.split(): if item.startswith('-L'): self.lib_dirs.append(item[2:]) def detect_simple_extensions(self): # # The following modules are all pretty straightforward, and compile # on pretty much any POSIXish platform. # # array objects self.add(Extension('array', ['arraymodule.c'])) # Context Variables self.add(Extension('_contextvars', ['_contextvarsmodule.c'])) shared_math = 'Modules/_math.o' # math library functions, e.g. sin() self.add(Extension('math', ['mathmodule.c'], extra_compile_args=['-DPy_BUILD_CORE_MODULE'], extra_objects=[shared_math], depends=['_math.h', shared_math], libraries=['m'])) # complex math library functions self.add(Extension('cmath', ['cmathmodule.c'], extra_compile_args=['-DPy_BUILD_CORE_MODULE'], extra_objects=[shared_math], depends=['_math.h', shared_math], libraries=['m'])) # time libraries: librt may be needed for clock_gettime() time_libs = [] lib = sysconfig.get_config_var('TIMEMODULE_LIB') if lib: time_libs.append(lib) # time operations and variables self.add(Extension('time', ['timemodule.c'], libraries=time_libs)) # libm is needed by delta_new() that uses round() and by accum() that # uses modf(). self.add(Extension('_datetime', ['_datetimemodule.c'], libraries=['m'])) # zoneinfo module self.add(Extension('_zoneinfo', ['_zoneinfo.c'])), # random number generator implemented in C self.add(Extension("_random", ["_randommodule.c"], extra_compile_args=['-DPy_BUILD_CORE_MODULE'])) # bisect self.add(Extension("_bisect", ["_bisectmodule.c"])) # heapq self.add(Extension("_heapq", ["_heapqmodule.c"])) # C-optimized pickle replacement self.add(Extension("_pickle", ["_pickle.c"], extra_compile_args=['-DPy_BUILD_CORE_MODULE'])) # atexit self.add(Extension("atexit", ["atexitmodule.c"])) # _json speedups self.add(Extension("_json", ["_json.c"], extra_compile_args=['-DPy_BUILD_CORE_MODULE'])) # profiler (_lsprof is for cProfile.py) self.add(Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c'])) # static Unicode character database self.add(Extension('unicodedata', ['unicodedata.c'], depends=['unicodedata_db.h', 'unicodename_db.h'])) # _opcode module self.add(Extension('_opcode', ['_opcode.c'])) # asyncio speedups self.add(Extension("_asyncio", ["_asynciomodule.c"], extra_compile_args=['-DPy_BUILD_CORE_MODULE'])) # _abc speedups self.add(Extension("_abc", ["_abc.c"])) # _queue module self.add(Extension("_queue", ["_queuemodule.c"])) # _statistics module self.add(Extension("_statistics", ["_statisticsmodule.c"])) # Modules with some UNIX dependencies -- on by default: # (If you have a really backward UNIX, select and socket may not be # supported...) # fcntl(2) and ioctl(2) libs = [] if (self.config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)): # May be necessary on AIX for flock function libs = ['bsd'] self.add(Extension('fcntl', ['fcntlmodule.c'], libraries=libs)) # pwd(3) self.add(Extension('pwd', ['pwdmodule.c'])) # grp(3) if not VXWORKS: self.add(Extension('grp', ['grpmodule.c'])) # spwd, shadow passwords if (self.config_h_vars.get('HAVE_GETSPNAM', False) or self.config_h_vars.get('HAVE_GETSPENT', False)): self.add(Extension('spwd', ['spwdmodule.c'])) # AIX has shadow passwords, but access is not via getspent(), etc. # module support is not expected so it not 'missing' elif not AIX: self.missing.append('spwd') # select(2); not on ancient System V self.add(Extension('select', ['selectmodule.c'])) # Fred Drake's interface to the Python parser self.add(Extension('parser', ['parsermodule.c'])) # Memory-mapped files (also works on Win32). self.add(Extension('mmap', ['mmapmodule.c'])) # Lance Ellinghaus's syslog module # syslog daemon interface self.add(Extension('syslog', ['syslogmodule.c'])) # Python interface to subinterpreter C-API. self.add(Extension('_xxsubinterpreters', ['_xxsubinterpretersmodule.c'])) # # Here ends the simple stuff. From here on, modules need certain # libraries, are platform-specific, or present other surprises. # # Multimedia modules # These don't work for 64-bit platforms!!! # These represent audio samples or images as strings: # # Operations on audio samples # According to #993173, this one should actually work fine on # 64-bit platforms. # # audioop needs libm for floor() in multiple functions. self.add(Extension('audioop', ['audioop.c'], libraries=['m'])) # CSV files self.add(Extension('_csv', ['_csv.c'])) # POSIX subprocess module helper. self.add(Extension('_posixsubprocess', ['_posixsubprocess.c'])) def detect_test_extensions(self): # Python C API test module self.add(Extension('_testcapi', ['_testcapimodule.c'], depends=['testcapi_long.h'])) # Python Internal C API test module self.add(Extension('_testinternalcapi', ['_testinternalcapi.c'], extra_compile_args=['-DPy_BUILD_CORE_MODULE'])) # Python PEP-3118 (buffer protocol) test module self.add(Extension('_testbuffer', ['_testbuffer.c'])) # Test loading multiple modules from one compiled file (http://bugs.python.org/issue16421) self.add(Extension('_testimportmultiple', ['_testimportmultiple.c'])) # Test multi-phase extension module init (PEP 489) self.add(Extension('_testmultiphase', ['_testmultiphase.c'])) # Fuzz tests. self.add(Extension('_xxtestfuzz', ['_xxtestfuzz/_xxtestfuzz.c', '_xxtestfuzz/fuzzer.c'])) def detect_readline_curses(self): # readline do_readline = self.compiler.find_library_file(self.lib_dirs, 'readline') readline_termcap_library = "" curses_library = "" # Cannot use os.popen here in py3k. tmpfile = os.path.join(self.build_temp, 'readline_termcap_lib') if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) # Determine if readline is already linked against curses or tinfo. if do_readline: if CROSS_COMPILING: ret = run_command("%s -d %s | grep '(NEEDED)' > %s" % (sysconfig.get_config_var('READELF'), do_readline, tmpfile)) elif find_executable('ldd'): ret = run_command("ldd %s > %s" % (do_readline, tmpfile)) else: ret = 1 if ret == 0: with open(tmpfile) as fp: for ln in fp: if 'curses' in ln: readline_termcap_library = re.sub( r'.*lib(n?cursesw?)\.so.*', r'\1', ln ).rstrip() break # termcap interface split out from ncurses if 'tinfo' in ln: readline_termcap_library = 'tinfo' break if os.path.exists(tmpfile): os.unlink(tmpfile) # Issue 7384: If readline is already linked against curses, # use the same library for the readline and curses modules. if 'curses' in readline_termcap_library: curses_library = readline_termcap_library elif self.compiler.find_library_file(self.lib_dirs, 'ncursesw'): curses_library = 'ncursesw' # Issue 36210: OSS provided ncurses does not link on AIX # Use IBM supplied 'curses' for successful build of _curses elif AIX and self.compiler.find_library_file(self.lib_dirs, 'curses'): curses_library = 'curses' elif self.compiler.find_library_file(self.lib_dirs, 'ncurses'): curses_library = 'ncurses' elif self.compiler.find_library_file(self.lib_dirs, 'curses'): curses_library = 'curses' if MACOS: os_release = int(os.uname()[2].split('.')[0]) dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') if (dep_target and (tuple(int(n) for n in dep_target.split('.')[0:2]) < (10, 5) ) ): os_release = 8 if os_release < 9: # MacOSX 10.4 has a broken readline. Don't try to build # the readline module unless the user has installed a fixed # readline package if find_file('readline/rlconf.h', self.inc_dirs, []) is None: do_readline = False if do_readline: if MACOS and os_release < 9: # In every directory on the search path search for a dynamic # library and then a static library, instead of first looking # for dynamic libraries on the entire path. # This way a statically linked custom readline gets picked up # before the (possibly broken) dynamic library in /usr/lib. readline_extra_link_args = ('-Wl,-search_paths_first',) else: readline_extra_link_args = () readline_libs = ['readline'] if readline_termcap_library: pass # Issue 7384: Already linked against curses or tinfo. elif curses_library: readline_libs.append(curses_library) elif self.compiler.find_library_file(self.lib_dirs + ['/usr/lib/termcap'], 'termcap'): readline_libs.append('termcap') self.add(Extension('readline', ['readline.c'], library_dirs=['/usr/lib/termcap'], extra_link_args=readline_extra_link_args, libraries=readline_libs)) else: self.missing.append('readline') # Curses support, requiring the System V version of curses, often # provided by the ncurses library. curses_defines = [] curses_includes = [] panel_library = 'panel' if curses_library == 'ncursesw': curses_defines.append(('HAVE_NCURSESW', '1')) if not CROSS_COMPILING: curses_includes.append('/usr/include/ncursesw') # Bug 1464056: If _curses.so links with ncursesw, # _curses_panel.so must link with panelw. panel_library = 'panelw' if MACOS: # On OS X, there is no separate /usr/lib/libncursesw nor # libpanelw. If we are here, we found a locally-supplied # version of libncursesw. There should also be a # libpanelw. _XOPEN_SOURCE defines are usually excluded # for OS X but we need _XOPEN_SOURCE_EXTENDED here for # ncurses wide char support curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1')) elif MACOS and curses_library == 'ncurses': # Building with the system-suppied combined libncurses/libpanel curses_defines.append(('HAVE_NCURSESW', '1')) curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1')) curses_enabled = True if curses_library.startswith('ncurses'): curses_libs = [curses_library] self.add(Extension('_curses', ['_cursesmodule.c'], include_dirs=curses_includes, define_macros=curses_defines, libraries=curses_libs)) elif curses_library == 'curses' and not MACOS: # OSX has an old Berkeley curses, not good enough for # the _curses module. if (self.compiler.find_library_file(self.lib_dirs, 'terminfo')): curses_libs = ['curses', 'terminfo'] elif (self.compiler.find_library_file(self.lib_dirs, 'termcap')): curses_libs = ['curses', 'termcap'] else: curses_libs = ['curses'] self.add(Extension('_curses', ['_cursesmodule.c'], define_macros=curses_defines, libraries=curses_libs)) else: curses_enabled = False self.missing.append('_curses') # If the curses module is enabled, check for the panel module # _curses_panel needs some form of ncurses skip_curses_panel = True if AIX else False if (curses_enabled and not skip_curses_panel and self.compiler.find_library_file(self.lib_dirs, panel_library)): self.add(Extension('_curses_panel', ['_curses_panel.c'], include_dirs=curses_includes, define_macros=curses_defines, libraries=[panel_library, *curses_libs])) elif not skip_curses_panel: self.missing.append('_curses_panel') def detect_crypt(self): # crypt module. if VXWORKS: # bpo-31904: crypt() function is not provided by VxWorks. # DES_crypt() OpenSSL provides is too weak to implement # the encryption. return if self.compiler.find_library_file(self.lib_dirs, 'crypt'): libs = ['crypt'] else: libs = [] self.add(Extension('_crypt', ['_cryptmodule.c'], libraries=libs)) def detect_socket(self): # socket(2) if not VXWORKS: kwargs = {'depends': ['socketmodule.h']} if MACOS: # Issue #35569: Expose RFC 3542 socket options. kwargs['extra_compile_args'] = ['-D__APPLE_USE_RFC_3542'] self.add(Extension('_socket', ['socketmodule.c'], **kwargs)) elif self.compiler.find_library_file(self.lib_dirs, 'net'): libs = ['net'] self.add(Extension('_socket', ['socketmodule.c'], depends=['socketmodule.h'], libraries=libs)) def detect_dbm_gdbm(self): # Modules that provide persistent dictionary-like semantics. You will # probably want to arrange for at least one of them to be available on # your machine, though none are defined by default because of library # dependencies. The Python module dbm/__init__.py provides an # implementation independent wrapper for these; dbm/dumb.py provides # similar functionality (but slower of course) implemented in Python. # Sleepycat^WOracle Berkeley DB interface. # http://www.oracle.com/database/berkeley-db/db/index.html # # This requires the Sleepycat^WOracle DB code. The supported versions # are set below. Visit the URL above to download # a release. Most open source OSes come with one or more # versions of BerkeleyDB already installed. max_db_ver = (5, 3) min_db_ver = (3, 3) db_setup_debug = False # verbose debug prints from this script? def allow_db_ver(db_ver): """Returns a boolean if the given BerkeleyDB version is acceptable. Args: db_ver: A tuple of the version to verify. """ if not (min_db_ver <= db_ver <= max_db_ver): return False return True def gen_db_minor_ver_nums(major): if major == 4: for x in range(max_db_ver[1]+1): if allow_db_ver((4, x)): yield x elif major == 3: for x in (3,): if allow_db_ver((3, x)): yield x else: raise ValueError("unknown major BerkeleyDB version", major) # construct a list of paths to look for the header file in on # top of the normal inc_dirs. db_inc_paths = [ '/usr/include/db4', '/usr/local/include/db4', '/opt/sfw/include/db4', '/usr/include/db3', '/usr/local/include/db3', '/opt/sfw/include/db3', # Fink defaults (http://fink.sourceforge.net/) '/sw/include/db4', '/sw/include/db3', ] # 4.x minor number specific paths for x in gen_db_minor_ver_nums(4): db_inc_paths.append('/usr/include/db4%d' % x) db_inc_paths.append('/usr/include/db4.%d' % x) db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x) db_inc_paths.append('/usr/local/include/db4%d' % x) db_inc_paths.append('/pkg/db-4.%d/include' % x) db_inc_paths.append('/opt/db-4.%d/include' % x) # MacPorts default (http://www.macports.org/) db_inc_paths.append('/opt/local/include/db4%d' % x) # 3.x minor number specific paths for x in gen_db_minor_ver_nums(3): db_inc_paths.append('/usr/include/db3%d' % x) db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x) db_inc_paths.append('/usr/local/include/db3%d' % x) db_inc_paths.append('/pkg/db-3.%d/include' % x) db_inc_paths.append('/opt/db-3.%d/include' % x) if CROSS_COMPILING: db_inc_paths = [] # Add some common subdirectories for Sleepycat DB to the list, # based on the standard include directories. This way DB3/4 gets # picked up when it is installed in a non-standard prefix and # the user has added that prefix into inc_dirs. std_variants = [] for dn in self.inc_dirs: std_variants.append(os.path.join(dn, 'db3')) std_variants.append(os.path.join(dn, 'db4')) for x in gen_db_minor_ver_nums(4): std_variants.append(os.path.join(dn, "db4%d"%x)) std_variants.append(os.path.join(dn, "db4.%d"%x)) for x in gen_db_minor_ver_nums(3): std_variants.append(os.path.join(dn, "db3%d"%x)) std_variants.append(os.path.join(dn, "db3.%d"%x)) db_inc_paths = std_variants + db_inc_paths db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)] db_ver_inc_map = {} if MACOS: sysroot = macosx_sdk_root() class db_found(Exception): pass try: # See whether there is a Sleepycat header in the standard # search path. for d in self.inc_dirs + db_inc_paths: f = os.path.join(d, "db.h") if MACOS and is_macosx_sdk_path(d): f = os.path.join(sysroot, d[1:], "db.h") if db_setup_debug: print("db: looking for db.h in", f) if os.path.exists(f): with open(f, 'rb') as file: f = file.read() m = re.search(br"#define\WDB_VERSION_MAJOR\W(\d+)", f) if m: db_major = int(m.group(1)) m = re.search(br"#define\WDB_VERSION_MINOR\W(\d+)", f) db_minor = int(m.group(1)) db_ver = (db_major, db_minor) # Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug if db_ver == (4, 6): m = re.search(br"#define\WDB_VERSION_PATCH\W(\d+)", f) db_patch = int(m.group(1)) if db_patch < 21: print("db.h:", db_ver, "patch", db_patch, "being ignored (4.6.x must be >= 4.6.21)") continue if ( (db_ver not in db_ver_inc_map) and allow_db_ver(db_ver) ): # save the include directory with the db.h version # (first occurrence only) db_ver_inc_map[db_ver] = d if db_setup_debug: print("db.h: found", db_ver, "in", d) else: # we already found a header for this library version if db_setup_debug: print("db.h: ignoring", d) else: # ignore this header, it didn't contain a version number if db_setup_debug: print("db.h: no version number version in", d) db_found_vers = list(db_ver_inc_map.keys()) db_found_vers.sort() while db_found_vers: db_ver = db_found_vers.pop() db_incdir = db_ver_inc_map[db_ver] # check lib directories parallel to the location of the header db_dirs_to_check = [ db_incdir.replace("include", 'lib64'), db_incdir.replace("include", 'lib'), ] if not MACOS: db_dirs_to_check = list(filter(os.path.isdir, db_dirs_to_check)) else: # Same as other branch, but takes OSX SDK into account tmp = [] for dn in db_dirs_to_check: if is_macosx_sdk_path(dn): if os.path.isdir(os.path.join(sysroot, dn[1:])): tmp.append(dn) else: if os.path.isdir(dn): tmp.append(dn) db_dirs_to_check = tmp db_dirs_to_check = tmp # Look for a version specific db-X.Y before an ambiguous dbX # XXX should we -ever- look for a dbX name? Do any # systems really not name their library by version and # symlink to more general names? for dblib in (('db-%d.%d' % db_ver), ('db%d%d' % db_ver), ('db%d' % db_ver[0])): dblib_file = self.compiler.find_library_file( db_dirs_to_check + self.lib_dirs, dblib ) if dblib_file: dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ] raise db_found else: if db_setup_debug: print("db lib: ", dblib, "not found") except db_found: if db_setup_debug: print("bsddb using BerkeleyDB lib:", db_ver, dblib) print("bsddb lib dir:", dblib_dir, " inc dir:", db_incdir) dblibs = [dblib] # Only add the found library and include directories if they aren't # already being searched. This avoids an explicit runtime library # dependency. if db_incdir in self.inc_dirs: db_incs = None else: db_incs = [db_incdir] if dblib_dir[0] in self.lib_dirs: dblib_dir = None else: if db_setup_debug: print("db: no appropriate library found") db_incs = None dblibs = [] dblib_dir = None dbm_setup_debug = False # verbose debug prints from this script? dbm_order = ['gdbm'] # The standard Unix dbm module: if not CYGWIN: config_args = [arg.strip("'") for arg in sysconfig.get_config_var("CONFIG_ARGS").split()] dbm_args = [arg for arg in config_args if arg.startswith('--with-dbmliborder=')] if dbm_args: dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":") else: dbm_order = "ndbm:gdbm:bdb".split(":") dbmext = None for cand in dbm_order: if cand == "ndbm": if find_file("ndbm.h", self.inc_dirs, []) is not None: # Some systems have -lndbm, others have -lgdbm_compat, # others don't have either if self.compiler.find_library_file(self.lib_dirs, 'ndbm'): ndbm_libs = ['ndbm'] elif self.compiler.find_library_file(self.lib_dirs, 'gdbm_compat'): ndbm_libs = ['gdbm_compat'] else: ndbm_libs = [] if dbm_setup_debug: print("building dbm using ndbm") dbmext = Extension('_dbm', ['_dbmmodule.c'], define_macros=[ ('HAVE_NDBM_H',None), ], libraries=ndbm_libs) break elif cand == "gdbm": if self.compiler.find_library_file(self.lib_dirs, 'gdbm'): gdbm_libs = ['gdbm'] if self.compiler.find_library_file(self.lib_dirs, 'gdbm_compat'): gdbm_libs.append('gdbm_compat') if find_file("gdbm/ndbm.h", self.inc_dirs, []) is not None: if dbm_setup_debug: print("building dbm using gdbm") dbmext = Extension( '_dbm', ['_dbmmodule.c'], define_macros=[ ('HAVE_GDBM_NDBM_H', None), ], libraries = gdbm_libs) break if find_file("gdbm-ndbm.h", self.inc_dirs, []) is not None: if dbm_setup_debug: print("building dbm using gdbm") dbmext = Extension( '_dbm', ['_dbmmodule.c'], define_macros=[ ('HAVE_GDBM_DASH_NDBM_H', None), ], libraries = gdbm_libs) break elif cand == "bdb": if dblibs: if dbm_setup_debug: print("building dbm using bdb") dbmext = Extension('_dbm', ['_dbmmodule.c'], library_dirs=dblib_dir, runtime_library_dirs=dblib_dir, include_dirs=db_incs, define_macros=[ ('HAVE_BERKDB_H', None), ('DB_DBM_HSEARCH', None), ], libraries=dblibs) break if dbmext is not None: self.add(dbmext) else: self.missing.append('_dbm') # Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm: if ('gdbm' in dbm_order and self.compiler.find_library_file(self.lib_dirs, 'gdbm')): self.add(Extension('_gdbm', ['_gdbmmodule.c'], libraries=['gdbm'])) else: self.missing.append('_gdbm') def detect_sqlite(self): # The sqlite interface sqlite_setup_debug = False # verbose debug prints from this script? # We hunt for #define SQLITE_VERSION "n.n.n" # We need to find >= sqlite version 3.3.9, for sqlite3_prepare_v2 sqlite_incdir = sqlite_libdir = None sqlite_inc_paths = [ '/usr/include', '/usr/include/sqlite', '/usr/include/sqlite3', '/usr/local/include', '/usr/local/include/sqlite', '/usr/local/include/sqlite3', ] if CROSS_COMPILING: sqlite_inc_paths = [] MIN_SQLITE_VERSION_NUMBER = (3, 7, 2) MIN_SQLITE_VERSION = ".".join([str(x) for x in MIN_SQLITE_VERSION_NUMBER]) # Scan the default include directories before the SQLite specific # ones. This allows one to override the copy of sqlite on OSX, # where /usr/include contains an old version of sqlite. if MACOS: sysroot = macosx_sdk_root() for d_ in self.inc_dirs + sqlite_inc_paths: d = d_ if MACOS and is_macosx_sdk_path(d): d = os.path.join(sysroot, d[1:]) f = os.path.join(d, "sqlite3.h") if os.path.exists(f): if sqlite_setup_debug: print("sqlite: found %s"%f) with open(f) as file: incf = file.read() m = re.search( r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"([\d\.]*)"', incf) if m: sqlite_version = m.group(1) sqlite_version_tuple = tuple([int(x) for x in sqlite_version.split(".")]) if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER: # we win! if sqlite_setup_debug: print("%s/sqlite3.h: version %s"%(d, sqlite_version)) sqlite_incdir = d break else: if sqlite_setup_debug: print("%s: version %s is too old, need >= %s"%(d, sqlite_version, MIN_SQLITE_VERSION)) elif sqlite_setup_debug: print("sqlite: %s had no SQLITE_VERSION"%(f,)) if sqlite_incdir: sqlite_dirs_to_check = [ os.path.join(sqlite_incdir, '..', 'lib64'), os.path.join(sqlite_incdir, '..', 'lib'), os.path.join(sqlite_incdir, '..', '..', 'lib64'), os.path.join(sqlite_incdir, '..', '..', 'lib'), ] sqlite_libfile = self.compiler.find_library_file( sqlite_dirs_to_check + self.lib_dirs, 'sqlite3') if sqlite_libfile: sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))] if sqlite_incdir and sqlite_libdir: sqlite_srcs = ['_sqlite/cache.c', '_sqlite/connection.c', '_sqlite/cursor.c', '_sqlite/microprotocols.c', '_sqlite/module.c', '_sqlite/prepare_protocol.c', '_sqlite/row.c', '_sqlite/statement.c', '_sqlite/util.c', ] sqlite_defines = [] if not MS_WINDOWS: sqlite_defines.append(('MODULE_NAME', '"sqlite3"')) else: sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"')) # Enable support for loadable extensions in the sqlite3 module # if --enable-loadable-sqlite-extensions configure option is used. if '--enable-loadable-sqlite-extensions' not in sysconfig.get_config_var("CONFIG_ARGS"): sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1")) if MACOS: # In every directory on the search path search for a dynamic # library and then a static library, instead of first looking # for dynamic libraries on the entire path. # This way a statically linked custom sqlite gets picked up # before the dynamic library in /usr/lib. sqlite_extra_link_args = ('-Wl,-search_paths_first',) else: sqlite_extra_link_args = () include_dirs = ["Modules/_sqlite"] # Only include the directory where sqlite was found if it does # not already exist in set include directories, otherwise you # can end up with a bad search path order. if sqlite_incdir not in self.compiler.include_dirs: include_dirs.append(sqlite_incdir) # avoid a runtime library path for a system library dir if sqlite_libdir and sqlite_libdir[0] in self.lib_dirs: sqlite_libdir = None self.add(Extension('_sqlite3', sqlite_srcs, define_macros=sqlite_defines, include_dirs=include_dirs, library_dirs=sqlite_libdir, extra_link_args=sqlite_extra_link_args, libraries=["sqlite3",])) else: self.missing.append('_sqlite3') def detect_platform_specific_exts(self): # Unix-only modules if not MS_WINDOWS: if not VXWORKS: # Steen Lumholt's termios module self.add(Extension('termios', ['termios.c'])) # Jeremy Hylton's rlimit interface self.add(Extension('resource', ['resource.c'])) else: self.missing.extend(['resource', 'termios']) # Platform-specific libraries if HOST_PLATFORM.startswith(('linux', 'freebsd', 'gnukfreebsd')): self.add(Extension('ossaudiodev', ['ossaudiodev.c'])) elif not AIX: self.missing.append('ossaudiodev') if MACOS: self.add(Extension('_scproxy', ['_scproxy.c'], extra_link_args=[ '-framework', 'SystemConfiguration', '-framework', 'CoreFoundation'])) def detect_compress_exts(self): # Andrew Kuchling's zlib module. Note that some versions of zlib # 1.1.3 have security problems. See CERT Advisory CA-2002-07: # http://www.cert.org/advisories/CA-2002-07.html # # zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to # patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For # now, we still accept 1.1.3, because we think it's difficult to # exploit this in Python, and we'd rather make it RedHat's problem # than our problem <wink>. # # You can upgrade zlib to version 1.1.4 yourself by going to # http://www.gzip.org/zlib/ zlib_inc = find_file('zlib.h', [], self.inc_dirs) have_zlib = False if zlib_inc is not None: zlib_h = zlib_inc[0] + '/zlib.h' version = '"0.0.0"' version_req = '"1.1.3"' if MACOS and is_macosx_sdk_path(zlib_h): zlib_h = os.path.join(macosx_sdk_root(), zlib_h[1:]) with open(zlib_h) as fp: while 1: line = fp.readline() if not line: break if line.startswith('#define ZLIB_VERSION'): version = line.split()[2] break if version >= version_req: if (self.compiler.find_library_file(self.lib_dirs, 'z')): if MACOS: zlib_extra_link_args = ('-Wl,-search_paths_first',) else: zlib_extra_link_args = () self.add(Extension('zlib', ['zlibmodule.c'], libraries=['z'], extra_link_args=zlib_extra_link_args)) have_zlib = True else: self.missing.append('zlib') else: self.missing.append('zlib') else: self.missing.append('zlib') # Helper module for various ascii-encoders. Uses zlib for an optimized # crc32 if we have it. Otherwise binascii uses its own. if have_zlib: extra_compile_args = ['-DUSE_ZLIB_CRC32'] libraries = ['z'] extra_link_args = zlib_extra_link_args else: extra_compile_args = [] libraries = [] extra_link_args = [] self.add(Extension('binascii', ['binascii.c'], extra_compile_args=extra_compile_args, libraries=libraries, extra_link_args=extra_link_args)) # Gustavo Niemeyer's bz2 module. if (self.compiler.find_library_file(self.lib_dirs, 'bz2')): if MACOS: bz2_extra_link_args = ('-Wl,-search_paths_first',) else: bz2_extra_link_args = () self.add(Extension('_bz2', ['_bz2module.c'], libraries=['bz2'], extra_link_args=bz2_extra_link_args)) else: self.missing.append('_bz2') # LZMA compression support. if self.compiler.find_library_file(self.lib_dirs, 'lzma'): self.add(Extension('_lzma', ['_lzmamodule.c'], libraries=['lzma'])) else: self.missing.append('_lzma') def detect_expat_elementtree(self): # Interface to the Expat XML parser # # Expat was written by James Clark and is now maintained by a group of # developers on SourceForge; see www.libexpat.org for more information. # The pyexpat module was written by Paul Prescod after a prototype by # Jack Jansen. The Expat source is included in Modules/expat/. Usage # of a system shared libexpat.so is possible with --with-system-expat # configure option. # # More information on Expat can be found at www.libexpat.org. # if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"): expat_inc = [] define_macros = [] extra_compile_args = [] expat_lib = ['expat'] expat_sources = [] expat_depends = [] else: expat_inc = [os.path.join(self.srcdir, 'Modules', 'expat')] define_macros = [ ('HAVE_EXPAT_CONFIG_H', '1'), # bpo-30947: Python uses best available entropy sources to # call XML_SetHashSalt(), expat entropy sources are not needed ('XML_POOR_ENTROPY', '1'), ] extra_compile_args = [] expat_lib = [] expat_sources = ['expat/xmlparse.c', 'expat/xmlrole.c', 'expat/xmltok.c'] expat_depends = ['expat/ascii.h', 'expat/asciitab.h', 'expat/expat.h', 'expat/expat_config.h', 'expat/expat_external.h', 'expat/internal.h', 'expat/latin1tab.h', 'expat/utf8tab.h', 'expat/xmlrole.h', 'expat/xmltok.h', 'expat/xmltok_impl.h' ] cc = sysconfig.get_config_var('CC').split()[0] ret = run_command( '"%s" -Werror -Wno-unreachable-code -E -xc /dev/null >/dev/null 2>&1' % cc) if ret == 0: extra_compile_args.append('-Wno-unreachable-code') self.add(Extension('pyexpat', define_macros=define_macros, extra_compile_args=extra_compile_args, include_dirs=expat_inc, libraries=expat_lib, sources=['pyexpat.c'] + expat_sources, depends=expat_depends)) # Fredrik Lundh's cElementTree module. Note that this also # uses expat (via the CAPI hook in pyexpat). if os.path.isfile(os.path.join(self.srcdir, 'Modules', '_elementtree.c')): define_macros.append(('USE_PYEXPAT_CAPI', None)) self.add(Extension('_elementtree', define_macros=define_macros, include_dirs=expat_inc, libraries=expat_lib, sources=['_elementtree.c'], depends=['pyexpat.c', *expat_sources, *expat_depends])) else: self.missing.append('_elementtree') def detect_multibytecodecs(self): # Hye-Shik Chang's CJKCodecs modules. self.add(Extension('_multibytecodec', ['cjkcodecs/multibytecodec.c'])) for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'): self.add(Extension('_codecs_%s' % loc, ['cjkcodecs/_codecs_%s.c' % loc])) def detect_multiprocessing(self): # Richard Oudkerk's multiprocessing module if MS_WINDOWS: multiprocessing_srcs = ['_multiprocessing/multiprocessing.c', '_multiprocessing/semaphore.c'] else: multiprocessing_srcs = ['_multiprocessing/multiprocessing.c'] if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')): multiprocessing_srcs.append('_multiprocessing/semaphore.c') if (sysconfig.get_config_var('HAVE_SHM_OPEN') and sysconfig.get_config_var('HAVE_SHM_UNLINK')): posixshmem_srcs = ['_multiprocessing/posixshmem.c'] libs = [] if sysconfig.get_config_var('SHM_NEEDS_LIBRT'): # need to link with librt to get shm_open() libs.append('rt') self.add(Extension('_posixshmem', posixshmem_srcs, define_macros={}, libraries=libs, include_dirs=["Modules/_multiprocessing"])) self.add(Extension('_multiprocessing', multiprocessing_srcs, include_dirs=["Modules/_multiprocessing"])) def detect_uuid(self): # Build the _uuid module if possible uuid_incs = find_file("uuid.h", self.inc_dirs, ["/usr/include/uuid"]) if uuid_incs is not None: if self.compiler.find_library_file(self.lib_dirs, 'uuid'): uuid_libs = ['uuid'] else: uuid_libs = [] self.add(Extension('_uuid', ['_uuidmodule.c'], libraries=uuid_libs, include_dirs=uuid_incs)) else: self.missing.append('_uuid') def detect_modules(self): self.configure_compiler() self.init_inc_lib_dirs() self.detect_simple_extensions() if TEST_EXTENSIONS: self.detect_test_extensions() self.detect_readline_curses() self.detect_crypt() self.detect_socket() self.detect_openssl_hashlib() self.detect_hash_builtins() self.detect_dbm_gdbm() self.detect_sqlite() self.detect_platform_specific_exts() self.detect_nis() self.detect_compress_exts() self.detect_expat_elementtree() self.detect_multibytecodecs() self.detect_decimal() self.detect_ctypes() self.detect_multiprocessing() if not self.detect_tkinter(): self.missing.append('_tkinter') self.detect_uuid() ## # Uncomment these lines if you want to play with xxmodule.c ## self.add(Extension('xx', ['xxmodule.c'])) if 'd' not in sysconfig.get_config_var('ABIFLAGS'): self.add(Extension('xxlimited', ['xxlimited.c'], define_macros=[('Py_LIMITED_API', '0x03050000')])) def detect_tkinter_explicitly(self): # Build _tkinter using explicit locations for Tcl/Tk. # # This is enabled when both arguments are given to ./configure: # # --with-tcltk-includes="-I/path/to/tclincludes \ # -I/path/to/tkincludes" # --with-tcltk-libs="-L/path/to/tcllibs -ltclm.n \ # -L/path/to/tklibs -ltkm.n" # # These values can also be specified or overridden via make: # make TCLTK_INCLUDES="..." TCLTK_LIBS="..." # # This can be useful for building and testing tkinter with multiple # versions of Tcl/Tk. Note that a build of Tk depends on a particular # build of Tcl so you need to specify both arguments and use care when # overriding. # The _TCLTK variables are created in the Makefile sharedmods target. tcltk_includes = os.environ.get('_TCLTK_INCLUDES') tcltk_libs = os.environ.get('_TCLTK_LIBS') if not (tcltk_includes and tcltk_libs): # Resume default configuration search. return False extra_compile_args = tcltk_includes.split() extra_link_args = tcltk_libs.split() self.add(Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'], define_macros=[('WITH_APPINIT', 1)], extra_compile_args = extra_compile_args, extra_link_args = extra_link_args)) return True def detect_tkinter_darwin(self): # Build default _tkinter on macOS using Tcl and Tk frameworks. # # The macOS native Tk (AKA Aqua Tk) and Tcl are most commonly # built and installed as macOS framework bundles. However, # for several reasons, we cannot take full advantage of the # Apple-supplied compiler chain's -framework options here. # Instead, we need to find and pass to the compiler the # absolute paths of the Tcl and Tk headers files we want to use # and the absolute path to the directory containing the Tcl # and Tk frameworks for linking. # # We want to handle here two common use cases on macOS: # 1. Build and link with system-wide third-party or user-built # Tcl and Tk frameworks installed in /Library/Frameworks. # 2. Build and link using a user-specified macOS SDK so that the # built Python can be exported to other systems. In this case, # search only the SDK's /Library/Frameworks (normally empty) # and /System/Library/Frameworks. # # Any other use case should be able to be handled explicitly by # using the options described above in detect_tkinter_explicitly(). # In particular it would be good to handle here the case where # you want to build and link with a framework build of Tcl and Tk # that is not in /Library/Frameworks, say, in your private # $HOME/Library/Frameworks directory or elsewhere. It turns # out to be difficult to make that work automatically here # without bringing into play more tools and magic. That case # can be handled using a recipe with the right arguments # to detect_tkinter_explicitly(). # # Note also that the fallback case here is to try to use the # Apple-supplied Tcl and Tk frameworks in /System/Library but # be forewarned that they are deprecated by Apple and typically # out-of-date and buggy; their use should be avoided if at # all possible by installing a newer version of Tcl and Tk in # /Library/Frameworks before building Python without # an explicit SDK or by configuring build arguments explicitly. from os.path import join, exists sysroot = macosx_sdk_root() # path to the SDK or '/' if macosx_sdk_specified(): # Use case #2: an SDK other than '/' was specified. # Only search there. framework_dirs = [ join(sysroot, 'Library', 'Frameworks'), join(sysroot, 'System', 'Library', 'Frameworks'), ] else: # Use case #1: no explicit SDK selected. # Search the local system-wide /Library/Frameworks, # not the one in the default SDK, otherwise fall back to # /System/Library/Frameworks whose header files may be in # the default SDK or, on older systems, actually installed. framework_dirs = [ join('/', 'Library', 'Frameworks'), join(sysroot, 'System', 'Library', 'Frameworks'), ] # Find the directory that contains the Tcl.framework and # Tk.framework bundles. for F in framework_dirs: # both Tcl.framework and Tk.framework should be present for fw in 'Tcl', 'Tk': if not exists(join(F, fw + '.framework')): break else: # ok, F is now directory with both frameworks. Continue # building break else: # Tk and Tcl frameworks not found. Normal "unix" tkinter search # will now resume. return False include_dirs = [ join(F, fw + '.framework', H) for fw in ('Tcl', 'Tk') for H in ('Headers',) ] # Add the base framework directory as well compile_args = ['-F', F] # Do not build tkinter for archs that this Tk was not built with. cflags = sysconfig.get_config_vars('CFLAGS')[0] archs = re.findall(r'-arch\s+(\w+)', cflags) tmpfile = os.path.join(self.build_temp, 'tk.arch') if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) run_command( "file {}/Tk.framework/Tk | grep 'for architecture' > {}".format(F, tmpfile) ) with open(tmpfile) as fp: detected_archs = [] for ln in fp: a = ln.split()[-1] if a in archs: detected_archs.append(ln.split()[-1]) os.unlink(tmpfile) arch_args = [] for a in detected_archs: arch_args.append('-arch') arch_args.append(a) compile_args += arch_args link_args = [','.join(['-Wl', '-F', F, '-framework', 'Tcl', '-framework', 'Tk']), *arch_args] # The X11/xlib.h file bundled in the Tk sources can cause function # prototype warnings from the compiler. Since we cannot easily fix # that, suppress the warnings here instead. if '-Wstrict-prototypes' in cflags.split(): compile_args.append('-Wno-strict-prototypes') self.add(Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'], define_macros=[('WITH_APPINIT', 1)], include_dirs=include_dirs, libraries=[], extra_compile_args=compile_args, extra_link_args=link_args)) return True def detect_tkinter(self): # The _tkinter module. # Check whether --with-tcltk-includes and --with-tcltk-libs were # configured or passed into the make target. If so, use these values # to build tkinter and bypass the searches for Tcl and TK in standard # locations. if self.detect_tkinter_explicitly(): return True # Rather than complicate the code below, detecting and building # AquaTk is a separate method. Only one Tkinter will be built on # Darwin - either AquaTk, if it is found, or X11 based Tk. if (MACOS and self.detect_tkinter_darwin()): return True # Assume we haven't found any of the libraries or include files # The versions with dots are used on Unix, and the versions without # dots on Windows, for detection by cygwin. tcllib = tklib = tcl_includes = tk_includes = None for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83', '8.2', '82', '8.1', '81', '8.0', '80']: tklib = self.compiler.find_library_file(self.lib_dirs, 'tk' + version) tcllib = self.compiler.find_library_file(self.lib_dirs, 'tcl' + version) if tklib and tcllib: # Exit the loop when we've found the Tcl/Tk libraries break # Now check for the header files if tklib and tcllib: # Check for the include files on Debian and {Free,Open}BSD, where # they're put in /usr/include/{tcl,tk}X.Y dotversion = version if '.' not in dotversion and "bsd" in HOST_PLATFORM.lower(): # OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a, # but the include subdirs are named like .../include/tcl8.3. dotversion = dotversion[:-1] + '.' + dotversion[-1] tcl_include_sub = [] tk_include_sub = [] for dir in self.inc_dirs: tcl_include_sub += [dir + os.sep + "tcl" + dotversion] tk_include_sub += [dir + os.sep + "tk" + dotversion] tk_include_sub += tcl_include_sub tcl_includes = find_file('tcl.h', self.inc_dirs, tcl_include_sub) tk_includes = find_file('tk.h', self.inc_dirs, tk_include_sub) if (tcllib is None or tklib is None or tcl_includes is None or tk_includes is None): self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2) return False # OK... everything seems to be present for Tcl/Tk. include_dirs = [] libs = [] defs = [] added_lib_dirs = [] for dir in tcl_includes + tk_includes: if dir not in include_dirs: include_dirs.append(dir) # Check for various platform-specific directories if HOST_PLATFORM == 'sunos5': include_dirs.append('/usr/openwin/include') added_lib_dirs.append('/usr/openwin/lib') elif os.path.exists('/usr/X11R6/include'): include_dirs.append('/usr/X11R6/include') added_lib_dirs.append('/usr/X11R6/lib64') added_lib_dirs.append('/usr/X11R6/lib') elif os.path.exists('/usr/X11R5/include'): include_dirs.append('/usr/X11R5/include') added_lib_dirs.append('/usr/X11R5/lib') else: # Assume default location for X11 include_dirs.append('/usr/X11/include') added_lib_dirs.append('/usr/X11/lib') # If Cygwin, then verify that X is installed before proceeding if CYGWIN: x11_inc = find_file('X11/Xlib.h', [], include_dirs) if x11_inc is None: return False # Check for BLT extension if self.compiler.find_library_file(self.lib_dirs + added_lib_dirs, 'BLT8.0'): defs.append( ('WITH_BLT', 1) ) libs.append('BLT8.0') elif self.compiler.find_library_file(self.lib_dirs + added_lib_dirs, 'BLT'): defs.append( ('WITH_BLT', 1) ) libs.append('BLT') # Add the Tcl/Tk libraries libs.append('tk'+ version) libs.append('tcl'+ version) # Finally, link with the X11 libraries (not appropriate on cygwin) if not CYGWIN: libs.append('X11') # XXX handle these, but how to detect? # *** Uncomment and edit for PIL (TkImaging) extension only: # -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \ # *** Uncomment and edit for TOGL extension only: # -DWITH_TOGL togl.c \ # *** Uncomment these for TOGL extension only: # -lGL -lGLU -lXext -lXmu \ self.add(Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'], define_macros=[('WITH_APPINIT', 1)] + defs, include_dirs=include_dirs, libraries=libs, library_dirs=added_lib_dirs)) return True def configure_ctypes(self, ext): return True def detect_ctypes(self): # Thomas Heller's _ctypes module if (not sysconfig.get_config_var("LIBFFI_INCLUDEDIR") and MACOS): self.use_system_libffi = True else: self.use_system_libffi = '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS") include_dirs = [] extra_compile_args = ['-DPy_BUILD_CORE_MODULE'] extra_link_args = [] sources = ['_ctypes/_ctypes.c', '_ctypes/callbacks.c', '_ctypes/callproc.c', '_ctypes/stgdict.c', '_ctypes/cfield.c'] depends = ['_ctypes/ctypes.h'] if MACOS: sources.append('_ctypes/malloc_closure.c') extra_compile_args.append('-DUSING_MALLOC_CLOSURE_DOT_C=1') extra_compile_args.append('-DMACOSX') include_dirs.append('_ctypes/darwin') elif HOST_PLATFORM == 'sunos5': # XXX This shouldn't be necessary; it appears that some # of the assembler code is non-PIC (i.e. it has relocations # when it shouldn't. The proper fix would be to rewrite # the assembler code to be PIC. # This only works with GCC; the Sun compiler likely refuses # this option. If you want to compile ctypes with the Sun # compiler, please research a proper solution, instead of # finding some -z option for the Sun compiler. extra_link_args.append('-mimpure-text') elif HOST_PLATFORM.startswith('hp-ux'): extra_link_args.append('-fPIC') ext = Extension('_ctypes', include_dirs=include_dirs, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, libraries=[], sources=sources, depends=depends) self.add(ext) if TEST_EXTENSIONS: # function my_sqrt() needs libm for sqrt() self.add(Extension('_ctypes_test', sources=['_ctypes/_ctypes_test.c'], libraries=['m'])) ffi_inc = sysconfig.get_config_var("LIBFFI_INCLUDEDIR") ffi_lib = None ffi_inc_dirs = self.inc_dirs.copy() if MACOS: ffi_in_sdk = os.path.join(macosx_sdk_root(), "usr/include/ffi") if not ffi_inc: if os.path.exists(ffi_in_sdk): ext.extra_compile_args.append("-DUSING_APPLE_OS_LIBFFI=1") ffi_inc = ffi_in_sdk ffi_lib = 'ffi' else: # OS X 10.5 comes with libffi.dylib; the include files are # in /usr/include/ffi ffi_inc_dirs.append('/usr/include/ffi') if not ffi_inc: found = find_file('ffi.h', [], ffi_inc_dirs) if found: ffi_inc = found[0] if ffi_inc: ffi_h = ffi_inc + '/ffi.h' if not os.path.exists(ffi_h): ffi_inc = None print('Header file {} does not exist'.format(ffi_h)) if ffi_lib is None and ffi_inc: for lib_name in ('ffi', 'ffi_pic'): if (self.compiler.find_library_file(self.lib_dirs, lib_name)): ffi_lib = lib_name break if ffi_inc and ffi_lib: ffi_headers = glob(os.path.join(ffi_inc, '*.h')) if grep_headers_for('ffi_prep_cif_var', ffi_headers): ext.extra_compile_args.append("-DHAVE_FFI_PREP_CIF_VAR=1") if grep_headers_for('ffi_prep_closure_loc', ffi_headers): ext.extra_compile_args.append("-DHAVE_FFI_PREP_CLOSURE_LOC=1") if grep_headers_for('ffi_closure_alloc', ffi_headers): ext.extra_compile_args.append("-DHAVE_FFI_CLOSURE_ALLOC=1") ext.include_dirs.append(ffi_inc) ext.libraries.append(ffi_lib) self.use_system_libffi = True if sysconfig.get_config_var('HAVE_LIBDL'): # for dlopen, see bpo-32647 ext.libraries.append('dl') def detect_decimal(self): # Stefan Krah's _decimal module extra_compile_args = [] undef_macros = [] if '--with-system-libmpdec' in sysconfig.get_config_var("CONFIG_ARGS"): include_dirs = [] libraries = [':libmpdec.so.2'] sources = ['_decimal/_decimal.c'] depends = ['_decimal/docstrings.h'] else: include_dirs = [os.path.abspath(os.path.join(self.srcdir, 'Modules', '_decimal', 'libmpdec'))] libraries = ['m'] sources = [ '_decimal/_decimal.c', '_decimal/libmpdec/basearith.c', '_decimal/libmpdec/constants.c', '_decimal/libmpdec/context.c', '_decimal/libmpdec/convolute.c', '_decimal/libmpdec/crt.c', '_decimal/libmpdec/difradix2.c', '_decimal/libmpdec/fnt.c', '_decimal/libmpdec/fourstep.c', '_decimal/libmpdec/io.c', '_decimal/libmpdec/mpalloc.c', '_decimal/libmpdec/mpdecimal.c', '_decimal/libmpdec/numbertheory.c', '_decimal/libmpdec/sixstep.c', '_decimal/libmpdec/transpose.c', ] depends = [ '_decimal/docstrings.h', '_decimal/libmpdec/basearith.h', '_decimal/libmpdec/bits.h', '_decimal/libmpdec/constants.h', '_decimal/libmpdec/convolute.h', '_decimal/libmpdec/crt.h', '_decimal/libmpdec/difradix2.h', '_decimal/libmpdec/fnt.h', '_decimal/libmpdec/fourstep.h', '_decimal/libmpdec/io.h', '_decimal/libmpdec/mpalloc.h', '_decimal/libmpdec/mpdecimal.h', '_decimal/libmpdec/numbertheory.h', '_decimal/libmpdec/sixstep.h', '_decimal/libmpdec/transpose.h', '_decimal/libmpdec/typearith.h', '_decimal/libmpdec/umodarith.h', ] config = { 'x64': [('CONFIG_64','1'), ('ASM','1')], 'uint128': [('CONFIG_64','1'), ('ANSI','1'), ('HAVE_UINT128_T','1')], 'ansi64': [('CONFIG_64','1'), ('ANSI','1')], 'ppro': [('CONFIG_32','1'), ('PPRO','1'), ('ASM','1')], 'ansi32': [('CONFIG_32','1'), ('ANSI','1')], 'ansi-legacy': [('CONFIG_32','1'), ('ANSI','1'), ('LEGACY_COMPILER','1')], 'universal': [('UNIVERSAL','1')] } cc = sysconfig.get_config_var('CC') sizeof_size_t = sysconfig.get_config_var('SIZEOF_SIZE_T') machine = os.environ.get('PYTHON_DECIMAL_WITH_MACHINE') if machine: # Override automatic configuration to facilitate testing. define_macros = config[machine] elif MACOS: # Universal here means: build with the same options Python # was built with. define_macros = config['universal'] elif sizeof_size_t == 8: if sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X64'): define_macros = config['x64'] elif sysconfig.get_config_var('HAVE_GCC_UINT128_T'): define_macros = config['uint128'] else: define_macros = config['ansi64'] elif sizeof_size_t == 4: ppro = sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X87') if ppro and ('gcc' in cc or 'clang' in cc) and \ not 'sunos' in HOST_PLATFORM: # solaris: problems with register allocation. # icc >= 11.0 works as well. define_macros = config['ppro'] extra_compile_args.append('-Wno-unknown-pragmas') else: define_macros = config['ansi32'] else: raise DistutilsError("_decimal: unsupported architecture") # Workarounds for toolchain bugs: if sysconfig.get_config_var('HAVE_IPA_PURE_CONST_BUG'): # Some versions of gcc miscompile inline asm: # http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46491 # http://gcc.gnu.org/ml/gcc/2010-11/msg00366.html extra_compile_args.append('-fno-ipa-pure-const') if sysconfig.get_config_var('HAVE_GLIBC_MEMMOVE_BUG'): # _FORTIFY_SOURCE wrappers for memmove and bcopy are incorrect: # http://sourceware.org/ml/libc-alpha/2010-12/msg00009.html undef_macros.append('_FORTIFY_SOURCE') # Uncomment for extra functionality: #define_macros.append(('EXTRA_FUNCTIONALITY', 1)) self.add(Extension('_decimal', include_dirs=include_dirs, libraries=libraries, define_macros=define_macros, undef_macros=undef_macros, extra_compile_args=extra_compile_args, sources=sources, depends=depends)) def detect_openssl_hashlib(self): # Detect SSL support for the socket module (via _ssl) config_vars = sysconfig.get_config_vars() def split_var(name, sep): # poor man's shlex, the re module is not available yet. value = config_vars.get(name) if not value: return () # This trick works because ax_check_openssl uses --libs-only-L, # --libs-only-l, and --cflags-only-I. value = ' ' + value sep = ' ' + sep return [v.strip() for v in value.split(sep) if v.strip()] openssl_includes = split_var('OPENSSL_INCLUDES', '-I') openssl_libdirs = split_var('OPENSSL_LDFLAGS', '-L') openssl_libs = split_var('OPENSSL_LIBS', '-l') if not openssl_libs: # libssl and libcrypto not found self.missing.extend(['_ssl', '_hashlib']) return None, None # Find OpenSSL includes ssl_incs = find_file( 'openssl/ssl.h', self.inc_dirs, openssl_includes ) if ssl_incs is None: self.missing.extend(['_ssl', '_hashlib']) return None, None # OpenSSL 1.0.2 uses Kerberos for KRB5 ciphers krb5_h = find_file( 'krb5.h', self.inc_dirs, ['/usr/kerberos/include'] ) if krb5_h: ssl_incs.extend(krb5_h) if config_vars.get("HAVE_X509_VERIFY_PARAM_SET1_HOST"): self.add(Extension( '_ssl', ['_ssl.c'], include_dirs=openssl_includes, library_dirs=openssl_libdirs, libraries=openssl_libs, depends=[ 'socketmodule.h', '_ssl/debughelpers.c', '_ssl_data.h', '_ssl_data_111.h', '_ssl_data_300.h', ])) else: self.missing.append('_ssl') self.add(Extension('_hashlib', ['_hashopenssl.c'], depends=['hashlib.h'], include_dirs=openssl_includes, library_dirs=openssl_libdirs, libraries=openssl_libs)) def detect_hash_builtins(self): # By default we always compile these even when OpenSSL is available # (issue #14693). It's harmless and the object code is tiny # (40-50 KiB per module, only loaded when actually used). Modules can # be disabled via the --with-builtin-hashlib-hashes configure flag. supported = {"md5", "sha1", "sha256", "sha512", "sha3", "blake2"} configured = sysconfig.get_config_var("PY_BUILTIN_HASHLIB_HASHES") configured = configured.strip('"').lower() configured = { m.strip() for m in configured.split(",") } self.disabled_configure.extend( sorted(supported.difference(configured)) ) if "sha256" in configured: self.add(Extension( '_sha256', ['sha256module.c'], extra_compile_args=['-DPy_BUILD_CORE_MODULE'], depends=['hashlib.h'] )) if "sha512" in configured: self.add(Extension( '_sha512', ['sha512module.c'], extra_compile_args=['-DPy_BUILD_CORE_MODULE'], depends=['hashlib.h'] )) if "md5" in configured: self.add(Extension( '_md5', ['md5module.c'], depends=['hashlib.h'] )) if "sha1" in configured: self.add(Extension( '_sha1', ['sha1module.c'], depends=['hashlib.h'] )) if "blake2" in configured: blake2_deps = glob( os.path.join(escape(self.srcdir), 'Modules/_blake2/impl/*') ) blake2_deps.append('hashlib.h') self.add(Extension( '_blake2', [ '_blake2/blake2module.c', '_blake2/blake2b_impl.c', '_blake2/blake2s_impl.c' ], depends=blake2_deps )) if "sha3" in configured: sha3_deps = glob( os.path.join(escape(self.srcdir), 'Modules/_sha3/kcp/*') ) sha3_deps.append('hashlib.h') self.add(Extension( '_sha3', ['_sha3/sha3module.c'], depends=sha3_deps )) def detect_nis(self): if MS_WINDOWS or CYGWIN or HOST_PLATFORM == 'qnx6': self.missing.append('nis') return libs = [] library_dirs = [] includes_dirs = [] # bpo-32521: glibc has deprecated Sun RPC for some time. Fedora 28 # moved headers and libraries to libtirpc and libnsl. The headers # are in tircp and nsl sub directories. rpcsvc_inc = find_file( 'rpcsvc/yp_prot.h', self.inc_dirs, [os.path.join(inc_dir, 'nsl') for inc_dir in self.inc_dirs] ) rpc_inc = find_file( 'rpc/rpc.h', self.inc_dirs, [os.path.join(inc_dir, 'tirpc') for inc_dir in self.inc_dirs] ) if rpcsvc_inc is None or rpc_inc is None: # not found self.missing.append('nis') return includes_dirs.extend(rpcsvc_inc) includes_dirs.extend(rpc_inc) if self.compiler.find_library_file(self.lib_dirs, 'nsl'): libs.append('nsl') else: # libnsl-devel: check for libnsl in nsl/ subdirectory nsl_dirs = [os.path.join(lib_dir, 'nsl') for lib_dir in self.lib_dirs] libnsl = self.compiler.find_library_file(nsl_dirs, 'nsl') if libnsl is not None: library_dirs.append(os.path.dirname(libnsl)) libs.append('nsl') if self.compiler.find_library_file(self.lib_dirs, 'tirpc'): libs.append('tirpc') self.add(Extension('nis', ['nismodule.c'], libraries=libs, library_dirs=library_dirs, include_dirs=includes_dirs)) class PyBuildInstall(install): # Suppress the warning about installation into the lib_dynload # directory, which is not in sys.path when running Python during # installation: def initialize_options (self): install.initialize_options(self) self.warn_dir=0 # Customize subcommands to not install an egg-info file for Python sub_commands = [('install_lib', install.has_lib), ('install_headers', install.has_headers), ('install_scripts', install.has_scripts), ('install_data', install.has_data)] class PyBuildInstallLib(install_lib): # Do exactly what install_lib does but make sure correct access modes get # set on installed directories and files. All installed files with get # mode 644 unless they are a shared library in which case they will get # mode 755. All installed directories will get mode 755. # this is works for EXT_SUFFIX too, which ends with SHLIB_SUFFIX shlib_suffix = sysconfig.get_config_var("SHLIB_SUFFIX") def install(self): outfiles = install_lib.install(self) self.set_file_modes(outfiles, 0o644, 0o755) self.set_dir_modes(self.install_dir, 0o755) return outfiles def set_file_modes(self, files, defaultMode, sharedLibMode): if not files: return for filename in files: if os.path.islink(filename): continue mode = defaultMode if filename.endswith(self.shlib_suffix): mode = sharedLibMode log.info("changing mode of %s to %o", filename, mode) if not self.dry_run: os.chmod(filename, mode) def set_dir_modes(self, dirname, mode): for dirpath, dirnames, fnames in os.walk(dirname): if os.path.islink(dirpath): continue log.info("changing mode of %s to %o", dirpath, mode) if not self.dry_run: os.chmod(dirpath, mode) class PyBuildScripts(build_scripts): def copy_scripts(self): outfiles, updated_files = build_scripts.copy_scripts(self) fullversion = '-{0[0]}.{0[1]}'.format(sys.version_info) minoronly = '.{0[1]}'.format(sys.version_info) newoutfiles = [] newupdated_files = [] for filename in outfiles: if filename.endswith('2to3'): newfilename = filename + fullversion else: newfilename = filename + minoronly log.info('renaming %s to %s', filename, newfilename) os.rename(filename, newfilename) newoutfiles.append(newfilename) if filename in updated_files: newupdated_files.append(newfilename) return newoutfiles, newupdated_files def main(): set_compiler_flags('CFLAGS', 'PY_CFLAGS_NODIST') set_compiler_flags('LDFLAGS', 'PY_LDFLAGS_NODIST') class DummyProcess: """Hack for parallel build""" ProcessPoolExecutor = None sys.modules['concurrent.futures.process'] = DummyProcess validate_tzpath() # turn off warnings when deprecated modules are imported import warnings warnings.filterwarnings("ignore",category=DeprecationWarning) setup(# PyPI Metadata (PEP 301) name = "Python", version = sys.version.split()[0], url = "http://www.python.org/%d.%d" % sys.version_info[:2], maintainer = "Guido van Rossum and the Python community", maintainer_email = "[email protected]", description = "A high-level object-oriented programming language", long_description = SUMMARY.strip(), license = "PSF license", classifiers = [x for x in CLASSIFIERS.split("\n") if x], platforms = ["Many"], # Build info cmdclass = {'build_ext': PyBuildExt, 'build_scripts': PyBuildScripts, 'install': PyBuildInstall, 'install_lib': PyBuildInstallLib}, # The struct module is defined here, because build_ext won't be # called unless there's at least one extension module defined. ext_modules=[Extension('_struct', ['_struct.c'])], # If you change the scripts installed here, you also need to # check the PyBuildScripts command above, and change the links # created by the bininstall target in Makefile.pre.in scripts = ["Tools/scripts/pydoc3", "Tools/scripts/idle3", "Tools/scripts/2to3"] ) # --install-platlib if __name__ == '__main__': main()
the-stack_106_30906
import inspect import logging from functools import wraps from json import loads from traceback import format_exc import paste.httpexceptions from six import string_types from galaxy.exceptions import error_codes, MessageException from galaxy.util import ( parse_non_hex_float, unicodify ) from galaxy.util.json import safe_dumps from galaxy.web.framework import url_for log = logging.getLogger(__name__) JSON_CONTENT_TYPE = "application/json" JSONP_CONTENT_TYPE = "application/javascript" JSONP_CALLBACK_KEY = 'callback' def error(message): raise MessageException(message, type='error') # ----------------------------------------------------------------------------- web controller decorators def _save_orig_fn(wrapped, orig): if not hasattr(orig, '_orig'): wrapped._orig = orig return wrapped def expose(func): """ Decorator: mark a function as 'exposed' and thus web accessible """ func.exposed = True return func def json(func, pretty=False): """ Format the response as JSON and set the response content type to JSON_CONTENT_TYPE. """ @wraps(func) def call_and_format(self, trans, *args, **kwargs): # pull out any callback argument to the api endpoint and set the content type to json or javascript jsonp_callback = kwargs.pop(JSONP_CALLBACK_KEY, None) if jsonp_callback: trans.response.set_content_type(JSONP_CONTENT_TYPE) else: trans.response.set_content_type(JSON_CONTENT_TYPE) rval = func(self, trans, *args, **kwargs) return _format_return_as_json(rval, jsonp_callback, pretty=(pretty or trans.debug)) if not hasattr(func, '_orig'): call_and_format._orig = func return expose(_save_orig_fn(call_and_format, func)) def json_pretty(func): """ Indent and sort returned JSON. """ return json(func, pretty=True) def require_login(verb="perform this action", use_panels=False, webapp='galaxy'): def argcatcher(func): @wraps(func) def decorator(self, trans, *args, **kwargs): if trans.get_user(): return func(self, trans, *args, **kwargs) else: return trans.show_error_message( 'You must be <a target="galaxy_main" href="%s">logged in</a> to %s.' % (url_for(controller='user', action='login', webapp=webapp), verb), use_panels=use_panels) return decorator return argcatcher def require_admin(func): @wraps(func) def decorator(self, trans, *args, **kwargs): if not trans.user_is_admin: msg = "You must be an administrator to access this feature." user = trans.get_user() if not trans.app.config.admin_users_list: msg = "You must be logged in as an administrator to access this feature, but no administrators are set in the Galaxy configuration." elif not user: msg = "You must be logged in as an administrator to access this feature." trans.response.status = 403 if trans.response.get_content_type() == 'application/json': return msg else: return trans.show_error_message(msg) return func(self, trans, *args, **kwargs) return decorator # ----------------------------------------------------------------------------- (original) api decorators def expose_api(func, to_json=True, user_required=True): """ Expose this function via the API. """ @wraps(func) def decorator(self, trans, *args, **kwargs): def error(environ, start_response): start_response(error_status, [('Content-type', 'text/plain')]) return error_message error_status = '403 Forbidden' if trans.error_message: return trans.error_message if user_required and trans.anonymous: error_message = "API Authentication Required for this request" return error if trans.request.body: try: kwargs['payload'] = __extract_payload_from_request(trans, func, kwargs) except ValueError: error_status = '400 Bad Request' error_message = 'Your request did not appear to be valid JSON, please consult the API documentation' return error # pull out any callback argument to the api endpoint and set the content type to json or javascript jsonp_callback = kwargs.pop(JSONP_CALLBACK_KEY, None) if jsonp_callback: trans.response.set_content_type(JSONP_CONTENT_TYPE) else: trans.response.set_content_type(JSON_CONTENT_TYPE) # send 'do not cache' headers to handle IE's caching of ajax get responses trans.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" # Perform api_run_as processing, possibly changing identity if 'payload' in kwargs and isinstance(kwargs['payload'], dict) and 'run_as' in kwargs['payload']: if not trans.user_can_do_run_as: error_message = 'User does not have permissions to run jobs as another user' return error try: decoded_user_id = trans.security.decode_id(kwargs['payload']['run_as']) except TypeError: trans.response.status = 400 return "Malformed user id ( %s ) specified, unable to decode." % str(kwargs['payload']['run_as']) try: user = trans.sa_session.query(trans.app.model.User).get(decoded_user_id) trans.api_inherit_admin = trans.user_is_admin trans.set_user(user) except Exception: trans.response.status = 400 return "That user does not exist." try: rval = func(self, trans, *args, **kwargs) if to_json: rval = _format_return_as_json(rval, jsonp_callback, pretty=trans.debug) return rval except paste.httpexceptions.HTTPException: raise # handled except Exception: log.exception('Uncaught exception in exposed API method:') raise paste.httpexceptions.HTTPServerError() return expose(_save_orig_fn(decorator, func)) def __extract_payload_from_request(trans, func, kwargs): content_type = trans.request.headers.get('content-type', '') if content_type.startswith('application/x-www-form-urlencoded') or content_type.startswith('multipart/form-data'): # If the content type is a standard type such as multipart/form-data, the wsgi framework parses the request body # and loads all field values into kwargs. However, kwargs also contains formal method parameters etc. which # are not a part of the request body. This is a problem because it's not possible to differentiate between values # which are a part of the request body, and therefore should be a part of the payload, and values which should not be # in the payload. Therefore, the decorated method's formal arguments are discovered through reflection and removed from # the payload dictionary. This helps to prevent duplicate argument conflicts in downstream methods. payload = kwargs.copy() named_args, _, _, _ = inspect.getargspec(func) for arg in named_args: payload.pop(arg, None) for k, v in payload.items(): if isinstance(v, string_types): try: # note: parse_non_hex_float only needed here for single string values where something like # 40000000000000e5 will be parsed as a scientific notation float. This is as opposed to hex strings # in larger JSON structures where quoting prevents this (further below) payload[k] = loads(v, parse_float=parse_non_hex_float) except Exception: # may not actually be json, just continue pass else: # Assume application/json content type and parse request body manually, since wsgi won't do it. However, the order of this check # should ideally be in reverse, with the if clause being a check for application/json and the else clause assuming a standard encoding # such as multipart/form-data. Leaving it as is for backward compatibility, just in case. payload = loads(unicodify(trans.request.body)) return payload def expose_api_raw(func): """ Expose this function via the API but don't dump the results to JSON. """ return expose_api(func, to_json=False) def expose_api_raw_anonymous(func): """ Expose this function via the API but don't dump the results to JSON. """ return expose_api(func, to_json=False, user_required=False) def expose_api_anonymous(func, to_json=True): """ Expose this function via the API but don't require a set user. """ return expose_api(func, to_json=to_json, user_required=False) # ----------------------------------------------------------------------------- (new) api decorators # TODO: rename as expose_api and make default. def _future_expose_api(func, to_json=True, user_required=True, user_or_session_required=True, handle_jsonp=True): """ Expose this function via the API. """ @wraps(func) def decorator(self, trans, *args, **kwargs): # errors passed in from trans._authenicate_api if trans.error_message: return __api_error_response(trans, status_code=403, err_code=error_codes.USER_NO_API_KEY, err_msg=trans.error_message) if trans.anonymous: # error if anon and user required if user_required: return __api_error_response(trans, status_code=403, err_code=error_codes.USER_NO_API_KEY, err_msg="API authentication required for this request") # error if anon and no session if not trans.galaxy_session and user_or_session_required: return __api_error_response(trans, status_code=403, err_code=error_codes.USER_NO_API_KEY, err_msg="API authentication required for this request") if trans.request.body: try: kwargs['payload'] = __extract_payload_from_request(trans, func, kwargs) except ValueError: error_code = error_codes.USER_INVALID_JSON return __api_error_response(trans, status_code=400, err_code=error_code) # pull out any callback argument to the api endpoint and set the content type to json or javascript # TODO: use handle_jsonp to NOT overwrite existing tool_shed JSONP jsonp_callback = kwargs.pop(JSONP_CALLBACK_KEY, None) if handle_jsonp else None if jsonp_callback: trans.response.set_content_type(JSONP_CONTENT_TYPE) else: trans.response.set_content_type(JSON_CONTENT_TYPE) # send 'do not cache' headers to handle IE's caching of ajax get responses trans.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" # TODO: Refactor next block out into a helper procedure. # Perform api_run_as processing, possibly changing identity if 'payload' in kwargs and 'run_as' in kwargs['payload']: if not trans.user_can_do_run_as: error_code = error_codes.USER_CANNOT_RUN_AS return __api_error_response(trans, err_code=error_code, status_code=403) try: decoded_user_id = trans.security.decode_id(kwargs['payload']['run_as']) except (TypeError, ValueError): error_message = "Malformed user id ( %s ) specified, unable to decode." % str(kwargs['payload']['run_as']) error_code = error_codes.USER_INVALID_RUN_AS return __api_error_response(trans, err_code=error_code, err_msg=error_message, status_code=400) try: user = trans.sa_session.query(trans.app.model.User).get(decoded_user_id) trans.api_inherit_admin = trans.user_is_admin trans.set_user(user) except Exception: error_code = error_codes.USER_INVALID_RUN_AS return __api_error_response(trans, err_code=error_code, status_code=400) try: rval = func(self, trans, *args, **kwargs) if to_json: rval = _format_return_as_json(rval, jsonp_callback, pretty=trans.debug) return rval except MessageException as e: traceback_string = format_exc() return __api_error_response(trans, exception=e, traceback=traceback_string) except paste.httpexceptions.HTTPException: # TODO: Allow to pass or format for the API??? raise # handled except Exception as e: traceback_string = format_exc() error_message = 'Uncaught exception in exposed API method:' log.exception(error_message) return __api_error_response( trans, status_code=500, exception=e, traceback=traceback_string, err_msg=error_message, err_code=error_codes.UNKNOWN ) if not hasattr(func, '_orig'): decorator._orig = func decorator.exposed = True return decorator def _format_return_as_json(rval, jsonp_callback=None, pretty=False): """ Formats a return value as JSON or JSONP if `jsonp_callback` is present. Use `pretty=True` to return pretty printed json. """ dumps_kwargs = dict(indent=4, sort_keys=True) if pretty else {} json = safe_dumps(rval, **dumps_kwargs) if jsonp_callback: json = "{}({});".format(jsonp_callback, json) return json def __api_error_message(trans, **kwds): exception = kwds.get("exception", None) if exception: # If we are passed a MessageException use err_msg. default_error_code = getattr(exception, "err_code", error_codes.UNKNOWN) default_error_message = getattr(exception, "err_msg", default_error_code.default_error_message) extra_error_info = getattr(exception, 'extra_error_info', {}) if not isinstance(extra_error_info, dict): extra_error_info = {} else: default_error_message = "Error processing API request." default_error_code = error_codes.UNKNOWN extra_error_info = {} traceback_string = kwds.get("traceback", "No traceback available.") err_msg = kwds.get("err_msg", default_error_message) error_code_object = kwds.get("err_code", default_error_code) try: error_code = error_code_object.code except AttributeError: # Some sort of bad error code sent in, logic failure on part of # Galaxy developer. error_code = error_codes.UNKNOWN.code # Would prefer the terminology of error_code and error_message, but # err_msg used a good number of places already. Might as well not change # it? error_response = dict(err_msg=err_msg, err_code=error_code, **extra_error_info) if trans.debug: # TODO: Should admins get to see traceback as well? error_response["traceback"] = traceback_string return error_response def __api_error_response(trans, **kwds): error_dict = __api_error_message(trans, **kwds) exception = kwds.get("exception", None) # If we are given an status code directly - use it - otherwise check # the exception for a status_code attribute. if "status_code" in kwds: status_code = int(kwds.get("status_code")) elif hasattr(exception, "status_code"): status_code = int(exception.status_code) else: status_code = 500 response = trans.response if not response.status or str(response.status).startswith("20"): # Unset status code appears to be string '200 OK', if anything # non-success (i.e. not 200 or 201) has been set, do not override # underlying controller. response.status = status_code return safe_dumps(error_dict) def _future_expose_api_anonymous(func, to_json=True): """ Expose this function via the API but don't require a set user. """ return _future_expose_api(func, to_json=to_json, user_required=False) def _future_expose_api_anonymous_and_sessionless(func, to_json=True): """ Expose this function via the API but don't require a user or a galaxy_session. """ return _future_expose_api(func, to_json=to_json, user_required=False, user_or_session_required=False) def _future_expose_api_raw(func): return _future_expose_api(func, to_json=False, user_required=True) def _future_expose_api_raw_anonymous(func): return _future_expose_api(func, to_json=False, user_required=False) def _future_expose_api_raw_anonymous_and_sessionless(func): # TODO: tool_shed api implemented JSONP first on a method-by-method basis, don't overwrite that for now return _future_expose_api( func, to_json=False, user_required=False, user_or_session_required=False, handle_jsonp=False )
the-stack_106_30907
import logging from enum import IntEnum from blatann.nrf import nrf_events, nrf_types from blatann import uuid, exceptions from blatann.waitables.connection_waitable import ClientConnectionWaitable from blatann.event_type import Event, EventSource logger = logging.getLogger(__name__) class AdvertisingFlags(object): LIMITED_DISCOVERY_MODE = 0x01 GENERAL_DISCOVERY_MODE = 0x02 BR_EDR_NOT_SUPPORTED = 0x04 BR_EDR_CONTROLLER = 0x08 BR_EDR_HOST = 0x10 AdvertisingMode = nrf_types.BLEGapAdvType class AdvertisingData(object): """ Class which represents data that can be advertised """ MAX_ENCODED_LENGTH = 31 # Bluetooth-defined max length that the encoded data can be Types = nrf_types.BLEAdvData.Types # Enum representing the different advertising data types def __init__(self, flags=None, local_name=None, local_name_complete=True, service_uuid16s=None, service_uuid128s=None, has_more_uuid16_services=False, has_more_uuid128_services=False, service_data=None, manufacturer_data=None, **other_flags): self.flags = flags self.local_name = local_name self.local_name_complete = local_name_complete self.service_uuid16s = service_uuid16s or [] self.service_uuid128s = service_uuid128s or [] self.has_more_uuid16_services = has_more_uuid16_services self.has_more_uuid128_services = has_more_uuid128_services self.service_data = service_data self.manufacturer_data = manufacturer_data self.other_flags = {self.Types[k]: v for k, v in other_flags.items()} if not isinstance(self.service_uuid16s, (list, tuple)): self.service_uuid16s = [self.service_uuid16s] if not isinstance(self.service_uuid128s, (list, tuple)): self.service_uuid128s = [self.service_uuid128s] @property def service_uuids(self): """ Gets all of the 16-bit and 128-bit service UUIDs specified in the advertising data :return: list of the service UUIDs present in the advertising data :rtype: list[uuid.Uuid] """ return self.service_uuid16s + self.service_uuid128s def check_encoded_length(self): """ Checks if the encoded length of this advertising data payload meets the maximum allowed length specified by the Bluetooth spec :return: a tuple of the encoded length and a bool result of whether or not it meets requirements :rtype: tuple[int, bool] """ ble_adv_data = self.to_ble_adv_data() encoded_data = ble_adv_data.to_list() return len(encoded_data), len(encoded_data) <= self.MAX_ENCODED_LENGTH def to_ble_adv_data(self): """ Converts the advertising data to a BLEAdvData object that can be used by the nRF Driver :return: the BLEAdvData object which represents this advertising data :rtype: nrf_types.BLEAdvData """ records = self.other_flags.copy() if self.flags: records[self.Types.flags] = [int(self.flags)] if self.local_name: name_type = self.Types.complete_local_name if self.local_name_complete else self.Types.short_local_name records[name_type] = self.local_name if self.service_uuid128s: uuid_type = self.Types.service_128bit_uuid_more_available if self.has_more_uuid128_services else self.Types.service_128bit_uuid_complete # UUID data is little-endian, reverse the lists and concatenate uuid_data = [] for u in self.service_uuid128s: uuid_data.extend(u.uuid[::-1]) records[uuid_type] = uuid_data if self.service_uuid16s: uuid_type = self.Types.service_16bit_uuid_more_available if self.has_more_uuid16_services else self.Types.service_16bit_uuid_complete uuid_data = [] for u in self.service_uuid16s: uuid_data.append(u.uuid & 0xFF) uuid_data.append((u.uuid >> 8) & 0xFF) records[uuid_type] = uuid_data if self.service_data: records[self.Types.service_data] = self.service_data if self.manufacturer_data: records[self.Types.manufacturer_specific_data] = self.manufacturer_data record_string_keys = {k.name: v for k, v in records.items()} return nrf_types.BLEAdvData(**record_string_keys) @classmethod def from_ble_adv_records(cls, advertise_records): """ Converts a dictionary of AdvertisingData.Type: value keypairs into an object of this class :param advertise_records: a dictionary mapping the advertise data types to their corresponding values :type advertise_records: dict :return: the AdvertisingData from the records given :rtype: AdvertisingData """ flags = advertise_records.pop(cls.Types.flags, None) if flags: flags = flags[0] local_name_complete = False local_name = advertise_records.pop(cls.Types.complete_local_name, None) if local_name: local_name_complete = True else: local_name = advertise_records.pop(cls.Types.short_local_name, None) if local_name: local_name = str("".join(chr(c) for c in local_name)) manufacturer_data = advertise_records.pop(cls.Types.manufacturer_specific_data, None) if manufacturer_data: manufacturer_data = bytearray(manufacturer_data) service_data = advertise_records.pop(cls.Types.service_data, None) if service_data: service_data = bytearray(service_data) more_16bit_services = False uuid16_data = advertise_records.pop(cls.Types.service_16bit_uuid_more_available, None) if uuid16_data: more_16bit_services = True else: uuid16_data = advertise_records.pop(cls.Types.service_16bit_uuid_complete, None) service_uuid16s = [] if uuid16_data: for i in range(0, len(uuid16_data), 2): uuid16 = (uuid16_data[i+1] << 8) | uuid16_data[i] service_uuid16s.append(uuid.Uuid16(uuid16)) more_128bit_services = False uuid128_data = advertise_records.pop(cls.Types.service_128bit_uuid_more_available, None) if uuid128_data: more_128bit_services = True else: uuid128_data = advertise_records.pop(cls.Types.service_128bit_uuid_complete, None) service_uuid128s = [] if uuid128_data: for i in range(0, len(uuid128_data), 16): uuid128 = uuid128_data[i:i+16][::-1] service_uuid128s.append(uuid.Uuid128(uuid128)) record_string_keys = {k.name: bytearray(v) for k, v in advertise_records.items()} return AdvertisingData(flags=flags, local_name=local_name, local_name_complete=local_name_complete, service_uuid16s=service_uuid16s, service_uuid128s=service_uuid128s, has_more_uuid16_services=more_16bit_services, has_more_uuid128_services=more_128bit_services, service_data=service_data, manufacturer_data=manufacturer_data, **record_string_keys) def __repr__(self): params = [] if self.local_name: name_str = "Name: {}".format(self.local_name) if not self.local_name_complete: name_str += "(short)" params.append(name_str) if self.flags: params.append("Flags: {}".format(self.flags)) if self.service_uuid16s: param_str = "16-bit Services: [{}]".format(", ".join(str(u) for u in self.service_uuid16s)) if self.has_more_uuid16_services: param_str += "+ more" params.append(param_str) if self.service_uuid128s: param_str = "128-bit Services: [{}]".format(", ".join(str(u) for u in self.service_uuid128s)) if self.has_more_uuid128_services: param_str += "+ more" params.append(param_str) if self.service_data: params.append("Service Data: {}".format(self.service_data)) return "{}({})".format(self.__class__.__name__, ", ".join(params)) class Advertiser(object): # Constant used to indicate that the BLE device should advertise indefinitely, until # connected to or stopped manually ADVERTISE_FOREVER = 0 def __init__(self, ble_device, client): """ :type ble_device: blatann.device.BleDevice :type client: blatann.peer.Client """ self.ble_device = ble_device self.advertising = False self._auto_restart = False self.client = client self.ble_device.ble_driver.event_subscribe(self._handle_adv_timeout, nrf_events.GapEvtTimeout) self.ble_device.ble_driver.event_subscribe(self._handle_disconnect, nrf_events.GapEvtDisconnected) self._on_advertising_timeout = EventSource("Advertising Timeout", logger) self._advertise_interval = 100 self._timeout = self.ADVERTISE_FOREVER self._advertise_mode = AdvertisingMode.connectable_undirected @property def on_advertising_timeout(self): """ Event generated whenever advertising times out and finishes with no connections made Event args: None :return: an Event which can have handlers registered to and deregistered from :rtype: Event """ return self._on_advertising_timeout def set_advertise_data(self, advertise_data=AdvertisingData(), scan_response=AdvertisingData()): """ Sets the advertising and scan response data which will be broadcasted to peers during advertising Note: BLE Restricts advertise and scan response data to an encoded length of 31 bytes each. Use AdvertisingData.check_encoded_length() to determine if the :param advertise_data: The advertise data to use :type advertise_data: AdvertisingData :param scan_response: The scan response data to use :type scan_response: AdvertisingData """ adv_len, adv_pass = advertise_data.check_encoded_length() scan_len, scan_pass = advertise_data.check_encoded_length() if not adv_pass: raise exceptions.InvalidOperationException("Encoded Advertising data length is too long ({} bytes). " "Max: {} bytes".format(adv_len, advertise_data.MAX_ENCODED_LENGTH)) if not scan_pass: raise exceptions.InvalidOperationException("Encoded Scan Response data length is too long ({} bytes). " "Max: {} bytes".format(scan_len, advertise_data.MAX_ENCODED_LENGTH)) self.ble_device.ble_driver.ble_gap_adv_data_set(advertise_data.to_ble_adv_data(), scan_response.to_ble_adv_data()) def set_default_advertise_params(self, advertise_interval_ms, timeout_seconds, advertise_mode=AdvertisingMode.connectable_undirected): """ Sets the default advertising parameters so they do not need to be specified on each start :param advertise_interval_ms: The advertising interval, in milliseconds :param timeout_seconds: How long to advertise for before timing out, in seconds :param advertise_mode: The mode the advertiser should use :type advertise_mode: AdvertisingMode """ self._advertise_interval = advertise_interval_ms self._timeout = timeout_seconds self._advertise_mode = advertise_mode def start(self, adv_interval_ms=None, timeout_sec=None, auto_restart=False, advertise_mode=None): """ Starts advertising with the given parameters. If none given, will use the default :param adv_interval_ms: The interval at which to send out advertise packets, in milliseconds :param timeout_sec: The duration which to advertise for :param auto_restart: Flag indicating that advertising should restart automatically when the timeout expires, or when the client disconnects :param advertise_mode: The mode the advertiser should use :return: A waitable that will expire either when the timeout occurs, or a client connects. Waitable Returns a peer.Client() object :rtype: ClientConnectionWaitable """ if self.advertising: self._stop() if adv_interval_ms is None: adv_interval_ms = self._advertise_interval if timeout_sec is None: timeout_sec = self._timeout if advertise_mode is None: advertise_mode = self._advertise_mode self._timeout = timeout_sec self._advertise_interval = adv_interval_ms self._advertise_mode = advertise_mode params = nrf_types.BLEGapAdvParams(adv_interval_ms, timeout_sec, advertise_mode) self._auto_restart = auto_restart logger.info("Starting advertising, params: {}, auto-restart: {}".format(params, auto_restart)) self.ble_device.ble_driver.ble_gap_adv_start(params) self.advertising = True return ClientConnectionWaitable(self.ble_device, self.client) def stop(self): """ Stops advertising and disables the auto-restart functionality (if enabled) """ self._auto_restart = False self._stop() def _stop(self): if not self.advertising: return self.advertising = False try: self.ble_device.ble_driver.ble_gap_adv_stop() except Exception: pass def _handle_adv_timeout(self, driver, event): """ :type event: nrf_events.GapEvtTimeout """ if event.src == nrf_events.BLEGapTimeoutSrc.advertising: self.advertising = False self._on_advertising_timeout.notify(self) if self._auto_restart: self.start() def _handle_disconnect(self, driver, event): """ :type event: nrf_events.GapEvtDisconnected """ if event.conn_handle == self.client.conn_handle and self._auto_restart: self.start()
the-stack_106_30908
"""413. Arithmetic Slices https://leetcode.com/problems/arithmetic-slices/ """ from typing import List class Solution: def number_of_arithmetic_slices(self, nums: List[int]) -> int: i = 0 ans = 0 while i < len(nums) - 2: j = i + 1 diff = nums[j] - nums[i] while j < len(nums) and nums[j] - nums[j - 1] == diff: j += 1 n = j - i if n >= 3: ans += (n - 1) * (n - 2) // 2 i = j - 1 else: i += 1 return ans
the-stack_106_30909
while True: n, *k = map(int, input().split()) if n==0: break inp = input() if len(inp)%n != 0: inp += ' '*(n-len(inp)%n) out = [] for i in range(len(inp)//n): for j in k: out.append(inp[i*n:i*n+n][j-1]) print("'", *out, "'", sep='')
the-stack_106_30910
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys sys.path.append('..') from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place import numpy as np from functools import partial from typing import Optional, List, Callable, Dict, Any, Set import unittest import hypothesis import hypothesis.strategies as st from hypothesis import assume def sample_program_configs(draw): in_shape = draw( st.lists( st.integers( min_value=1, max_value=10), min_size=4, max_size=4)) mode_data = draw(st.sampled_from(["channel", "element"])) alpha_shape = [1] if mode_data == "channel": alpha_shape = [1, in_shape[1], 1, 1] elif mode_data == 'element': alpha_shape = [1] + list(in_shape)[1:] def generate_input(*args, **kwargs): return np.random.random(kwargs['tensor_shape']).astype(np.float32) def generate_alpha(*args, **kwargs): return np.random.random(alpha_shape).astype(np.float32) build_ops = OpConfig( type="prelu", inputs={ "X": ["input_data"], "Alpha": ['alpha_data'], }, outputs={"Out": ["output_data"], }, attrs={"mode": mode_data, }) program_config = ProgramConfig( ops=[build_ops], weights={}, inputs={ "input_data": TensorConfig(data_gen=partial( generate_input, tensor_shape=in_shape)), "alpha_data": TensorConfig(data_gen=partial( generate_input, tensor_shape=alpha_shape)), }, outputs=["output_data"]) return program_config
the-stack_106_30911
from typing import Callable, Iterable, Mapping, Optional, Any, List, Union from enum import Enum from pathlib import Path from wasabi import Printer import srsly import re import sys import itertools from ._util import app, Arg, Opt from ..training import docs_to_json from ..tokens import Doc, DocBin from ..training.converters import iob_to_docs, conll_ner_to_docs, json_to_docs from ..training.converters import conllu_to_docs # Converters are matched by file extension except for ner/iob, which are # matched by file extension and content. To add a converter, add a new # entry to this dict with the file extension mapped to the converter function # imported from /converters. CONVERTERS: Mapping[str, Callable[..., Iterable[Doc]]] = { "conllubio": conllu_to_docs, "conllu": conllu_to_docs, "conll": conll_ner_to_docs, "ner": conll_ner_to_docs, "iob": iob_to_docs, "json": json_to_docs, } # File types that can be written to stdout FILE_TYPES_STDOUT = ("json",) class FileTypes(str, Enum): json = "json" spacy = "spacy" @app.command("convert") def convert_cli( # fmt: off input_path: str = Arg(..., help="Input file or directory", exists=True), output_dir: Path = Arg("-", help="Output directory. '-' for stdout.", allow_dash=True, exists=True), file_type: FileTypes = Opt("spacy", "--file-type", "-t", help="Type of data to produce"), n_sents: int = Opt(1, "--n-sents", "-n", help="Number of sentences per doc (0 to disable)"), seg_sents: bool = Opt(False, "--seg-sents", "-s", help="Segment sentences (for -c ner)"), model: Optional[str] = Opt(None, "--model", "--base", "-b", help="Trained spaCy pipeline for sentence segmentation to use as base (for --seg-sents)"), morphology: bool = Opt(False, "--morphology", "-m", help="Enable appending morphology to tags"), merge_subtokens: bool = Opt(False, "--merge-subtokens", "-T", help="Merge CoNLL-U subtokens"), converter: str = Opt("auto", "--converter", "-c", help=f"Converter: {tuple(CONVERTERS.keys())}"), ner_map: Optional[Path] = Opt(None, "--ner-map", "-nm", help="NER tag mapping (as JSON-encoded dict of entity types)", exists=True), lang: Optional[str] = Opt(None, "--lang", "-l", help="Language (if tokenizer required)"), concatenate: bool = Opt(None, "--concatenate", "-C", help="Concatenate output to a single file"), # fmt: on ): """ Convert files into json or DocBin format for training. The resulting .spacy file can be used with the train command and other experiment management functions. If no output_dir is specified and the output format is JSON, the data is written to stdout, so you can pipe them forward to a JSON file: $ spacy convert some_file.conllu --file-type json > some_file.json DOCS: https://spacy.io/api/cli#convert """ input_path = Path(input_path) output_dir: Union[str, Path] = "-" if output_dir == Path("-") else output_dir silent = output_dir == "-" msg = Printer(no_print=silent) verify_cli_args(msg, input_path, output_dir, file_type.value, converter, ner_map) converter = _get_converter(msg, converter, input_path) convert( input_path, output_dir, file_type=file_type.value, n_sents=n_sents, seg_sents=seg_sents, model=model, morphology=morphology, merge_subtokens=merge_subtokens, converter=converter, ner_map=ner_map, lang=lang, concatenate=concatenate, silent=silent, msg=msg, ) def convert( input_path: Path, output_dir: Union[str, Path], *, file_type: str = "json", n_sents: int = 1, seg_sents: bool = False, model: Optional[str] = None, morphology: bool = False, merge_subtokens: bool = False, converter: str = "auto", ner_map: Optional[Path] = None, lang: Optional[str] = None, concatenate: bool = False, silent: bool = True, msg: Optional[Printer] = None, ) -> None: input_path = Path(input_path) if not msg: msg = Printer(no_print=silent) ner_map = srsly.read_json(ner_map) if ner_map is not None else None doc_files = [] for input_loc in walk_directory(input_path, converter): with input_loc.open("r", encoding="utf-8") as infile: input_data = infile.read() # Use converter function to convert data func = CONVERTERS[converter] docs = func( input_data, n_sents=n_sents, seg_sents=seg_sents, append_morphology=morphology, merge_subtokens=merge_subtokens, lang=lang, model=model, no_print=silent, ner_map=ner_map, ) doc_files.append((input_loc, docs)) if concatenate: all_docs = itertools.chain.from_iterable([docs for _, docs in doc_files]) doc_files = [(input_path, all_docs)] for input_loc, docs in doc_files: if file_type == "json": data = [docs_to_json(docs)] len_docs = len(data) else: db = DocBin(docs=docs, store_user_data=True) len_docs = len(db) data = db.to_bytes() # type: ignore[assignment] if output_dir == "-": _print_docs_to_stdout(data, file_type) else: if input_loc != input_path: subpath = input_loc.relative_to(input_path) output_file = Path(output_dir) / subpath.with_suffix(f".{file_type}") else: output_file = Path(output_dir) / input_loc.parts[-1] output_file = output_file.with_suffix(f".{file_type}") _write_docs_to_file(data, output_file, file_type) msg.good(f"Generated output file ({len_docs} documents): {output_file}") def _print_docs_to_stdout(data: Any, output_type: str) -> None: if output_type == "json": srsly.write_json("-", data) else: sys.stdout.buffer.write(data) def _write_docs_to_file(data: Any, output_file: Path, output_type: str) -> None: if not output_file.parent.exists(): output_file.parent.mkdir(parents=True) if output_type == "json": srsly.write_json(output_file, data) else: with output_file.open("wb") as file_: file_.write(data) def autodetect_ner_format(input_data: str) -> Optional[str]: # guess format from the first 20 lines lines = input_data.split("\n")[:20] format_guesses = {"ner": 0, "iob": 0} iob_re = re.compile(r"\S+\|(O|[IB]-\S+)") ner_re = re.compile(r"\S+\s+(O|[IB]-\S+)$") for line in lines: line = line.strip() if iob_re.search(line): format_guesses["iob"] += 1 if ner_re.search(line): format_guesses["ner"] += 1 if format_guesses["iob"] == 0 and format_guesses["ner"] > 0: return "ner" if format_guesses["ner"] == 0 and format_guesses["iob"] > 0: return "iob" return None def walk_directory(path: Path, converter: str) -> List[Path]: if not path.is_dir(): return [path] paths = [path] locs = [] seen = set() for path in paths: if str(path) in seen: continue seen.add(str(path)) if path.parts[-1].startswith("."): continue elif path.is_dir(): paths.extend(path.iterdir()) elif converter == "json" and not path.parts[-1].endswith("json"): continue elif converter == "conll" and not path.parts[-1].endswith("conll"): continue elif converter == "iob" and not path.parts[-1].endswith("iob"): continue else: locs.append(path) # It's good to sort these, in case the ordering messes up cache. locs.sort() return locs def verify_cli_args( msg: Printer, input_path: Path, output_dir: Union[str, Path], file_type: str, converter: str, ner_map: Optional[Path], ): if file_type not in FILE_TYPES_STDOUT and output_dir == "-": msg.fail( f"Can't write .{file_type} data to stdout. Please specify an output directory.", exits=1, ) if not input_path.exists(): msg.fail("Input file not found", input_path, exits=1) if output_dir != "-" and not Path(output_dir).exists(): msg.fail("Output directory not found", output_dir, exits=1) if ner_map is not None and not Path(ner_map).exists(): msg.fail("NER map not found", ner_map, exits=1) if input_path.is_dir(): input_locs = walk_directory(input_path, converter) if len(input_locs) == 0: msg.fail("No input files in directory", input_path, exits=1) file_types = list(set([loc.suffix[1:] for loc in input_locs])) if converter == "auto" and len(file_types) >= 2: file_types_str = ",".join(file_types) msg.fail("All input files must be same type", file_types_str, exits=1) if converter != "auto" and converter not in CONVERTERS: msg.fail(f"Can't find converter for {converter}", exits=1) def _get_converter(msg, converter, input_path: Path): if input_path.is_dir(): input_path = walk_directory(input_path, converter)[0] if converter == "auto": converter = input_path.suffix[1:] if converter == "ner" or converter == "iob": with input_path.open(encoding="utf8") as file_: input_data = file_.read() converter_autodetect = autodetect_ner_format(input_data) if converter_autodetect == "ner": msg.info("Auto-detected token-per-line NER format") converter = converter_autodetect elif converter_autodetect == "iob": msg.info("Auto-detected sentence-per-line NER format") converter = converter_autodetect else: msg.warn( "Can't automatically detect NER format. " "Conversion may not succeed. " "See https://spacy.io/api/cli#convert" ) return converter
the-stack_106_30912
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('cms_lab_carousel', '0003_auto_20150827_0111'), ] operations = [ migrations.AlterField( model_name='slide', name='carousel', field=models.ForeignKey(to='cms_lab_carousel.Carousel', help_text='Choose a carousel for this slide.', on_delete=django.db.models.deletion.SET_NULL, null=True, blank=True), preserve_default=True, ), migrations.AlterField( model_name='slide', name='publication', field=models.ForeignKey(to='cms_lab_publications.Publication', help_text='<strong>If this slide is for a publication, select/create a publication.</strong><br>The publication info will be used to auto-populate the title, subtitle, and description fields when slide is saved (if those fields are left blank).<br>To override this auto-fill behavior, manually enter the title, subtitle, and/or description below.', on_delete=django.db.models.deletion.PROTECT, null=True, blank=True), preserve_default=True, ), migrations.AlterField( model_name='slide', name='publish_datetime', field=models.DateTimeField(default=django.utils.timezone.now, help_text='<strong>Choose date/time to publish slide.</strong><br>Slides are displayed in reverse-chronological order, so this can be used to control their order. A future date will be hide a slide until that date.<br>If this is a slide for a publication and this field is not set to a future date/time or at least one day in the past, it will be auto-populated with the date of the publication.', verbose_name='date/time slide published'), preserve_default=True, ), ]
the-stack_106_30914
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import functools import collections import traceback import numpy as np import logging from ppdet.core.workspace import register, serializable from .parallel_map import ParallelMap from .transform.batch_operators import Gt2YoloTarget __all__ = ['Reader', 'create_reader'] logger = logging.getLogger(__name__) class Compose(object): def __init__(self, transforms, ctx=None): self.transforms = transforms self.ctx = ctx def __call__(self, data): ctx = self.ctx if self.ctx else {} for f in self.transforms: try: data = f(data, ctx) except Exception as e: stack_info = traceback.format_exc() logger.info("fail to map op [{}] with error: {} and stack:\n{}". format(f, e, str(stack_info))) raise e return data def _calc_img_weights(roidbs): """ calculate the probabilities of each sample """ imgs_cls = [] num_per_cls = {} img_weights = [] for i, roidb in enumerate(roidbs): img_cls = set([k for cls in roidbs[i]['gt_class'] for k in cls]) imgs_cls.append(img_cls) for c in img_cls: if c not in num_per_cls: num_per_cls[c] = 1 else: num_per_cls[c] += 1 for i in range(len(roidbs)): weights = 0 for c in imgs_cls[i]: weights += 1 / num_per_cls[c] img_weights.append(weights) # probabilities sum to 1 img_weights = img_weights / np.sum(img_weights) return img_weights def _has_empty(item): def empty(x): if isinstance(x, np.ndarray) and x.size == 0: return True elif isinstance(x, collections.Sequence) and len(x) == 0: return True else: return False if isinstance(item, collections.Sequence) and len(item) == 0: return True if item is None: return True if empty(item): return True return False def _segm(samples): assert 'gt_poly' in samples segms = samples['gt_poly'] if 'is_crowd' in samples: is_crowd = samples['is_crowd'] if len(segms) != 0: assert len(segms) == is_crowd.shape[0] gt_masks = [] valid = True for i in range(len(segms)): segm = segms[i] gt_segm = [] if 'is_crowd' in samples and is_crowd[i]: gt_segm.append([[0, 0]]) else: for poly in segm: if len(poly) == 0: valid = False break gt_segm.append(np.array(poly).reshape(-1, 2)) if (not valid) or len(gt_segm) == 0: break gt_masks.append(gt_segm) return gt_masks def batch_arrange(batch_samples, fields): def im_shape(samples, dim=3): # hard code assert 'h' in samples assert 'w' in samples if dim == 3: # RCNN, .. return np.array((samples['h'], samples['w'], 1), dtype=np.float32) else: # YOLOv3, .. return np.array((samples['h'], samples['w']), dtype=np.int32) arrange_batch = [] for samples in batch_samples: one_ins = () for i, field in enumerate(fields): if field == 'gt_mask': one_ins += (_segm(samples), ) elif field == 'im_shape': one_ins += (im_shape(samples), ) elif field == 'im_size': one_ins += (im_shape(samples, 2), ) else: if field == 'is_difficult': field = 'difficult' assert field in samples, '{} not in samples'.format(field) one_ins += (samples[field], ) arrange_batch.append(one_ins) return arrange_batch @register @serializable class Reader(object): """ Args: dataset (DataSet): DataSet object sample_transforms (list of BaseOperator): a list of sample transforms operators. batch_transforms (list of BaseOperator): a list of batch transforms operators. batch_size (int): batch size. shuffle (bool): whether shuffle dataset or not. Default False. drop_last (bool): whether drop last batch or not. Default False. drop_empty (bool): whether drop sample when it's gt is empty or not. Default True. mixup_epoch (int): mixup epoc number. Default is -1, meaning not use mixup. class_aware_sampling (bool): whether use class-aware sampling or not. Default False. worker_num (int): number of working threads/processes. Default -1, meaning not use multi-threads/multi-processes. use_process (bool): whether use multi-processes or not. It only works when worker_num > 1. Default False. bufsize (int): buffer size for multi-threads/multi-processes, please note, one instance in buffer is one batch data. memsize (str): size of shared memory used in result queue when use_process is true. Default 3G. inputs_def (dict): network input definition use to get input fields, which is used to determine the order of returned data. """ def __init__(self, dataset=None, sample_transforms=None, batch_transforms=None, batch_size=None, shuffle=False, drop_last=False, drop_empty=True, mixup_epoch=-1, class_aware_sampling=False, worker_num=-1, use_process=False, use_fine_grained_loss=False, num_classes=80, bufsize=100, memsize='3G', inputs_def=None): self._dataset = dataset self._roidbs = self._dataset.get_roidb() self._fields = copy.deepcopy(inputs_def[ 'fields']) if inputs_def else None # transform self._sample_transforms = Compose(sample_transforms, {'fields': self._fields}) self._batch_transforms = None if use_fine_grained_loss: for bt in batch_transforms: if isinstance(bt, Gt2YoloTarget): bt.num_classes = num_classes elif batch_transforms: batch_transforms = [ bt for bt in batch_transforms if not isinstance(bt, Gt2YoloTarget) ] if batch_transforms: self._batch_transforms = Compose(batch_transforms, {'fields': self._fields}) # data if inputs_def and inputs_def.get('multi_scale', False): from ppdet.modeling.architectures.input_helper import multiscale_def im_shape = inputs_def[ 'image_shape'] if 'image_shape' in inputs_def else [ 3, None, None ] _, ms_fields = multiscale_def(im_shape, inputs_def['num_scales'], inputs_def['use_flip']) self._fields += ms_fields self._batch_size = batch_size self._shuffle = shuffle self._drop_last = drop_last self._drop_empty = drop_empty # sampling self._mixup_epoch = mixup_epoch self._class_aware_sampling = class_aware_sampling self._load_img = False self._sample_num = len(self._roidbs) if self._class_aware_sampling: self.img_weights = _calc_img_weights(self._roidbs) self._indexes = None self._pos = -1 self._epoch = -1 # multi-process self._worker_num = worker_num self._parallel = None if self._worker_num > -1: task = functools.partial(self.worker, self._drop_empty) self._parallel = ParallelMap(self, task, worker_num, bufsize, use_process, memsize) def __call__(self): if self._worker_num > -1: return self._parallel else: return self def __iter__(self): return self def reset(self): """implementation of Dataset.reset """ self.indexes = [i for i in range(self.size())] if self._class_aware_sampling: self.indexes = np.random.choice( self._sample_num, self._sample_num, replace=False, p=self.img_weights) if self._shuffle: np.random.shuffle(self.indexes) if self._mixup_epoch > 0 and len(self.indexes) < 2: logger.info("Disable mixup for dataset samples " "less than 2 samples") self._mixup_epoch = -1 if self._epoch < 0: self._epoch = 0 else: self._epoch += 1 self._pos = 0 def __next__(self): return self.next() def next(self): if self._epoch < 0: self.reset() if self.drained(): raise StopIteration batch = self._load_batch() if self._drop_last and len(batch) < self._batch_size: raise StopIteration if self._worker_num > -1: return batch else: return self.worker(self._drop_empty, batch) def _load_batch(self): batch = [] bs = 0 while bs != self._batch_size: if self._pos >= self.size(): break pos = self.indexes[self._pos] sample = copy.deepcopy(self._roidbs[pos]) self._pos += 1 if self._drop_empty and self._fields and 'gt_mask' in self._fields: if _has_empty(_segm(sample)): #logger.warn('gt_mask is empty or not valid in {}'.format( # sample['im_file'])) continue if self._drop_empty and self._fields and 'gt_bbox' in self._fields: if _has_empty(sample['gt_bbox']): #logger.warn('gt_bbox {} is empty or not valid in {}, ' # 'drop this sample'.format( # sample['im_file'], sample['gt_bbox'])) continue if self._load_img: sample['image'] = self._load_image(sample['im_file']) if self._epoch < self._mixup_epoch: num = len(self.indexes) mix_idx = np.random.randint(1, num) mix_idx = self.indexes[(mix_idx + self._pos - 1) % num] sample['mixup'] = copy.deepcopy(self._roidbs[mix_idx]) if self._load_img: sample['mixup']['image'] = self._load_image(sample['mixup'][ 'im_file']) batch.append(sample) bs += 1 return batch def worker(self, drop_empty=True, batch_samples=None): """ sample transform and batch transform. """ batch = [] for sample in batch_samples: sample = self._sample_transforms(sample) if drop_empty and 'gt_bbox' in sample: if _has_empty(sample['gt_bbox']): #logger.warn('gt_bbox {} is empty or not valid in {}, ' # 'drop this sample'.format( # sample['im_file'], sample['gt_bbox'])) continue batch.append(sample) if len(batch) > 0 and self._batch_transforms: batch = self._batch_transforms(batch) if len(batch) > 0 and self._fields: batch = batch_arrange(batch, self._fields) return batch def _load_image(self, filename): with open(filename, 'rb') as f: return f.read() def size(self): """ implementation of Dataset.size """ return self._sample_num def drained(self): """ implementation of Dataset.drained """ assert self._epoch >= 0, 'The first epoch has not begin!' return self._pos >= self.size() def stop(self): if self._parallel: self._parallel.stop() def create_reader(cfg, max_iter=0, global_cfg=None): """ Return iterable data reader. Args: max_iter (int): number of iterations. """ if not isinstance(cfg, dict): raise TypeError("The config should be a dict when creating reader.") # synchornize use_fine_grained_loss/num_classes from global_cfg to reader cfg if global_cfg: cfg['use_fine_grained_loss'] = getattr(global_cfg, 'use_fine_grained_loss', False) cfg['num_classes'] = getattr(global_cfg, 'num_classes', 80) reader = Reader(**cfg)() def _reader(): n = 0 while True: for _batch in reader: if len(_batch) > 0: yield _batch n += 1 if max_iter > 0 and n == max_iter: return reader.reset() if max_iter <= 0: return return _reader
the-stack_106_30915
import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import numpy as np import gym # 超参数 BATCH_SIZE = 32 LR = 0.01 # learning rate EPSILON = 0.9 # 最优选择动作百分比 GAMMA = 0.9 # 奖励递减参数 TARGET_REPLACE_ITER = 100 # Q 现实网络的更新频率 MEMORY_CAPACITY = 2000 # 记忆库大小 env = gym.make('CartPole-v0') # 立杆子游戏 env = env.unwrapped N_ACTIONS = env.action_space.n # 杆子能做的动作 N_STATES = env.observation_space.shape[0] # 杆子能获取的环境信息数 ENV_A_SHAPE = 0 if isinstance(env.action_space.sample(), int) else env.action_space.sample().shape # to confirm the shape class Net(nn.Module): def __init__(self, ): super(Net, self).__init__() self.fc1 = nn.Linear(N_STATES, 50) self.fc1.weight.data.normal_(0, 0.1) # initialization self.out = nn.Linear(50, N_ACTIONS) self.out.weight.data.normal_(0, 0.1) # initialization def forward(self, x): x = self.fc1(x) x = F.relu(x) actions_value = self.out(x) return actions_value class DQN(object): def __init__(self): self.eval_net, self.target_net = Net(), Net() self.learn_step_counter = 0 # for target updating self.memory_counter = 0 # for storing memory self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2)) # initialize memory self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR) self.loss_func = nn.MSELoss() def choose_action(self, x): x = Variable(torch.unsqueeze(torch.FloatTensor(x), 0)) # input only one sample if np.random.uniform() < EPSILON: # greedy actions_value = self.eval_net.forward(x) action = torch.max(actions_value, 1)[1].data.numpy() action = action[0, 0] if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE) # return the argmax index else: # random action = np.random.randint(0, N_ACTIONS) action = action if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE) return action def store_transition(self, s, a, r, s_): transition = np.hstack((s, [a, r], s_)) # replace the old memory with new memory index = self.memory_counter % MEMORY_CAPACITY self.memory[index, :] = transition self.memory_counter += 1 def learn(self): # target parameter update if self.learn_step_counter % TARGET_REPLACE_ITER == 0: self.target_net.load_state_dict(self.eval_net.state_dict()) self.learn_step_counter += 1 # sample batch transitions sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE) b_memory = self.memory[sample_index, :] b_s = Variable(torch.FloatTensor(b_memory[:, :N_STATES])) b_a = Variable(torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int))) b_r = Variable(torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2])) b_s_ = Variable(torch.FloatTensor(b_memory[:, -N_STATES:])) # q_eval w.r.t the action in experience q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1) q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) # shape (batch, 1) loss = self.loss_func(q_eval, q_target) self.optimizer.zero_grad() loss.backward() self.optimizer.step() dqn = DQN() print('\nCollecting experience...') for i_episode in range(400): s = env.reset() ep_r = 0 while True: env.render() a = dqn.choose_action(s) # take action s_, r, done, info = env.step(a) # modify the reward x, x_dot, theta, theta_dot = s_ r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8 r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5 r = r1 + r2 dqn.store_transition(s, a, r, s_) ep_r += r if dqn.memory_counter > MEMORY_CAPACITY: dqn.learn() if done: print('Ep: ', i_episode, '| Ep_r: ', round(ep_r, 2)) if done: break s = s_
the-stack_106_30916
import threading import weakref from collections import defaultdict from dataclasses import dataclass, field from functools import wraps from types import FunctionType from typing import ( Any, Callable, Dict, Generic, List, Literal, MutableMapping, Optional, Sequence, Set, TypeVar, Union, cast, overload, ) import orjson from django.core.exceptions import ValidationError from django.http import HttpRequest, HttpResponse from django.utils.translation import gettext as _ import zerver.lib.rate_limiter as rate_limiter import zerver.tornado.handlers as handlers from zerver.lib.exceptions import ErrorCode, InvalidJSONError, JsonableError from zerver.lib.notes import BaseNotes from zerver.lib.types import Validator, ViewFuncT from zerver.lib.validator import check_anything from zerver.models import Client, Realm @dataclass class RequestNotes(BaseNotes[HttpRequest, "RequestNotes"]): """This class contains extra metadata that Zulip associated with a Django HttpRequest object. See the docstring for BaseNotes for details on how it works. Note that most Optional fields will be definitely not None once middleware has run. In the future, we may want to express that in the types by having different types EarlyRequestNotes and post-middleware RequestNotes types, but for now we have a lot of `assert request_notes.foo is not None` when accessing them. """ client: Optional[Client] = None client_name: Optional[str] = None client_version: Optional[str] = None log_data: Optional[MutableMapping[str, Any]] = None rate_limit: Optional[str] = None requestor_for_logs: Optional[str] = None # We use realm_cached to indicate whether the realm is cached or not. # Because the default value of realm is None, which can indicate "unset" # and "nonexistence" at the same time. realm: Optional[Realm] = None has_fetched_realm: bool = False set_language: Optional[str] = None ratelimits_applied: List["rate_limiter.RateLimitResult"] = field(default_factory=lambda: []) query: Optional[str] = None error_format: Optional[str] = None placeholder_open_graph_description: Optional[str] = None saved_response: Optional[HttpResponse] = None # tornado_handler is a weak reference to work around a memory leak # in WeakKeyDictionary (https://bugs.python.org/issue44680). tornado_handler: Optional["weakref.ReferenceType[handlers.AsyncDjangoHandler]"] = None processed_parameters: Set[str] = field(default_factory=set) ignored_parameters: Set[str] = field(default_factory=set) @classmethod def init_notes(cls) -> "RequestNotes": return RequestNotes() class RequestConfusingParmsError(JsonableError): code = ErrorCode.REQUEST_CONFUSING_VAR data_fields = ["var_name1", "var_name2"] def __init__(self, var_name1: str, var_name2: str) -> None: self.var_name1: str = var_name1 self.var_name2: str = var_name2 @staticmethod def msg_format() -> str: return _("Can't decide between '{var_name1}' and '{var_name2}' arguments") class RequestVariableMissingError(JsonableError): code = ErrorCode.REQUEST_VARIABLE_MISSING data_fields = ["var_name"] def __init__(self, var_name: str) -> None: self.var_name: str = var_name @staticmethod def msg_format() -> str: return _("Missing '{var_name}' argument") class RequestVariableConversionError(JsonableError): code = ErrorCode.REQUEST_VARIABLE_INVALID data_fields = ["var_name", "bad_value"] def __init__(self, var_name: str, bad_value: Any) -> None: self.var_name: str = var_name self.bad_value = bad_value @staticmethod def msg_format() -> str: return _("Bad value for '{var_name}': {bad_value}") # Used in conjunction with @has_request_variables, below ResultT = TypeVar("ResultT") class _REQ(Generic[ResultT]): # NotSpecified is a sentinel value for determining whether a # default value was specified for a request variable. We can't # use None because that could be a valid, user-specified default class _NotSpecified: pass NotSpecified = _NotSpecified() def __init__( self, whence: Optional[str] = None, *, converter: Optional[Callable[[str, str], ResultT]] = None, default: Union[_NotSpecified, ResultT, None] = NotSpecified, json_validator: Optional[Validator[ResultT]] = None, str_validator: Optional[Validator[ResultT]] = None, argument_type: Optional[str] = None, intentionally_undocumented: bool = False, documentation_pending: bool = False, aliases: Sequence[str] = [], path_only: bool = False, ) -> None: """whence: the name of the request variable that should be used for this parameter. Defaults to a request variable of the same name as the parameter. converter: a function that takes a string and returns a new value. If specified, this will be called on the request variable value before passing to the function default: a value to be used for the argument if the parameter is missing in the request json_validator: similar to converter, but takes an already parsed JSON data structure. If specified, we will parse the JSON request variable value before passing to the function str_validator: Like json_validator, but doesn't parse JSON first. argument_type: pass 'body' to extract the parsed JSON corresponding to the request body aliases: alternate names for the POST var path_only: Used for parameters included in the URL that we still want to validate via REQ's hooks. """ if argument_type == "body" and converter is None and json_validator is None: # legacy behavior json_validator = cast(Callable[[str, object], ResultT], check_anything) self.post_var_name = whence self.func_var_name: Optional[str] = None self.converter = converter self.json_validator = json_validator self.str_validator = str_validator self.default = default self.argument_type = argument_type self.aliases = aliases self.intentionally_undocumented = intentionally_undocumented self.documentation_pending = documentation_pending self.path_only = path_only assert converter is None or ( json_validator is None and str_validator is None ), "converter and json_validator are mutually exclusive" assert ( json_validator is None or str_validator is None ), "json_validator and str_validator are mutually exclusive" # This factory function ensures that mypy can correctly analyze REQ. # # Note that REQ claims to return a type matching that of the parameter # of which it is the default value, allowing type checking of view # functions using has_request_variables. In reality, REQ returns an # instance of class _REQ to enable the decorator to scan the parameter # list for _REQ objects and patch the parameters as the true types. # # See also this documentation to learn how @overload helps here. # https://zulip.readthedocs.io/en/latest/testing/mypy.html#using-overload-to-accurately-describe-variations # # Overload 1: converter @overload def REQ( whence: Optional[str] = ..., *, converter: Callable[[str, str], ResultT], default: ResultT = ..., argument_type: Optional[Literal["body"]] = ..., intentionally_undocumented: bool = ..., documentation_pending: bool = ..., aliases: Sequence[str] = ..., path_only: bool = ..., ) -> ResultT: ... # Overload 2: json_validator @overload def REQ( whence: Optional[str] = ..., *, default: ResultT = ..., json_validator: Validator[ResultT], argument_type: Optional[Literal["body"]] = ..., intentionally_undocumented: bool = ..., documentation_pending: bool = ..., aliases: Sequence[str] = ..., path_only: bool = ..., ) -> ResultT: ... # Overload 3: no converter/json_validator, default: str or unspecified, argument_type=None @overload def REQ( whence: Optional[str] = ..., *, default: str = ..., str_validator: Optional[Validator[str]] = ..., intentionally_undocumented: bool = ..., documentation_pending: bool = ..., aliases: Sequence[str] = ..., path_only: bool = ..., ) -> str: ... # Overload 4: no converter/validator, default=None, argument_type=None @overload def REQ( whence: Optional[str] = ..., *, default: None, str_validator: Optional[Validator[str]] = ..., intentionally_undocumented: bool = ..., documentation_pending: bool = ..., aliases: Sequence[str] = ..., path_only: bool = ..., ) -> Optional[str]: ... # Overload 5: argument_type="body" @overload def REQ( whence: Optional[str] = ..., *, default: ResultT = ..., argument_type: Literal["body"], intentionally_undocumented: bool = ..., documentation_pending: bool = ..., aliases: Sequence[str] = ..., path_only: bool = ..., ) -> ResultT: ... # Implementation def REQ( whence: Optional[str] = None, *, converter: Optional[Callable[[str, str], ResultT]] = None, default: Union[_REQ._NotSpecified, ResultT] = _REQ.NotSpecified, json_validator: Optional[Validator[ResultT]] = None, str_validator: Optional[Validator[ResultT]] = None, argument_type: Optional[str] = None, intentionally_undocumented: bool = False, documentation_pending: bool = False, aliases: Sequence[str] = [], path_only: bool = False, ) -> ResultT: return cast( ResultT, _REQ( whence, converter=converter, default=default, json_validator=json_validator, str_validator=str_validator, argument_type=argument_type, intentionally_undocumented=intentionally_undocumented, documentation_pending=documentation_pending, aliases=aliases, path_only=path_only, ), ) arguments_map: Dict[str, List[str]] = defaultdict(list) # Extracts variables from the request object and passes them as # named function arguments. The request object must be the first # argument to the function. # # To use, assign a function parameter a default value that is an # instance of the _REQ class. That parameter will then be automatically # populated from the HTTP request. The request object must be the # first argument to the decorated function. # # This should generally be the innermost (syntactically bottommost) # decorator applied to a view, since other decorators won't preserve # the default parameter values used by has_request_variables. # # Note that this can't be used in helper functions which are not # expected to call json_success or raise JsonableError, as it uses JsonableError # internally when it encounters an error def has_request_variables(view_func: ViewFuncT) -> ViewFuncT: num_params = view_func.__code__.co_argcount default_param_values = cast(FunctionType, view_func).__defaults__ if default_param_values is None: default_param_values = () num_default_params = len(default_param_values) default_param_names = view_func.__code__.co_varnames[num_params - num_default_params :] post_params = [] view_func_full_name = ".".join([view_func.__module__, view_func.__name__]) for (name, value) in zip(default_param_names, default_param_values): if isinstance(value, _REQ): value.func_var_name = name if value.post_var_name is None: value.post_var_name = name post_params.append(value) # Record arguments that should be documented so that our # automated OpenAPI docs tests can compare these against the code. if ( not value.intentionally_undocumented and not value.documentation_pending and not value.path_only ): arguments_map[view_func_full_name].append(value.post_var_name) @wraps(view_func) def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse: request_notes = RequestNotes.get_notes(request) for param in post_params: func_var_name = param.func_var_name if param.path_only: # For path_only parameters, they should already have # been passed via the URL, so there's no need for REQ # to do anything. # # TODO: Either run validators for path_only parameters # or don't declare them using REQ. assert func_var_name in kwargs if func_var_name in kwargs: continue assert func_var_name is not None post_var_name: Optional[str] if param.argument_type == "body": post_var_name = "request" try: val = request.body.decode(request.encoding or "utf-8") except UnicodeDecodeError: raise JsonableError(_("Malformed payload")) else: # This is a view bug, not a user error, and thus should throw a 500. assert param.argument_type is None, "Invalid argument type" post_var_names = [param.post_var_name] post_var_names += param.aliases post_var_name = None for req_var in post_var_names: assert req_var is not None if req_var in request.POST: val = request.POST[req_var] request_notes.processed_parameters.add(req_var) elif req_var in request.GET: val = request.GET[req_var] request_notes.processed_parameters.add(req_var) else: # This is covered by test_REQ_aliases, but coverage.py # fails to recognize this for some reason. continue # nocoverage if post_var_name is not None: raise RequestConfusingParmsError(post_var_name, req_var) post_var_name = req_var if post_var_name is None: post_var_name = param.post_var_name assert post_var_name is not None if param.default is _REQ.NotSpecified: raise RequestVariableMissingError(post_var_name) kwargs[func_var_name] = param.default continue if param.converter is not None: try: val = param.converter(post_var_name, val) except JsonableError: raise except Exception: raise RequestVariableConversionError(post_var_name, val) # json_validator is like converter, but doesn't handle JSON parsing; we do. if param.json_validator is not None: try: val = orjson.loads(val) except orjson.JSONDecodeError: if param.argument_type == "body": raise InvalidJSONError(_("Malformed JSON")) raise JsonableError(_('Argument "{}" is not valid JSON.').format(post_var_name)) try: val = param.json_validator(post_var_name, val) except ValidationError as error: raise JsonableError(error.message) # str_validators is like json_validator, but for direct strings (no JSON parsing). if param.str_validator is not None: try: val = param.str_validator(post_var_name, val) except ValidationError as error: raise JsonableError(error.message) kwargs[func_var_name] = val return view_func(request, *args, **kwargs) return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927 local = threading.local() def get_current_request() -> Optional[HttpRequest]: """Returns the current HttpRequest object; this should only be used by logging frameworks, which have no other access to the current request. All other codepaths should pass through the current request object, rather than rely on this thread-local global. """ return getattr(local, "request", None) def set_request(req: HttpRequest) -> None: setattr(local, "request", req) def unset_request() -> None: if hasattr(local, "request"): delattr(local, "request")
the-stack_106_30919
# Copyright 2018-2021 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from typing import Any from unittest import mock import pandas as pd import pytest import tornado.testing import tornado.web from streamlit import StreamlitAPIException from streamlit.components.v1.components import ComponentRegistry from streamlit.components.v1.components import ComponentRequestHandler from streamlit.components.v1.components import CustomComponent from streamlit.components.v1.components import declare_component import streamlit.components.v1 as components from streamlit.elements import arrow_table from streamlit.errors import DuplicateWidgetID from streamlit.proto.ComponentInstance_pb2 import SpecialArg from streamlit.type_util import to_bytes from tests import testutil from tests.testutil import DeltaGeneratorTestCase URL = "http://not.a.real.url:3001" PATH = "not/a/real/path" def _serialize_dataframe_arg(key: str, value: Any) -> SpecialArg: special_arg = SpecialArg() special_arg.key = key arrow_table.marshall(special_arg.arrow_dataframe.data, value) return special_arg def _serialize_bytes_arg(key: str, value: Any) -> SpecialArg: special_arg = SpecialArg() special_arg.key = key special_arg.bytes = to_bytes(value) return special_arg class DeclareComponentTest(unittest.TestCase): """Test component declaration.""" def tearDown(self) -> None: ComponentRegistry._instance = None def test_name(self): """Test component name generation""" # Test a component defined in a module with no package component = components.declare_component("foo", url=URL) self.assertEqual("components_test.foo", component.name) # Test a component defined in __init__.py from component_test_data import component as init_component self.assertEqual( "component_test_data.foo", init_component.name, ) # Test a component defined in a module within a package from component_test_data.outer_module import component as outer_module_component self.assertEqual( "component_test_data.outer_module.foo", outer_module_component.name, ) # Test a component defined in module within a nested package from component_test_data.nested.inner_module import ( component as inner_module_component, ) self.assertEqual( "component_test_data.nested.inner_module.foo", inner_module_component.name, ) def test_only_path(self): """Succeed when a path is provided.""" def isdir(path): return path == PATH or path == os.path.abspath(PATH) with mock.patch( "streamlit.components.v1.components.os.path.isdir", side_effect=isdir ): component = components.declare_component("test", path=PATH) self.assertEqual(PATH, component.path) self.assertIsNone(component.url) self.assertEqual( ComponentRegistry.instance().get_component_path(component.name), component.abspath, ) def test_only_url(self): """Succeed when a URL is provided.""" component = components.declare_component("test", url=URL) self.assertEqual(URL, component.url) self.assertIsNone(component.path) self.assertEqual( ComponentRegistry.instance().get_component_path("components_test"), component.abspath, ) def test_path_and_url(self): """Fail if path AND url are provided.""" with pytest.raises(StreamlitAPIException) as exception_message: components.declare_component("test", path=PATH, url=URL) self.assertEqual( "Either 'path' or 'url' must be set, but not both.", str(exception_message.value), ) def test_no_path_and_no_url(self): """Fail if neither path nor url is provided.""" with pytest.raises(StreamlitAPIException) as exception_message: components.declare_component("test", path=None, url=None) self.assertEqual( "Either 'path' or 'url' must be set, but not both.", str(exception_message.value), ) class ComponentRegistryTest(unittest.TestCase): """Test component registration.""" def tearDown(self) -> None: ComponentRegistry._instance = None def test_register_component_with_path(self): """Registering a component should associate it with its path.""" test_path = "/a/test/component/directory" def isdir(path): return path == test_path registry = ComponentRegistry.instance() with mock.patch( "streamlit.components.v1.components.os.path.isdir", side_effect=isdir ): registry.register_component( CustomComponent("test_component", path=test_path) ) self.assertEqual(test_path, registry.get_component_path("test_component")) def test_register_component_no_path(self): """It's not an error to register a component without a path.""" registry = ComponentRegistry.instance() # Return None when the component hasn't been registered self.assertIsNone(registry.get_component_path("test_component")) # And also return None when the component doesn't have a path registry.register_component( CustomComponent("test_component", url="http://not.a.url") ) self.assertIsNone(registry.get_component_path("test_component")) def test_register_invalid_path(self): """We raise an exception if a component is registered with a non-existent path. """ test_path = "/a/test/component/directory" registry = ComponentRegistry.instance() with self.assertRaises(StreamlitAPIException) as ctx: registry.register_component(CustomComponent("test_component", test_path)) self.assertIn("No such component directory", ctx.exception) def test_register_duplicate_path(self): """It's not an error to re-register a component. (This can happen during development). """ test_path_1 = "/a/test/component/directory" test_path_2 = "/another/test/component/directory" def isdir(path): return path in (test_path_1, test_path_2) registry = ComponentRegistry.instance() with mock.patch( "streamlit.components.v1.components.os.path.isdir", side_effect=isdir ): registry.register_component(CustomComponent("test_component", test_path_1)) registry.register_component(CustomComponent("test_component", test_path_1)) self.assertEqual(test_path_1, registry.get_component_path("test_component")) registry.register_component(CustomComponent("test_component", test_path_2)) self.assertEqual(test_path_2, registry.get_component_path("test_component")) class InvokeComponentTest(DeltaGeneratorTestCase): """Test invocation of a custom component object.""" def setUp(self): super().setUp() self.test_component = components.declare_component("test", url=URL) def test_only_json_args(self): """Test that component with only json args is marshalled correctly.""" self.test_component(foo="bar") proto = self.get_delta_from_queue().new_element.component_instance self.assertEqual(self.test_component.name, proto.component_name) self.assertJSONEqual( {"foo": "bar", "key": None, "default": None}, proto.json_args ) self.assertEqual("[]", str(proto.special_args)) def test_only_df_args(self): """Test that component with only dataframe args is marshalled correctly.""" raw_data = { "First Name": ["Jason", "Molly"], "Last Name": ["Miller", "Jacobson"], "Age": [42, 52], } df = pd.DataFrame(raw_data, columns=["First Name", "Last Name", "Age"]) self.test_component(df=df) proto = self.get_delta_from_queue().new_element.component_instance self.assertEqual(self.test_component.name, proto.component_name) self.assertJSONEqual({"key": None, "default": None}, proto.json_args) self.assertEqual(1, len(proto.special_args)) self.assertEqual(_serialize_dataframe_arg("df", df), proto.special_args[0]) def test_only_list_args(self): """Test that component with only list args is marshalled correctly.""" self.test_component(data=["foo", "bar", "baz"]) proto = self.get_delta_from_queue().new_element.component_instance self.assertJSONEqual( {"data": ["foo", "bar", "baz"], "key": None, "default": None}, proto.json_args, ) self.assertEqual("[]", str(proto.special_args)) def test_no_args(self): """Test that component with no args is marshalled correctly.""" self.test_component() proto = self.get_delta_from_queue().new_element.component_instance self.assertEqual(self.test_component.name, proto.component_name) self.assertJSONEqual({"key": None, "default": None}, proto.json_args) self.assertEqual("[]", str(proto.special_args)) def test_bytes_args(self): self.test_component(foo=b"foo", bar=b"bar") proto = self.get_delta_from_queue().new_element.component_instance self.assertJSONEqual({"key": None, "default": None}, proto.json_args) self.assertEqual(2, len(proto.special_args)) self.assertEqual( _serialize_bytes_arg("foo", b"foo"), proto.special_args[0], ) self.assertEqual( _serialize_bytes_arg("bar", b"bar"), proto.special_args[1], ) def test_mixed_args(self): """Test marshalling of a component with varied arg types.""" df = pd.DataFrame( { "First Name": ["Jason", "Molly"], "Last Name": ["Miller", "Jacobson"], "Age": [42, 52], }, columns=["First Name", "Last Name", "Age"], ) self.test_component(string_arg="string", df_arg=df, bytes_arg=b"bytes") proto = self.get_delta_from_queue().new_element.component_instance self.assertEqual(self.test_component.name, proto.component_name) self.assertJSONEqual( {"string_arg": "string", "key": None, "default": None}, proto.json_args, ) self.assertEqual(2, len(proto.special_args)) self.assertEqual(_serialize_dataframe_arg("df_arg", df), proto.special_args[0]) self.assertEqual( _serialize_bytes_arg("bytes_arg", b"bytes"), proto.special_args[1] ) def test_duplicate_key(self): """Two components with the same `key` should throw DuplicateWidgetID exception""" self.test_component(foo="bar", key="baz") with self.assertRaises(DuplicateWidgetID): self.test_component(key="baz") def test_key_sent_to_frontend(self): """We send the 'key' param to the frontend (even if it's None).""" # Test a string key self.test_component(key="baz") proto = self.get_delta_from_queue().new_element.component_instance self.assertJSONEqual({"key": "baz", "default": None}, proto.json_args) # Test an empty key self.test_component() proto = self.get_delta_from_queue().new_element.component_instance self.assertJSONEqual({"key": None, "default": None}, proto.json_args) def test_simple_default(self): """Test the 'default' param with a JSON value.""" return_value = self.test_component(default="baz") self.assertEqual("baz", return_value) proto = self.get_delta_from_queue().new_element.component_instance self.assertJSONEqual({"key": None, "default": "baz"}, proto.json_args) def test_bytes_default(self): """Test the 'default' param with a bytes value.""" return_value = self.test_component(default=b"bytes") self.assertEqual(b"bytes", return_value) proto = self.get_delta_from_queue().new_element.component_instance self.assertJSONEqual({"key": None}, proto.json_args) self.assertEqual( _serialize_bytes_arg("default", b"bytes"), proto.special_args[0], ) def test_df_default(self): """Test the 'default' param with a DataFrame value.""" df = pd.DataFrame( { "First Name": ["Jason", "Molly"], "Last Name": ["Miller", "Jacobson"], "Age": [42, 52], }, columns=["First Name", "Last Name", "Age"], ) return_value = self.test_component(default=df) self.assertTrue(df.equals(return_value), "df != return_value") proto = self.get_delta_from_queue().new_element.component_instance self.assertJSONEqual({"key": None}, proto.json_args) self.assertEqual( _serialize_dataframe_arg("default", df), proto.special_args[0], ) def assertJSONEqual(self, a, b): """Asserts that two JSON dicts are equal. If either arg is a string, it will be first converted to a dict with json.loads().""" # Ensure both objects are dicts. dict_a = a if isinstance(a, dict) else json.loads(a) dict_b = b if isinstance(b, dict) else json.loads(b) self.assertEqual(dict_a, dict_b) class ComponentRequestHandlerTest(tornado.testing.AsyncHTTPTestCase): """Test /component endpoint.""" def tearDown(self) -> None: ComponentRegistry._instance = None def get_app(self): self.registry = ComponentRegistry() return tornado.web.Application( [ ( "/component/(.*)", ComponentRequestHandler, dict(registry=self.registry.instance()), ) ] ) def _request_component(self, path): return self.fetch("/component/%s" % path, method="GET") def test_success_request(self): """Test request success when valid parameters are provided.""" with mock.patch("streamlit.components.v1.components.os.path.isdir"): # We don't need the return value in this case. declare_component("test", path=PATH) with mock.patch( "streamlit.components.v1.components.open", mock.mock_open(read_data="Test Content"), ): response = self._request_component("components_test.test") self.assertEqual(200, response.code) self.assertEqual(b"Test Content", response.body) def test_invalid_component_request(self): """Test request failure when invalid component name is provided.""" response = self._request_component("invalid_component") self.assertEqual(404, response.code) self.assertEqual(b"not found", response.body) def test_invalid_content_request(self): """Test request failure when invalid content (file) is provided.""" with mock.patch("streamlit.components.v1.components.os.path.isdir"): declare_component("test", path=PATH) with mock.patch("streamlit.components.v1.components.open") as m: m.side_effect = OSError("Invalid content") response = self._request_component("components_test.test") self.assertEqual(404, response.code) self.assertEqual( b"read error", response.body, ) def test_invalid_encoding_request(self): """Test request failure when invalid encoded file is provided.""" with mock.patch("streamlit.components.v1.components.os.path.isdir"): declare_component("test", path=PATH) with mock.patch("streamlit.components.v1.components.open") as m: m.side_effect = UnicodeDecodeError( "utf-8", b"", 9, 11, "unexpected end of data" ) response = self._request_component("components_test.test") self.assertEqual(404, response.code) self.assertEqual( b"read error", response.body, ) class IFrameTest(testutil.DeltaGeneratorTestCase): def test_iframe(self): """Test components.iframe""" components.iframe("http://not.a.url", width=200, scrolling=True) el = self.get_delta_from_queue().new_element self.assertEqual(el.iframe.src, "http://not.a.url") self.assertEqual(el.iframe.srcdoc, "") self.assertEqual(el.iframe.width, 200) self.assertTrue(el.iframe.has_width) self.assertTrue(el.iframe.scrolling) def test_html(self): """Test components.html""" html = r"<html><body>An HTML string!</body></html>" components.html(html, width=200, scrolling=True) el = self.get_delta_from_queue().new_element self.assertEqual(el.iframe.src, "") self.assertEqual(el.iframe.srcdoc, html) self.assertEqual(el.iframe.width, 200) self.assertTrue(el.iframe.has_width) self.assertTrue(el.iframe.scrolling)
the-stack_106_30920
""" Perform general agent monitoring, like: 1. Status of the agent processes 2. Status of the agent threads 3. Couchdb replication status (and status of its database) 4. Disk usage status """ from __future__ import division from future.utils import viewitems import time import logging import threading from pprint import pformat from Utils.Timers import timeFunction from Utils.Utilities import numberCouchProcess from Utils.PortForward import PortForward from WMComponent.AgentStatusWatcher.DrainStatusPoller import DrainStatusPoller from WMComponent.AnalyticsDataCollector.DataCollectAPI import WMAgentDBData, initAgentInfo from WMCore.Credential.Proxy import Proxy from WMCore.Database.CMSCouch import CouchMonitor from WMCore.Lexicon import sanitizeURL from WMCore.Services.ReqMgrAux.ReqMgrAux import isDrainMode, listDiskUsageOverThreshold from WMCore.Services.WMStats.WMStatsWriter import WMStatsWriter from WMCore.Services.WorkQueue.WorkQueue import WorkQueue as WorkQueueDS from WMCore.WorkQueue.DataStructs.WorkQueueElementsSummary import getGlobalSiteStatusSummary from WMCore.WorkerThreads.BaseWorkerThread import BaseWorkerThread # CMSMonitoring modules from CMSMonitoring.StompAMQ import StompAMQ class AgentStatusPoller(BaseWorkerThread): """ Gether the summary data for request (workflow) from local queue, local job couchdb, wmbs/boss air and populate summary db for monitoring """ def __init__(self, config): """ initialize properties specified from config """ BaseWorkerThread.__init__(self) # set the workqueue service for REST call self.config = config # need to get campaign, user, owner info self.agentInfo = initAgentInfo(self.config) self.summaryLevel = config.AnalyticsDataCollector.summaryLevel proxyArgs = {'logger': logging.getLogger(), 'cleanEnvironment': True} self.proxy = Proxy(proxyArgs) self.proxyFile = self.proxy.getProxyFilename() # X509_USER_PROXY self.userCertFile = self.proxy.getUserCertFilename() # X509_USER_CERT # credential lifetime warning/error thresholds, in days self.credThresholds = {'proxy': {'error': 3, 'warning': 5}, 'certificate': {'error': 10, 'warning': 20}} # create a portForwarder to be used for rerouting the replication process self.portForwarder = PortForward(8443) # Monitoring setup self.userAMQ = getattr(config.AgentStatusWatcher, "userAMQ", None) self.passAMQ = getattr(config.AgentStatusWatcher, "passAMQ", None) self.postToAMQ = getattr(config.AgentStatusWatcher, "enableAMQ", False) self.topicAMQ = getattr(config.AgentStatusWatcher, "topicAMQ", None) self.hostPortAMQ = getattr(config.AgentStatusWatcher, "hostPortAMQ", [('cms-mb.cern.ch', 61313)]) # T0 doesn't have WorkQueue, so some monitoring/replication code has to be skipped here if hasattr(self.config, "Tier0Feeder"): self.isT0agent = True self.producer = "tier0wmagent" else: self.isT0agent = False self.producer = "wmagent" localWQUrl = config.AnalyticsDataCollector.localQueueURL self.workqueueDS = WorkQueueDS(localWQUrl) def setUpCouchDBReplication(self): self.replicatorDocs = [] # set up common replication code wmstatsSource = self.config.JobStateMachine.jobSummaryDBName wmstatsTarget = self.config.General.centralWMStatsURL wmstatsTarget = self.portForwarder(wmstatsTarget) self.replicatorDocs.append({'source': wmstatsSource, 'target': wmstatsTarget, 'filter': "WMStatsAgent/repfilter"}) if self.isT0agent: t0Source = self.config.Tier0Feeder.requestDBName t0Target = self.config.AnalyticsDataCollector.centralRequestDBURL self.replicatorDocs.append({'source': t0Source, 'target': t0Target, 'filter': "T0Request/repfilter"}) else: # set up workqueue replication wqfilter = 'WorkQueue/queueFilter' parentQURL = self.config.WorkQueueManager.queueParams["ParentQueueCouchUrl"] parentQURL = self.portForwarder(parentQURL) childURL = self.config.WorkQueueManager.queueParams["QueueURL"] childURL = self.portForwarder(childURL) query_params = {'childUrl': childURL, 'parentUrl': sanitizeURL(parentQURL)['url']} localQInboxURL = "%s_inbox" % self.config.AnalyticsDataCollector.localQueueURL self.replicatorDocs.append({'source': sanitizeURL(parentQURL)['url'], 'target': localQInboxURL, 'filter': wqfilter, 'query_params': query_params}) self.replicatorDocs.append({'source': sanitizeURL(localQInboxURL)['url'], 'target': parentQURL, 'filter': wqfilter, 'query_params': query_params}) # delete old replicator docs before setting up self.localCouchMonitor.deleteReplicatorDocs() for rp in self.replicatorDocs: self.localCouchMonitor.couchServer.replicate( rp['source'], rp['target'], filter=rp['filter'], query_params=rp.get('query_params', False), continuous=True) # First cicle need to be skipped since document is not updated that fast self.skipReplicationCheck = True def setup(self, parameters): """ set db connection(couchdb, wmbs) to prepare to gather information """ # interface to WMBS/BossAir db myThread = threading.currentThread() # set wmagent db data self.wmagentDB = WMAgentDBData(self.summaryLevel, myThread.dbi, myThread.logger) self.centralWMStatsCouchDB = WMStatsWriter(self.config.General.centralWMStatsURL) self.localCouchMonitor = CouchMonitor(self.config.JobStateMachine.couchurl) self.setUpCouchDBReplication() @timeFunction def algorithm(self, parameters): """ get information from wmbs, workqueue and local couch """ try: agentInfo = self.collectAgentInfo() self.checkCredLifetime(agentInfo, "proxy") self.checkCredLifetime(agentInfo, "certificate") timeSpent, wmbsInfo, _ = self.collectWMBSInfo() wmbsInfo['total_query_time'] = int(timeSpent) agentInfo["WMBS_INFO"] = wmbsInfo logging.info("WMBS data collected in: %d secs", timeSpent) if not self.isT0agent: timeSpent, localWQInfo, _ = self.collectWorkQueueInfo() localWQInfo['total_query_time'] = int(timeSpent) agentInfo["LocalWQ_INFO"] = localWQInfo logging.info("Local WorkQueue data collected in: %d secs", timeSpent) self.uploadAgentInfoToCentralWMStats(agentInfo) self.buildMonITDocs(agentInfo) except Exception as ex: logging.exception("Error occurred, will retry later.\nDetails: %s", str(ex)) @timeFunction def collectWorkQueueInfo(self): """ Collect information from local workqueue database :return: """ results = {} wqStates = ['Available', 'Acquired'] results['workByStatus'] = self.workqueueDS.getJobsByStatus() results['workByStatusAndPriority'] = self.workqueueDS.getJobsByStatusAndPriority() elements = self.workqueueDS.getElementsByStatus(wqStates) uniSites, posSites = getGlobalSiteStatusSummary(elements, status=wqStates, dataLocality=True) results['uniqueJobsPerSite'] = uniSites results['possibleJobsPerSite'] = posSites return results def collectCouchDBInfo(self): couchInfo = {'name': 'CouchServer', 'status': 'ok', 'error_message': ""} if self.skipReplicationCheck: # skipping the check this round set if False so it can be checked next round. self.skipReplicationCheck = False return couchInfo for rp in self.replicatorDocs: cInfo = self.localCouchMonitor.checkCouchServerStatus(rp['source'], rp['target'], checkUpdateSeq=False) if cInfo['status'] != 'ok': couchInfo['status'] = 'error' couchInfo['error_message'] = cInfo['error_message'] return couchInfo def collectAgentInfo(self): """ Monitors the general health of the agent, as: 1. status of the agent processes 2. status of the agent threads based on the database info 3. couchdb active tasks and its replications 4. check the disk usage 5. check the number of couch processes :return: a dict with all the info collected """ logging.info("Getting agent info ...") agentInfo = self.wmagentDB.getComponentStatus(self.config) agentInfo.update(self.agentInfo) agentInfo['disk_warning'] = listDiskUsageOverThreshold(self.config, updateDB=True) if isDrainMode(self.config): logging.info("Agent is in DrainMode") agentInfo['drain_mode'] = True agentInfo['drain_stats'] = DrainStatusPoller.getDrainInfo() else: agentInfo['drain_mode'] = False couchInfo = self.collectCouchDBInfo() if couchInfo['status'] != 'ok': agentInfo['down_components'].append(couchInfo['name']) agentInfo['status'] = couchInfo['status'] agentInfo['down_component_detail'].append(couchInfo) # Couch process warning couchProc = numberCouchProcess() logging.info("CouchDB is running with %d processes", couchProc) couchProcessThreshold = self.config.AnalyticsDataCollector.couchProcessThreshold if couchProc >= couchProcessThreshold: agentInfo['couch_process_warning'] = couchProc else: agentInfo['couch_process_warning'] = 0 # Change status if there is data_error, couch process maxed out or disk full problems. if agentInfo['status'] == 'ok' and (agentInfo['drain_mode'] or agentInfo['disk_warning']): agentInfo['status'] = "warning" if agentInfo['status'] == 'ok' or agentInfo['status'] == 'warning': if agentInfo.get('data_error', 'ok') != 'ok' or agentInfo.get('couch_process_warning', 0): agentInfo['status'] = "error" logging.info("List of agent components down: %s", agentInfo['down_components']) return agentInfo def uploadAgentInfoToCentralWMStats(self, agentInfo): """ Add some required fields to the document before it can get uploaded to WMStats. :param agentInfo: dict with agent stats to be posted to couchdb """ agentInfo['_id'] = agentInfo["agent_url"] agentInfo['timestamp'] = int(time.time()) agentInfo['type'] = "agent_info" # directly upload to the remote to prevent data conflict when agent is cleaned up and redeployed try: self.centralWMStatsCouchDB.updateAgentInfo(agentInfo, propertiesToKeep=["data_last_update", "data_error"]) except Exception as e: logging.error("Failed to upload agent statistics to WMStats. Error: %s", str(e)) @timeFunction def collectWMBSInfo(self): """ Fetches WMBS job information. In addition to WMBS, also collects RunJob info from BossAir :return: dict with the number of jobs in each status """ logging.info("Getting wmbs job info ...") results = {} # first retrieve the site thresholds results['thresholds'] = self.wmagentDB.getJobSlotInfo() logging.debug("Running and pending site thresholds: %s", results['thresholds']) # now fetch the amount of jobs in each state and the amount of created # jobs grouped by task results.update(self.wmagentDB.getAgentMonitoring()) logging.debug("Total number of jobs in WMBS sorted by status: %s", results['wmbsCountByState']) logging.debug("Total number of 'created' jobs in WMBS sorted by type: %s", results['wmbsCreatedTypeCount']) logging.debug("Total number of 'executing' jobs in WMBS sorted by type: %s", results['wmbsExecutingTypeCount']) logging.debug("Total number of active jobs in BossAir sorted by status: %s", results['activeRunJobByStatus']) logging.debug("Total number of complete jobs in BossAir sorted by status: %s", results['completeRunJobByStatus']) logging.debug("Available slots thresholds to pull work from GQ to LQ: %s", results['thresholdsGQ2LQ']) logging.debug("List of jobs pending for each site, sorted by priority: %s", results['sitePendCountByPrio']) return results def checkCredLifetime(self, agInfo, credType): """ Check the credential lifetime. Usually X509_USER_PROXY or X509_USER_CERT and raise either a warning or an error if the proxy validity is about to expire. :param agInfo: dictionary with plenty of agent monitoring information in place. :param credType: credential type, can be: "proxy" or "certificate" :return: same dictionary object plus additional keys/values if needed. """ if credType == "proxy": credFile = self.proxyFile secsLeft = self.proxy.getTimeLeft(proxy=credFile) elif credType == "certificate": credFile = self.userCertFile secsLeft = self.proxy.getUserCertTimeLeft(openSSL=True) else: logging.error("Unknown credential type. Available options are: [proxy, certificate]") return logging.debug("%s '%s' lifetime is %d seconds", credType, credFile, secsLeft) daysLeft = secsLeft / (60 * 60 * 24) if daysLeft <= self.credThresholds[credType]['error']: credWarning = True agInfo['status'] = "error" elif daysLeft <= self.credThresholds[credType]['warning']: credWarning = True if agInfo['status'] == "ok": agInfo['status'] = "warning" else: credWarning = False if credWarning: warnMsg = "Agent %s '%s' must be renewed ASAP. " % (credType, credFile) warnMsg += "Its time left is: %.2f hours;" % (secsLeft / 3600.) agInfo['proxy_warning'] = agInfo.get('proxy_warning', "") + warnMsg logging.warning(warnMsg) return def buildMonITDocs(self, dataStats): """ Convert agent statistics into MonIT-friendly documents to be posted to AMQ/ES. It creates 5 different type of documents: * priority information * site information * work information * agent information * agent health information Note that the internal methods are popping some metrics out of dataStats """ if not self.postToAMQ: return logging.info("Preparing documents to be posted to AMQ/MonIT..") allDocs = self._buildMonITPrioDocs(dataStats) allDocs.extend(self._buildMonITSitesDocs(dataStats)) allDocs.extend(self._buildMonITWorkDocs(dataStats)) allDocs.extend(self._buildMonITWMBSDocs(dataStats)) allDocs.extend(self._buildMonITAgentDocs(dataStats)) allDocs.extend(self._buildMonITHealthDocs(dataStats)) allDocs.extend(self._buildMonITSummaryDocs(dataStats)) # and finally post them all to AMQ logging.info("Found %d documents to post to AMQ", len(allDocs)) self.uploadToAMQ(allDocs, dataStats['agent_url'], dataStats['timestamp']) def _buildMonITPrioDocs(self, dataStats): """ Uses the `sitePendCountByPrio` metric in order to build documents reporting the site name, job priority and amount of jobs within that priority. :param dataStats: dictionary with metrics previously posted to WMStats :return: list of dictionaries with the wma_prio_info MonIT docs """ docType = "wma_prio_info" prioDocs = [] sitePendCountByPrio = dataStats['WMBS_INFO'].pop('sitePendCountByPrio', []) for site, item in viewitems(sitePendCountByPrio): # it seems sites with no jobs are also always here as "Sitename": {0: 0} if list(item) == [0]: continue for prio, jobs in viewitems(item): prioDoc = {} prioDoc['site_name'] = site prioDoc['type'] = docType prioDoc['priority'] = prio prioDoc['job_count'] = jobs prioDocs.append(prioDoc) return prioDocs def _buildMonITSitesDocs(self, dataStats): """ Uses the site thresholds and job information for each site in order to build a `site_info` document type for MonIT. :param dataStats: dictionary with metrics previously posted to WMStats :return: list of dictionaries with the wma_site_info MonIT docs """ docType = "wma_site_info" siteDocs = [] thresholds = dataStats['WMBS_INFO'].pop('thresholds', {}) thresholdsGQ2LQ = dataStats['WMBS_INFO'].pop('thresholdsGQ2LQ', {}) if self.isT0agent: possibleJobsPerSite = {} uniqueJobsPerSite = {} else: possibleJobsPerSite = dataStats['LocalWQ_INFO'].pop('possibleJobsPerSite', {}) uniqueJobsPerSite = dataStats['LocalWQ_INFO'].pop('uniqueJobsPerSite', {}) for site in sorted(thresholds): siteDoc = {} siteDoc['site_name'] = site siteDoc['type'] = docType siteDoc['thresholds'] = thresholds[site] siteDoc['state'] = siteDoc['thresholds'].pop('state', 'Unknown') siteDoc['thresholdsGQ2LQ'] = thresholdsGQ2LQ.get(site, 0) for status in possibleJobsPerSite: # make sure these keys are always present in the documents jobKey = "possible_%s_jobs" % status.lower() elemKey = "num_%s_elem" % status.lower() uniJobKey = "unique_%s_jobs" % status.lower() siteDoc[jobKey], siteDoc[elemKey], siteDoc[uniJobKey] = 0, 0, 0 if site in possibleJobsPerSite[status]: siteDoc[jobKey] = possibleJobsPerSite[status][site]['sum_jobs'] siteDoc[elemKey] = possibleJobsPerSite[status][site]['num_elem'] if site in uniqueJobsPerSite[status]: siteDoc[uniJobKey] = uniqueJobsPerSite[status][site]['sum_jobs'] siteDocs.append(siteDoc) return siteDocs def _buildMonITWorkDocs(self, dataStats): """ Uses the local workqueue information order by WQE status and build statistics for the workload in terms of workqueue elements and top level jobs. Using the WMBS data, also builds documents to show the amount of work in 'created' and 'executing' WMBS status. :param dataStats: dictionary with metrics previously posted to WMStats :return: list of dictionaries with the wma_work_info MonIT docs """ workDocs = [] if self.isT0agent: return workDocs docType = "wma_work_info" workByStatus = dataStats['LocalWQ_INFO'].pop('workByStatus', {}) for status, info in viewitems(workByStatus): workDoc = {} workDoc['type'] = docType workDoc['status'] = status workDoc['num_elem'] = info.get('num_elem', 0) workDoc['sum_jobs'] = info.get('sum_jobs', 0) workDocs.append(workDoc) return workDocs def _buildMonITWMBSDocs(self, dataStats): """ Using the WMBS data, builds documents to show the amount of work in 'created' and 'executing' WMBS status. It also builds a document for every single wmbs_status in the database. :param dataStats: dictionary with metrics previously posted to WMStats :return: list of dictionaries with the wma_wmbs_info and wma_wmbs_state_info docs """ docType = "wma_wmbs_info" wmbsDocs = [] wmbsCreatedTypeCount = dataStats['WMBS_INFO'].pop('wmbsCreatedTypeCount', {}) wmbsExecutingTypeCount = dataStats['WMBS_INFO'].pop('wmbsExecutingTypeCount', {}) for jobType in wmbsCreatedTypeCount: wmbsDoc = {} wmbsDoc['type'] = docType wmbsDoc['job_type'] = jobType wmbsDoc['created_jobs'] = wmbsCreatedTypeCount[jobType] wmbsDoc['executing_jobs'] = wmbsExecutingTypeCount[jobType] wmbsDocs.append(wmbsDoc) docType = "wma_wmbs_state_info" wmbsCountByState = dataStats['WMBS_INFO'].pop('wmbsCountByState', {}) for wmbsStatus in wmbsCountByState: wmbsDoc = {} wmbsDoc['type'] = docType wmbsDoc['wmbs_status'] = wmbsStatus wmbsDoc['num_jobs'] = wmbsCountByState[wmbsStatus] wmbsDocs.append(wmbsDoc) return wmbsDocs def _buildMonITAgentDocs(self, dataStats): """ Uses the BossAir and WMBS table information in order to build a view of amount of jobs in different statuses. :param dataStats: dictionary with metrics previously posted to WMStats :return: list of dictionaries with the wma_agent_info MonIT docs """ docType = "wma_agent_info" agentDocs = [] activeRunJobByStatus = dataStats['WMBS_INFO'].pop('activeRunJobByStatus', {}) completeRunJobByStatus = dataStats['WMBS_INFO'].pop('completeRunJobByStatus', {}) for schedStatus in activeRunJobByStatus: agentDoc = {} agentDoc['type'] = docType agentDoc['schedd_status'] = schedStatus agentDoc['active_jobs'] = activeRunJobByStatus[schedStatus] agentDoc['completed_jobs'] = completeRunJobByStatus[schedStatus] agentDocs.append(agentDoc) return agentDocs def _buildMonITHealthDocs(self, dataStats): """ Creates documents with specific agent information, status of each component and worker thread (similar to what is shown in wmstats) and also some very basic performance numbers. :param dataStats: dictionary with metrics previously posted to WMStats :return: list of dictionaries with the wma_health_info MonIT docs """ docType = "wma_health_info" healthDocs = [] workersStatus = dataStats.pop('workers', {}) for worker in workersStatus: healthDoc = {} healthDoc['type'] = docType healthDoc['worker_name'] = worker['name'] healthDoc['worker_state'] = worker['state'] healthDoc['worker_poll'] = worker['poll_interval'] healthDoc['worker_last_hb'] = worker['last_updated'] healthDoc['worker_cycle_time'] = worker['cycle_time'] healthDocs.append(healthDoc) return healthDocs def _buildMonITSummaryDocs(self, dataStats): """ Creates a document with the very basic agent info used in the wmstats monitoring tab. :param dataStats: dictionary with metrics previously posted to WMStats :return: list of dictionaries with the wma_health_info MonIT docs """ docType = "wma_summary_info" summaryDocs = [] summaryDoc = {} summaryDoc['type'] = docType summaryDoc['agent_team'] = dataStats['agent_team'] summaryDoc['agent_version'] = dataStats['agent_version'] summaryDoc['agent_status'] = dataStats['status'] if not self.isT0agent: summaryDoc['wq_query_time'] = dataStats['LocalWQ_INFO']['total_query_time'] summaryDoc['wmbs_query_time'] = dataStats['WMBS_INFO']['total_query_time'] summaryDoc['drain_mode'] = dataStats['drain_mode'] summaryDoc['down_components'] = dataStats['down_components'] summaryDocs.append(summaryDoc) return summaryDocs def uploadToAMQ(self, docs, agentUrl, timeS): """ _uploadToAMQ_ Sends data to AMQ, which ends up in the MonIT infrastructure. :param docs: list of documents/dicts to be posted """ if not docs: logging.info("There are no documents to send to AMQ") return # add mandatory information for every single document for doc in docs: doc['agent_url'] = agentUrl docType = "cms_%s_info" % self.producer notifications = [] logging.debug("Sending the following data to AMQ %s", pformat(docs)) try: stompSvc = StompAMQ(username=self.userAMQ, password=self.passAMQ, producer=self.producer, topic=self.topicAMQ, validation_schema=None, host_and_ports=self.hostPortAMQ, logger=logging) for doc in docs: singleNotif, _, _ = stompSvc.make_notification(payload=doc, docType=docType, ts=timeS, dataSubfield="payload") notifications.append(singleNotif) failures = stompSvc.send(notifications) msg = "%i out of %i documents successfully sent to AMQ" % (len(notifications) - len(failures), len(notifications)) logging.info(msg) except Exception as ex: logging.exception("Failed to send data to StompAMQ. Error %s", str(ex)) return
the-stack_106_30921
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Resource service.""" from oslo_log import log import six from keystone import assignment from keystone.common import cache from keystone.common import driver_hints from keystone.common import manager from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.i18n import _ from keystone import notifications from keystone.resource.backends import base from keystone.resource.backends import sql as resource_sql from keystone.token import provider as token_provider CONF = keystone.conf.CONF LOG = log.getLogger(__name__) MEMOIZE = cache.get_memoization_decorator(group='resource') PROVIDERS = provider_api.ProviderAPIs TAG_SEARCH_FILTERS = ('tags', 'tags-any', 'not-tags', 'not-tags-any') class Manager(manager.Manager): """Default pivot point for the Resource backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.resource' _provides_api = 'resource_api' _DOMAIN = 'domain' _PROJECT = 'project' _PROJECT_TAG = 'project tag' def __init__(self): # NOTE(morgan): The resource driver must be SQL. This is because there # is a FK between identity and resource. Almost every deployment uses # SQL Identity in some form. Even if SQL Identity is not used, there # is almost no reason to have non-SQL Resource. Keystone requires # SQL in a number of ways, this simply codifies it plainly for resource # the driver_name = None simply implies we don't need to load a driver. self.driver = resource_sql.Resource() super(Manager, self).__init__(driver_name=None) def _get_hierarchy_depth(self, parents_list): return len(parents_list) + 1 def _assert_max_hierarchy_depth(self, project_id, parents_list=None): if parents_list is None: parents_list = self.list_project_parents(project_id) # NOTE(henry-nash): In upgrading to a scenario where domains are # represented as projects acting as domains, we will effectively # increase the depth of any existing project hierarchy by one. To avoid # pushing any existing hierarchies over the limit, we add one to the # maximum depth allowed, as specified in the configuration file. max_depth = CONF.max_project_tree_depth + 1 # NOTE(wxy): If the hierarchical limit enforcement model is used, the # project depth should be not greater than the model's limit as well. # # TODO(wxy): Deprecate and remove CONF.max_project_tree_depth, let the # depth check only based on the limit enforcement model. limit_model = PROVIDERS.unified_limit_api.enforcement_model if limit_model.MAX_PROJECT_TREE_DEPTH is not None: max_depth = min(max_depth, limit_model.MAX_PROJECT_TREE_DEPTH + 1) if self._get_hierarchy_depth(parents_list) > max_depth: raise exception.ForbiddenNotSecurity( _('Max hierarchy depth reached for %s branch.') % project_id) def _assert_is_domain_project_constraints(self, project_ref): """Enforce specific constraints of projects that act as domains. Called when is_domain is true, this method ensures that: * multiple domains are enabled * the project name is not the reserved name for a federated domain * the project is a root project :raises keystone.exception.ValidationError: If one of the constraints was not satisfied. """ if (not PROVIDERS.identity_api.multiple_domains_supported and project_ref['id'] != CONF.identity.default_domain_id and project_ref['id'] != base.NULL_DOMAIN_ID): raise exception.ValidationError( message=_('Multiple domains are not supported')) self.assert_domain_not_federated(project_ref['id'], project_ref) if project_ref['parent_id']: raise exception.ValidationError( message=_('only root projects are allowed to act as ' 'domains.')) def _assert_regular_project_constraints(self, project_ref): """Enforce regular project hierarchy constraints. Called when is_domain is false. The project must contain a valid domain_id and parent_id. The goal of this method is to check that the domain_id specified is consistent with the domain of its parent. :raises keystone.exception.ValidationError: If one of the constraints was not satisfied. :raises keystone.exception.DomainNotFound: In case the domain is not found. """ # Ensure domain_id is valid, and by inference will not be None. domain = self.get_domain(project_ref['domain_id']) parent_ref = self.get_project(project_ref['parent_id']) if parent_ref['is_domain']: if parent_ref['id'] != domain['id']: raise exception.ValidationError( message=_('Cannot create project, the parent ' '(%(parent_id)s) is acting as a domain, ' 'but this project\'s domain id (%(domain_id)s) ' 'does not match the parent\'s id.') % {'parent_id': parent_ref['id'], 'domain_id': domain['id']}) else: parent_domain_id = parent_ref.get('domain_id') if parent_domain_id != domain['id']: raise exception.ValidationError( message=_('Cannot create project, since it specifies ' 'its domain_id %(domain_id)s, but ' 'specifies a parent in a different domain ' '(%(parent_domain_id)s).') % {'domain_id': domain['id'], 'parent_domain_id': parent_domain_id}) def _enforce_project_constraints(self, project_ref): if project_ref.get('is_domain'): self._assert_is_domain_project_constraints(project_ref) else: self._assert_regular_project_constraints(project_ref) # The whole hierarchy (upwards) must be enabled parent_id = project_ref['parent_id'] parents_list = self.list_project_parents(parent_id) parent_ref = self.get_project(parent_id) parents_list.append(parent_ref) for ref in parents_list: if not ref.get('enabled', True): raise exception.ValidationError( message=_('cannot create a project in a ' 'branch containing a disabled ' 'project: %s') % ref['id']) self._assert_max_hierarchy_depth(project_ref.get('parent_id'), parents_list) def _raise_reserved_character_exception(self, entity_type, name): msg = _('%(entity)s name cannot contain the following reserved ' 'characters: %(chars)s') raise exception.ValidationError( message=msg % { 'entity': entity_type, 'chars': utils.list_url_unsafe_chars(name) }) def _generate_project_name_conflict_msg(self, project): if project['is_domain']: return _('it is not permitted to have two projects ' 'acting as domains with the same name: %s' ) % project['name'] else: return _('it is not permitted to have two projects ' 'with either the same name or same id in ' 'the same domain: ' 'name is %(name)s, project id %(id)s' ) % project def create_project(self, project_id, project, initiator=None): project = project.copy() if (CONF.resource.project_name_url_safe != 'off' and utils.is_not_url_safe(project['name'])): self._raise_reserved_character_exception('Project', project['name']) project.setdefault('enabled', True) project['name'] = project['name'].strip() project.setdefault('description', '') # For regular projects, the controller will ensure we have a valid # domain_id. For projects acting as a domain, the project_id # is, effectively, the domain_id - and for such projects we don't # bother to store a copy of it in the domain_id attribute. project.setdefault('domain_id', None) project.setdefault('parent_id', None) if not project['parent_id']: project['parent_id'] = project['domain_id'] project.setdefault('is_domain', False) self._enforce_project_constraints(project) # We leave enforcing name uniqueness to the underlying driver (instead # of doing it in code in the project_constraints above), so as to allow # this check to be done at the storage level, avoiding race conditions # in multi-process keystone configurations. try: ret = self.driver.create_project(project_id, project) except exception.Conflict: raise exception.Conflict( type='project', details=self._generate_project_name_conflict_msg(project)) if project.get('is_domain'): notifications.Audit.created(self._DOMAIN, project_id, initiator) else: notifications.Audit.created(self._PROJECT, project_id, initiator) if MEMOIZE.should_cache(ret): self.get_project.set(ret, self, project_id) self.get_project_by_name.set(ret, self, ret['name'], ret['domain_id']) assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() return ret def assert_domain_enabled(self, domain_id, domain=None): """Assert the Domain is enabled. :raise AssertionError: if domain is disabled. """ if domain is None: domain = self.get_domain(domain_id) if not domain.get('enabled', True): raise AssertionError(_('Domain is disabled: %s') % domain_id) def assert_domain_not_federated(self, domain_id, domain): """Assert the Domain's name and id do not match the reserved keyword. Note that the reserved keyword is defined in the configuration file, by default, it is 'Federated', it is also case insensitive. If config's option is empty the default hardcoded value 'Federated' will be used. :raise AssertionError: if domain named match the value in the config. """ # NOTE(marek-denis): We cannot create this attribute in the __init__ as # config values are always initialized to default value. federated_domain = CONF.federation.federated_domain_name.lower() if (domain.get('name') and domain['name'].lower() == federated_domain): raise AssertionError(_('Domain cannot be named %s') % domain['name']) if (domain_id.lower() == federated_domain): raise AssertionError(_('Domain cannot have ID %s') % domain_id) def assert_project_enabled(self, project_id, project=None): """Assert the project is enabled and its associated domain is enabled. :raise AssertionError: if the project or domain is disabled. """ if project is None: project = self.get_project(project_id) # If it's a regular project (i.e. it has a domain_id), we need to make # sure the domain itself is not disabled if project['domain_id']: self.assert_domain_enabled(domain_id=project['domain_id']) if not project.get('enabled', True): raise AssertionError(_('Project is disabled: %s') % project_id) def _assert_all_parents_are_enabled(self, project_id): parents_list = self.list_project_parents(project_id) for project in parents_list: if not project.get('enabled', True): raise exception.ForbiddenNotSecurity( _('Cannot enable project %s since it has disabled ' 'parents') % project_id) def _check_whole_subtree_is_disabled(self, project_id, subtree_list=None): if not subtree_list: subtree_list = self.list_projects_in_subtree(project_id) subtree_enabled = [ref.get('enabled', True) for ref in subtree_list] return (not any(subtree_enabled)) def _update_project(self, project_id, project, initiator=None, cascade=False): # Use the driver directly to prevent using old cached value. original_project = self.driver.get_project(project_id) project = project.copy() self._require_matching_domain_id(project, original_project) if original_project['is_domain']: domain = self._get_domain_from_project(original_project) self.assert_domain_not_federated(project_id, domain) url_safe_option = CONF.resource.domain_name_url_safe exception_entity = 'Domain' else: url_safe_option = CONF.resource.project_name_url_safe exception_entity = 'Project' project_name_changed = ('name' in project and project['name'] != original_project['name']) if (url_safe_option != 'off' and project_name_changed and utils.is_not_url_safe(project['name'])): self._raise_reserved_character_exception(exception_entity, project['name']) elif project_name_changed: project['name'] = project['name'].strip() parent_id = original_project.get('parent_id') if 'parent_id' in project and project.get('parent_id') != parent_id: raise exception.ForbiddenNotSecurity( _('Update of `parent_id` is not allowed.')) if ('is_domain' in project and project['is_domain'] != original_project['is_domain']): raise exception.ValidationError( message=_('Update of `is_domain` is not allowed.')) original_project_enabled = original_project.get('enabled', True) project_enabled = project.get('enabled', True) if not original_project_enabled and project_enabled: self._assert_all_parents_are_enabled(project_id) if original_project_enabled and not project_enabled: # NOTE(htruta): In order to disable a regular project, all its # children must already be disabled. However, to keep # compatibility with the existing domain behaviour, we allow a # project acting as a domain to be disabled irrespective of the # state of its children. Disabling a project acting as domain # effectively disables its children. if (not original_project.get('is_domain') and not cascade and not self._check_whole_subtree_is_disabled(project_id)): raise exception.ForbiddenNotSecurity( _('Cannot disable project %(project_id)s since its ' 'subtree contains enabled projects.') % {'project_id': project_id}) notifications.Audit.disabled(self._PROJECT, project_id, public=False) # Drop the computed assignments if the project is being disabled. # This ensures an accurate list of projects is returned when # listing projects/domains for a user based on role assignments. assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() if cascade: self._only_allow_enabled_to_update_cascade(project, original_project) self._update_project_enabled_cascade(project_id, project_enabled) try: project['is_domain'] = (project.get('is_domain') or original_project['is_domain']) ret = self.driver.update_project(project_id, project) except exception.Conflict: raise exception.Conflict( type='project', details=self._generate_project_name_conflict_msg(project)) try: self.get_project.invalidate(self, project_id) self.get_project_by_name.invalidate(self, original_project['name'], original_project['domain_id']) if ('domain_id' in project and project['domain_id'] != original_project['domain_id']): # If the project's domain_id has been updated, invalidate user # role assignments cache region, as it may be caching inherited # assignments from the old domain to the specified project assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() finally: # attempt to send audit event even if the cache invalidation raises notifications.Audit.updated(self._PROJECT, project_id, initiator) if original_project['is_domain']: notifications.Audit.updated(self._DOMAIN, project_id, initiator) # If the domain is being disabled, issue the disable # notification as well if original_project_enabled and not project_enabled: # NOTE(lbragstad): When a domain is disabled, we have to # invalidate the entire token cache. With persistent # tokens, we did something similar where all tokens for a # specific domain were deleted when that domain was # disabled. This effectively offers the same behavior for # non-persistent tokens by removing them from the cache and # requiring the authorization context to be rebuilt the # next time they're validated. token_provider.TOKENS_REGION.invalidate() notifications.Audit.disabled(self._DOMAIN, project_id, public=False) return ret def _only_allow_enabled_to_update_cascade(self, project, original_project): for attr in project: if attr != 'enabled': if project.get(attr) != original_project.get(attr): raise exception.ValidationError( message=_('Cascade update is only allowed for ' 'enabled attribute.')) def _update_project_enabled_cascade(self, project_id, enabled): subtree = self.list_projects_in_subtree(project_id) # Update enabled only if different from original value subtree_to_update = [child for child in subtree if child['enabled'] != enabled] for child in subtree_to_update: child['enabled'] = enabled if not enabled: # Does not in fact disable the project, only emits a # notification that it was disabled. The actual disablement # is done in the next line. notifications.Audit.disabled(self._PROJECT, child['id'], public=False) self.driver.update_project(child['id'], child) def update_project(self, project_id, project, initiator=None, cascade=False): ret = self._update_project(project_id, project, initiator, cascade) if ret['is_domain']: self.get_domain.invalidate(self, project_id) self.get_domain_by_name.invalidate(self, ret['name']) return ret def _post_delete_cleanup_project(self, project_id, project, initiator=None): try: self.get_project.invalidate(self, project_id) self.get_project_by_name.invalidate(self, project['name'], project['domain_id']) PROVIDERS.assignment_api.delete_project_assignments(project_id) # Invalidate user role assignments cache region, as it may # be caching role assignments where the target is # the specified project assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() PROVIDERS.credential_api.delete_credentials_for_project(project_id) PROVIDERS.trust_api.delete_trusts_for_project(project_id) PROVIDERS.unified_limit_api.delete_limits_for_project(project_id) finally: # attempt to send audit event even if the cache invalidation raises notifications.Audit.deleted(self._PROJECT, project_id, initiator) def delete_project(self, project_id, initiator=None, cascade=False): """Delete one project or a subtree. :param cascade: If true, the specified project and all its sub-projects are deleted. Otherwise, only the specified project is deleted. :type cascade: boolean :raises keystone.exception.ValidationError: if project is a domain :raises keystone.exception.Forbidden: if project is not a leaf """ project = self.driver.get_project(project_id) if project.get('is_domain'): self._delete_domain(project, initiator) else: self._delete_project(project, initiator, cascade) def _delete_project(self, project, initiator=None, cascade=False): project_id = project['id'] if project['is_domain'] and project['enabled']: raise exception.ValidationError( message=_('cannot delete an enabled project acting as a ' 'domain. Please disable the project %s first.') % project.get('id')) if not self.is_leaf_project(project_id) and not cascade: raise exception.ForbiddenNotSecurity( _('Cannot delete the project %s since it is not a leaf in the ' 'hierarchy. Use the cascade option if you want to delete a ' 'whole subtree.') % project_id) if cascade: # Getting reversed project's subtrees list, i.e. from the leaves # to the root, so we do not break parent_id FK. subtree_list = self.list_projects_in_subtree(project_id) subtree_list.reverse() if not self._check_whole_subtree_is_disabled( project_id, subtree_list=subtree_list): raise exception.ForbiddenNotSecurity( _('Cannot delete project %(project_id)s since its subtree ' 'contains enabled projects.') % {'project_id': project_id}) project_list = subtree_list + [project] projects_ids = [x['id'] for x in project_list] ret = self.driver.delete_projects_from_ids(projects_ids) for prj in project_list: self._post_delete_cleanup_project(prj['id'], prj, initiator) else: ret = self.driver.delete_project(project_id) self._post_delete_cleanup_project(project_id, project, initiator) reason = ( 'The token cache is being invalidate because project ' '%(project_id)s was deleted. Authorization will be recalculated ' 'and enforced accordingly the next time users authenticate or ' 'validate a token.' % {'project_id': project_id} ) notifications.invalidate_token_cache_notification(reason) return ret def _filter_projects_list(self, projects_list, user_id): user_projects = PROVIDERS.assignment_api.list_projects_for_user( user_id ) user_projects_ids = set([proj['id'] for proj in user_projects]) # Keep only the projects present in user_projects return [proj for proj in projects_list if proj['id'] in user_projects_ids] def _assert_valid_project_id(self, project_id): if project_id is None: msg = _('Project field is required and cannot be empty.') raise exception.ValidationError(message=msg) # Check if project_id exists self.get_project(project_id) def _include_limits(self, projects): """Modify a list of projects to include limit information. :param projects: a list of project references including an `id` :type projects: list of dictionaries """ for project in projects: hints = driver_hints.Hints() hints.add_filter('project_id', project['id']) limits = PROVIDERS.unified_limit_api.list_limits(hints) project['limits'] = limits def list_project_parents(self, project_id, user_id=None, include_limits=False): self._assert_valid_project_id(project_id) parents = self.driver.list_project_parents(project_id) # If a user_id was provided, the returned list should be filtered # against the projects this user has access to. if user_id: parents = self._filter_projects_list(parents, user_id) if include_limits: self._include_limits(parents) return parents def _build_parents_as_ids_dict(self, project, parents_by_id): # NOTE(rodrigods): we don't rely in the order of the projects returned # by the list_project_parents() method. Thus, we create a project cache # (parents_by_id) in order to access each parent in constant time and # traverse up the hierarchy. def traverse_parents_hierarchy(project): parent_id = project.get('parent_id') if not parent_id: return None parent = parents_by_id[parent_id] return {parent_id: traverse_parents_hierarchy(parent)} return traverse_parents_hierarchy(project) def get_project_parents_as_ids(self, project): """Get the IDs from the parents from a given project. The project IDs are returned as a structured dictionary traversing up the hierarchy to the top level project. For example, considering the following project hierarchy:: A | +-B-+ | | C D If we query for project C parents, the expected return is the following dictionary:: 'parents': { B['id']: { A['id']: None } } """ parents_list = self.list_project_parents(project['id']) parents_as_ids = self._build_parents_as_ids_dict( project, {proj['id']: proj for proj in parents_list}) return parents_as_ids def list_projects_in_subtree(self, project_id, user_id=None, include_limits=False): self._assert_valid_project_id(project_id) subtree = self.driver.list_projects_in_subtree(project_id) # If a user_id was provided, the returned list should be filtered # against the projects this user has access to. if user_id: subtree = self._filter_projects_list(subtree, user_id) if include_limits: self._include_limits(subtree) return subtree def _build_subtree_as_ids_dict(self, project_id, subtree_by_parent): # NOTE(rodrigods): we perform a depth first search to construct the # dictionaries representing each level of the subtree hierarchy. In # order to improve this traversal performance, we create a cache of # projects (subtree_py_parent) that accesses in constant time the # direct children of a given project. def traverse_subtree_hierarchy(project_id): children = subtree_by_parent.get(project_id) if not children: return None children_ids = {} for child in children: children_ids[child['id']] = traverse_subtree_hierarchy( child['id']) return children_ids return traverse_subtree_hierarchy(project_id) def get_projects_in_subtree_as_ids(self, project_id): """Get the IDs from the projects in the subtree from a given project. The project IDs are returned as a structured dictionary representing their hierarchy. For example, considering the following project hierarchy:: A | +-B-+ | | C D If we query for project A subtree, the expected return is the following dictionary:: 'subtree': { B['id']: { C['id']: None, D['id']: None } } """ def _projects_indexed_by_parent(projects_list): projects_by_parent = {} for proj in projects_list: parent_id = proj.get('parent_id') if parent_id: if parent_id in projects_by_parent: projects_by_parent[parent_id].append(proj) else: projects_by_parent[parent_id] = [proj] return projects_by_parent subtree_list = self.list_projects_in_subtree(project_id) subtree_as_ids = self._build_subtree_as_ids_dict( project_id, _projects_indexed_by_parent(subtree_list)) return subtree_as_ids def list_domains_from_ids(self, domain_ids): """List domains for the provided list of ids. :param domain_ids: list of ids :returns: a list of domain_refs. This method is used internally by the assignment manager to bulk read a set of domains given their ids. """ # Retrieve the projects acting as domains get their correspondent # domains projects = self.list_projects_from_ids(domain_ids) domains = [self._get_domain_from_project(project) for project in projects] return domains @MEMOIZE def get_domain(self, domain_id): try: # Retrieve the corresponding project that acts as a domain project = self.driver.get_project(domain_id) # the DB backend might not operate in case sensitive mode, # therefore verify for exact match of IDs if domain_id != project['id']: raise exception.DomainNotFound(domain_id=domain_id) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_id) # Return its correspondent domain return self._get_domain_from_project(project) @MEMOIZE def get_domain_by_name(self, domain_name): try: # Retrieve the corresponding project that acts as a domain project = self.driver.get_project_by_name(domain_name, domain_id=None) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_name) # Return its correspondent domain return self._get_domain_from_project(project) def _get_domain_from_project(self, project_ref): """Create a domain ref from a project ref. Based on the provided project ref, create a domain ref, so that the result can be returned in response to a domain API call. """ if not project_ref['is_domain']: LOG.error('Asked to convert a non-domain project into a ' 'domain - Domain: %(domain_id)s, Project ID: ' '%(id)s, Project Name: %(project_name)s', {'domain_id': project_ref['domain_id'], 'id': project_ref['id'], 'project_name': project_ref['name']}) raise exception.DomainNotFound(domain_id=project_ref['id']) domain_ref = project_ref.copy() # As well as the project specific attributes that we need to remove, # there is an old compatibility issue in that update project (as well # as extracting an extra attributes), also includes a copy of the # actual extra dict as well - something that update domain does not do. for k in ['parent_id', 'domain_id', 'is_domain', 'extra']: domain_ref.pop(k, None) return domain_ref def create_domain(self, domain_id, domain, initiator=None): if (CONF.resource.domain_name_url_safe != 'off' and utils.is_not_url_safe(domain['name'])): self._raise_reserved_character_exception('Domain', domain['name']) project_from_domain = base.get_project_from_domain(domain) is_domain_project = self.create_project( domain_id, project_from_domain, initiator) return self._get_domain_from_project(is_domain_project) @manager.response_truncated def list_domains(self, hints=None): projects = self.list_projects_acting_as_domain(hints) domains = [self._get_domain_from_project(project) for project in projects] return domains def update_domain(self, domain_id, domain, initiator=None): # TODO(henry-nash): We shouldn't have to check for the federated domain # here as well as _update_project, but currently our tests assume the # checks are done in a specific order. The tests should be refactored. self.assert_domain_not_federated(domain_id, domain) project = base.get_project_from_domain(domain) try: original_domain = self.driver.get_project(domain_id) project = self._update_project(domain_id, project, initiator) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_id) domain_from_project = self._get_domain_from_project(project) self.get_domain.invalidate(self, domain_id) self.get_domain_by_name.invalidate(self, original_domain['name']) return domain_from_project def delete_domain(self, domain_id, initiator=None): # Use the driver directly to get the project that acts as a domain and # prevent using old cached value. try: domain = self.driver.get_project(domain_id) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_id) self._delete_domain(domain, initiator) def _delete_domain(self, domain, initiator=None): # To help avoid inadvertent deletes, we insist that the domain # has been previously disabled. This also prevents a user deleting # their own domain since, once it is disabled, they won't be able # to get a valid token to issue this delete. if domain['enabled']: raise exception.ForbiddenNotSecurity( _('Cannot delete a domain that is enabled, please disable it ' 'first.')) domain_id = domain['id'] self._delete_domain_contents(domain_id) notifications.Audit.internal( notifications.DOMAIN_DELETED, domain_id ) self._delete_project(domain, initiator) try: self.get_domain.invalidate(self, domain_id) self.get_domain_by_name.invalidate(self, domain['name']) # Delete any database stored domain config PROVIDERS.domain_config_api.delete_config_options(domain_id) PROVIDERS.domain_config_api.release_registration(domain_id) finally: # attempt to send audit event even if the cache invalidation raises notifications.Audit.deleted(self._DOMAIN, domain_id, initiator) def _delete_domain_contents(self, domain_id): """Delete the contents of a domain. Before we delete a domain, we need to remove all the entities that are owned by it, i.e. Projects. To do this we call the delete function for these entities, which are themselves responsible for deleting any credentials and role grants associated with them as well as revoking any relevant tokens. """ def _delete_projects(project, projects, examined): if project['id'] in examined: msg = ('Circular reference or a repeated entry found ' 'projects hierarchy - %(project_id)s.') LOG.error(msg, {'project_id': project['id']}) return examined.add(project['id']) children = [proj for proj in projects if proj.get('parent_id') == project['id']] for proj in children: _delete_projects(proj, projects, examined) try: self._delete_project(project, initiator=None) except exception.ProjectNotFound: LOG.debug(('Project %(projectid)s not found when ' 'deleting domain contents for %(domainid)s, ' 'continuing with cleanup.'), {'projectid': project['id'], 'domainid': domain_id}) proj_refs = self.list_projects_in_domain(domain_id) # Deleting projects recursively roots = [x for x in proj_refs if x.get('parent_id') == domain_id] examined = set() for project in roots: _delete_projects(project, proj_refs, examined) @manager.response_truncated def list_projects(self, hints=None): if hints: tag_filters = {} # Handle project tag filters separately for f in list(hints.filters): if f['name'] in TAG_SEARCH_FILTERS: tag_filters[f['name']] = f['value'] hints.filters.remove(f) if tag_filters: tag_refs = self.driver.list_projects_by_tags(tag_filters) project_refs = self.driver.list_projects(hints) ref_ids = [ref['id'] for ref in tag_refs] return [ref for ref in project_refs if ref['id'] in ref_ids] return self.driver.list_projects(hints or driver_hints.Hints()) # NOTE(henry-nash): list_projects_in_domain is actually an internal method # and not exposed via the API. Therefore there is no need to support # driver hints for it. def list_projects_in_domain(self, domain_id): return self.driver.list_projects_in_domain(domain_id) def list_projects_acting_as_domain(self, hints=None): return self.driver.list_projects_acting_as_domain( hints or driver_hints.Hints()) @MEMOIZE def get_project(self, project_id): return self.driver.get_project(project_id) @MEMOIZE def get_project_by_name(self, project_name, domain_id): return self.driver.get_project_by_name(project_name, domain_id) def _require_matching_domain_id(self, new_ref, orig_ref): """Ensure the current domain ID matches the reference one, if any. Provided we want domain IDs to be immutable, check whether any domain_id specified in the ref dictionary matches the existing domain_id for this entity. :param new_ref: the dictionary of new values proposed for this entity :param orig_ref: the dictionary of original values proposed for this entity :raises: :class:`keystone.exception.ValidationError` """ if 'domain_id' in new_ref: if new_ref['domain_id'] != orig_ref['domain_id']: raise exception.ValidationError(_('Cannot change Domain ID')) def create_project_tag(self, project_id, tag, initiator=None): """Create a new tag on project. :param project_id: ID of a project to create a tag for :param tag: The string value of a tag to add :returns: The value of the created tag """ project = self.driver.get_project(project_id) tag_name = tag.strip() project['tags'].append(tag_name) self.update_project(project_id, {'tags': project['tags']}) notifications.Audit.created( self._PROJECT_TAG, tag_name, initiator) return tag_name def get_project_tag(self, project_id, tag_name): """Return information for a single tag on a project. :param project_id: ID of a project to retrive a tag from :param tag_name: Name of a tag to return :raises keystone.exception.ProjectTagNotFound: If the tag name does not exist on the project :returns: The tag value """ project = self.driver.get_project(project_id) if tag_name not in project.get('tags'): raise exception.ProjectTagNotFound(project_tag=tag_name) return tag_name def list_project_tags(self, project_id): """List all tags on project. :param project_id: The ID of a project :returns: A list of tags from a project """ project = self.driver.get_project(project_id) return project.get('tags', []) def update_project_tags(self, project_id, tags, initiator=None): """Update all tags on a project. :param project_id: The ID of the project to update :param tags: A list of tags to update on the project :returns: A list of tags """ self.driver.get_project(project_id) tag_list = [t.strip() for t in tags] project = {'tags': tag_list} self.update_project(project_id, project) return tag_list def delete_project_tag(self, project_id, tag): """Delete single tag from project. :param project_id: The ID of the project :param tag: The tag value to delete :raises keystone.exception.ProjectTagNotFound: If the tag name does not exist on the project """ project = self.driver.get_project(project_id) try: project['tags'].remove(tag) except ValueError: raise exception.ProjectTagNotFound(project_tag=tag) self.update_project(project_id, project) notifications.Audit.deleted(self._PROJECT_TAG, tag) def check_project_depth(self, max_depth=None): """Check project depth whether greater than input or not.""" if max_depth: exceeded_project_ids = self.driver.check_project_depth(max_depth) if exceeded_project_ids: raise exception.LimitTreeExceedError(exceeded_project_ids, max_depth) MEMOIZE_CONFIG = cache.get_memoization_decorator(group='domain_config') class DomainConfigManager(manager.Manager): """Default pivot point for the Domain Config backend.""" # NOTE(henry-nash): In order for a config option to be stored in the # standard table, it must be explicitly whitelisted. Options marked as # sensitive are stored in a separate table. Attempting to store options # that are not listed as either whitelisted or sensitive will raise an # exception. # # Only those options that affect the domain-specific driver support in # the identity manager are supported. driver_namespace = 'keystone.resource.domain_config' _provides_api = 'domain_config_api' # We explicitly state each whitelisted option instead of pulling all ldap # options from CONF and selectively pruning them to prevent a security # lapse. That way if a new ldap CONF key/value were to be added it wouldn't # automatically be added to the whitelisted options unless that is what was # intended. In which case, we explicitly add it to the list ourselves. whitelisted_options = { 'identity': ['driver', 'list_limit'], 'ldap': [ 'url', 'user', 'suffix', 'query_scope', 'page_size', 'alias_dereferencing', 'debug_level', 'chase_referrals', 'user_tree_dn', 'user_filter', 'user_objectclass', 'user_id_attribute', 'user_name_attribute', 'user_mail_attribute', 'user_description_attribute', 'user_pass_attribute', 'user_enabled_attribute', 'user_enabled_invert', 'user_enabled_mask', 'user_enabled_default', 'user_attribute_ignore', 'user_default_project_id_attribute', 'user_enabled_emulation', 'user_enabled_emulation_dn', 'user_enabled_emulation_use_group_config', 'user_additional_attribute_mapping', 'group_tree_dn', 'group_filter', 'group_objectclass', 'group_id_attribute', 'group_name_attribute', 'group_members_are_ids', 'group_member_attribute', 'group_desc_attribute', 'group_attribute_ignore', 'group_additional_attribute_mapping', 'tls_cacertfile', 'tls_cacertdir', 'use_tls', 'tls_req_cert', 'use_pool', 'pool_size', 'pool_retry_max', 'pool_retry_delay', 'pool_connection_timeout', 'pool_connection_lifetime', 'use_auth_pool', 'auth_pool_size', 'auth_pool_connection_lifetime' ] } sensitive_options = { 'identity': [], 'ldap': ['password'] } def __init__(self): super(DomainConfigManager, self).__init__(CONF.domain_config.driver) def _assert_valid_config(self, config): """Ensure the options in the config are valid. This method is called to validate the request config in create and update manager calls. :param config: config structure being created or updated """ # Something must be defined in the request if not config: raise exception.InvalidDomainConfig( reason=_('No options specified')) # Make sure the groups/options defined in config itself are valid for group in config: if (not config[group] or not isinstance(config[group], dict)): msg = _('The value of group %(group)s specified in the ' 'config should be a dictionary of options') % { 'group': group} raise exception.InvalidDomainConfig(reason=msg) for option in config[group]: self._assert_valid_group_and_option(group, option) def _assert_valid_group_and_option(self, group, option): """Ensure the combination of group and option is valid. :param group: optional group name, if specified it must be one we support :param option: optional option name, if specified it must be one we support and a group must also be specified """ if not group and not option: # For all calls, it's OK for neither to be defined, it means you # are operating on all config options for that domain. return if not group and option: # Our API structure should prevent this from ever happening, so if # it does, then this is coding error. msg = _('Option %(option)s found with no group specified while ' 'checking domain configuration request') % { 'option': option} raise exception.UnexpectedError(exception=msg) if (group and group not in self.whitelisted_options and group not in self.sensitive_options): msg = _('Group %(group)s is not supported ' 'for domain specific configurations') % {'group': group} raise exception.InvalidDomainConfig(reason=msg) if option: if (option not in self.whitelisted_options[group] and option not in self.sensitive_options[group]): msg = _('Option %(option)s in group %(group)s is not ' 'supported for domain specific configurations') % { 'group': group, 'option': option} raise exception.InvalidDomainConfig(reason=msg) def _is_sensitive(self, group, option): return option in self.sensitive_options[group] def _config_to_list(self, config): """Build list of options for use by backend drivers.""" option_list = [] for group in config: for option in config[group]: option_list.append({ 'group': group, 'option': option, 'value': config[group][option], 'sensitive': self._is_sensitive(group, option)}) return option_list def _option_dict(self, group, option): group_attr = getattr(CONF, group) return {'group': group, 'option': option, 'value': getattr(group_attr, option)} def _list_to_config(self, whitelisted, sensitive=None, req_option=None): """Build config dict from a list of option dicts. :param whitelisted: list of dicts containing options and their groups, this has already been filtered to only contain those options to include in the output. :param sensitive: list of dicts containing sensitive options and their groups, this has already been filtered to only contain those options to include in the output. :param req_option: the individual option requested :returns: a config dict, including sensitive if specified """ the_list = whitelisted + (sensitive or []) if not the_list: return {} if req_option: # The request was specific to an individual option, so # no need to include the group in the output. We first check that # there is only one option in the answer (and that it's the right # one) - if not, something has gone wrong and we raise an error if len(the_list) > 1 or the_list[0]['option'] != req_option: LOG.error('Unexpected results in response for domain ' 'config - %(count)s responses, first option is ' '%(option)s, expected option %(expected)s', {'count': len(the_list), 'option': list[0]['option'], 'expected': req_option}) raise exception.UnexpectedError( _('An unexpected error occurred when retrieving domain ' 'configs')) return {the_list[0]['option']: the_list[0]['value']} config = {} for option in the_list: config.setdefault(option['group'], {}) config[option['group']][option['option']] = option['value'] return config def create_config(self, domain_id, config): """Create config for a domain. :param domain_id: the domain in question :param config: the dict of config groups/options to assign to the domain Creates a new config, overwriting any previous config (no Conflict error will be generated). :returns: a dict of group dicts containing the options, with any that are sensitive removed :raises keystone.exception.InvalidDomainConfig: when the config contains options we do not support """ self._assert_valid_config(config) option_list = self._config_to_list(config) self.create_config_options(domain_id, option_list) # Since we are caching on the full substituted config, we just # invalidate here, rather than try and create the right result to # cache. self.get_config_with_sensitive_info.invalidate(self, domain_id) return self._list_to_config(self.list_config_options(domain_id)) def get_config(self, domain_id, group=None, option=None): """Get config, or partial config, for a domain. :param domain_id: the domain in question :param group: an optional specific group of options :param option: an optional specific option within the group :returns: a dict of group dicts containing the whitelisted options, filtered by group and option specified :raises keystone.exception.DomainConfigNotFound: when no config found that matches domain_id, group and option specified :raises keystone.exception.InvalidDomainConfig: when the config and group/option parameters specify an option we do not support An example response:: { 'ldap': { 'url': 'myurl' 'user_tree_dn': 'OU=myou'}, 'identity': { 'driver': 'ldap'} } """ self._assert_valid_group_and_option(group, option) whitelisted = self.list_config_options(domain_id, group, option) if whitelisted: return self._list_to_config(whitelisted, req_option=option) if option: msg = _('option %(option)s in group %(group)s') % { 'group': group, 'option': option} elif group: msg = _('group %(group)s') % {'group': group} else: msg = _('any options') raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg) def get_security_compliance_config(self, domain_id, group, option=None): r"""Get full or partial security compliance config from configuration. :param domain_id: the domain in question :param group: a specific group of options :param option: an optional specific option within the group :returns: a dict of group dicts containing the whitelisted options, filtered by group and option specified :raises keystone.exception.InvalidDomainConfig: when the config and group/option parameters specify an option we do not support An example response:: { 'security_compliance': { 'password_regex': '^(?=.*\d)(?=.*[a-zA-Z]).{7,}$' 'password_regex_description': 'A password must consist of at least 1 letter, ' '1 digit, and have a minimum length of 7 characters' } } """ if domain_id != CONF.identity.default_domain_id: msg = _('Reading security compliance information for any domain ' 'other than the default domain is not allowed or ' 'supported.') raise exception.InvalidDomainConfig(reason=msg) config_list = [] readable_options = ['password_regex', 'password_regex_description'] if option and option not in readable_options: msg = _('Reading security compliance values other than ' 'password_regex and password_regex_description is not ' 'allowed.') raise exception.InvalidDomainConfig(reason=msg) elif option and option in readable_options: config_list.append(self._option_dict(group, option)) elif not option: for op in readable_options: config_list.append(self._option_dict(group, op)) # We already validated that the group is the security_compliance group # so we can move along and start validating the options return self._list_to_config(config_list, req_option=option) def update_config(self, domain_id, config, group=None, option=None): """Update config, or partial config, for a domain. :param domain_id: the domain in question :param config: the config dict containing and groups/options being updated :param group: an optional specific group of options, which if specified must appear in config, with no other groups :param option: an optional specific option within the group, which if specified must appear in config, with no other options The contents of the supplied config will be merged with the existing config for this domain, updating or creating new options if these did not previously exist. If group or option is specified, then the update will be limited to those specified items and the inclusion of other options in the supplied config will raise an exception, as will the situation when those options do not already exist in the current config. :returns: a dict of groups containing all whitelisted options :raises keystone.exception.InvalidDomainConfig: when the config and group/option parameters specify an option we do not support or one that does not exist in the original config """ def _assert_valid_update(domain_id, config, group=None, option=None): """Ensure the combination of config, group and option is valid.""" self._assert_valid_config(config) self._assert_valid_group_and_option(group, option) # If a group has been specified, then the request is to # explicitly only update the options in that group - so the config # must not contain anything else. Further, that group must exist in # the original config. Likewise, if an option has been specified, # then the group in the config must only contain that option and it # also must exist in the original config. if group: if len(config) != 1 or (option and len(config[group]) != 1): if option: msg = _('Trying to update option %(option)s in group ' '%(group)s, so that, and only that, option ' 'must be specified in the config') % { 'group': group, 'option': option} else: msg = _('Trying to update group %(group)s, so that, ' 'and only that, group must be specified in ' 'the config') % {'group': group} raise exception.InvalidDomainConfig(reason=msg) # So we now know we have the right number of entries in the # config that align with a group/option being specified, but we # must also make sure they match. if group not in config: msg = _('request to update group %(group)s, but config ' 'provided contains group %(group_other)s ' 'instead') % { 'group': group, 'group_other': list(config.keys())[0]} raise exception.InvalidDomainConfig(reason=msg) if option and option not in config[group]: msg = _('Trying to update option %(option)s in group ' '%(group)s, but config provided contains option ' '%(option_other)s instead') % { 'group': group, 'option': option, 'option_other': list(config[group].keys())[0]} raise exception.InvalidDomainConfig(reason=msg) # Finally, we need to check if the group/option specified # already exists in the original config - since if not, to keep # with the semantics of an update, we need to fail with # a DomainConfigNotFound if not self._get_config_with_sensitive_info(domain_id, group, option): if option: msg = _('option %(option)s in group %(group)s') % { 'group': group, 'option': option} raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg) else: msg = _('group %(group)s') % {'group': group} raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg) update_config = config if group and option: # The config will just be a dict containing the option and # its value, so make it look like a single option under the # group in question update_config = {group: config} _assert_valid_update(domain_id, update_config, group, option) option_list = self._config_to_list(update_config) self.update_config_options(domain_id, option_list) self.get_config_with_sensitive_info.invalidate(self, domain_id) return self.get_config(domain_id) def delete_config(self, domain_id, group=None, option=None): """Delete config, or partial config, for the domain. :param domain_id: the domain in question :param group: an optional specific group of options :param option: an optional specific option within the group If group and option are None, then the entire config for the domain is deleted. If group is not None, then just that group of options will be deleted. If group and option are both specified, then just that option is deleted. :raises keystone.exception.InvalidDomainConfig: when group/option parameters specify an option we do not support or one that does not exist in the original config. """ self._assert_valid_group_and_option(group, option) if group: # As this is a partial delete, then make sure the items requested # are valid and exist in the current config current_config = self._get_config_with_sensitive_info(domain_id) # Raise an exception if the group/options specified don't exist in # the current config so that the delete method provides the # correct error semantics. current_group = current_config.get(group) if not current_group: msg = _('group %(group)s') % {'group': group} raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg) if option and not current_group.get(option): msg = _('option %(option)s in group %(group)s') % { 'group': group, 'option': option} raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg) self.delete_config_options(domain_id, group, option) self.get_config_with_sensitive_info.invalidate(self, domain_id) def _get_config_with_sensitive_info(self, domain_id, group=None, option=None): """Get config for a domain/group/option with sensitive info included. This is only used by the methods within this class, which may need to check individual groups or options. """ whitelisted = self.list_config_options(domain_id, group, option) sensitive = self.list_config_options(domain_id, group, option, sensitive=True) # Check if there are any sensitive substitutions needed. We first try # and simply ensure any sensitive options that have valid substitution # references in the whitelisted options are substituted. We then check # the resulting whitelisted option and raise a warning if there # appears to be an unmatched or incorrectly constructed substitution # reference. To avoid the risk of logging any sensitive options that # have already been substituted, we first take a copy of the # whitelisted option. # Build a dict of the sensitive options ready to try substitution sensitive_dict = {s['option']: s['value'] for s in sensitive} for each_whitelisted in whitelisted: if not isinstance(each_whitelisted['value'], six.string_types): # We only support substitutions into string types, if its an # integer, list etc. then just continue onto the next one continue # Store away the original value in case we need to raise a warning # after substitution. original_value = each_whitelisted['value'] warning_msg = '' try: each_whitelisted['value'] = ( each_whitelisted['value'] % sensitive_dict) except KeyError: warning_msg = ( 'Found what looks like an unmatched config option ' 'substitution reference - domain: %(domain)s, group: ' '%(group)s, option: %(option)s, value: %(value)s. Perhaps ' 'the config option to which it refers has yet to be ' 'added?') except (ValueError, TypeError): warning_msg = ( 'Found what looks like an incorrectly constructed ' 'config option substitution reference - domain: ' '%(domain)s, group: %(group)s, option: %(option)s, ' 'value: %(value)s.') if warning_msg: LOG.warning(warning_msg, { 'domain': domain_id, 'group': each_whitelisted['group'], 'option': each_whitelisted['option'], 'value': original_value}) return self._list_to_config(whitelisted, sensitive) @MEMOIZE_CONFIG def get_config_with_sensitive_info(self, domain_id): """Get config for a domain with sensitive info included. This method is not exposed via the public API, but is used by the identity manager to initialize a domain with the fully formed config options. """ return self._get_config_with_sensitive_info(domain_id) def get_config_default(self, group=None, option=None): """Get default config, or partial default config. :param group: an optional specific group of options :param option: an optional specific option within the group :returns: a dict of group dicts containing the default options, filtered by group and option if specified :raises keystone.exception.InvalidDomainConfig: when the config and group/option parameters specify an option we do not support (or one that is not whitelisted). An example response:: { 'ldap': { 'url': 'myurl', 'user_tree_dn': 'OU=myou', ....}, 'identity': { 'driver': 'ldap'} } """ self._assert_valid_group_and_option(group, option) config_list = [] if group: if option: if option not in self.whitelisted_options[group]: msg = _('Reading the default for option %(option)s in ' 'group %(group)s is not supported') % { 'option': option, 'group': group} raise exception.InvalidDomainConfig(reason=msg) config_list.append(self._option_dict(group, option)) else: for each_option in self.whitelisted_options[group]: config_list.append(self._option_dict(group, each_option)) else: for each_group in self.whitelisted_options: for each_option in self.whitelisted_options[each_group]: config_list.append( self._option_dict(each_group, each_option) ) return self._list_to_config(config_list, req_option=option)
the-stack_106_30922
""" This module lets you practice one form of the ACCUMULATOR pattern, namely, the "IN GRAPHICS" form which features: -- DRAWING OBJECTS via ACCUMULATING positions and/or sizes, as in: x = x + pixels Additionally, it emphasizes that you must ** DO A CONCRETE EXAMPLE BY HAND ** before you can implement a solution to the problem in Python. Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher, Mark Hays, Aaron Wilkin, their colleagues, and Eddie Mannan. """ # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE. import rosegraphics as rg # ----------------------------------------------------------------------------- # Students: As you work each of these problems, ask yourself: # 1. Do I need a loop? # If so, HOW MANY LOOPS? # # 2. Where I need a loop, what needs to happen: # -- BEFORE the loop? # -- IN the loop? # -- AFTER the loop? # ----------------------------------------------------------------------------- def main(): """ Calls the TEST functions in this module. """ run_test_draw_squares_from_circle() run_test_draw_circles_from_rectangle() run_test_draw_lines_from_rectangles() def run_test_draw_squares_from_circle(): """ Tests the draw_squares_from_circle function. """ print() print('--------------------------------------------------') print('Testing the draw_squares_from_circle function:') print(' See the graphics windows that pop up.') print('--------------------------------------------------') # ------------------------------------------------------------------------- # TWO tests on ONE window. # ------------------------------------------------------------------------- title = 'Tests 1 and 2 of DRAW_SQUARES_FROM_CIRCLE: ' title = title + ' 7 little squares from green circle, 4 big squares' window1 = rg.RoseWindow(650, 350, title) # Test 1: circle = rg.Circle(rg.Point(100, 100), 20) circle.fill_color = 'green' draw_squares_from_circle(7, circle, window1) # Test 2: circle = rg.Circle(rg.Point(350, 70), 50) draw_squares_from_circle(4, circle, window1) window1.close_on_mouse_click() # ------------------------------------------------------------------------- # A third test on ANOTHER window. # ------------------------------------------------------------------------- title = 'Test 3 of DRAW_SQUARES_FROM_CIRCLE: ' title += ' 20 teeny squares from blue circle!' window2 = rg.RoseWindow(525, 300, title) # Test 3: circle = rg.Circle(rg.Point(50, 50), 10) circle.fill_color = 'blue' draw_squares_from_circle(20, circle, window2) window2.close_on_mouse_click() def draw_squares_from_circle(n, circle, window): """ What comes in: Three arguments: -- A positive integer n. -- An rg.Circle. -- An rg.RoseWindow. What goes out: Nothing (i.e., None). Side effects: See draw_squares_from_circle.pdf in this project for pictures that may help you better understand the following specification: First draws the given rg.Circle on the given rg.RoseWindow. Then draws n rg.Squares on the given rg.RoseWindow, such that: -- The first rg.Square circumscribes the given rg.Circle. -- Each subsequent rg.Square has its upper-left quarter on top of the lower-right quarter of the previous rg.Square, so that the squares form an overlapping sequence that goes down and to the right. Must ** render ** but ** NOT close ** the window. Type hints: :type n: int :type circle: rg.Circle :type window: rg.RoseWindow """ circle.attach_to(window) square = rg.Square(circle.center, (circle.radius * 2)) square.attach_to(window) for k in range(n - 1): square = rg.Square(rg.Point(square.center.x + circle.radius, square.center.y + circle.radius), (circle.radius * 2)) square.attach_to(window) window.render() # ------------------------------------------------------------------------- # DONE: 2. Implement and test this function. # Tests have been written for you (above). # # CONSIDER using the ACCUMULATOR IN GRAPHICS pattern, # as in draw_row_of_circles in m1e, # instead of directly using the loop variable. # ########################################################################### # HINT: To figure out the code that computes the necessary # positions of each square, # ** FIRST DO A CONCRETE EXAMPLE BY HAND! ** ########################################################################### # ------------------------------------------------------------------------- def run_test_draw_circles_from_rectangle(): """ Tests the draw_circles_from_rectangle function. """ print() print('--------------------------------------------------') print('Testing the draw_circles_from_rectangle function:') print(' See the graphics windows that pop up.') print('--------------------------------------------------') title = 'Tests 1 and 2 of CIRCLES_FROM_RECTANGLE: ' title = title + ' 4 Green Circles to Left, 5 Circles Up, 8 Blue Circles to Left, 3 Circles Up' window1 = rg.RoseWindow(650, 350, title) # Test 1: rectangle = rg.Rectangle(rg.Point(200, 200), rg.Point(250, 250)) rectangle.fill_color = 'green' draw_circles_from_rectangle(4, 5, rectangle, window1) # Test 2: rectangle = rg.Rectangle(rg.Point(350, 350), rg.Point(370, 370)) rectangle.fill_color = 'blue' draw_circles_from_rectangle(8, 3, rectangle, window1) window1.close_on_mouse_click() # ------------------------------------------------------------------------- # A third test on ANOTHER window. # ------------------------------------------------------------------------- title = 'Test 3 of CIRCLES_FROM-RECTANGLE: ' title += ' 6 Yellow Circles Left, 10 Circles Up' window2 = rg.RoseWindow(525, 300, title) # Test 3: rectangle = rg.Rectangle(rg.Point(200, 200), rg.Point(250, 250)) rectangle.fill_color = 'yellow' draw_circles_from_rectangle(6, 10, rectangle, window2) window2.close_on_mouse_click() # ------------------------------------------------------------------------- # DONE: 3. Implement this TEST function. # It TESTS the draw_circles_from_rectangle function # defined below. Include at least ** 3 ** tests, of which # *** at least TWO tests are on ONE window and # *** at least ONE test is on a DIFFERENT window. # ########################################################################### # HINT: Consider using the same test cases as suggested by the # pictures in draw_circles_from_rectangle.pdf in this project. # Follow the same form as the example in a previous problem. ########################################################################### # ------------------------------------------------------------------------- def draw_circles_from_rectangle(m, n, rectangle, window): """ What comes in: Four arguments: -- Positive integers m and n. -- An rg.Rectangle. -- An rg.RoseWindow. What goes out: Nothing (i.e., None). Side effects: See draw_circles_from_rectangle.pdf in this project for pictures that may help you better understand the following specification: First draws the given rg.Rectangle on the given rg.RoseWindow. Then draws m rg.Circles on the given rg.RoseWindow, such that: -- The diameter of each rg.Circle is the same as the height of the given rg.Rectangle. -- The first rg.Circle is immediately to the left of the given rg.Rectangle -- Each subsequent rg.Circle is immediately to the left of the previous rg.Circle, so that the circles form a row that goes to the left. -- Each rg. Circle has the same fill_color as the given rg.Rectangle (and has no outline_color). Then draws n rg.Circles on the given RoseWindow, such that: -- The diameter of each rg.Circle is the same as the width of the given rg.Rectangle. -- The first rg.Circle is immediately above the given rg.Rectangle -- Each subsequent rg.Circle is immediately above the previous rg.Circle, so that the circles form a column that goes up. -- Each rg.Circle has the same outline_color as the given rg.Rectangle (and has no fill_color). Must ** render ** but ** NOT close ** the window. Type hints: :type m: int :type n: int :type rectangle: rg.Rectangle :type window: rg.RoseWindow """ rectangle.attach_to(window) circle1 = rg.Circle(rg.Point(rectangle.get_center().x, rectangle.corner_1.y - ((rectangle.get_width())/2)), (rectangle.get_width()/2)) circle1.attach_to(window) for k in range(n - 1): circle1 = rg.Circle(rg.Point(circle1.center.x, (circle1.center.y - (circle1.radius * 2))), circle1.radius) circle1.attach_to(window) radius = rectangle.get_height()/2 center = rg.Point(rectangle.get_center().x - (radius * 2), rectangle.get_center().y) circle2 = rg.Circle(center, radius) circle2.fill_color = 'green' circle2.attach_to(window) for k in range(m - 1): circle2 = rg.Circle(rg.Point(circle2.center.x - (radius * 2), circle2.center.y), radius) circle2.fill_color = 'green' circle2.attach_to(window) window.render() # ------------------------------------------------------------------------- # DONE: 4. Implement and test this function. # Tests have been written for you (above). # # CONSIDER using the ACCUMULATOR IN GRAPHICS pattern, # as in draw_row_of_circles in m1e, # instead of directly using the loop variable. # ########################################################################### # HINT: To figure out the code that computes the necessary # positions of each circle, # ** FIRST DO A CONCRETE EXAMPLE BY HAND! ** ########################################################################### # ------------------------------------------------------------------------- def run_test_draw_lines_from_rectangles(): """ Tests the draw_lines_from_rectangles function. """ print() print('--------------------------------------------------') print('Testing the draw_lines_from_rectangles function:') print(' See the graphics windows that pop up.') print('--------------------------------------------------') # TWO tests on ONE window. title = 'Tests 1 & 2 of DRAW_LINES_FROM_RECTANGLES:' title += ' 5 lines, 8 lines!' window1 = rg.RoseWindow(900, 400, title) rectangle1 = rg.Rectangle(rg.Point(100, 25), rg.Point(150, 125)) rectangle2 = rg.Rectangle(rg.Point(300, 150), rg.Point(400, 175)) rectangle1.outline_color = 'red' rectangle2.outline_color = 'blue' draw_lines_from_rectangles(rectangle1, rectangle2, 5, window1) rectangle1 = rg.Rectangle(rg.Point(870, 30), rg.Point(750, 100)) rectangle2 = rg.Rectangle(rg.Point(700, 90), rg.Point(650, 60)) rectangle2.outline_color = 'green' draw_lines_from_rectangles(rectangle1, rectangle2, 8, window1) window1.close_on_mouse_click() # A third test on ANOTHER window. title = 'Test 3 of DRAW_LINES_FROM_RECTANGLES: 11 lines!' window2 = rg.RoseWindow(700, 700, title) rectangle1 = rg.Rectangle(rg.Point(550, 200), rg.Point(650, 100)) rectangle2 = rg.Rectangle(rg.Point(600, 50), rg.Point(650, 75)) rectangle1.outline_color = 'brown' rectangle2.outline_color = 'cyan' rectangle2.outline_thickness = 10 draw_lines_from_rectangles(rectangle1, rectangle2, 11, window2) window2.close_on_mouse_click() def draw_lines_from_rectangles(rectangle1, rectangle2, n, window): """ What comes in: Four arguments: -- Two rg.Rectangles. -- A positive integer n. -- An rg.RoseWindow. What goes out: Nothing (i.e., None). Side effects: See draw_lines_from_rectangles.pdf in this project for pictures that may help you better understand the following specification: First draws the given rg.Rectangles on the given rg.RoseWindow. Then draws n rg.Lines on the given rg.RoseWindow, such that: -- The 1st rg.Line goes from the center of one of the 1st rg.Rectangle to the center of the 2nd rg.Rectangle. -- The 2nd rg.Line goes from the lower-left corner of the 1st rg.Rectangle and is parallel to the 1st rg.Line, with the same length and direction as the 1st rg.Line. -- Subsequent rg.Lines are shifted from the previous rg.Line in the same way that the 2nd rg.Line is shifted from the 1st. -- Each of the rg.Lines has thickness 5. -- The colors of the rg.Lines alternate, as follows: - The 1st, 3rd, 5th, ... rg.Line has color R1_color - The 2nd, 4th, 6th, ... rg.Line has color R2_color where - R1_color is the outline color of the 1st rg.Rectangle - R2_color is the outline color of the 2nd rg.Rectangle Must ** render ** but ** NOT close ** the window. Type hints: :type rectangle1: rg.Rectangle :type rectangle2: rg.Rectangle :type n: int :type window: rg.RoseWindow """ # ------------------------------------------------------------------------- # TODO: 5. Implement and test this function. # Tests have been written for you (above). # # CONSIDER using the ACCUMULATOR IN GRAPHICS pattern, # as in draw_row_of_circles in m1e, # instead of directly using the loop variable. # ########################################################################### # HINT: To figure out the code that computes the necessary # endpoints for each line, # ** FIRST DO A CONCRETE EXAMPLE BY HAND! ** ########################################################################### # ------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Calls main to start the ball rolling. # ----------------------------------------------------------------------------- main()
the-stack_106_30924
#!/usr/bin/env python3 from setuptools import setup, find_packages long_description = """ tropohelper is a library to speed up creating resources using tropospher and cloudformation on AWS. Troposphere makes it much easier, but it can really make a file for creating a stack large and repedative. Using these helper functions keeps things much more DRY. """ setup( name='tropohelper', version="1.4.0", description='tropohelper is a collection of troposphere helpers to promote DRY.', long_description=long_description, author='Michael Gorman', author_email='[email protected]', url='https://github.com/mjgorman/tropohelper', packages=find_packages(), install_requires=['troposphere==2.4.6', 'awacs>=0.7.2'], test_suite='nose.collector', tests_require=['nose<2.0'] )
the-stack_106_30925
from random import * from PD_Naive.naive_pd import * # in1 = [(0, 000001), (0, 000010), (0, 000011), (1, 000100), (3, 000101), (3, 000110), (4, 000111), (4, 001000)] inp_1 = [(0, '00001'), (0, '00010'), (0, '00011'), (1, '00100'), (3, '00101'), (3, '00110'), (4, '00111'), (4, '01000')] def coin(): return randint(0, 1) # # in1q = [0, 0, 0, 1, 3, 3, 4, 4] # in1 = [(in1q[i], to_bin_with_leading_zeros(i+1, 5)) for i in range(len(in1q))] # print(in1) # len(in1q) # in1 = [(0, to_bin_with_leading_zeros(0))] # in1 = [(0, to_bin_with_leading_zeros(0))] def to_bin_with_leading_zeros(n: int, length: int) -> str: s = bin(n)[2:] diff = length - len(s) if diff > 0: return "0" * diff + s elif diff == 0: return s else: assert False def t1(m: int, f: int, l: int): """ :param m Quotients (q_i) range interval. (forall q_i, q_i in [m]) :param f Number of elements in PD. :param l Remainder (r_i) length. (|r_i| = l) :return: """ assert f <= m quotient = list(range(f)) remainder = [to_bin_with_leading_zeros(i, l) for i in range(f)] d = naive_pd(m, f, l) for i in range(f): q, r = quotient[i], remainder[i] d.insert(q, r) temp = d.lookup(q, r) if not temp: print("Failed in {:} lookup".format(i)) print(d) return False print("Passed t1.") return True def t2(m: int, f: int, l: int): assert f <= m quotient = list(range(f)) remainder = [to_bin_with_leading_zeros(i, l) for i in range(f)] d = naive_pd(m, f, l) for i in range(f): q, r = quotient[i], remainder[i] d.insert(q, r) temp = d.lookup(q, r) if not temp: print("Failed in {:} lookup".format(i)) print(d) return False d.remove(q, r) temp = d.lookup(q, r) if temp: print("Failed in {:} lookup after deletion".format(i)) print(d) return False print("Passed t2.") return True # random insertion and deletion def t3(reps: int = 1 << 7): f = randint(8, 128) m = randint(f + 1, f * 2) l = randint(11, 31) assert f <= m def single_key(): return randint(0, m), to_bin_with_leading_zeros(randint(1, (1 << l) - 1), l) elements = {single_key() for _ in range(f)} inserted_elements = set() assert len(elements) == f # quotient=sample(range(m), f) # assert len(quotient) == len(set(quotient)) # remainder=[to_bin_with_leading_zeros(randint, l) for i in range(f)] d = naive_pd(m, f, l) for i in range(reps): # print(d.get_head_as_runs()) if coin(): if not elements: continue temp = sample(elements, 1)[0] elements.remove(temp) d.insert(*temp) inserted_elements.add(temp) if not d.lookup(*temp): print("Failed in {:} lookup. case 1.".format(i)) # print(d) return False else: if not inserted_elements: continue temp = sample(inserted_elements, 1)[0] if not d.lookup(*temp): print("Failed in {:} lookup. case 2.".format(i)) # print(d) return False inserted_elements.remove(temp) d.remove(*temp) elements.add(temp) if d.lookup(*temp): print("Failed in {:} lookup. False positive".format(i)) # print(d) return False # # q, r = quotient[i], remainder[i] # d.insert(q, r) # temp = d.lookup(q, r) # if not temp: # print("Failed in {:} lookup".format(i)) # print(d) # return False # d.remove(q, r) # temp = d.lookup(q, r) # if temp: # print("Failed in {:} lookup after deletion".format(i)) # print(d) # return False # print("Passed t3.") return True int("10"*4,2) int("110"*2 +"00",2) bi = lambda n:bin(n)[2:] s = bi(170) bi(170) bi(216) s [i for i in range(len(s)) if s[i] == '0'] 170 & () s1 = "1110100110110" len(s1) t1(16, 3, 5) t1(16, 9, 5) t1(16, 15, 5) # t2(16, 3, 5) t2(16, 9, 5) t2(16, 15, 5) # sample(list(range(4)), 2) t3(1 << 9) for i in range(5): assert t3(1 << 9) for i in range(1 << 8): assert t3() print(i, end="\t")
the-stack_106_30927
# SPDX-License-Identifier: MIT # Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs # Copyright (C) 2012-2014 Bastian Kleineidam # Copyright (C) 2015-2020 Tobias Gruetzmacher import re import os import pytest from xdist.dsession import LoadScopeScheduling from dosagelib.scraper import scrapers def get_test_scrapers(): """Return scrapers that should be tested.""" if 'TESTALL' in os.environ: # test all comics (this will take some time) # ignore mangadex for now (site is temporary down) scraper_pattern = '^(?!MangaDex)' elif 'TESTCOMICS' in os.environ: scraper_pattern = os.environ['TESTCOMICS'] else: # Get limited number of scraper tests as default testscrapernames = [ # "classic" _BasicScraper 'AbstruseGoose', # complex _ParserScraper 'GoComics/CalvinAndHobbes', # _WordPressScraper 'GrrlPower', ] scraper_pattern = '^(' + '|'.join(testscrapernames) + ')$' matcher = re.compile(scraper_pattern) return [ scraperobj for scraperobj in scrapers.get() if matcher.match(scraperobj.name) ] def pytest_generate_tests(metafunc): if 'scraperobj' in metafunc.fixturenames: scrapers = get_test_scrapers() scraperids = [x.name for x in scrapers] metafunc.parametrize('scraperobj', scrapers, ids=scraperids) class LoadModScheduling(LoadScopeScheduling): """Implement load scheduling for comic modules. See xdist for details.""" def _split_scope(self, nodeid): mod, test = nodeid.split("::", 1) return mod + "::" + test.split("/", 1)[0] @pytest.mark.trylast def pytest_xdist_make_scheduler(config, log): return LoadModScheduling(config, log)
the-stack_106_30928
import os import telegram from telegram.ext import Updater, CommandHandler from telegram import InlineKeyboardMarkup, InlineKeyboardButton def start(update, context): Button1 = InlineKeyboardButton( text='Github', url='https://github.com/drewdev02' ) Button2= InlineKeyboardButton( text='Me', url='https://t.me/Adrewdev' ) update.message.reply_text( text='Hoy es un gran dia mi vida, usa "/more" para algo especial', reply_markup=InlineKeyboardMarkup([ [Button1, Button2] ]) ) def more(update, context): Button3 = InlineKeyboardButton( text='Tocame, My love', url='https://telegra.ph/De-mi-para-ti-por-ser-tu-d%C3%ADa-especial-03-08' # hacer un post en telegraph ) update.message.reply_text( text='Happy Brithday!!!', reply_markup=InlineKeyboardMarkup([ [Button3] ]) ) if __name__ == '__main__': token = os.environ['TOKEN'] bot = telegram.Bot(token=token) updater = Updater(token=token, use_context=True) dp = updater.dispatcher dp.add_handler(CommandHandler('start', start)) dp.add_handler(CommandHandler('more', more)) updater.start_polling() print('bot is polling') updater.idle()
the-stack_106_30930
from leapp.libraries.actor import library from leapp import reporting from leapp.libraries.common.testutils import create_report_mocked class extract_tgz64_mocked(object): def __init__(self): self.called = 0 self.s = None def __call__(self, s): self.called += 1 self.s = s class enable_service_mocked(object): def __init__(self): self.called = 0 self.names = [] def __call__(self, name): self.called += 1 self.names.append(name) class write_file_mocked(object): def __init__(self): self.called = 0 self.name = None self.content = None def __call__(self, name, content): self.called += 1 self.name = name self.content = content class ntp2chrony_mocked(object): def __init__(self, lines): self.called = 0 self.ignored_lines = lines self.args = None def __call__(self, *args): self.called += 1 self.args = args return self.ignored_lines * ['a line'] def test_migration(monkeypatch): for ntp_services, chrony_services, ignored_lines in [ ([], [], 0), (['ntpd'], ['chronyd'], 0), (['ntpdate'], ['chronyd'], 1), (['ntp-wait'], ['chrony-wait'], 0), (['ntpd', 'ntpdate', 'ntp-wait'], ['chronyd', 'chronyd', 'chrony-wait'], 1), ]: monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) monkeypatch.setattr(library, 'extract_tgz64', extract_tgz64_mocked()) monkeypatch.setattr(library, 'enable_service', enable_service_mocked()) monkeypatch.setattr(library, 'write_file', write_file_mocked()) monkeypatch.setattr(library, 'ntp2chrony', ntp2chrony_mocked(ignored_lines)) library.migrate_ntp(ntp_services, 'abcdef') if ntp_services: assert reporting.create_report.called == 1 if ignored_lines > 0: assert 'configuration partially migrated to chrony' in \ reporting.create_report.report_fields['title'] else: assert 'configuration migrated to chrony' in \ reporting.create_report.report_fields['title'] assert library.extract_tgz64.called == 1 assert library.extract_tgz64.s == 'abcdef' assert library.enable_service.called == len(chrony_services) assert library.enable_service.names == chrony_services assert library.write_file.called == (0 if 'ntpd' in ntp_services else 1) if library.write_file.called: assert library.write_file.name == '/etc/ntp.conf.nosources' assert 'without ntp configuration' in library.write_file.content assert library.ntp2chrony.called == 1 assert library.ntp2chrony.args == ( '/', '/etc/ntp.conf' if 'ntpd' in ntp_services else '/etc/ntp.conf.nosources', '/etc/ntp/step-tickers' if 'ntpdate' in ntp_services else '') else: assert reporting.create_report.called == 0 assert library.extract_tgz64.called == 0 assert library.enable_service.called == 0 assert library.write_file.called == 0 assert library.ntp2chrony.called == 0
the-stack_106_30933
# Zulip's OpenAPI-based API documentation system is documented at # https://zulip.readthedocs.io/en/latest/documentation/api.html # # This file contains helper functions for generating cURL examples # based on Zulip's OpenAPI definitions, as well as test setup and # fetching of appropriate parameter values to use when running the # cURL examples as part of the tools/test-api test suite. from functools import wraps from typing import Any, Callable, Dict, List, Optional, Set, Tuple from django.utils.timezone import now as timezone_now from zerver.lib.actions import ( do_add_linkifier, do_add_reaction, do_add_realm_playground, do_create_user, update_user_presence, ) from zerver.lib.events import do_events_register from zerver.lib.initial_password import initial_password from zerver.lib.test_classes import ZulipTestCase from zerver.lib.upload import upload_message_file from zerver.lib.users import get_api_key from zerver.models import Client, Message, UserGroup, UserPresence, get_realm, get_user GENERATOR_FUNCTIONS: Dict[str, Callable[[], Dict[str, object]]] = {} REGISTERED_GENERATOR_FUNCTIONS: Set[str] = set() CALLED_GENERATOR_FUNCTIONS: Set[str] = set() # This is a List rather than just a string in order to make it easier # to write to it from another module. AUTHENTICATION_LINE: List[str] = [""] helpers = ZulipTestCase() def openapi_param_value_generator( endpoints: List[str], ) -> Callable[[Callable[[], Dict[str, object]]], Callable[[], Dict[str, object]]]: """This decorator is used to register OpenAPI param value genarator functions with endpoints. Example usage: @openapi_param_value_generator(["/messages/render:post"]) def ... """ def wrapper(generator_func: Callable[[], Dict[str, object]]) -> Callable[[], Dict[str, object]]: @wraps(generator_func) def _record_calls_wrapper() -> Dict[str, object]: CALLED_GENERATOR_FUNCTIONS.add(generator_func.__name__) return generator_func() REGISTERED_GENERATOR_FUNCTIONS.add(generator_func.__name__) for endpoint in endpoints: GENERATOR_FUNCTIONS[endpoint] = _record_calls_wrapper return _record_calls_wrapper return wrapper def assert_all_helper_functions_called() -> None: """Throws an exception if any registered helpers were not called by tests""" if REGISTERED_GENERATOR_FUNCTIONS == CALLED_GENERATOR_FUNCTIONS: return uncalled_functions = str(REGISTERED_GENERATOR_FUNCTIONS - CALLED_GENERATOR_FUNCTIONS) raise Exception(f"Registered curl API generators were not called: {uncalled_functions}") def patch_openapi_example_values( entry: str, params: List[Dict[str, Any]], request_body: Optional[Dict[str, Any]] = None, ) -> Tuple[List[Dict[str, object]], Optional[Dict[str, object]]]: if entry not in GENERATOR_FUNCTIONS: return params, request_body func = GENERATOR_FUNCTIONS[entry] realm_example_values: Dict[str, object] = func() for param in params: param_name = param["name"] if param_name in realm_example_values: if "content" in param: param["content"]["application/json"]["example"] = realm_example_values[param_name] else: param["example"] = realm_example_values[param_name] if request_body is not None: properties = request_body["content"]["multipart/form-data"]["schema"]["properties"] for key, property in properties.items(): if key in realm_example_values: property["example"] = realm_example_values[key] return params, request_body @openapi_param_value_generator(["/fetch_api_key:post"]) def fetch_api_key() -> Dict[str, object]: email = helpers.example_email("iago") password = initial_password(email) return { "username": email, "password": password, } @openapi_param_value_generator( [ "/messages/{message_id}:get", "/messages/{message_id}/history:get", "/messages/{message_id}:patch", "/messages/{message_id}:delete", ] ) def iago_message_id() -> Dict[str, object]: return { "message_id": helpers.send_stream_message(helpers.example_user("iago"), "Denmark"), } @openapi_param_value_generator(["/messages/{message_id}/reactions:delete"]) def add_emoji_to_message() -> Dict[str, object]: user_profile = helpers.example_user("iago") # from OpenAPI format data in zulip.yaml message_id = 43 emoji_name = "octopus" emoji_code = "1f419" reaction_type = "unicode_emoji" message = Message.objects.select_related().get(id=message_id) do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type) return {} @openapi_param_value_generator(["/messages/flags:post"]) def update_flags_message_ids() -> Dict[str, object]: stream_name = "Venice" helpers.subscribe(helpers.example_user("iago"), stream_name) messages = [] for _ in range(3): messages.append(helpers.send_stream_message(helpers.example_user("iago"), stream_name)) return { "messages": messages, } @openapi_param_value_generator(["/mark_stream_as_read:post", "/users/me/{stream_id}/topics:get"]) def get_venice_stream_id() -> Dict[str, object]: return { "stream_id": helpers.get_stream_id("Venice"), } @openapi_param_value_generator(["/streams/{stream_id}:patch"]) def update_stream() -> Dict[str, object]: stream = helpers.subscribe(helpers.example_user("iago"), "temp_stream 1") return { "stream_id": stream.id, } @openapi_param_value_generator(["/streams/{stream_id}:delete"]) def create_temp_stream_and_get_id() -> Dict[str, object]: stream = helpers.subscribe(helpers.example_user("iago"), "temp_stream 2") return { "stream_id": stream.id, } @openapi_param_value_generator(["/mark_topic_as_read:post"]) def get_denmark_stream_id_and_topic() -> Dict[str, object]: stream_name = "Denmark" topic_name = "Tivoli Gardens" helpers.subscribe(helpers.example_user("iago"), stream_name) helpers.send_stream_message(helpers.example_user("hamlet"), stream_name, topic_name=topic_name) return { "stream_id": helpers.get_stream_id(stream_name), "topic_name": topic_name, } @openapi_param_value_generator(["/users/me/subscriptions/properties:post"]) def update_subscription_data() -> Dict[str, object]: profile = helpers.example_user("iago") helpers.subscribe(profile, "Verona") helpers.subscribe(profile, "social") return { "subscription_data": [ {"stream_id": helpers.get_stream_id("Verona"), "property": "pin_to_top", "value": True}, {"stream_id": helpers.get_stream_id("social"), "property": "color", "value": "#f00f00"}, ], } @openapi_param_value_generator(["/users/me/subscriptions:delete"]) def delete_subscription_data() -> Dict[str, object]: iago = helpers.example_user("iago") zoe = helpers.example_user("ZOE") helpers.subscribe(iago, "Verona") helpers.subscribe(iago, "social") helpers.subscribe(zoe, "Verona") helpers.subscribe(zoe, "social") return {} @openapi_param_value_generator(["/events:get"]) def get_events() -> Dict[str, object]: profile = helpers.example_user("iago") helpers.subscribe(profile, "Verona") client = Client.objects.create(name="curl-test-client-1") response = do_events_register(profile, client, event_types=["message", "realm_emoji"]) helpers.send_stream_message(helpers.example_user("hamlet"), "Verona") return { "queue_id": response["queue_id"], "last_event_id": response["last_event_id"], } @openapi_param_value_generator(["/events:delete"]) def delete_event_queue() -> Dict[str, object]: profile = helpers.example_user("iago") client = Client.objects.create(name="curl-test-client-2") response = do_events_register(profile, client, event_types=["message"]) return { "queue_id": response["queue_id"], "last_event_id": response["last_event_id"], } @openapi_param_value_generator(["/users/{user_id_or_email}/presence:get"]) def get_user_presence() -> Dict[str, object]: iago = helpers.example_user("iago") client = Client.objects.create(name="curl-test-client-3") update_user_presence(iago, client, timezone_now(), UserPresence.ACTIVE, False) return {} @openapi_param_value_generator(["/users:post"]) def create_user() -> Dict[str, object]: return { "email": helpers.nonreg_email("test"), } @openapi_param_value_generator(["/user_groups/create:post"]) def create_user_group_data() -> Dict[str, object]: return { "members": [helpers.example_user("hamlet").id, helpers.example_user("othello").id], } @openapi_param_value_generator( ["/user_groups/{user_group_id}:patch", "/user_groups/{user_group_id}:delete"] ) def get_temp_user_group_id() -> Dict[str, object]: user_group, _ = UserGroup.objects.get_or_create(name="temp", realm=get_realm("zulip")) return { "user_group_id": user_group.id, } @openapi_param_value_generator(["/realm/filters/{filter_id}:delete"]) def remove_realm_filters() -> Dict[str, object]: filter_id = do_add_linkifier( get_realm("zulip"), "#(?P<id>[0-9]{2,8})", "https://github.com/zulip/zulip/pull/%(id)s" ) return { "filter_id": filter_id, } @openapi_param_value_generator(["/realm/emoji/{emoji_name}:post", "/user_uploads:post"]) def upload_custom_emoji() -> Dict[str, object]: return { "filename": "zerver/tests/images/animated_img.gif", } @openapi_param_value_generator(["/realm/playgrounds:post"]) def add_realm_playground() -> Dict[str, object]: return { "name": "Python2 playground", "pygments_language": "Python2", "url_prefix": "https://python2.example.com", } @openapi_param_value_generator(["/realm/playgrounds/{playground_id}:delete"]) def remove_realm_playground() -> Dict[str, object]: playground_info = dict( name="Python playground", pygments_language="Python", url_prefix="https://python.example.com", ) playground_id = do_add_realm_playground(get_realm("zulip"), **playground_info) return { "playground_id": playground_id, } @openapi_param_value_generator(["/users/{user_id}:delete"]) def deactivate_user() -> Dict[str, object]: user_profile = do_create_user( email="[email protected]", password=None, full_name="test_user", realm=get_realm("zulip"), acting_user=None, ) return {"user_id": user_profile.id} @openapi_param_value_generator(["/users/me:delete"]) def deactivate_own_user() -> Dict[str, object]: test_user_email = "[email protected]" deactivate_test_user = do_create_user( test_user_email, "secret", get_realm("zulip"), "Mr. Delete", role=200, acting_user=None, ) realm = get_realm("zulip") test_user = get_user(test_user_email, realm) test_user_api_key = get_api_key(test_user) # change authentication line to allow test_client to delete itself. AUTHENTICATION_LINE[0] = f"{deactivate_test_user.email}:{test_user_api_key}" return {} @openapi_param_value_generator(["/attachments/{attachment_id}:delete"]) def remove_attachment() -> Dict[str, object]: user_profile = helpers.example_user("iago") url = upload_message_file("dummy.txt", len(b"zulip!"), "text/plain", b"zulip!", user_profile) attachment_id = url.replace("/user_uploads/", "").split("/")[0] return {"attachment_id": attachment_id}
the-stack_106_30934
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack.package import * class Lmdb(MakefilePackage): """Symas LMDB is an extraordinarily fast, memory-efficient database we developed for the Symas OpenLDAP Project. With memory-mapped files, it has the read performance of a pure in-memory database while retaining the persistence of standard disk-based databases.""" homepage = "https://lmdb.tech/" url = "https://github.com/LMDB/lmdb/archive/LMDB_0.9.21.tar.gz" version('0.9.29', sha256='22054926b426c66d8f2bc22071365df6e35f3aacf19ad943bc6167d4cae3bebb') version('0.9.24', sha256='44602436c52c29d4f301f55f6fd8115f945469b868348e3cddaf91ab2473ea26') version('0.9.22', sha256='f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28') version('0.9.21', sha256='1187b635a4cc415bb6972bba346121f81edd996e99b8f0816151d4090f90b559') version('0.9.16', sha256='49d7b40949f2ced9bc8b23ea6a89e75471a1c9126537a8b268c318a00b84322b') build_directory = 'libraries/liblmdb' @property def build_targets(self): return ['CC={0}'.format(spack_cc)] @property def install_targets(self): return ['prefix={0}'.format(self.prefix), 'install'] @run_after('install') def install_pkgconfig(self): mkdirp(self.prefix.lib.pkgconfig) with open(join_path(self.prefix.lib.pkgconfig, 'lmdb.pc'), 'w') as f: f.write('prefix={0}\n'.format(self.prefix)) f.write('exec_prefix=${prefix}\n') f.write('libdir={0}\n'.format(self.prefix.lib)) f.write('includedir={0}\n'.format(self.prefix.include)) f.write('\n') f.write('Name: LMDB\n') f.write('Description: Symas LMDB is an extraordinarily fast, ' 'memory-efficient database.\n') f.write('Version: {0}\n'.format(self.spec.version)) f.write('Cflags: -I${includedir}\n') f.write('Libs: -L${libdir} -llmdb\n')
the-stack_106_30935
# basic functions are inspired by Tinygrad's own implementation import numpy as np from .function import Function def unbroadcast(out, in_shape): """Sum the gradients of the output in the case that broadcasting was performed during the calculation of a result. This effectively avoids explicitly splitting a broadcasting operation into several clone modules beforehand. """ # if the input is a scalar, sum over every dimension if in_shape == (1,): sum_axis = None return out.sum(axis=sum_axis).reshape(in_shape) original_in_shape = in_shape # if it's an (n,) shape vector change its shape to mimic (1, n, 1, ...) according to output shape if len(in_shape) == 1: n = in_shape[0] index = out.shape[::-1].index(n) temp_axis = [n if i == index else 1 for i in range(len(out.shape))] in_shape = temp_axis[::-1] # finally, sum the axis where broadcasting took place sum_axis = tuple([dim for dim in range(len(in_shape)) if in_shape[dim]==1 and out.shape[dim]>1]) return out.sum(axis=sum_axis).reshape(original_in_shape) # basic tensor operations class Add(Function): @staticmethod def forward(context, x1, x2): context.save_for_backward(x1.shape, x2.shape) return x1 + x2 @staticmethod def backward(context, output_grads): # y = x1 + x2 ||| dy/dx1 = dy/dx2 = 1 # the local gradient of the sum operator will be 1 for both inputs. Now just multiply the # local gradient with the incoming gradient to get the gradient of the target function # w.r.t. the inputs and keep the chain rule going x1_shape, x2_shape = context.saved_data return unbroadcast(output_grads, x1_shape), unbroadcast(output_grads, x2_shape) class Sub(Function): @staticmethod def forward(context, x1, x2): context.save_for_backward(x1.shape, x2.shape) return x1 - x2 @staticmethod def backward(context, output_grads): # y = x1 - x2 ||| dy/x1 = 1 dy/x2 = -1 x1_shape, x2_shape = context.saved_data return unbroadcast(output_grads, x1_shape), unbroadcast(-output_grads, x2_shape) class Mul(Function): @staticmethod def forward(context, x1, x2): context.save_for_backward(x1, x2) return x1 * x2 @staticmethod def backward(context, output_grads): # y = x1 * x2 ||| dy/x1 = x2 dy/x2 = x1 x1, x2 = context.saved_data return unbroadcast(x2 * output_grads, x1.shape), unbroadcast(x1 * output_grads, x2.shape) class Div(Function): @staticmethod def forward(context, x1, x2): context.save_for_backward(x1, x2) return x1 / x2 @staticmethod def backward(context, output_grads): # y = x1 / x2 ||| dy/x1 = (1x2) dy/x2 = x1 * d(1/x2)/x2 = x1 * -(1/x2**2) x1, x2 = context.saved_data return (unbroadcast((1/x2) * output_grads, x1.shape), unbroadcast(x1 * (-1/x2**2) * output_grads, x2.shape)) class Pow(Function): @staticmethod def forward(context, x, y): context.save_for_backward(x, y) return x ** y @staticmethod def backward(context, output_grads): x, y = context.saved_data x_non_negative = x.copy() x_non_negative[x_non_negative < 0] = np.nan return (unbroadcast(y * (x**(y-1.0)) * output_grads, x.shape), unbroadcast((x**y) * np.log(x_non_negative) * output_grads, y.shape)) class Matmul(Function): @staticmethod def forward(context, x, y): context.save_for_backward(x, y) return x @ y @staticmethod def backward(context, output_grads): x, y = context.saved_data x_shape = x.shape y_shape = y.shape if len(x.shape) == 1: x = np.expand_dims(x, axis=0) if len(y.shape) == 1: y = np.expand_dims(y, axis=1) if len(output_grads.shape) == 1: output_grads = np.expand_dims(output_grads, axis=0) x_grad = unbroadcast([email protected], x.shape) y_grad = unbroadcast(x.T@output_grads, y.shape) return x_grad.reshape(x_shape), y_grad.reshape(y_shape) class Mean(Function): @staticmethod def forward(context, array): div_coeff = 1 / array.size context.save_for_backward(div_coeff, array.shape) pre_sum = array * div_coeff return pre_sum.sum() @staticmethod def backward(context, output_grads): div_coeff, input_shape = context.saved_data weighted_grads = output_grads * div_coeff return np.ones(input_shape) * weighted_grads class Reshape(Function): @staticmethod def forward(context, array, shape): shape = shape.astype('int') context.save_for_backward(array.shape) return array.reshape(shape) @staticmethod def backward(context, output_grads): input_shape, = context.saved_data return output_grads.reshape(input_shape) class Transpose(Function): @staticmethod def forward(context, array, order): if np.isnan(order).all(): order = None else: order = order.astype('int') context.save_for_backward(order) return array.transpose(order) @staticmethod def backward(context, output_grads): order, = context.saved_data if order is None: return output_grads.transpose() un_transpose = [order[idx] for idx in order] return output_grads.transpose(un_transpose) class SumSelf(Function): @staticmethod def forward(context, array, axis=None, keepdims=False): context.save_for_backward(axis, array.shape, keepdims) return array.sum(axis, keepdims=keepdims, dtype='float32') @staticmethod def backward(context, output_grads): # the dimensions of the output grad are the ones from the original input # regardless of keepdims axis, input_shape, keepdims = context.saved_data if not keepdims and input_shape != (1,): output_grads = np.expand_dims(output_grads, axis) grads = np.zeros(input_shape, dtype='float32') + output_grads return grads.reshape(input_shape) class Exp(Function): @staticmethod def forward(context, array): result = np.exp(array) context.save_for_backward(result) return result @staticmethod def backward(context, output_grads): forward_result, = context.saved_data return forward_result * output_grads class Log(Function): @staticmethod def forward(context, array): context.save_for_backward(array) return np.log(array) @staticmethod def backward(context, output_grads): EPSILON = 1e-9 forward_input, = context.saved_data return (1/(forward_input+EPSILON)) * output_grads # nn functions class ReLU(Function): @staticmethod def forward(context, array): mask = array > 0 context.save_for_backward(mask) return array * mask @staticmethod def backward(context, output_grads): mask, = context.saved_data return output_grads * mask class SoftMax(Function): @staticmethod def forward(context, array): # if there are problems, look into the numerically stable implementation input_shape = array.shape n_dims = len(input_shape) # treat all vectors as column vectors if n_dims == 1: array = np.expand_dims(array, axis=0) n_dims = 2 exp = np.exp(array) result = exp / np.sum(exp, axis=(n_dims-1), keepdims=True) context.save_for_backward(input_shape, result) return result.reshape(input_shape) @staticmethod def backward(context, output_grads): input_shape, forward_result = context.saved_data # great further explanation from https://stackoverflow.com/a/36280783 # compute J[i, j] for i != j resulting in -softmax_i * softmax_j jacobian = -forward_result[..., np.newaxis] * forward_result[:, np.newaxis, :] # get the diagonal indices (i=j) and fill them with softmax_i * (1 - softmax_i) idx_y, idx_x = np.diag_indices_from(jacobian[0]) jacobian[:, idx_y, idx_x] = forward_result * (1. - forward_result) # reduce the jacobian down to a gradient w.r.t. the inputs: # a column of the jacobian tells you how every output is affected by a particular input, # output_grads tell you how every output affects the target function, # so by multiplying output_grads by column j and summing the result # you will get the total influence of input j over all the outputs output_grads = output_grads[..., np.newaxis, :] return (output_grads @ jacobian).reshape(input_shape) class CrossEntropy(Function): @staticmethod def forward(context, in_tensor, targets): # targets will be used as indices so integers are required targets = targets.astype('int') context.save_for_backward(in_tensor, targets) # select only the inputs that will affect the loss n = in_tensor.shape[0] inputs_in_target_indices = in_tensor[range(n), targets] # apply cross-entropy loss to those inputs and return the average log_result = -np.log(inputs_in_target_indices) return np.sum(log_result) * (1/n) @staticmethod def backward(context, output_grads): EPSILON = 1e-9 in_tensor, targets = context.saved_data n = in_tensor.shape[0] # every local gradient will be 0, except the ones corresponding to the inputs # used to calculate the forward pass, those will have regular -1/x grad local_grads = np.zeros_like(in_tensor) local_grads[range(n), targets] = -1/(in_tensor[range(n), targets]+EPSILON) local_grads *= (1/n) return local_grads * output_grads # nn module operations class Linear(Function): # i = out_features # j = in_features # m = number of examples in the batch @staticmethod def forward(context, array, weight, bias): context.save_for_backward(array, weight, bias.shape) return array @ weight.T + bias # Y[mxi] = X[mxj] @ W.T[jxi] + b[1xi] @staticmethod def backward(context, output_grads): array, weight, bias_shape = context.saved_data dX = output_grads @ weight # dJ/dX[mxj] = dJ/dY[mxi] @ W[ixj] dW = output_grads.T @ array # dJ/dW[ixj] = dJ/dY.T[ixm] @ X[mxj] db = unbroadcast(output_grads, bias_shape) # dJ/db[ix1] = unbroadcast(dJ/db, b.shape) return dX, dW, db class NaiveConv2d(Function): @staticmethod def forward(context, array, weight, stride, padding): pass @staticmethod def backward(context, output_grads): pass class Conv2d(Function): @staticmethod def forward(context, array, weight, stride, padding): pass @staticmethod def backward(context, output_grads): pass
the-stack_106_30936
def special_for(iterable): iterator = iter(iterable) while True: try: iterator*5 next(iterator) except StopIteration: break class MyGen: current = 0 def __init__(self, first, last): self.first = first self.last = last MyGen.current = self.first #this line allows us to use the current number as the starting point for the iteration def __iter__(self): return self def __next__(self): if MyGen.current < self.last: num = MyGen.current MyGen.current += 1 return num raise StopIteration gen = MyGen(1,100) for i in gen: print(i)
the-stack_106_30937
import torch import utility import data import model import loss from option import args from trainer import Trainer torch.manual_seed(args.seed) checkpoint = utility.checkpoint(args) def main(): global model global loss if args.data_test == ['video']: from videotester import VideoTester model = model.Model(args, checkpoint) t = VideoTester(args, model, checkpoint) t.test() else: if checkpoint.ok: loader = data.Data(args) model = model.Model(args, checkpoint) loss = loss.Loss(args, checkpoint) if not args.test_only else None t = Trainer(args, loader, model, loss, checkpoint) while not t.terminate(): t.train() t.test() checkpoint.done() if __name__ == '__main__': main()
the-stack_106_30938
import logging import os from airflow.models import BaseOperator from airflow.exceptions import AirflowException from airflow.operators.bash_operator import BashOperator from airflow.utils.decorators import apply_defaults from subprocess import check_output, CalledProcessError class JavaOperator(BaseOperator): """ :param maven_coordinate: Metadata of the jar in Maven you want to download :type maven_coordinate: list :param repositories: Where the jar is located in Maven. :type repositories: list :param main_class: The location of user-defined main class you want to execute :type main_class: string :type op_args: list :param op_args: a list of positional arguments that will get unpacked in the order you provided when executing your jar :param op_kwargs: a dictionary of keyword arguments that will get unpacked in an arbitary but deterministic order when executing your jar after the positional arguments :type op_kwargs: dict :param fetch_mode: Mode to use when downloading the jars By default, it will fetch things missing from cache. Here is a list of modes available <offline|update-changing|update|missing|force> :type fetch_mode: string :param cache_directory: The location of where the jars are cached. By default, they are located at your user folder under '.coursier/cache/v1' :type cache_directory: string :param extra_coursier_params: a list of strings that can be args or kwargs :type extra_coursier_params: list """ template_fields = ('main_class', 'repositories', 'op_args', 'op_kwargs', 'fetch_mode', 'cache_directory', 'extra_coursier_params') ui_color = '#F5C957' @apply_defaults def __init__(self, maven_coordinates, repositories=None, main_class=None, op_args=None, op_kwargs=None, fetch_mode='missing', cache_directory=os.path.join(os.path.expanduser('~'), '.coursier/cache/v1'), extra_coursier_params=None, *args, **kwargs): super(JavaOperator, self).__init__(*args, **kwargs) self.maven_coordinates = maven_coordinates self.repositories = repositories self.main_class = main_class self.op_args = op_args or [] self.op_kwargs = op_kwargs or {} self.fetch_mode = fetch_mode self.cache_directory = cache_directory self.extra_coursier_params = extra_coursier_params or [] def run_coursier(self): """ Builds a bash command to download all transitive dependencies of a maven coordinate. It can return java -cp compatible output for executing. This is done through coursier. Find more information at: https://github.com/coursier/coursier. """ cmd = ['coursier', 'fetch'] cmd.extend(self.extra_coursier_params) cmd.extend(['--mode', self.fetch_mode]) cmd.extend(['--cache', self.cache_directory]) if self.main_class: cmd.extend(['--classpath']) for repo in self.repositories: cmd.extend(['--repository', repo]) for coordinate in self.maven_coordinates: if not isinstance(coordinate, MavenCoordinate): raise AirflowException('Please use the MavenCoordinate class. Current type: {0}, current value: {1}' .format(type(coordinate), coordinate)) cmd.extend([coordinate.get_coordinate(), '--artifact-type', coordinate.packaging]) logging.info('Executing %s', cmd) try: return check_output(cmd) except CalledProcessError: raise AirflowException("Failed to fetch requested maven coordinates") def execute(self, context): """ Runs the coursier command. Returns jvm exit code instead of main's exit code. When an exception is caught, the exit code returned will be 0 meaning a false positive result. It is best to include `System.exit()` with some non-zero value when an exception happens if that is the intended flow. """ output = self.run_coursier() if self.main_class: cmd = ['java', '-cp', '"'+output+'"', self.main_class] cmd.extend(self.op_args) for k, v in self.op_kwargs.items(): cmd.extend([k, v]) bash_command = ' '.join(cmd) BashOperator(bash_command=bash_command, task_id='inner_bash').execute(self) else: logging.info(output) class MavenCoordinate: """ For accuracy, copy/paste information direct from Nexus. Generally, leave packaging as default. Find more information here: https://maven.apache.org/pom.html#Maven_Coordinates :param group_id: Unique identification amongst an organization or a project :type group_id: string :param artifact_id: The name that the project is known by :type artifact_id: string :param version: Version of project :type version: string :param packaging: Type of project :type packaging: string """ def __init__(self, group_id, artifact_id, version, packaging='jar,bundle' ): self.group_id = group_id self.artifact_id = artifact_id self.version = version self.packaging = packaging def __repr__(self): return self.get_coordinate() def get_coordinate(self): return ':'.join([self.group_id, self.artifact_id, self.version])
the-stack_106_30940
""" EDIT NOTICE File edited from original in https://github.com/castorini/hedwig by Bernal Jimenez Gutierrez ([email protected]) in May 2020 """ import csv import sys import numpy as np from nltk.tokenize import sent_tokenize import torch class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id class InputFeaturesText(object): """A single set of features of data.""" def __init__(self, input_ids, label_id): self.input_ids = input_ids self.label_id = label_id class BertProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """ Gets a collection of `InputExample`s for the train set :param data_dir: :return: """ raise NotImplementedError() def get_dev_examples(self, data_dir): """ Gets a collection of `InputExample`s for the dev set :param data_dir: :return: """ raise NotImplementedError() def get_test_examples(self, data_dir): """ Gets a collection of `InputExample`s for the test set :param data_dir: :return: """ raise NotImplementedError() def get_labels(self): """ Gets a list of possible labels in the dataset :return: """ raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """ Reads a Tab Separated Values (TSV) file :param input_file: :param quotechar: :return: """ import sys csv.field_size_limit(sys.maxsize) with open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(str(cell, 'utf-8') for cell in line) lines.append(line) return lines def convert_examples_to_features(examples, max_seq_length, tokenizer, print_examples=False): """ Loads a data file into a list of InputBatch objects :param examples: :param max_seq_length: :param tokenizer: :param print_examples: :return: a list of InputBatch objects """ features = [] for (ex_index, example) in enumerate(examples): #Replacing new lines with [SEP] tokens text_a = example.text_a.replace('\\n','[SEP]') tokens_a = tokenizer.tokenize(text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambigiously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = [float(x) for x in example.label] if print_examples and ex_index < 5: print("tokens: %s" % " ".join([str(x) for x in tokens])) print("input_ids: %s" % " ".join([str(x) for x in input_ids])) print("input_mask: %s" % " ".join([str(x) for x in input_mask])) print("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) print("label: %s" % example.label) features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) return features def convert_examples_to_features_long(examples, max_seq_length, tokenizer, print_examples=False, model_type='longformer'): """ Loads a data file into a list of InputBatch objects :param examples: :param max_seq_length: :param tokenizer: :param print_examples: :return: a list of InputBatch objects """ features = [] encoded_out = tokenizer.batch_encode_plus([example.text_a.replace('\\n','</s>') for example in examples], add_special_tokens=True, max_length=max_seq_length, pad_to_max_length=True, return_token_type_ids=True) input_ids = encoded_out['input_ids'] attention_masks = encoded_out['attention_mask'] segment_ids = encoded_out['token_type_ids'] for example,ids, masks, segments in zip(examples,input_ids, attention_masks, segment_ids): if model_type == 'longformer': masks[0] = 2 label_id = [float(x) for x in example.label] features.append(InputFeatures(input_ids=ids, input_mask=masks, segment_ids=segments, label_id=label_id)) return features def convert_examples_to_hierarchical_features(examples, max_seq_length, tokenizer, print_examples=False): """ Loads a data file into a list of InputBatch objects :param examples: :param max_seq_length: :param tokenizer: :param print_examples: :return: a list of InputBatch objects """ features = [] for (ex_index, example) in enumerate(examples): tokens_a = [tokenizer.tokenize(line) for line in sent_tokenize(example.text_a)] tokens_b = None if example.text_b: tokens_b = [tokenizer.tokenize(line) for line in sent_tokenize(example.text_b)] # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length # Account for [CLS], [SEP], [SEP] _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] for i0 in range(len(tokens_a)): if len(tokens_a[i0]) > max_seq_length - 2: tokens_a[i0] = tokens_a[i0][:(max_seq_length - 2)] tokens = [["[CLS]"] + line + ["[SEP]"] for line in tokens_a] segment_ids = [[0] * len(line) for line in tokens] if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = list() for line in tokens: input_ids.append(tokenizer.convert_tokens_to_ids(line)) # Input mask has 1 for real tokens and 0 for padding tokens input_mask = [[1] * len(line_ids) for line_ids in input_ids] # Zero-pad up to the sequence length. padding = [[0] * (max_seq_length - len(line_ids)) for line_ids in input_ids] for i0 in range(len(input_ids)): input_ids[i0] += padding[i0] input_mask[i0] += padding[i0] segment_ids[i0] += padding[i0] label_id = [float(x) for x in example.label] if print_examples and ex_index < 5: print("tokens: %s" % " ".join([str(x) for x in tokens])) print("input_ids: %s" % " ".join([str(x) for x in input_ids])) print("input_mask: %s" % " ".join([str(x) for x in input_mask])) print("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) print("label: %s" % example.label) features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """ Truncates a sequence pair in place to the maximum length :param tokens_a: :param tokens_b: :param max_length: :return: """ # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop()
the-stack_106_30941
# Copyright 2011 OpenStack Foundation. # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exception related utilities. """ import logging import sys import time import traceback import six from egodocker.common.gettextutils import _LE class save_and_reraise_exception(object): """Save current exception, run some code and then re-raise. In some cases the exception context can be cleared, resulting in None being attempted to be re-raised after an exception handler is run. This can happen when eventlet switches greenthreads or when running an exception handler, code raises and catches an exception. In both cases the exception context will be cleared. To work around this, we save the exception state, run handler code, and then re-raise the original exception. If another exception occurs, the saved exception is logged and the new exception is re-raised. In some cases the caller may not want to re-raise the exception, and for those circumstances this context provides a reraise flag that can be used to suppress the exception. For example:: except Exception: with save_and_reraise_exception() as ctxt: decide_if_need_reraise() if not should_be_reraised: ctxt.reraise = False If another exception occurs and reraise flag is False, the saved exception will not be logged. If the caller wants to raise new exception during exception handling he/she sets reraise to False initially with an ability to set it back to True if needed:: except Exception: with save_and_reraise_exception(reraise=False) as ctxt: [if statements to determine whether to raise a new exception] # Not raising a new exception, so reraise ctxt.reraise = True """ def __init__(self, reraise=True): self.reraise = reraise def __enter__(self): self.type_, self.value, self.tb, = sys.exc_info() return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: if self.reraise: logging.error(_LE('Original exception being dropped: %s'), traceback.format_exception(self.type_, self.value, self.tb)) return False if self.reraise: six.reraise(self.type_, self.value, self.tb) def forever_retry_uncaught_exceptions(infunc): def inner_func(*args, **kwargs): last_log_time = 0 last_exc_message = None exc_count = 0 while True: try: return infunc(*args, **kwargs) except Exception as exc: this_exc_message = six.u(str(exc)) if this_exc_message == last_exc_message: exc_count += 1 else: exc_count = 1 # Do not log any more frequently than once a minute unless # the exception message changes cur_time = int(time.time()) if (cur_time - last_log_time > 60 or this_exc_message != last_exc_message): logging.exception( _LE('Unexpected exception occurred %d time(s)... ' 'retrying.') % exc_count) last_log_time = cur_time last_exc_message = this_exc_message exc_count = 0 # This should be a very rare event. In case it isn't, do # a sleep. time.sleep(1) return inner_func
the-stack_106_30942
#!/usr/bin/env python3 """ This module handles all interaction with NCBI's BLAST API, including launching new remote searches, polling for completion status, and retrieval of results. """ import re import time import logging import requests from cblaster import helpers from cblaster.classes import Hit LOG = logging.getLogger(__name__) BLAST_API_URL = "https://blast.ncbi.nlm.nih.gov/Blast.cgi" def start( sequences=None, query_file=None, query_ids=None, database="nr", program="blastp", megablast=False, filtering="F", evalue=10, nucl_reward=None, nucl_penalty=None, gap_costs="11 1", matrix="BLOSUM62", hitlist_size=5000, threshold=11, word_size=6, comp_based_stats=2, entrez_query=None, ): """Launch a remote BLAST search using NCBI BLAST API. Note that the HITLIST_SIZE, ALIGNMENTS and DESCRIPTIONS parameters must all be set together in order to mimic max_target_seqs behaviour. Usage guidelines: 1. Don't contact server more than once every 10 seconds 2. Don't poll for a single RID more than once a minute 3. Use URL parameter email/tool 4. Run scripts weekends or 9pm-5am Eastern time on weekdays if >50 searches For a full description of the parameters, see: 1. `BLAST API documentation<https://ncbi.github.io/blast-cloud/dev/api.html>` 2. `BLAST documentation <https://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=BlastHelp>` Parameters: sequences (dict): Query sequence dict generated by helpers.get_sequences() query_file (str): Path to a query FASTA file query_ids (list): Collection of NCBI sequence identifiers database (str): Target NCBI BLAST database program (str): BLAST variant to run megablast (bool): Enable megaBLAST option (only with BLASTn) filtering (str): Low complexity filtering evalue (float): E-value cutoff nucl_reward (int): Reward for matching bases (only with BLASTN/megaBLAST) nucl_penalty (int): Penalty for mismatched bases (only with BLASTN/megaBLAST) gap_costs (str): Gap existence and extension costs matrix (str): Scoring matrix name hitlist_size (int): Number of database sequences to keep threshold (int): Neighbouring score for initial words word_size (int): Size of word for initial matches comp_based_stats (int): Composition based statistics algorithm entrez_query (str): NCBI Entrez search term for pre-filtering the BLAST database Returns: rid (str): Request Identifier (RID) assigned to the search rtoe (int): Request Time Of Execution (RTOE), estimated run time of the search """ if not sequences: sequences = helpers.get_sequences( query_file=query_file, query_ids=query_ids ) query = helpers.sequences_to_fasta(sequences) parameters = { "CMD": "PUT", "DATABASE": database, "PROGRAM": program, "FILTER": filtering, "EXPECT": evalue, "GAPCOSTS": gap_costs, "MATRIX": matrix, "HITLIST_SIZE": hitlist_size, "ALIGNMENTS": hitlist_size, "DESCRIPTIONS": hitlist_size, "WORD_SIZE": word_size, "COMPOSITION_BASED_STATISTICS": comp_based_stats, } if entrez_query: parameters["ENTREZ_QUERY"] = entrez_query if program == "blastn": if megablast: parameters["MEGABLAST"] = "on" if nucl_reward: parameters["NUCL_REWARD"] = nucl_reward if nucl_penalty: parameters["NUCL_PENALTY"] = nucl_penalty else: # Does not apply to blastn parameters["THRESHOLD"] = threshold response = requests.post(BLAST_API_URL, files={"QUERY": query}, params=parameters) LOG.debug("Search parameters: %s", parameters) LOG.debug("Search URL: %s", response.url) rid, rtoe = re.findall(r"(?:RID|RTOE) = (.+?)[\n\s]", response.text) return rid, int(rtoe) def check(rid): """Check completion status of a BLAST search given a Request Identifier (RID). Arguments: rid (str): NCBI BLAST search request identifier (RID) Returns: bool: Search has completed successfully and hits were reported Raises: ValueError: Search has failed. This is caused either by program error (in which case, NCBI requests you submit an error report with the RID) or expiration of the RID (only stored for 24 hours). ValueError: Search has completed successfully, but no hits were reported. """ parameters = {"CMD": "Get", "RID": rid, "FORMAT_OBJECT": "SearchInfo"} response = requests.get(BLAST_API_URL, params=parameters) LOG.debug(response.url) search = re.findall(r"(?:Status|ThereAreHits)=(.+?)[\n\s]", response.text) if len(search) == 1: status = search[0] if status in ("UNKNOWN", "FAILED"): raise ValueError(f"Search {rid} failed (status={status})") if status == "WAITING": return False if search == ["READY", "yes"]: return True raise ValueError("Search completed, but found no hits") def retrieve(rid, hitlist_size=5000): """Retrieve BLAST results corresponding to a given Request Identifier (RID). Arguments: rid (str): NCBI BLAST search request identifiers (RID) hitlist_size (int): Total number of hits to retrieve Returns: list: BLAST search results split by newline, with HTML parts removed """ parameters = { "CMD": "Get", "RID": rid, "FORMAT_TYPE": "Tabular", "FORMAT_OBJECT": "Alignment", "HITLIST_SIZE": hitlist_size, "ALIGNMENTS": hitlist_size, "DESCRIPTIONS": hitlist_size, "NCBI_GI": "F", } LOG.debug(parameters) response = requests.get(BLAST_API_URL, params=parameters) LOG.debug(response.url) # Remove HTML junk and info lines # BLAST results are stored inside <PRE></PRE> tags return [ line for line in re.search("<PRE>(.+?)</PRE>", response.text, re.DOTALL) .group(1) .split("\n") if line and not line.startswith("#") ] def poll(rid, delay=60, max_retries=-1): """Poll BLAST API with given Request Identifier (RID) until results are returned. As per NCBI usage guidelines, this function will only poll once per minute; this is calculated each time such that wait is constant (i.e. accounts for differing response time on the status check). Arguments: rid (str): NCBI BLAST search request identifier (RID) delay (int): Total delay (seconds) between polling max_retries (int): Maximum number of polling attempts (-1 for unlimited) Returns: list: BLAST search results split by newline """ if delay < 60: raise ValueError("Delay must be at least 60s") retries, previous = 0, 0 while True: current = time.time() wait = previous - current + delay if wait > 0: time.sleep(wait) previous = current + wait else: previous = current LOG.info("Checking search status...") if check(rid): LOG.info("Search has completed successfully!") return if max_retries > 0 and retries == max_retries: raise ValueError(f"Reached maximum retry limit {max_retries}") retries += 1 def parse( handle, sequences=None, query_file=None, query_ids=None, max_evalue=0.01, min_identity=30, min_coverage=50, ): """Parse Tabular results from remote BLAST search performed via API. Since the API provides no option for returning query coverage, which is a metric we want to use for filtering hits, query sequences must be passed to this function so that their lengths can be compared to the alignment length. Arguments: handle (list): File handle (or file handle-like) object corresponding to BLAST results. Note that this function expects an iterable of tab-delimited lines and performs no validation/error checking sequences (dict): Query sequences query_file (str): Path to FASTA format query file query_ids (list): NCBI sequence identifiers max_evalue (float): Maximum e-value min_identity (float): Minimum percent identity min_coverage (float): Minimum percent query coverage Returns: list: Hit objects corresponding to criteria passing BLAST hits """ if not sequences: sequences = helpers.get_sequences(query_file, query_ids) hits = [] for line in handle: qid, sid, pident, *_, qstart, qend, _, _, evalue, score, _ = line.split("\t") # Manually calculate query coverage coverage = (int(qend) - int(qstart) + 1) / len(sequences[qid]) * 100 hit = Hit( query=qid, subject=sid, identity=pident, coverage=coverage, evalue=evalue, bitscore=score, ) if ( float(hit.identity) > min_identity and float(hit.coverage) > min_coverage and hit.evalue < max_evalue ): hits.append(hit) if len(hits) == 0: raise ValueError("No results found") return hits def search( rid=None, sequences=None, query_file=None, query_ids=None, min_identity=0.3, min_coverage=0.5, max_evalue=0.01, blast_file=None, **kwargs, ): """Perform a remote BLAST search via the NCBI's BLAST API. This function launches a new search given a query FASTA file or list of valid NCBI identifiers, polls the API to check the completion status of the search, then retrieves and parses the results. It is also possible to call other BLAST variants using the program argument. Arguments: rid (str): NCBI BLAST search request identifier (RID) sequences (dict): Query sequences query_file (str): Path to FASTA format query file query_ids (list): NCBI sequence identifiers min_identity (float): Minimum percent identity min_coverage (float): Minimum percent query coverage max_evalue (float): Maximum e-value blast_file (TextIOWrapper): file blast results are written to Returns: list: Hit objects corresponding to criteria passing BLAST hits """ if not rid: LOG.info("Launching new search") # Start search, get request identifier (RID) and execution ETA (RTOE) rid, rtoe = start( sequences=sequences, query_file=query_file, query_ids=query_ids, **kwargs ) LOG.info("Request Identifier (RID): %s", rid) LOG.info("Request Time Of Execution (RTOE): %ss", rtoe) # Wait the RTOE (sec) before bothering to poll time.sleep(rtoe) LOG.info("Polling NCBI for completion status") poll(rid) LOG.info("Retrieving results for search %s", rid) results = retrieve(rid) if blast_file: LOG.info("Writing BLAST hit table to %s", blast_file.name) blast = "\n".join(results) blast_file.write(blast) # Parse results for hits LOG.info("Parsing results...") results = parse( results, sequences=sequences, query_file=query_file, query_ids=query_ids, max_evalue=max_evalue, min_identity=min_identity, min_coverage=min_coverage, ) return rid, results
the-stack_106_30943
from typing import Set, Callable, Dict, List, Any from structures import Point class Vertex: __position: Point __children: Set['Vertex'] __parents: Set['Vertex'] __connectivity: Dict['Vertex', 'Vertex'] __aux: Dict[Any, Any] def __init__(self, position: Point, store_connectivity: bool = False) -> None: self.__position = position self.__children = set() self.__parents = set() self.__connectivity = {self: self} self.__store_connectivity = store_connectivity self.__cost = None self.__aux = {} def __add_connectivity(self, vertex_added: 'Vertex'): if vertex_added is self: return # update connectivity of this vertex self.__connectivity[vertex_added] = vertex_added connectivity_keys = self.connectivity.keys() vertex_added_connectivity_keys = vertex_added.connectivity.keys() new_connection_keys = vertex_added_connectivity_keys - connectivity_keys for vertex_key in new_connection_keys: if vertex_key: self.__connectivity[vertex_key] = vertex_added # get connectivity of all connections correct connectivity_keys = self.connectivity.keys() vertex_added.connectivity[self] = self for vertex_to_update in self.__connectivity: vertex_connectivity_keys = vertex_to_update.connectivity.keys() new_connection_keys = connectivity_keys - vertex_connectivity_keys for vertex_key in new_connection_keys: if self in vertex_to_update.connectivity: new_vertex_path_target = self else: new_vertex_path_target = vertex_added new_vertex_path_step = vertex_to_update.connectivity[new_vertex_path_target] vertex_to_update.connectivity[vertex_key] = new_vertex_path_step # Adding # def add_child(self, child: 'Vertex') -> None: self.__children.add(child) if self.__store_connectivity: self.__add_connectivity(child) def add_parent(self, parent: 'Vertex') -> None: self.__parents.add(parent) if self.__store_connectivity: self.__add_connectivity(parent) # Removing # def remove_child(self, child: 'Vertex') -> None: self.__children.remove(child) # ToDo No remove connecitivty implemented, as it is not trivial def remove_parent(self, parent: 'Vertex') -> None: self.__parents.remove(parent) # No remove connecitivty implemented, as it is not trivial # Setting # def set_child(self, child: 'Vertex'): self.__children.clear() # ToDo No remove connecitivty implemented, as it is not trivial self.__children.add(child) if self.__store_connectivity: child.__add_connectivity(self) self.__add_connectivity(child) def set_parent(self, parent: 'Vertex'): self.__parents.clear() # ToDo No remove connecitivty implemented, as it is not trivial self.__parents.add(parent) if self.__store_connectivity: parent.__add_connectivity(self) self.__add_connectivity(parent) # Visiting # def visit_children(self, f: Callable[['Vertex'], bool]) -> None: for child in self.__children: child.visit_children(f) if not f(self): return def visit_parents(self, f: Callable[['Vertex'], bool]) -> None: for child in self.__children: child.visit_children(f) if not f(self): return # Properties # @property def cost(self) -> float: return self.__cost @property def position(self) -> Point: return self.__position @property def children(self) -> Set['Vertex']: return self.__children @property def parents(self) -> Set['Vertex']: return self.__parents @property def connectivity(self) -> Dict['Vertex', 'Vertex']: return self.__connectivity @property def aux(self) -> Dict[Any, Any]: return self.__aux # Setters # @cost.setter def cost(self, val: float): self.__cost = val
the-stack_106_30944
__version__ = '2021.12' __url__ = 'https://github.com/Paradoxis/Flask-Unsign-Wordlist' __author__ = 'Luke Paris (Paradoxis)' import os, sys from flask_unsign_wordlist.exceptions import NoSuchWordlist def get(name: str='all') -> str: """ Get the path to a flask-unsign wordlist :param name: Wordlist name ('.txt' is implied) :return: Absolute path to a wordlist """ cwd = os.path.dirname(__file__) path = os.path.join(cwd, 'wordlists', name + '.txt') if not os.path.isfile(path): raise NoSuchWordlist(f'No known wordlist found with name: {name!r}') return path def main(): """CLI entry point""" try: path = get(sys.argv[1] if len(sys.argv) != 1 else 'all') sys.stdout.write(path) except NoSuchWordlist as e: print(str(e), file=sys.stderr) return 1 if __name__ == '__main__': exit(main() or 0)
the-stack_106_30945
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A wrapper class for Spark DataFrame to behave similar to pandas DataFrame. """ from collections import defaultdict, namedtuple from collections.abc import Mapping import re import warnings import inspect import json import types from functools import partial, reduce import sys from itertools import zip_longest, chain from types import TracebackType from typing import ( Any, Callable, Dict, Generic, IO, Iterable, Iterator, List, Optional, Sequence, Tuple, Type, Union, cast, no_type_check, TYPE_CHECKING, ) import datetime import numpy as np import pandas as pd from pandas.api.types import ( # type: ignore[attr-defined] is_bool_dtype, is_list_like, is_dict_like, is_scalar, ) from pandas.tseries.frequencies import DateOffset, to_offset if TYPE_CHECKING: from pandas.io.formats.style import Styler from pandas.core.dtypes.common import infer_dtype_from_object from pandas.core.accessor import CachedAccessor from pandas.core.dtypes.inference import is_sequence from pyspark import StorageLevel from pyspark.sql import Column, DataFrame as SparkDataFrame, functions as F from pyspark.sql.functions import pandas_udf from pyspark.sql.types import ( ArrayType, BooleanType, DataType, DoubleType, NumericType, Row, StringType, StructField, StructType, DecimalType, TimestampType, TimestampNTZType, ) from pyspark.sql.window import Window from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm. from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, Label, Name, Scalar, T from pyspark.pandas.accessors import PandasOnSparkFrameMethods from pyspark.pandas.config import option_context, get_option from pyspark.pandas.spark import functions as SF from pyspark.pandas.spark.accessors import SparkFrameMethods, CachedSparkFrameMethods from pyspark.pandas.utils import ( align_diff_frames, column_labels_level, combine_frames, default_session, is_name_like_tuple, is_name_like_value, is_testing, name_like_string, same_anchor, scol_for, validate_arguments_and_invoke_function, validate_axis, validate_bool_kwarg, validate_how, validate_mode, verify_temp_column_name, log_advice, ) from pyspark.pandas.generic import Frame from pyspark.pandas.internal import ( InternalField, InternalFrame, HIDDEN_COLUMNS, NATURAL_ORDER_COLUMN_NAME, SPARK_INDEX_NAME_FORMAT, SPARK_DEFAULT_INDEX_NAME, SPARK_DEFAULT_SERIES_NAME, SPARK_INDEX_NAME_PATTERN, ) from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame from pyspark.pandas.ml import corr from pyspark.pandas.typedef.typehints import ( as_spark_type, infer_return_type, pandas_on_spark_type, spark_type_to_pandas_dtype, DataFrameType, SeriesType, ScalarType, create_tuple_for_frame_type, ) from pyspark.pandas.plot import PandasOnSparkPlotAccessor if TYPE_CHECKING: from pyspark.sql._typing import OptionalPrimitiveType from pyspark.pandas.groupby import DataFrameGroupBy from pyspark.pandas.resample import DataFrameResampler from pyspark.pandas.indexes import Index from pyspark.pandas.series import Series # These regular expression patterns are complied and defined here to avoid to compile the same # pattern every time it is used in _repr_ and _repr_html_ in DataFrame. # Two patterns basically seek the footer string from Pandas' REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$") REPR_HTML_PATTERN = re.compile( r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$" ) _flex_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``. With reverse version, `{reverse}`. Among flexible wrappers (`add`, `sub`, `mul`, `div`) to arithmetic operators: `+`, `-`, `*`, `/`, `//`. Parameters ---------- other : scalar Any single data Returns ------- DataFrame Result of the arithmetic operation. Examples -------- >>> df = ps.DataFrame({{'angles': [0, 3, 4], ... 'degrees': [360, 180, 360]}}, ... index=['circle', 'triangle', 'rectangle'], ... columns=['angles', 'degrees']) >>> df angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Add a scalar with operator version which return the same results. Also reverse version. >>> df + 1 angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(df) angles degrees circle 0 720 triangle 6 360 rectangle 8 720 >>> df + df + df angles degrees circle 0 1080 triangle 9 540 rectangle 12 1080 >>> df.radd(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 Divide and true divide by constant with reverse version. >>> df / 10 angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.div(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rdiv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 >>> df.truediv(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rtruediv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 Subtract by constant with reverse version. >>> df - 1 angles degrees circle -1 359 triangle 2 179 rectangle 3 359 >>> df.sub(1) angles degrees circle -1 359 triangle 2 179 rectangle 3 359 >>> df.rsub(1) angles degrees circle 1 -359 triangle -2 -179 rectangle -3 -359 Multiply by constant with reverse version. >>> df * 1 angles degrees circle 0 360 triangle 3 180 rectangle 4 360 >>> df.mul(1) angles degrees circle 0 360 triangle 3 180 rectangle 4 360 >>> df.rmul(1) angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Floor Divide by constant with reverse version. >>> df // 10 angles degrees circle 0.0 36.0 triangle 0.0 18.0 rectangle 0.0 36.0 >>> df.floordiv(10) angles degrees circle 0.0 36.0 triangle 0.0 18.0 rectangle 0.0 36.0 >>> df.rfloordiv(10) # doctest: +SKIP angles degrees circle inf 0.0 triangle 3.0 0.0 rectangle 2.0 0.0 Mod by constant with reverse version. >>> df % 2 angles degrees circle 0 0 triangle 1 0 rectangle 0 0 >>> df.mod(2) angles degrees circle 0 0 triangle 1 0 rectangle 0 0 >>> df.rmod(2) angles degrees circle NaN 2 triangle 2.0 2 rectangle 2.0 2 Power by constant with reverse version. >>> df ** 2 angles degrees circle 0.0 129600.0 triangle 9.0 32400.0 rectangle 16.0 129600.0 >>> df.pow(2) angles degrees circle 0.0 129600.0 triangle 9.0 32400.0 rectangle 16.0 129600.0 >>> df.rpow(2) angles degrees circle 1.0 2.348543e+108 triangle 8.0 1.532496e+54 rectangle 16.0 2.348543e+108 """ class DataFrame(Frame, Generic[T]): """ pandas-on-Spark DataFrame that corresponds to pandas DataFrame logically. This holds Spark DataFrame internally. :ivar _internal: an internal immutable Frame to manage metadata. :type _internal: InternalFrame Parameters ---------- data : numpy ndarray (structured or homogeneous), dict, pandas DataFrame, Spark DataFrame \ or pandas-on-Spark Series Dict can contain Series, arrays, constants, or list-like objects Note that if `data` is a pandas DataFrame, a Spark DataFrame, and a pandas-on-Spark Series, other arguments should not be used. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = ps.DataFrame(data=d, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 Constructing DataFrame from pandas DataFrame >>> df = ps.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2'])) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = ps.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = ps.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df2 # doctest: +SKIP a b c d e 0 3 1 4 9 8 1 4 8 4 8 4 2 7 6 5 6 7 3 8 7 9 1 0 4 2 5 4 3 9 """ def __init__( # type: ignore[no-untyped-def] self, data=None, index=None, columns=None, dtype=None, copy=False ): if isinstance(data, InternalFrame): assert index is None assert columns is None assert dtype is None assert not copy internal = data elif isinstance(data, SparkDataFrame): assert index is None assert columns is None assert dtype is None assert not copy internal = InternalFrame(spark_frame=data, index_spark_columns=None) elif isinstance(data, ps.Series): assert index is None assert columns is None assert dtype is None assert not copy data = data.to_frame() internal = data._internal else: if isinstance(data, pd.DataFrame): assert index is None assert columns is None assert dtype is None assert not copy pdf = data else: pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy) internal = InternalFrame.from_pandas(pdf) object.__setattr__(self, "_internal_frame", internal) @property def _pssers(self) -> Dict[Label, "Series"]: """Return a dict of column label -> Series which anchors `self`.""" from pyspark.pandas.series import Series if not hasattr(self, "_psseries"): object.__setattr__( self, "_psseries", {label: Series(data=self, index=label) for label in self._internal.column_labels}, ) else: psseries = cast(Dict[Label, Series], self._psseries) # type: ignore[has-type] assert len(self._internal.column_labels) == len(psseries), ( len(self._internal.column_labels), len(psseries), ) if any(self is not psser._psdf for psser in psseries.values()): # Refresh the dict to contain only Series anchoring `self`. self._psseries = { label: ( psseries[label] if self is psseries[label]._psdf else Series(data=self, index=label) ) for label in self._internal.column_labels } return self._psseries @property def _internal(self) -> InternalFrame: return cast(InternalFrame, self._internal_frame) # type: ignore[has-type] def _update_internal_frame( self, internal: InternalFrame, requires_same_anchor: bool = True ) -> None: """ Update InternalFrame with the given one. If the column_label is changed or the new InternalFrame is not the same `anchor`, disconnect the link to the Series and create a new one. If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy, updating the underlying Spark DataFrame which need to combine a different Spark DataFrame. :param internal: the new InternalFrame :param requires_same_anchor: whether checking the same anchor """ from pyspark.pandas.series import Series if hasattr(self, "_psseries"): psseries = {} for old_label, new_label in zip_longest( self._internal.column_labels, internal.column_labels ): if old_label is not None: psser = self._pssers[old_label] renamed = old_label != new_label not_same_anchor = requires_same_anchor and not same_anchor(internal, psser) if renamed or not_same_anchor: psdf: DataFrame = DataFrame(self._internal.select_column(old_label)) psser._update_anchor(psdf) psser = None else: psser = None if new_label is not None: if psser is None: psser = Series(data=self, index=new_label) psseries[new_label] = psser self._psseries = psseries self._internal_frame = internal if hasattr(self, "_repr_pandas_cache"): del self._repr_pandas_cache @property def ndim(self) -> int: """ Return an int representing the number of array dimensions. return 2 for DataFrame. Examples -------- >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', None], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 NaN 7 8 >>> df.ndim 2 """ return 2 @property def axes(self) -> List: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def _reduce_for_stat_function( self, sfun: Callable[["Series"], Column], name: str, axis: Optional[Axis] = None, numeric_only: bool = True, skipna: bool = True, **kwargs: Any, ) -> "Series": """ Applies sfun to each column and returns a pd.Series where the number of rows equal the number of columns. Parameters ---------- sfun : either an 1-arg function that takes a Column and returns a Column, or a 2-arg function that takes a Column and its DataType and returns a Column. axis: used only for sanity check because series only support index axis. name : original pandas API name. axis : axis to apply. 0 or 1, or 'index' or 'columns. numeric_only : bool, default True Include only float, int, boolean columns. False is not supported. This parameter is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter currently. skipna : bool, default True Exclude NA/null values when computing the result. """ from pyspark.pandas.series import Series, first_series axis = validate_axis(axis) if axis == 0: min_count = kwargs.get("min_count", 0) exprs = [SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] new_column_labels = [] for label in self._internal.column_labels: psser = self._psser_for(label) is_numeric_or_boolean = isinstance( psser.spark.data_type, (NumericType, BooleanType) ) keep_column = not numeric_only or is_numeric_or_boolean if keep_column: if not skipna and get_option("compute.eager_check") and psser.hasnans: scol = F.first(F.lit(np.nan)) else: scol = sfun(psser) if min_count > 0: scol = F.when(Frame._count_expr(psser) >= min_count, scol) exprs.append(scol.alias(name_like_string(label))) new_column_labels.append(label) if len(exprs) == 1: return Series([]) sdf = self._internal.spark_frame.select(*exprs) # The data is expected to be small so it's fine to transpose/use default index. with ps.option_context("compute.max_rows", 1): internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=new_column_labels, column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal).transpose()) else: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only, **kwargs) if len(pdf) <= limit: return Series(pser) @pandas_udf(returnType=as_spark_type(pser.dtype.type)) # type: ignore[call-overload] def calculate_columns_axis(*cols: pd.Series) -> pd.Series: return getattr(pd.concat(cols, axis=1), name)( axis=axis, numeric_only=numeric_only, **kwargs ) column_name = verify_temp_column_name( self._internal.spark_frame.select(self._internal.index_spark_columns), "__calculate_columns_axis__", ) sdf = self._internal.spark_frame.select( self._internal.index_spark_columns + [calculate_columns_axis(*self._internal.data_spark_columns).alias(column_name)] ) internal = InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in self._internal.index_spark_column_names ], index_names=self._internal.index_names, index_fields=self._internal.index_fields, ) return first_series(DataFrame(internal)).rename(pser.name) def _psser_for(self, label: Label) -> "Series": """ Create Series with a proper column label. The given label must be verified to exist in `InternalFrame.column_labels`. For example, in some method, self is like: >>> self = ps.range(3) `self._psser_for(label)` can be used with `InternalFrame.column_labels`: >>> self._psser_for(self._internal.column_labels[0]) 0 0 1 1 2 2 Name: id, dtype: int64 `self._psser_for(label)` must not be used directly with user inputs. In that case, `self[label]` should be used instead, which checks the label exists or not: >>> self['id'] 0 0 1 1 2 2 Name: id, dtype: int64 """ return self._pssers[label] def _apply_series_op( self, op: Callable[["Series"], Union["Series", Column]], should_resolve: bool = False ) -> "DataFrame": applied = [] for label in self._internal.column_labels: applied.append(op(self._psser_for(label))) internal = self._internal.with_new_columns(applied) if should_resolve: internal = internal.resolved_copy return DataFrame(internal) # Arithmetic Operators def _map_series_op(self, op: str, other: Any) -> "DataFrame": from pyspark.pandas.base import IndexOpsMixin if not isinstance(other, DataFrame) and ( isinstance(other, IndexOpsMixin) or is_sequence(other) ): raise TypeError( "%s with a sequence is currently not supported; " "however, got %s." % (op, type(other).__name__) ) if isinstance(other, DataFrame): if self._internal.column_labels_level != other._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") if not same_anchor(self, other): # Different DataFrames def apply_op( psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label], ) -> Iterator[Tuple["Series", Label]]: for this_label, that_label in zip(this_column_labels, that_column_labels): yield ( getattr(psdf._psser_for(this_label), op)( psdf._psser_for(that_label) ).rename(this_label), this_label, ) return align_diff_frames(apply_op, self, other, fillna=True, how="full") else: applied = [] column_labels = [] for label in self._internal.column_labels: if label in other._internal.column_labels: applied.append(getattr(self._psser_for(label), op)(other._psser_for(label))) else: applied.append( SF.lit(None) .cast(self._internal.spark_type_for(label)) .alias(name_like_string(label)) ) column_labels.append(label) for label in other._internal.column_labels: if label not in column_labels: applied.append( SF.lit(None) .cast(other._internal.spark_type_for(label)) .alias(name_like_string(label)) ) column_labels.append(label) internal = self._internal.with_new_columns(applied, column_labels=column_labels) return DataFrame(internal) else: return self._apply_series_op(lambda psser: getattr(psser, op)(other)) def __add__(self, other: Any) -> "DataFrame": return self._map_series_op("add", other) def __radd__(self, other: Any) -> "DataFrame": return self._map_series_op("radd", other) def __truediv__(self, other: Any) -> "DataFrame": return self._map_series_op("truediv", other) def __rtruediv__(self, other: Any) -> "DataFrame": return self._map_series_op("rtruediv", other) def __mul__(self, other: Any) -> "DataFrame": return self._map_series_op("mul", other) def __rmul__(self, other: Any) -> "DataFrame": return self._map_series_op("rmul", other) def __sub__(self, other: Any) -> "DataFrame": return self._map_series_op("sub", other) def __rsub__(self, other: Any) -> "DataFrame": return self._map_series_op("rsub", other) def __pow__(self, other: Any) -> "DataFrame": return self._map_series_op("pow", other) def __rpow__(self, other: Any) -> "DataFrame": return self._map_series_op("rpow", other) def __mod__(self, other: Any) -> "DataFrame": return self._map_series_op("mod", other) def __rmod__(self, other: Any) -> "DataFrame": return self._map_series_op("rmod", other) def __floordiv__(self, other: Any) -> "DataFrame": return self._map_series_op("floordiv", other) def __rfloordiv__(self, other: Any) -> "DataFrame": return self._map_series_op("rfloordiv", other) def __abs__(self) -> "DataFrame": return self._apply_series_op(lambda psser: abs(psser)) def __neg__(self) -> "DataFrame": return self._apply_series_op(lambda psser: -psser) def add(self, other: Any) -> "DataFrame": return self + other # create accessor for plot plot = CachedAccessor("plot", PandasOnSparkPlotAccessor) # create accessor for Spark related methods. spark = CachedAccessor("spark", SparkFrameMethods) # create accessor for pandas-on-Spark specific methods. pandas_on_spark = CachedAccessor("pandas_on_spark", PandasOnSparkFrameMethods) # keep the name "koalas" for backward compatibility. koalas = CachedAccessor("koalas", PandasOnSparkFrameMethods) @no_type_check def hist(self, bins=10, **kwds): return self.plot.hist(bins, **kwds) hist.__doc__ = PandasOnSparkPlotAccessor.hist.__doc__ @no_type_check def boxplot(self, **kwds): return self.plot.box(**kwds) boxplot.__doc__ = PandasOnSparkPlotAccessor.box.__doc__ @no_type_check def kde(self, bw_method=None, ind=None, **kwds): return self.plot.kde(bw_method, ind, **kwds) kde.__doc__ = PandasOnSparkPlotAccessor.kde.__doc__ add.__doc__ = _flex_doc_FRAME.format( desc="Addition", op_name="+", equiv="dataframe + other", reverse="radd" ) def radd(self, other: Any) -> "DataFrame": return other + self radd.__doc__ = _flex_doc_FRAME.format( desc="Addition", op_name="+", equiv="other + dataframe", reverse="add" ) def div(self, other: Any) -> "DataFrame": return self / other div.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rdiv" ) divide = div def rdiv(self, other: Any) -> "DataFrame": return other / self rdiv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="other / dataframe", reverse="div" ) def truediv(self, other: Any) -> "DataFrame": return self / other truediv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rtruediv" ) def rtruediv(self, other: Any) -> "DataFrame": return other / self rtruediv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="other / dataframe", reverse="truediv" ) def mul(self, other: Any) -> "DataFrame": return self * other mul.__doc__ = _flex_doc_FRAME.format( desc="Multiplication", op_name="*", equiv="dataframe * other", reverse="rmul" ) multiply = mul def rmul(self, other: Any) -> "DataFrame": return other * self rmul.__doc__ = _flex_doc_FRAME.format( desc="Multiplication", op_name="*", equiv="other * dataframe", reverse="mul" ) def sub(self, other: Any) -> "DataFrame": return self - other sub.__doc__ = _flex_doc_FRAME.format( desc="Subtraction", op_name="-", equiv="dataframe - other", reverse="rsub" ) subtract = sub def rsub(self, other: Any) -> "DataFrame": return other - self rsub.__doc__ = _flex_doc_FRAME.format( desc="Subtraction", op_name="-", equiv="other - dataframe", reverse="sub" ) def mod(self, other: Any) -> "DataFrame": return self % other mod.__doc__ = _flex_doc_FRAME.format( desc="Modulo", op_name="%", equiv="dataframe % other", reverse="rmod" ) def rmod(self, other: Any) -> "DataFrame": return other % self rmod.__doc__ = _flex_doc_FRAME.format( desc="Modulo", op_name="%", equiv="other % dataframe", reverse="mod" ) def pow(self, other: Any) -> "DataFrame": return self ** other pow.__doc__ = _flex_doc_FRAME.format( desc="Exponential power of series", op_name="**", equiv="dataframe ** other", reverse="rpow" ) def rpow(self, other: Any) -> "DataFrame": return other ** self rpow.__doc__ = _flex_doc_FRAME.format( desc="Exponential power", op_name="**", equiv="other ** dataframe", reverse="pow" ) def floordiv(self, other: Any) -> "DataFrame": return self // other floordiv.__doc__ = _flex_doc_FRAME.format( desc="Integer division", op_name="//", equiv="dataframe // other", reverse="rfloordiv" ) def rfloordiv(self, other: Any) -> "DataFrame": return other // self rfloordiv.__doc__ = _flex_doc_FRAME.format( desc="Integer division", op_name="//", equiv="other // dataframe", reverse="floordiv" ) # Comparison Operators def __eq__(self, other: Any) -> "DataFrame": # type: ignore[override] return self._map_series_op("eq", other) def __ne__(self, other: Any) -> "DataFrame": # type: ignore[override] return self._map_series_op("ne", other) def __lt__(self, other: Any) -> "DataFrame": return self._map_series_op("lt", other) def __le__(self, other: Any) -> "DataFrame": return self._map_series_op("le", other) def __ge__(self, other: Any) -> "DataFrame": return self._map_series_op("ge", other) def __gt__(self, other: Any) -> "DataFrame": return self._map_series_op("gt", other) def eq(self, other: Any) -> "DataFrame": """ Compare if the current value is equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.eq(1) a b a True True b False False c False True d False False """ return self == other equals = eq def gt(self, other: Any) -> "DataFrame": """ Compare if the current value is greater than the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.gt(2) a b a False False b False False c True False d True False """ return self > other def ge(self, other: Any) -> "DataFrame": """ Compare if the current value is greater than or equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ge(1) a b a True True b True False c True True d True False """ return self >= other def lt(self, other: Any) -> "DataFrame": """ Compare if the current value is less than the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.lt(1) a b a False False b False False c False False d False False """ return self < other def le(self, other: Any) -> "DataFrame": """ Compare if the current value is less than or equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.le(2) a b a True True b True False c False True d False False """ return self <= other def ne(self, other: Any) -> "DataFrame": """ Compare if the current value is not equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ne(1) a b a False False b True True c True False d True True """ return self != other def applymap(self, func: Callable[[Any], Any]) -> "DataFrame": """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> np.int32: ... return x ** 2 pandas-on-Spark uses return type hint and does not try to infer the type. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. Examples -------- >>> df = ps.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> def str_len(x) -> int: ... return len(str(x)) >>> df.applymap(str_len) 0 1 0 3 4 1 5 5 >>> def power(x) -> float: ... return x ** 2 >>> df.applymap(power) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 You can omit the type hint and let pandas-on-Spark infer its type. >>> df.applymap(lambda x: x ** 2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ # TODO: We can implement shortcut theoretically since it creates new DataFrame # anyway and we don't have to worry about operations on different DataFrames. return self._apply_series_op(lambda psser: psser.apply(func)) # TODO: not all arguments are implemented comparing to pandas' for now. def aggregate(self, func: Union[List[str], Dict[Name, List[str]]]) -> "DataFrame": """Aggregate using one or more operations over the specified axis. Parameters ---------- func : dict or a list a dict mapping from column name (string) to aggregate functions (list of strings). If a list is given, the aggregation is performed against all columns. Returns ------- DataFrame Notes ----- `agg` is an alias for `aggregate`. Use the alias. See Also -------- DataFrame.apply : Invoke function on DataFrame. DataFrame.transform : Only perform transforming type operations. DataFrame.groupby : Perform operations over groups. Series.aggregate : The equivalent function for Series. Examples -------- >>> df = ps.DataFrame([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... [np.nan, np.nan, np.nan]], ... columns=['A', 'B', 'C']) >>> df A B C 0 1.0 2.0 3.0 1 4.0 5.0 6.0 2 7.0 8.0 9.0 3 NaN NaN NaN Aggregate these functions over the rows. >>> df.agg(['sum', 'min'])[['A', 'B', 'C']].sort_index() A B C min 1.0 2.0 3.0 sum 12.0 15.0 18.0 Different aggregations per column. >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']].sort_index() A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN For multi-index columns: >>> df.columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")]) >>> df.agg(['sum', 'min'])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index() X Y A B C min 1.0 2.0 3.0 sum 12.0 15.0 18.0 >>> aggregated = df.agg({("X", "A") : ['sum', 'min'], ("X", "B") : ['min', 'max']}) >>> aggregated[[("X", "A"), ("X", "B")]].sort_index() # doctest: +NORMALIZE_WHITESPACE X A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN """ from pyspark.pandas.groupby import GroupBy if isinstance(func, list): if all((isinstance(f, str) for f in func)): func = dict([(column, func) for column in self.columns]) else: raise ValueError( "If the given function is a list, it " "should only contains function names as strings." ) if not isinstance(func, dict) or not all( is_name_like_value(key) and ( isinstance(value, str) or (isinstance(value, list) and all(isinstance(v, str) for v in value)) ) for key, value in func.items() ): raise ValueError( "aggs must be a dict mapping from column name to aggregate " "functions (string or list of strings)." ) with option_context("compute.default_index_type", "distributed"): psdf: DataFrame = DataFrame(GroupBy._spark_groupby(self, func)) # The codes below basically converts: # # A B # sum min min max # 0 12.0 1.0 2.0 8.0 # # to: # A B # max NaN 8.0 # min 1.0 2.0 # sum 12.0 NaN # # Aggregated output is usually pretty much small. return psdf.stack().droplevel(0)[list(func.keys())] agg = aggregate def corr(self, method: str = "pearson") -> "DataFrame": """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- y : DataFrame See Also -------- Series.corr Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr('pearson') dogs cats dogs 1.000000 -0.851064 cats -0.851064 1.000000 >>> df.corr('spearman') dogs cats dogs 1.000000 -0.948683 cats -0.948683 1.000000 Notes ----- There are behavior differences between pandas-on-Spark and pandas. * the `method` argument only accepts 'pearson', 'spearman' * the data should not contain NaNs. pandas-on-Spark will return an error. * pandas-on-Spark doesn't support the following argument(s). * `min_periods` argument is not supported """ return cast(DataFrame, ps.from_pandas(corr(self, method))) # TODO: add axis parameter and support more methods def corrwith( self, other: DataFrameOrSeries, drop: bool = False, method: str = "pearson" ) -> "Series": """ Compute pairwise correlation. Pairwise correlation is computed between rows or columns of DataFrame with rows or columns of Series or DataFrame. DataFrames are first aligned along both axes before computing the correlations. .. versionadded:: 3.4.0 Parameters ---------- other : DataFrame, Series Object with which to compute correlations. drop : bool, default False Drop missing indices from result. method : str, default 'pearson' Method of correlation, one of: * pearson : standard correlation coefficient Returns ------- Series Pairwise correlations. See Also -------- DataFrame.corr : Compute pairwise correlation of columns. Examples -------- >>> df1 = ps.DataFrame({ ... "A":[1, 5, 7, 8], ... "X":[5, 8, 4, 3], ... "C":[10, 4, 9, 3]}) >>> df1.corrwith(df1[["X", "C"]]) X 1.0 C 1.0 A NaN dtype: float64 >>> df2 = ps.DataFrame({ ... "A":[5, 3, 6, 4], ... "B":[11, 2, 4, 3], ... "C":[4, 3, 8, 5]}) >>> with ps.option_context("compute.ops_on_diff_frames", True): ... df1.corrwith(df2) A -0.041703 C 0.395437 X NaN B NaN dtype: float64 >>> with ps.option_context("compute.ops_on_diff_frames", True): ... df2.corrwith(df1.X) A -0.597614 B -0.151186 C -0.642857 dtype: float64 """ from pyspark.pandas.series import Series, first_series if (method is not None) and (method not in ["pearson"]): raise NotImplementedError("corrwith currently works only for method='pearson'") if not isinstance(other, (DataFrame, Series)): raise TypeError("unsupported type: {}".format(type(other).__name__)) right_is_series = isinstance(other, Series) if same_anchor(self, other): combined = self this = self that = other else: combined = combine_frames(self, other, how="inner") this = combined["this"] that = combined["that"] this_numeric_column_labels: List[Label] = [] for column_label in this._internal.column_labels: if isinstance(this._internal.spark_type_for(column_label), (NumericType, BooleanType)): this_numeric_column_labels.append(column_label) that_numeric_column_labels: List[Label] = [] for column_label in that._internal.column_labels: if isinstance(that._internal.spark_type_for(column_label), (NumericType, BooleanType)): that_numeric_column_labels.append(column_label) intersect_numeric_column_labels: List[Label] = [] diff_numeric_column_labels: List[Label] = [] corr_scols = [] if right_is_series: intersect_numeric_column_labels = this_numeric_column_labels that_scol = that._internal.spark_column_for(that_numeric_column_labels[0]) for numeric_column_label in intersect_numeric_column_labels: this_scol = this._internal.spark_column_for(numeric_column_label) corr_scols.append( F.corr(this_scol.cast("double"), that_scol.cast("double")).alias( name_like_string(numeric_column_label) ) ) else: for numeric_column_label in this_numeric_column_labels: if numeric_column_label in that_numeric_column_labels: intersect_numeric_column_labels.append(numeric_column_label) else: diff_numeric_column_labels.append(numeric_column_label) for numeric_column_label in that_numeric_column_labels: if numeric_column_label not in this_numeric_column_labels: diff_numeric_column_labels.append(numeric_column_label) for numeric_column_label in intersect_numeric_column_labels: this_scol = this._internal.spark_column_for(numeric_column_label) that_scol = that._internal.spark_column_for(numeric_column_label) corr_scols.append( F.corr(this_scol.cast("double"), that_scol.cast("double")).alias( name_like_string(numeric_column_label) ) ) corr_labels: List[Label] = intersect_numeric_column_labels if not drop: for numeric_column_label in diff_numeric_column_labels: corr_scols.append( SF.lit(None).cast("double").alias(name_like_string(numeric_column_label)) ) corr_labels.append(numeric_column_label) sdf = combined._internal.spark_frame.select( *[SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)], *corr_scols ).limit( 1 ) # limit(1) to avoid returning more than 1 row when intersection is empty # The data is expected to be small so it's fine to transpose/use default index. with ps.option_context("compute.max_rows", 1): internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=corr_labels, column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal).transpose()) def iteritems(self) -> Iterator[Tuple[Name, "Series"]]: """ Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Returns ------- label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. Examples -------- >>> df = ps.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala'], ... columns=['species', 'population']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content.to_string()) ... label: species content: panda bear polar bear koala marsupial label: population content: panda 1864 polar 22000 koala 80000 """ return ( (label if len(label) > 1 else label[0], self._psser_for(label)) for label in self._internal.column_labels ) def iterrows(self) -> Iterator[Tuple[Name, pd.Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : pandas.Series The data of the row as a Series. it : generator A generator that iterates over the rows of the frame. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = ps.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns internal_index_columns = self._internal.index_spark_column_names internal_data_columns = self._internal.data_spark_column_names def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]: k = ( row[internal_index_columns[0]] if len(internal_index_columns) == 1 else tuple(row[c] for c in internal_index_columns) ) v = [row[c] for c in internal_data_columns] return k, v for k, v in map( extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator() ): s = pd.Series(v, index=columns, name=k) yield k, s def itertuples( self, index: bool = True, name: Optional[str] = "PandasOnSpark" ) -> Iterator[Tuple]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "PandasOnSpark" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. On python versions < 3.7 regular tuples are returned for DataFrames with a large number of columns (>254). Examples -------- >>> df = ps.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... PandasOnSpark(Index='dog', num_legs=4, num_wings=0) PandasOnSpark(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... PandasOnSpark(num_legs=4, num_wings=0) PandasOnSpark(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ fields = list(self.columns) if index: fields.insert(0, "Index") index_spark_column_names = self._internal.index_spark_column_names data_spark_column_names = self._internal.data_spark_column_names def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]: k = ( row[index_spark_column_names[0]] if len(index_spark_column_names) == 1 else tuple(row[c] for c in index_spark_column_names) ) v = [row[c] for c in data_spark_column_names] return k, v can_return_named_tuples = sys.version_info >= (3, 7) or len(self.columns) + index < 255 if name is not None and can_return_named_tuples: itertuple = namedtuple(name, fields, rename=True) # type: ignore[misc] for k, v in map( extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator(), ): yield itertuple._make(([k] if index else []) + list(v)) else: for k, v in map( extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator(), ): yield tuple(([k] if index else []) + list(v)) def items(self) -> Iterator[Tuple[Name, "Series"]]: """This is an alias of ``iteritems``.""" return self.iteritems() def to_clipboard(self, excel: bool = True, sep: Optional[str] = None, **kwargs: Any) -> None: """ Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. .. note:: This method should only be used if the resulting DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- excel : bool, default True - True, use the provided separator, writing in a csv format for allowing easy pasting into excel. - False, write a string representation of the object to the clipboard. sep : str, default ``'\\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none See Also -------- read_clipboard : Read text from clipboard. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = ps.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 This function also works for Series: >>> df = ps.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # 0, 1 ... # 1, 2 ... # 2, 3 ... # 3, 4 ... # 4, 5 ... # 5, 6 ... # 6, 7 """ args = locals() psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args ) def to_html( self, buf: Optional[IO[str]] = None, columns: Optional[Sequence[Name]] = None, col_space: Optional[Union[str, int, Dict[Name, Union[str, int]]]] = None, header: bool = True, index: bool = True, na_rep: str = "NaN", formatters: Optional[ Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]] ] = None, float_format: Optional[Callable[[float], str]] = None, sparsify: Optional[bool] = None, index_names: bool = True, justify: Optional[str] = None, max_rows: Optional[int] = None, max_cols: Optional[int] = None, show_dimensions: bool = False, decimal: str = ".", bold_rows: bool = True, classes: Optional[Union[str, list, tuple]] = None, escape: bool = True, notebook: bool = False, border: Optional[int] = None, table_id: Optional[str] = None, render_links: bool = False, ) -> Optional[str]: """ Render a DataFrame as an HTML table. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links (only works with pandas 0.24+). Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_string : Convert DataFrame to a string. """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: psdf = self.head(max_rows) else: psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args ) def to_string( self, buf: Optional[IO[str]] = None, columns: Optional[Sequence[Name]] = None, col_space: Optional[Union[str, int, Dict[Name, Union[str, int]]]] = None, header: bool = True, index: bool = True, na_rep: str = "NaN", formatters: Optional[ Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]] ] = None, float_format: Optional[Callable[[float], str]] = None, sparsify: Optional[bool] = None, index_names: bool = True, justify: Optional[str] = None, max_rows: Optional[int] = None, max_cols: Optional[int] = None, show_dimensions: bool = False, decimal: str = ".", line_width: Optional[int] = None, ) -> Optional[str]: """ Render a DataFrame to a console-friendly tabular output. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. line_width : int, optional Width to wrap a line in characters. Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2']) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 >>> print(df.to_string(max_rows=2)) col1 col2 0 1 4 1 2 5 """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: psdf = self.head(max_rows) else: psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args ) def to_dict(self, orient: str = "dict", into: Type = dict) -> Union[List, Mapping]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2'], ... columns=['col1', 'col2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df_dict = df.to_dict() >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])] You can specify the return orientation. >>> df_dict = df.to_dict('series') >>> sorted(df_dict.items()) [('col1', row1 1 row2 2 Name: col1, dtype: int64), ('col2', row1 0.50 row2 0.75 Name: col2, dtype: float64)] >>> df_dict = df.to_dict('split') >>> sorted(df_dict.items()) # doctest: +ELLIPSIS [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])] >>> df_dict = df.to_dict('records') >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]] >>> df_dict = df.to_dict('index') >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])] You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \ ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS [defaultdict(<class 'list'>, {'col..., 'col...}), \ defaultdict(<class 'list'>, {'col..., 'col...})] """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args ) def to_latex( self, buf: Optional[IO[str]] = None, columns: Optional[List[Name]] = None, col_space: Optional[int] = None, header: bool = True, index: bool = True, na_rep: str = "NaN", formatters: Optional[ Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]] ] = None, float_format: Optional[Callable[[float], str]] = None, sparsify: Optional[bool] = None, index_names: bool = True, bold_rows: bool = False, column_format: Optional[str] = None, longtable: Optional[bool] = None, escape: Optional[bool] = None, encoding: Optional[str] = None, decimal: str = ".", multicolumn: Optional[bool] = None, multicolumn_format: Optional[str] = None, multirow: Optional[bool] = None, ) -> Optional[str]: r""" Render an object to a LaTeX tabular environment table. Render an object to a tabular environment table. You can splice this into a LaTeX document. Requires usepackage{booktabs}. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, consider alternative formats. Parameters ---------- buf : file descriptor or None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default ‘NaN’ Missing data representation. formatters : list of functions or dict of {str: function}, optional Formatter functions to apply to columns’ elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : str, optional Format string for floating point numbers. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By default, ‘l’ will be used for all columns except columns of numbers, which default to ‘r’. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to ‘ascii’ on Python 2 and ‘utf-8’ on Python 3. decimal : str, default ‘.’ Character recognized as decimal separator, e.g. ‘,’ in Europe. multicolumn : bool, default True Use multicolumn to enhance MultiIndex columns. The default will be read from the config module. multicolumn_format : str, default ‘l’ The alignment for multicolumns, similar to column_format The default will be read from the config module. multirow : bool, default False Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. Returns ------- str or None If buf is None, returns the resulting LateX format as a string. Otherwise returns None. See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = ps.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}, ... columns=['name', 'mask', 'weapon']) >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE \begin{tabular}{lll} \toprule name & mask & weapon \\ \midrule Raphael & red & sai \\ Donatello & purple & bo staff \\ \bottomrule \end{tabular} <BLANKLINE> """ args = locals() psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args ) # TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic # when creating arrays) def transpose(self) -> "DataFrame": """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from pyspark.pandas.config import option_context >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE ... ps.DataFrame({'a': range(1001)}).transpose() Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Returns ------- DataFrame The transposed DataFrame. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the coerced dtype. For instance, if int and float have to be placed in same column, it becomes float. If type coercion is not possible, it fails. Also, note that the values in index should be unique because they become unique column names. In addition, if Spark 2.3 is used, the types should always be exactly same. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = ps.DataFrame(data=d1, columns=['col1', 'col2']) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T.sort_index() # doctest: +SKIP >>> df1_transposed # doctest: +SKIP 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes # doctest: +SKIP 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'score': [9.5, 8], ... 'kids': [0, 0], ... 'age': [12, 22]} >>> df2 = ps.DataFrame(data=d2, columns=['score', 'kids', 'age']) >>> df2 score kids age 0 9.5 0 12 1 8.0 0 22 >>> df2_transposed = df2.T.sort_index() # doctest: +SKIP >>> df2_transposed # doctest: +SKIP 0 1 age 12.0 22.0 kids 0.0 0.0 score 9.5 8.0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the coerced dtype: >>> df2.dtypes score float64 kids int64 age int64 dtype: object >>> df2_transposed.dtypes # doctest: +SKIP 0 float64 1 float64 dtype: object """ max_compute_count = get_option("compute.max_rows") if max_compute_count is not None: pdf = self.head(max_compute_count + 1)._to_internal_pandas() if len(pdf) > max_compute_count: raise ValueError( "Current DataFrame has more then the given limit {0} rows. " "Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' " "to retrieve to retrieve more than {0} rows. Note that, before changing the " "'compute.max_rows', this operation is considerably expensive.".format( max_compute_count ) ) return DataFrame(pdf.transpose()) # Explode the data to be pairs. # # For instance, if the current input DataFrame is as below: # # +------+------+------+------+------+ # |index1|index2|(a,x1)|(a,x2)|(b,x3)| # +------+------+------+------+------+ # | y1| z1| 1| 0| 0| # | y2| z2| 0| 50| 0| # | y3| z3| 3| 2| 1| # +------+------+------+------+------+ # # Output of `exploded_df` becomes as below: # # +-----------------+-----------------+-----------------+-----+ # | index|__index_level_0__|__index_level_1__|value| # +-----------------+-----------------+-----------------+-----+ # |{"a":["y1","z1"]}| a| x1| 1| # |{"a":["y1","z1"]}| a| x2| 0| # |{"a":["y1","z1"]}| b| x3| 0| # |{"a":["y2","z2"]}| a| x1| 0| # |{"a":["y2","z2"]}| a| x2| 50| # |{"a":["y2","z2"]}| b| x3| 0| # |{"a":["y3","z3"]}| a| x1| 3| # |{"a":["y3","z3"]}| a| x2| 2| # |{"a":["y3","z3"]}| b| x3| 1| # +-----------------+-----------------+-----------------+-----+ pairs = F.explode( F.array( *[ F.struct( *[ SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label) ], *[self._internal.spark_column_for(label).alias("value")], ) for label in self._internal.column_labels ] ) ) exploded_df = self._internal.spark_frame.withColumn("pairs", pairs).select( [ F.to_json( F.struct( F.array(*[scol for scol in self._internal.index_spark_columns]).alias("a") ) ).alias("index"), F.col("pairs.*"), ] ) # After that, executes pivot with key and its index column. # Note that index column should contain unique values since column names # should be unique. internal_index_columns = [ SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level) ] pivoted_df = exploded_df.groupBy(internal_index_columns).pivot("index") transposed_df = pivoted_df.agg(F.first(F.col("value"))) new_data_columns = list( filter(lambda x: x not in internal_index_columns, transposed_df.columns) ) column_labels = [ None if len(label) == 1 and label[0] is None else label for label in (tuple(json.loads(col)["a"]) for col in new_data_columns) ] internal = InternalFrame( spark_frame=transposed_df, index_spark_columns=[scol_for(transposed_df, col) for col in internal_index_columns], index_names=self._internal.column_label_names, column_labels=column_labels, data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns], column_label_names=self._internal.index_names, ) return DataFrame(internal) T = property(transpose) def apply( self, func: Callable, axis: Axis = 0, args: Sequence[Any] = (), **kwds: Any ) -> Union["Series", "DataFrame", "Index"]: """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: when `axis` is 0 or 'index', the `func` is unable to access to the whole input series. pandas-on-Spark internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example below. >>> # This case does not return the length of whole series but of the batch internally ... # used. ... def length(s) -> int: ... return len(s) ... >>> df = ps.DataFrame({'A': range(1000)}) >>> df.apply(length, axis=0) # doctest: +SKIP 0 83 1 83 2 83 ... 10 83 11 83 dtype: int32 .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify the return type as `Series` or scalar value in ``func``, for instance, as below: >>> def square(s) -> ps.Series[np.int32]: ... return s ** 2 pandas-on-Spark uses return type hint and does not try to infer the type. In case when axis is 1, it requires to specify `DataFrame` or scalar value with type hints as below: >>> def plus_one(x) -> ps.DataFrame[int, [float, float]]: ... return x + 1 If the return type is specified as `DataFrame`, the output column names become `c0, c1, c2 ... cn`. These names are positionally mapped to the returned DataFrame in ``func``. To specify the column names, you can assign them in a pandas friendly style as below: >>> def plus_one(x) -> ps.DataFrame[("index", int), [("a", float), ("b", float)]]: ... return x + 1 >>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}) >>> def plus_one(x) -> ps.DataFrame[ ... (pdf.index.name, pdf.index.dtype), zip(pdf.dtypes, pdf.columns)]: ... return x + 1 Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap : For elementwise operations. DataFrame.aggregate : Only perform aggregating type operations. DataFrame.transform : Only perform transforming type operations. Series.apply : The equivalent function for Series. Examples -------- >>> df = ps.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> def sqrt(x) -> ps.Series[float]: ... return np.sqrt(x) ... >>> df.apply(sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 You can omit the type hint and let pandas-on-Spark infer its type. >>> df.apply(np.sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 When `axis` is 1 or 'columns', it applies the function for each row. >>> def summation(x) -> np.int64: ... return np.sum(x) ... >>> df.apply(summation, axis=1) 0 13 1 13 2 13 dtype: int64 Likewise, you can omit the type hint and let pandas-on-Spark infer its type. >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 >>> df.apply(max, axis=1) 0 9 1 9 2 9 dtype: int64 Returning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object In order to specify the types when `axis` is '1', it should use DataFrame[...] annotation. In this case, the column names are automatically generated. >>> def identify(x) -> ps.DataFrame[('index', int), [('A', np.int64), ('B', np.int64)]]: ... return x ... >>> df.apply(identify, axis=1) # doctest: +NORMALIZE_WHITESPACE A B index 0 4 9 1 4 9 2 4 9 You can also specify extra arguments. >>> def plus_two(a, b, c) -> ps.DataFrame[np.int64, [np.int64, np.int64]]: ... return a + b + c ... >>> df.apply(plus_two, axis=1, args=(1,), c=3) c0 c1 0 8 13 1 8 13 2 8 13 """ from pyspark.pandas.groupby import GroupBy from pyspark.pandas.series import first_series if not isinstance(func, types.FunctionType): assert callable(func), "the first argument should be a callable function." f = func # Note that the return type hint specified here affects actual return # type in Spark (e.g., infer_return_type). And, MyPy does not allow # redefinition of a function. func = lambda *args, **kwargs: f(*args, **kwargs) # noqa: E731 axis = validate_axis(axis) should_return_series = False spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None should_retain_index = should_infer_schema def apply_func(pdf: pd.DataFrame) -> pd.DataFrame: pdf_or_pser = pdf.apply(func, axis=axis, args=args, **kwds) # type: ignore[arg-type] if isinstance(pdf_or_pser, pd.Series): return pdf_or_pser.to_frame() else: return pdf_or_pser self_applied: DataFrame = DataFrame(self._internal.resolved_copy) column_labels: Optional[List[Label]] = None if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. log_advice( "If the type hints is not specified for `apply`, " "it is expensive to infer the data type internally." ) limit = get_option("compute.shortcut_limit") pdf = self_applied.head(limit + 1)._to_internal_pandas() applied = pdf.apply(func, axis=axis, args=args, **kwds) # type: ignore[arg-type] psser_or_psdf = ps.from_pandas(applied) if len(pdf) <= limit: return psser_or_psdf psdf = psser_or_psdf if isinstance(psser_or_psdf, ps.Series): should_return_series = True psdf = psser_or_psdf._psdf index_fields = [field.normalize_spark_type() for field in psdf._internal.index_fields] data_fields = [field.normalize_spark_type() for field in psdf._internal.data_fields] return_schema = StructType([field.struct_field for field in index_fields + data_fields]) output_func = GroupBy._make_pandas_df_builder_func( self_applied, apply_func, return_schema, retain_index=should_retain_index ) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas( lambda iterator: map(output_func, iterator), schema=return_schema ) # If schema is inferred, we can restore indexes too. internal = psdf._internal.with_new_sdf( spark_frame=sdf, index_fields=index_fields, data_fields=data_fields ) else: return_type = infer_return_type(func) require_index_axis = isinstance(return_type, SeriesType) require_column_axis = isinstance(return_type, DataFrameType) index_fields = None if require_index_axis: if axis != 0: raise TypeError( "The given function should specify a scalar or a series as its type " "hints when axis is 0 or 'index'; however, the return type " "was %s" % return_sig ) dtype = cast(SeriesType, return_type).dtype spark_type = cast(SeriesType, return_type).spark_type data_fields = [ InternalField( dtype=dtype, struct_field=StructField(name=name, dataType=spark_type) ) for name in self_applied.columns ] return_schema = StructType([field.struct_field for field in data_fields]) elif require_column_axis: if axis != 1: raise TypeError( "The given function should specify a scalar or a frame as its type " "hints when axis is 1 or 'column'; however, the return type " "was %s" % return_sig ) index_fields = cast(DataFrameType, return_type).index_fields should_retain_index = len(index_fields) > 0 data_fields = cast(DataFrameType, return_type).data_fields return_schema = cast(DataFrameType, return_type).spark_type else: # any axis is fine. should_return_series = True spark_type = cast(ScalarType, return_type).spark_type dtype = cast(ScalarType, return_type).dtype data_fields = [ InternalField( dtype=dtype, struct_field=StructField( name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type ), ) ] return_schema = StructType([field.struct_field for field in data_fields]) column_labels = [None] output_func = GroupBy._make_pandas_df_builder_func( self_applied, apply_func, return_schema, retain_index=should_retain_index ) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas( lambda iterator: map(output_func, iterator), schema=return_schema ) index_spark_columns = None index_names: Optional[List[Optional[Tuple[Any, ...]]]] = None if should_retain_index: index_spark_columns = [ scol_for(sdf, index_field.struct_field.name) for index_field in index_fields ] if not any( [ SPARK_INDEX_NAME_PATTERN.match(index_field.struct_field.name) for index_field in index_fields ] ): index_names = [(index_field.struct_field.name,) for index_field in index_fields] internal = InternalFrame( spark_frame=sdf, index_names=index_names, index_spark_columns=index_spark_columns, index_fields=index_fields, data_fields=data_fields, column_labels=column_labels, ) result: DataFrame = DataFrame(internal) if should_return_series: return first_series(result) else: return result def transform( self, func: Callable[..., "Series"], axis: Axis = 0, *args: Any, **kwargs: Any ) -> "DataFrame": """ Call ``func`` on self producing a Series with transformed values and that has the same length as its input. See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> ps.Series[np.int32]: ... return x ** 2 pandas-on-Spark uses return type hint and does not try to infer the type. .. note:: the series within ``func`` is actually multiple pandas series as the segments of the whole pandas-on-Spark series; therefore, the length of each series is not guaranteed. As an example, an aggregation against each series does work as a global aggregation but an aggregation of each segment. See below: >>> def func(x) -> ps.Series[np.int32]: ... return x + sum(x) Parameters ---------- func : function Function to use for transforming the data. It must work when pandas Series is passed. axis : int, default 0 or 'index' Can only be set to 0 at the moment. *args Positional arguments to pass to func. **kwargs Keyword arguments to pass to func. Returns ------- DataFrame A DataFrame that must have the same length as self. Raises ------ Exception : If the returned DataFrame has a different length than self. See Also -------- DataFrame.aggregate : Only perform aggregating type operations. DataFrame.apply : Invoke function on DataFrame. Series.transform : The equivalent function for Series. Examples -------- >>> df = ps.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B']) >>> df A B 0 0 1 1 1 2 2 2 3 >>> def square(x) -> ps.Series[np.int32]: ... return x ** 2 >>> df.transform(square) A B 0 0 1 1 1 4 2 4 9 You can omit the type hint and let pandas-on-Spark infer its type. >>> df.transform(lambda x: x ** 2) A B 0 0 1 1 1 4 2 4 9 For multi-index columns: >>> df.columns = [('X', 'A'), ('X', 'B')] >>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 4 2 4 9 >>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 2 2 2 3 You can also specify extra arguments. >>> def calculation(x, y, z) -> ps.Series[int]: ... return x ** y + z >>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE X A B 0 20 21 1 21 1044 2 1044 59069 """ if not isinstance(func, types.FunctionType): assert callable(func), "the first argument should be a callable function." f = func # Note that the return type hint specified here affects actual return # type in Spark (e.g., infer_return_type). And, MyPy does not allow # redefinition of a function. func = lambda *args, **kwargs: f(*args, **kwargs) # noqa: E731 axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. log_advice( "If the type hints is not specified for `transform`, " "it is expensive to infer the data type internally." ) limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() transformed = pdf.transform(func, axis, *args, **kwargs) # type: ignore[arg-type] psdf: DataFrame = DataFrame(transformed) if len(pdf) <= limit: return psdf applied = [] data_fields = [] for input_label, output_label in zip( self._internal.column_labels, psdf._internal.column_labels ): psser = self._psser_for(input_label) field = psdf._internal.field_for(output_label).normalize_spark_type() data_fields.append(field) return_schema = field.spark_type applied.append( psser.pandas_on_spark._transform_batch( func=lambda c: func(c, *args, **kwargs), return_type=SeriesType(field.dtype, return_schema), ) ) internal = self._internal.with_new_columns(applied, data_fields=data_fields) return DataFrame(internal) else: return self._apply_series_op( lambda psser: psser.pandas_on_spark.transform_batch(func, *args, **kwargs) ) def pop(self, item: Name) -> "DataFrame": """ Return item and drop from frame. Raise KeyError if not found. Parameters ---------- item : str Label of column to be popped. Returns ------- Series Examples -------- >>> df = ps.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> df name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('class') 0 bird 1 bird 2 mammal 3 mammal Name: class, dtype: object >>> df name max_speed 0 falcon 389.0 1 parrot 24.0 2 lion 80.5 3 monkey NaN Also support for MultiIndex >>> df = ps.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df a b name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('a') name class 0 falcon bird 1 parrot bird 2 lion mammal 3 monkey mammal >>> df b max_speed 0 389.0 1 24.0 2 80.5 3 NaN """ result = self[item] self._update_internal_frame(self.drop(columns=item)._internal) return result # TODO: add axis parameter can work when '1' or 'columns' def xs(self, key: Name, axis: Axis = 0, level: Optional[int] = None) -> DataFrameOrSeries: """ Return cross-section from the DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : 0 or 'index', default 0 Axis to retrieve cross-section on. currently only support 0 or 'index' level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. Returns ------- DataFrame or Series Cross-section from the original DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = ps.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings locomotion walks 4 0 >>> df.xs(('mammal', 'dog', 'walks')) # doctest: +NORMALIZE_WHITESPACE num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class locomotion mammal walks 4 0 """ from pyspark.pandas.series import first_series if not is_name_like_value(key): raise TypeError("'key' should be a scalar value or tuple that contains scalar values") if level is not None and is_name_like_tuple(key): raise KeyError(key) axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') if not is_name_like_tuple(key): key = (key,) if len(key) > self._internal.index_level: raise KeyError( "Key length ({}) exceeds index depth ({})".format( len(key), self._internal.index_level ) ) if level is None: level = 0 rows = [ self._internal.index_spark_columns[lvl] == index for lvl, index in enumerate(key, level) ] internal = self._internal.with_filter(reduce(lambda x, y: x & y, rows)) if len(key) == self._internal.index_level: psdf: DataFrame = DataFrame(internal) pdf = psdf.head(2)._to_internal_pandas() if len(pdf) == 0: raise KeyError(key) elif len(pdf) > 1: return psdf else: return first_series(DataFrame(pdf.transpose())) else: index_spark_columns = ( internal.index_spark_columns[:level] + internal.index_spark_columns[level + len(key) :] ) index_names = internal.index_names[:level] + internal.index_names[level + len(key) :] index_fields = internal.index_fields[:level] + internal.index_fields[level + len(key) :] internal = internal.copy( index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields, ).resolved_copy return DataFrame(internal) def between_time( self, start_time: Union[datetime.time, str], end_time: Union[datetime.time, str], include_start: bool = True, include_end: bool = True, axis: Axis = 0, ) -> "DataFrame": """ Select values between particular times of the day (example: 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. include_start : bool, default True Whether the start time needs to be included in the result. include_end : bool, default True Whether the end time needs to be included in the result. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. Returns ------- DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx) >>> psdf A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> psdf.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> psdf.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError("between_time currently only works for axis=0") if not isinstance(self.index, ps.DatetimeIndex): raise TypeError("Index must be DatetimeIndex") psdf = self.copy() psdf.index.name = verify_temp_column_name(psdf, "__index_name__") return_types = [psdf.index.dtype] + list(psdf.dtypes) def pandas_between_time( # type: ignore[no-untyped-def] pdf, ) -> ps.DataFrame[return_types]: # type: ignore[valid-type] return pdf.between_time(start_time, end_time, include_start, include_end).reset_index() # apply_batch will remove the index of the pandas-on-Spark DataFrame and attach a # default index, which will never be used. So use "distributed" index as a dummy to # avoid overhead. with option_context("compute.default_index_type", "distributed"): psdf = psdf.pandas_on_spark.apply_batch(pandas_between_time) return DataFrame( self._internal.copy( spark_frame=psdf._internal.spark_frame, index_spark_columns=psdf._internal.data_spark_columns[:1], index_fields=psdf._internal.data_fields[:1], data_spark_columns=psdf._internal.data_spark_columns[1:], data_fields=psdf._internal.data_fields[1:], ) ) # TODO: implement axis=1 def at_time( self, time: Union[datetime.time, str], asof: bool = False, axis: Axis = 0 ) -> "DataFrame": """ Select values at particular time of day (example: 9:30AM). Parameters ---------- time : datetime.time or str axis : {0 or 'index', 1 or 'columns'}, default 0 Returns ------- DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> idx = pd.date_range('2018-04-09', periods=4, freq='12H') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx) >>> psdf A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> psdf.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if asof: raise NotImplementedError("'asof' argument is not supported") axis = validate_axis(axis) if axis != 0: raise NotImplementedError("at_time currently only works for axis=0") if not isinstance(self.index, ps.DatetimeIndex): raise TypeError("Index must be DatetimeIndex") psdf = self.copy() psdf.index.name = verify_temp_column_name(psdf, "__index_name__") return_types = [psdf.index.dtype] + list(psdf.dtypes) def pandas_at_time( # type: ignore[no-untyped-def] pdf, ) -> ps.DataFrame[return_types]: # type: ignore[valid-type] return pdf.at_time(time, asof, axis).reset_index() # apply_batch will remove the index of the pandas-on-Spark DataFrame and attach # a default index, which will never be used. So use "distributed" index as a dummy # to avoid overhead. with option_context("compute.default_index_type", "distributed"): psdf = psdf.pandas_on_spark.apply_batch(pandas_at_time) return DataFrame( self._internal.copy( spark_frame=psdf._internal.spark_frame, index_spark_columns=psdf._internal.data_spark_columns[:1], index_fields=psdf._internal.data_fields[:1], data_spark_columns=psdf._internal.data_spark_columns[1:], data_fields=psdf._internal.data_fields[1:], ) ) def where( self, cond: DataFrameOrSeries, other: Union[DataFrameOrSeries, Any] = np.nan, axis: Axis = None, ) -> "DataFrame": """ Replace values where the condition is False. Parameters ---------- cond : boolean DataFrame Where cond is True, keep the original value. Where False, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is False are replaced with corresponding value from other. axis : int, default None Can only be set to 0 at the moment for compatibility with pandas. Returns ------- DataFrame Examples -------- >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.where(df1 > 0).sort_index() A B 0 NaN 100.0 1 1.0 200.0 2 2.0 300.0 3 3.0 400.0 4 4.0 500.0 >>> df1.where(df1 > 1, 10).sort_index() A B 0 10 100 1 10 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df1 + 100).sort_index() A B 0 100 100 1 101 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df2).sort_index() A B 0 0 100 1 -1 200 2 2 300 3 3 400 4 4 500 When the column name of cond is different from self, it treats all values are False >>> cond = ps.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0 >>> cond C D 0 True False 1 False True 2 False False 3 True False 4 False True >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN When the type of cond is Series, it just check boolean regardless of column name >>> cond = ps.Series([1, 2]) > 1 >>> cond 0 False 1 True dtype: bool >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 1.0 200.0 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> reset_option("compute.ops_on_diff_frames") """ from pyspark.pandas.series import Series axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') tmp_cond_col_name = "__tmp_cond_col_{}__".format tmp_other_col_name = "__tmp_other_col_{}__".format psdf = self.copy() tmp_cond_col_names = [ tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels ] if isinstance(cond, DataFrame): cond = cond[ [ ( cond._internal.spark_column_for(label) if label in cond._internal.column_labels else SF.lit(False) ).alias(name) for label, name in zip(self._internal.column_labels, tmp_cond_col_names) ] ] psdf[tmp_cond_col_names] = cond elif isinstance(cond, Series): cond = cond.to_frame() cond = cond[ [cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names] ] psdf[tmp_cond_col_names] = cond else: raise TypeError("type of cond must be a DataFrame or Series") tmp_other_col_names = [ tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels ] if isinstance(other, DataFrame): other = other[ [ ( other._internal.spark_column_for(label) if label in other._internal.column_labels else SF.lit(np.nan) ).alias(name) for label, name in zip(self._internal.column_labels, tmp_other_col_names) ] ] psdf[tmp_other_col_names] = other elif isinstance(other, Series): other = other.to_frame() other = other[ [other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names] ] psdf[tmp_other_col_names] = other else: for label in self._internal.column_labels: psdf[tmp_other_col_name(name_like_string(label))] = other # above logic make spark dataframe looks like below: # +-----------------+---+---+------------------+-------------------+------------------+--... # |__index_level_0__| A| B|__tmp_cond_col_A__|__tmp_other_col_A__|__tmp_cond_col_B__|__... # +-----------------+---+---+------------------+-------------------+------------------+--... # | 0| 0|100| true| 0| false| ... # | 1| 1|200| false| -1| false| ... # | 3| 3|400| true| -3| false| ... # | 2| 2|300| false| -2| true| ... # | 4| 4|500| false| -4| false| ... # +-----------------+---+---+------------------+-------------------+------------------+--... data_spark_columns = [] for label in self._internal.column_labels: data_spark_columns.append( F.when( psdf[tmp_cond_col_name(name_like_string(label))].spark.column, psdf._internal.spark_column_for(label), ) .otherwise(psdf[tmp_other_col_name(name_like_string(label))].spark.column) .alias(psdf._internal.spark_column_name_for(label)) ) return DataFrame( psdf._internal.with_new_columns( data_spark_columns, column_labels=self._internal.column_labels # TODO: dtypes? ) ) def mask( self, cond: DataFrameOrSeries, other: Union[DataFrameOrSeries, Any] = np.nan ) -> "DataFrame": """ Replace values where the condition is True. Parameters ---------- cond : boolean DataFrame Where cond is False, keep the original value. Where True, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is True are replaced with corresponding value from other. Returns ------- DataFrame Examples -------- >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.mask(df1 > 0).sort_index() A B 0 0.0 NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> df1.mask(df1 > 1, 10).sort_index() A B 0 0 10 1 1 10 2 10 10 3 10 10 4 10 10 >>> df1.mask(df1 > 1, df1 + 100).sort_index() A B 0 0 200 1 1 300 2 102 400 3 103 500 4 104 600 >>> df1.mask(df1 > 1, df2).sort_index() A B 0 0 -100 1 1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> reset_option("compute.ops_on_diff_frames") """ from pyspark.pandas.series import Series if not isinstance(cond, (DataFrame, Series)): raise TypeError("type of cond must be a DataFrame or Series") cond_inversed = cond._apply_series_op(lambda psser: ~psser) return self.where(cond_inversed, other) @property def index(self) -> "Index": """The index (row labels) Column of the DataFrame. Currently not supported when the DataFrame has no index. See Also -------- Index """ from pyspark.pandas.indexes.base import Index return Index._new_instance(self) @property def empty(self) -> bool: """ Returns true if the current DataFrame is empty. Otherwise, returns false. Examples -------- >>> ps.range(10).empty False >>> ps.range(0).empty True >>> ps.DataFrame({}, index=list('abc')).empty True """ return ( len(self._internal.column_labels) == 0 or self._internal.resolved_copy.spark_frame.rdd.isEmpty() ) @property def style(self) -> "Styler": """ Property returning a Styler object containing methods for building a styled HTML representation for the DataFrame. .. note:: currently it collects top 1000 rows and return its pandas `pandas.io.formats.style.Styler` instance. Examples -------- >>> ps.range(1001).style # doctest: +SKIP <pandas.io.formats.style.Styler object at ...> """ max_results = get_option("compute.max_rows") pdf = self.head(max_results + 1)._to_internal_pandas() if len(pdf) > max_results: warnings.warn("'style' property will only use top %s rows." % max_results, UserWarning) return pdf.head(max_results).style def set_index( self, keys: Union[Name, List[Name]], drop: bool = True, append: bool = False, inplace: bool = False, ) -> Optional["DataFrame"]: """Set the DataFrame index (row labels) using one or more existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index` and ``np.ndarray``. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. Examples -------- >>> df = ps.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}, ... columns=['month', 'year', 'sale']) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 """ inplace = validate_bool_kwarg(inplace, "inplace") key_list: List[Label] if is_name_like_tuple(keys): key_list = [cast(Label, keys)] elif is_name_like_value(keys): key_list = [(keys,)] else: key_list = [key if is_name_like_tuple(key) else (key,) for key in keys] columns = set(self._internal.column_labels) for key in key_list: if key not in columns: raise KeyError(name_like_string(key)) if drop: column_labels = [ label for label in self._internal.column_labels if label not in key_list ] else: column_labels = self._internal.column_labels if append: index_spark_columns = self._internal.index_spark_columns + [ self._internal.spark_column_for(label) for label in key_list ] index_names = self._internal.index_names + key_list index_fields = self._internal.index_fields + [ self._internal.field_for(label) for label in key_list ] else: index_spark_columns = [self._internal.spark_column_for(label) for label in key_list] index_names = key_list index_fields = [self._internal.field_for(label) for label in key_list] internal = self._internal.copy( index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[self._internal.spark_column_for(label) for label in column_labels], data_fields=[self._internal.field_for(label) for label in column_labels], ) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def reset_index( self, level: Optional[Union[int, Name, Sequence[Union[int, Name]]]] = None, drop: bool = False, inplace: bool = False, col_level: int = 0, col_fill: str = "", ) -> Optional["DataFrame"]: """Reset the index, or a level of it. For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- DataFrame DataFrame with the new index. See Also -------- DataFrame.set_index : Opposite of reset_index. Examples -------- >>> df = ps.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column. Unlike pandas, pandas-on-Spark does not automatically add a sequential index. The following 0, 1, 2, 3 are only there when we display the DataFrame. >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = ps.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df # doctest: +NORMALIZE_WHITESPACE speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, ... col_fill='species') # doctest: +NORMALIZE_WHITESPACE species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, ... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, "inplace") multi_index = self._internal.index_level > 1 def rename(index: int) -> Label: if multi_index: return ("level_{}".format(index),) else: if ("index",) not in self._internal.column_labels: return ("index",) else: return ("level_{}".format(index),) if level is None: new_column_labels = [ name if name is not None else rename(i) for i, name in enumerate(self._internal.index_names) ] new_data_spark_columns = [ scol.alias(name_like_string(label)) for scol, label in zip(self._internal.index_spark_columns, new_column_labels) ] new_data_fields = self._internal.index_fields index_spark_columns = [] index_names = [] index_fields = [] else: if is_list_like(level): level = list(cast(Sequence[Union[int, Name]], level)) if isinstance(level, int) or is_name_like_tuple(level): level_list = [cast(Union[int, Label], level)] elif is_name_like_value(level): level_list = [(level,)] else: level_list = [ lvl if isinstance(lvl, int) or is_name_like_tuple(lvl) else (lvl,) for lvl in level ] if all(isinstance(lvl, int) for lvl in level_list): int_level_list = cast(List[int], level_list) for lev in int_level_list: if lev >= self._internal.index_level: raise IndexError( "Too many levels: Index has only {} level, not {}".format( self._internal.index_level, lev + 1 ) ) idx = int_level_list elif all(is_name_like_tuple(lev) for lev in level_list): idx = [] for label in cast(List[Label], level_list): try: i = self._internal.index_names.index(label) idx.append(i) except ValueError: if multi_index: raise KeyError("Level unknown not found") else: raise KeyError( "Level unknown must be same as name ({})".format( name_like_string(self._internal.index_names[0]) ) ) else: raise ValueError("Level should be all int or all string.") idx.sort() new_column_labels = [] new_data_spark_columns = [] new_data_fields = [] index_spark_columns = self._internal.index_spark_columns.copy() index_names = self._internal.index_names.copy() index_fields = self._internal.index_fields.copy() for i in idx[::-1]: name = index_names.pop(i) new_column_labels.insert(0, name if name is not None else rename(i)) scol = index_spark_columns.pop(i) new_data_spark_columns.insert(0, scol.alias(name_like_string(name))) new_data_fields.insert(0, index_fields.pop(i).copy(name=name_like_string(name))) if drop: new_data_spark_columns = [] new_column_labels = [] new_data_fields = [] for label in new_column_labels: if label in self._internal.column_labels: raise ValueError("cannot insert {}, already exists".format(name_like_string(label))) if self._internal.column_labels_level > 1: column_depth = len(self._internal.column_labels[0]) if col_level >= column_depth: raise IndexError( "Too many levels: Index has only {} levels, not {}".format( column_depth, col_level + 1 ) ) if any(col_level + len(label) > column_depth for label in new_column_labels): raise ValueError("Item must have length equal to number of levels.") new_column_labels = [ tuple( ([col_fill] * col_level) + list(label) + ([col_fill] * (column_depth - (len(label) + col_level))) ) for label in new_column_labels ] internal = self._internal.copy( index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields, column_labels=new_column_labels + self._internal.column_labels, data_spark_columns=new_data_spark_columns + self._internal.data_spark_columns, data_fields=new_data_fields + self._internal.data_fields, ) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def isnull(self) -> "DataFrame": """ Detects missing values for items in the current Dataframe. Return a boolean same-sized Dataframe indicating if the values are NA. NA values, such as None or numpy.NaN, gets mapped to True values. Everything else gets mapped to False values. See Also -------- DataFrame.notnull Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.isnull() 0 1 0 False False 1 False True 2 False True 3 False False >>> df = ps.DataFrame([[None, 'bee', None], ['dog', None, 'fly']]) >>> df.isnull() 0 1 2 0 True False True 1 False True False """ return self._apply_series_op(lambda psser: psser.isnull()) isna = isnull def notnull(self) -> "DataFrame": """ Detects non-missing values for items in the current Dataframe. This function takes a dataframe and indicates whether it's values are valid (not missing, which is ``NaN`` in numeric datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike). See Also -------- DataFrame.isnull Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.notnull() 0 1 0 True True 1 True False 2 True False 3 True True >>> df = ps.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df.notnull() 0 1 2 0 True True True 1 True False True """ return self._apply_series_op(lambda psser: psser.notnull()) notna = notnull def insert( self, loc: int, column: Name, value: Union[Scalar, "Series", Iterable], allow_duplicates: bool = False, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : int, Series, or array-like allow_duplicates : bool, optional Examples -------- >>> psdf = ps.DataFrame([1, 2, 3]) >>> psdf.sort_index() 0 0 1 1 2 2 3 >>> psdf.insert(0, 'x', 4) >>> psdf.sort_index() x 0 0 4 1 1 4 2 2 4 3 >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> psdf.insert(1, 'y', [5, 6, 7]) >>> psdf.sort_index() x y 0 0 4 5 1 1 4 6 2 2 4 7 3 >>> psdf.insert(2, 'z', ps.Series([8, 9, 10])) >>> psdf.sort_index() x y z 0 0 4 5 8 1 1 4 6 9 2 2 4 7 10 3 >>> reset_option("compute.ops_on_diff_frames") """ if not isinstance(loc, int): raise TypeError("loc must be int") assert 0 <= loc <= len(self.columns) assert allow_duplicates is False if not is_name_like_value(column): raise TypeError( '"column" should be a scalar value or tuple that contains scalar values' ) # TODO(SPARK-37723): Support tuple for non-MultiIndex column name. if is_name_like_tuple(column): if self._internal.column_labels_level > 1: if len(column) != len(self.columns.levels): # type: ignore[attr-defined] # To be consistent with pandas raise ValueError('"column" must have length equal to number of column levels.') else: raise NotImplementedError( "Assigning column name as tuple is only supported for MultiIndex columns " "for now." ) if column in self.columns: raise ValueError("cannot insert %s, already exists" % str(column)) psdf = self.copy() psdf[column] = value columns = psdf.columns[:-1].insert(loc, psdf.columns[-1]) psdf = psdf[columns] self._update_internal_frame(psdf._internal) # TODO: add frep and axis parameter def shift(self, periods: int = 1, fill_value: Optional[Any] = None) -> "DataFrame": """ Shift DataFrame by desired number of periods. .. note:: the current implementation of shift uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. fill_value : object, optional The scalar value to use for newly introduced missing values. The default depends on the dtype of self. For numeric data, np.nan is used. Returns ------- Copy of input DataFrame, shifted. Examples -------- >>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45], ... 'Col2': [13, 23, 18, 33, 48], ... 'Col3': [17, 27, 22, 37, 52]}, ... columns=['Col1', 'Col2', 'Col3']) >>> df.shift(periods=3) Col1 Col2 Col3 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 10.0 13.0 17.0 4 20.0 23.0 27.0 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 0 0 0 0 1 0 0 0 2 0 0 0 3 10 13 17 4 20 23 27 """ return self._apply_series_op( lambda psser: psser._shift(periods, fill_value), should_resolve=True ) # TODO: axis should support 1 or 'columns' either at this moment def diff(self, periods: int = 1, axis: Axis = 0) -> "DataFrame": """ First discrete difference of element. Calculates the difference of a DataFrame element compared with another element in the DataFrame (default is the element in the same column of the previous row). .. note:: the current implementation of diff uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. axis : int, default 0 or 'index' Can only be set to 0 at the moment. Returns ------- diffed : DataFrame Examples -------- >>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.diff() a b c 0 NaN NaN NaN 1 1.0 0.0 3.0 2 1.0 1.0 5.0 3 1.0 1.0 7.0 4 1.0 2.0 9.0 5 1.0 3.0 11.0 Difference with previous column >>> df.diff(periods=3) a b c 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 3.0 2.0 15.0 4 3.0 4.0 21.0 5 3.0 6.0 27.0 Difference with following row >>> df.diff(periods=-1) a b c 0 -1.0 0.0 -3.0 1 -1.0 -1.0 -5.0 2 -1.0 -1.0 -7.0 3 -1.0 -2.0 -9.0 4 -1.0 -3.0 -11.0 5 NaN NaN NaN """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') return self._apply_series_op(lambda psser: psser._diff(periods), should_resolve=True) # TODO: axis should support 1 or 'columns' either at this moment def nunique( self, axis: Axis = 0, dropna: bool = True, approx: bool = False, rsd: float = 0.05, ) -> "Series": """ Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- axis : int, default 0 or 'index' Can only be set to 0 at the moment. dropna : bool, default True Don’t include NaN in the count. approx: bool, default False If False, will use the exact algorithm and return the exact number of unique. If True, it uses the HyperLogLog approximate algorithm, which is significantly faster for large amount of data. Note: This parameter is specific to pandas-on-Spark and is not found in pandas. rsd: float, default 0.05 Maximum estimation error allowed in the HyperLogLog algorithm. Note: Just like ``approx`` this parameter is specific to pandas-on-Spark. Returns ------- The number of unique values per column as a pandas-on-Spark Series. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]}) >>> df.nunique() A 3 B 1 dtype: int64 >>> df.nunique(dropna=False) A 3 B 2 dtype: int64 On big data, we recommend using the approximate algorithm to speed up this function. The result will be very close to the exact unique count. >>> df.nunique(approx=True) A 3 B 1 dtype: int64 """ from pyspark.pandas.series import first_series axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') sdf = self._internal.spark_frame.select( [SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] + [ self._psser_for(label)._nunique(dropna, approx, rsd) for label in self._internal.column_labels ] ) # The data is expected to be small so it's fine to transpose/use default index. with ps.option_context("compute.max_rows", 1): internal = self._internal.copy( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], index_names=[None], index_fields=[None], data_spark_columns=[ scol_for(sdf, col) for col in self._internal.data_spark_column_names ], data_fields=None, ) return first_series(DataFrame(internal).transpose()) def round(self, decimals: Union[int, Dict[Name, int], "Series"] = 0) -> "DataFrame": """ Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. .. note:: If `decimals` is a Series, it is expected to be small, as all the data is loaded into the driver's memory. Returns ------- DataFrame See Also -------- Series.round Examples -------- >>> df = ps.DataFrame({'A':[0.028208, 0.038683, 0.877076], ... 'B':[0.992815, 0.645646, 0.149370], ... 'C':[0.173891, 0.577595, 0.491027]}, ... columns=['A', 'B', 'C'], ... index=['first', 'second', 'third']) >>> df A B C first 0.028208 0.992815 0.173891 second 0.038683 0.645646 0.577595 third 0.877076 0.149370 0.491027 >>> df.round(2) A B C first 0.03 0.99 0.17 second 0.04 0.65 0.58 third 0.88 0.15 0.49 >>> df.round({'A': 1, 'C': 2}) A B C first 0.0 0.992815 0.17 second 0.0 0.645646 0.58 third 0.9 0.149370 0.49 >>> decimals = ps.Series([1, 0, 2], index=['A', 'B', 'C']) >>> df.round(decimals) A B C first 0.0 1.0 0.17 second 0.0 1.0 0.58 third 0.9 0.0 0.49 """ if isinstance(decimals, ps.Series): decimals_dict = { k if isinstance(k, tuple) else (k,): v for k, v in decimals._to_internal_pandas().items() } elif isinstance(decimals, dict): decimals_dict = {k if is_name_like_tuple(k) else (k,): v for k, v in decimals.items()} elif isinstance(decimals, int): decimals_dict = {k: decimals for k in self._internal.column_labels} else: raise TypeError("decimals must be an integer, a dict-like or a Series") def op(psser: ps.Series) -> Union[ps.Series, Column]: label = psser._column_label if label in decimals_dict: return F.round(psser.spark.column, decimals_dict[label]) else: return psser return self._apply_series_op(op) def _mark_duplicates( self, subset: Optional[Union[Name, List[Name]]] = None, keep: Union[bool, str] = "first", ) -> Tuple[SparkDataFrame, str]: if subset is None: subset_list = self._internal.column_labels else: if is_name_like_tuple(subset): subset_list = [cast(Label, subset)] elif is_name_like_value(subset): subset_list = [(subset,)] else: subset_list = [sub if is_name_like_tuple(sub) else (sub,) for sub in subset] diff = set(subset_list).difference(set(self._internal.column_labels)) if len(diff) > 0: raise KeyError(", ".join([name_like_string(d) for d in diff])) group_cols = [self._internal.spark_column_name_for(label) for label in subset_list] sdf = self._internal.resolved_copy.spark_frame column = verify_temp_column_name(sdf, "__duplicated__") if keep == "first" or keep == "last": if keep == "first": ord_func = F.asc else: ord_func = F.desc window = ( Window.partitionBy(*group_cols) .orderBy(ord_func(NATURAL_ORDER_COLUMN_NAME)) .rowsBetween(Window.unboundedPreceding, Window.currentRow) ) sdf = sdf.withColumn(column, F.row_number().over(window) > 1) elif not keep: window = Window.partitionBy(*group_cols).rowsBetween( Window.unboundedPreceding, Window.unboundedFollowing ) sdf = sdf.withColumn(column, F.count("*").over(window) > 1) else: raise ValueError("'keep' only supports 'first', 'last' and False") return sdf, column def duplicated( self, subset: Optional[Union[Name, List[Name]]] = None, keep: Union[bool, str] = "first", ) -> "Series": """ Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : Series Examples -------- >>> df = ps.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]}, ... columns = ['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 1 1 1 2 1 1 1 3 3 4 5 >>> df.duplicated().sort_index() 0 False 1 True 2 True 3 False dtype: bool Mark duplicates as ``True`` except for the last occurrence. >>> df.duplicated(keep='last').sort_index() 0 True 1 True 2 False 3 False dtype: bool Mark all duplicates as ``True``. >>> df.duplicated(keep=False).sort_index() 0 True 1 True 2 True 3 False dtype: bool """ from pyspark.pandas.series import first_series sdf, column = self._mark_duplicates(subset, keep) sdf = sdf.select( self._internal.index_spark_columns + [scol_for(sdf, column).alias(SPARK_DEFAULT_SERIES_NAME)] ) return first_series( DataFrame( InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in self._internal.index_spark_column_names ], index_names=self._internal.index_names, index_fields=self._internal.index_fields, column_labels=[None], data_spark_columns=[scol_for(sdf, SPARK_DEFAULT_SERIES_NAME)], ) ) ) # TODO: support other as DataFrame or array-like def dot(self, other: "Series") -> "Series": """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series It can also be called using ``self @ other`` in Python >= 3.5. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from pyspark.pandas.config import option_context >>> with option_context( ... 'compute.max_rows', 1000, "compute.ops_on_diff_frames", True ... ): # doctest: +NORMALIZE_WHITESPACE ... psdf = ps.DataFrame({'a': range(1001)}) ... psser = ps.Series([2], index=['a']) ... psdf.dot(psser) Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Parameters ---------- other : Series The other object to compute the matrix product with. Returns ------- Series Return the matrix product between self and other as a Series. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> psdf = ps.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> psser = ps.Series([1, 1, 2, 1]) >>> psdf.dot(psser) 0 -4 1 5 dtype: int64 Note how shuffling of the objects does not change the result. >>> psser2 = psser.reindex([1, 0, 2, 3]) >>> psdf.dot(psser2) 0 -4 1 5 dtype: int64 >>> psdf @ psser2 0 -4 1 5 dtype: int64 >>> reset_option("compute.ops_on_diff_frames") """ if not isinstance(other, ps.Series): raise TypeError("Unsupported type {}".format(type(other).__name__)) else: return cast(ps.Series, other.dot(self.transpose())).rename(None) def __matmul__(self, other: "Series") -> "Series": """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def to_table( self, name: str, format: Optional[str] = None, mode: str = "w", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options: Any, ) -> None: if index_col is None: log_advice( "If `index_col` is not specified for `to_table`, " "the existing index is lost when converting to table." ) mode = validate_mode(mode) return self.spark.to_table(name, format, mode, partition_cols, index_col, **options) to_table.__doc__ = SparkFrameMethods.to_table.__doc__ def to_delta( self, path: str, mode: str = "w", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options: "OptionalPrimitiveType", ) -> None: """ Write the DataFrame out as a Delta Lake table. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. - 'append' (equivalent to 'a'): Append the new data to existing data. - 'overwrite' (equivalent to 'w'): Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent pandas-on-Spark's index. The index name in pandas-on-Spark is ignored. By default, the index is always lost. options : dict All other options passed directly into Delta Lake. See Also -------- read_delta DataFrame.to_parquet DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ps.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 Create a new Delta Lake table, partitioned by one column: >>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date') # doctest: +SKIP Partitioned by two columns: >>> df.to_delta('%s/to_delta/bar' % path, ... partition_cols=['date', 'country']) # doctest: +SKIP Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta: >>> df.to_delta('%s/to_delta/bar' % path, ... mode='overwrite', replaceWhere='date >= "2012-01-01"') # doctest: +SKIP """ if index_col is None: log_advice( "If `index_col` is not specified for `to_delta`, " "the existing index is lost when converting to Delta." ) if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore[assignment] mode = validate_mode(mode) self.spark.to_spark_io( path=path, mode=mode, format="delta", partition_cols=partition_cols, index_col=index_col, **options, ) def to_parquet( self, path: str, mode: str = "w", partition_cols: Optional[Union[str, List[str]]] = None, compression: Optional[str] = None, index_col: Optional[Union[str, List[str]]] = None, **options: Any, ) -> None: """ Write the DataFrame out as a Parquet file or directory. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. - 'append' (equivalent to 'a'): Append the new data to existing data. - 'overwrite' (equivalent to 'w'): Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'} Compression codec to use when saving to file. If None is set, it uses the value specified in `spark.sql.parquet.compression.codec`. index_col: str or list of str, optional, default: None Column names to be used in Spark to represent pandas-on-Spark's index. The index name in pandas-on-Spark is ignored. By default, the index is always lost. options : dict All other options passed directly into Spark's data source. See Also -------- read_parquet DataFrame.to_delta DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ps.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date') >>> df.to_parquet( ... '%s/to_parquet/foo.parquet' % path, ... mode = 'overwrite', ... partition_cols=['date', 'country']) """ if index_col is None: log_advice( "If `index_col` is not specified for `to_parquet`, " "the existing index is lost when converting to Parquet." ) if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") mode = validate_mode(mode) builder = self.to_spark(index_col=index_col).write.mode(mode) if partition_cols is not None: builder.partitionBy(partition_cols) if compression is not None: builder.option("compression", compression) builder.options(**options).format("parquet").save(path) def to_orc( self, path: str, mode: str = "w", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options: "OptionalPrimitiveType", ) -> None: """ Write the DataFrame out as a ORC file or directory. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. - 'append' (equivalent to 'a'): Append the new data to existing data. - 'overwrite' (equivalent to 'w'): Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent pandas-on-Spark's index. The index name in pandas-on-Spark is ignored. By default, the index is always lost. options : dict All other options passed directly into Spark's data source. See Also -------- read_orc DataFrame.to_delta DataFrame.to_parquet DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ps.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_orc('%s/to_orc/foo.orc' % path, partition_cols='date') >>> df.to_orc( ... '%s/to_orc/foo.orc' % path, ... mode = 'overwrite', ... partition_cols=['date', 'country']) """ if index_col is None: log_advice( "If `index_col` is not specified for `to_orc`, " "the existing index is lost when converting to ORC." ) if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore[assignment] mode = validate_mode(mode) self.spark.to_spark_io( path=path, mode=mode, format="orc", partition_cols=partition_cols, index_col=index_col, **options, ) def to_spark_io( self, path: Optional[str] = None, format: Optional[str] = None, mode: str = "overwrite", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options: "OptionalPrimitiveType", ) -> None: """An alias for :func:`DataFrame.spark.to_spark_io`. See :meth:`pyspark.pandas.spark.accessors.SparkFrameMethods.to_spark_io`. .. deprecated:: 3.2.0 Use :func:`DataFrame.spark.to_spark_io` instead. """ warnings.warn("Deprecated in 3.2, Use DataFrame.spark.to_spark_io instead.", FutureWarning) return self.spark.to_spark_io(path, format, mode, partition_cols, index_col, **options) to_spark_io.__doc__ = SparkFrameMethods.to_spark_io.__doc__ def to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame: if index_col is None: log_advice( "If `index_col` is not specified for `to_spark`, " "the existing index is lost when converting to Spark DataFrame." ) return self._to_spark(index_col) to_spark.__doc__ = SparkFrameMethods.__doc__ def _to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame: """ Same as `to_spark()`, without issueing the advice log when `index_col` is not specified for internal usage. """ return self.spark.frame(index_col) def to_pandas(self) -> pd.DataFrame: """ Return a pandas DataFrame. .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.to_pandas() dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1 """ log_advice( "`to_pandas` loads all data into the driver's memory. " "It should only be used if the resulting pandas DataFrame is expected to be small." ) return self._to_pandas() def _to_pandas(self) -> pd.DataFrame: """ Same as `to_pandas()`, without issueing the advice log for internal usage. """ return self._internal.to_pandas_frame.copy() def assign(self, **kwargs: Any) -> "DataFrame": """ Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable, Series or Index} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas-on-Spark doesn't check it). If the values are not callable, (e.g. a Series or a literal), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Examples -------- >>> df = ps.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence and you can also create multiple columns within the same assign. >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32, ... temp_k=df['temp_c'] + 273.15, ... temp_idx=df.index) >>> assigned[['temp_c', 'temp_f', 'temp_k', 'temp_idx']] temp_c temp_f temp_k temp_idx Portland 17.0 62.6 290.15 Portland Berkeley 25.0 77.0 298.15 Berkeley Notes ----- Assigning multiple columns within the same ``assign`` is possible but you cannot refer to newly created or modified columns. This feature is supported in pandas for Python 3.6 and later but not in pandas-on-Spark. In pandas-on-Spark, all items are computed first, and then assigned. """ return self._assign(kwargs) def _assign(self, kwargs: Any) -> "DataFrame": assert isinstance(kwargs, dict) from pyspark.pandas.indexes import MultiIndex from pyspark.pandas.series import IndexOpsMixin for k, v in kwargs.items(): is_invalid_assignee = ( not (isinstance(v, (IndexOpsMixin, Column)) or callable(v) or is_scalar(v)) ) or isinstance(v, MultiIndex) if is_invalid_assignee: raise TypeError( "Column assignment doesn't support type " "{0}".format(type(v).__name__) ) if callable(v): kwargs[k] = v(self) pairs = { (k if is_name_like_tuple(k) else (k,)): ( (v.spark.column, v._internal.data_fields[0]) if isinstance(v, IndexOpsMixin) and not isinstance(v, MultiIndex) else (v, None) if isinstance(v, Column) else (SF.lit(v), None) ) for k, v in kwargs.items() } scols = [] data_fields = [] for label in self._internal.column_labels: for i in range(len(label)): if label[: len(label) - i] in pairs: scol, field = pairs[label[: len(label) - i]] name = self._internal.spark_column_name_for(label) scol = scol.alias(name) if field is not None: field = field.copy(name=name) break else: scol = self._internal.spark_column_for(label) field = self._internal.field_for(label) scols.append(scol) data_fields.append(field) column_labels = self._internal.column_labels.copy() for label, (scol, field) in pairs.items(): if label not in set(i[: len(label)] for i in self._internal.column_labels): name = name_like_string(label) scols.append(scol.alias(name)) if field is not None: field = field.copy(name=name) data_fields.append(field) column_labels.append(label) level = self._internal.column_labels_level column_labels = [ tuple(list(label) + ([""] * (level - len(label)))) for label in column_labels ] internal = self._internal.with_new_columns( scols, column_labels=column_labels, data_fields=data_fields ) return DataFrame(internal) @staticmethod def from_records( data: Union[np.ndarray, List[tuple], dict, pd.DataFrame], index: Union[str, list, np.ndarray] = None, exclude: list = None, columns: list = None, coerce_float: bool = False, nrows: int = None, ) -> "DataFrame": """ Convert structured or record ndarray to DataFrame. Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Columns or fields to exclude columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) coerce_float : boolean, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets nrows : int, default None Number of rows to read if data is an iterator Returns ------- df : DataFrame Examples -------- Use dict as input >>> ps.DataFrame.from_records({'A': [1, 2, 3]}) A 0 1 1 2 2 3 Use list of tuples as input >>> ps.DataFrame.from_records([(1, 2), (3, 4)]) 0 1 0 1 2 1 3 4 Use NumPy array as input >>> ps.DataFrame.from_records(np.eye(3)) 0 1 2 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0 """ return DataFrame( pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows) ) def to_records( self, index: bool = True, column_dtypes: Optional[Union[str, Dtype, Dict[Name, Union[str, Dtype]]]] = None, index_dtypes: Optional[Union[str, Dtype, Dict[Name, Union[str, Dtype]]]] = None, ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. .. note:: This method should only be used if the resulting NumPy ndarray is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = ps.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) # doctest: +SKIP rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Specification of dtype for columns is new in pandas 0.24.0. Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')]) Specification of dtype for index is new in pandas 0.24.0. Data types can also be specified for the index: >>> df.to_records(index_dtypes="<S2") # doctest: +SKIP rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')]) """ args = locals() psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args ) def copy(self, deep: bool = True) -> "DataFrame": """ Make a copy of this object's indices and data. Parameters ---------- deep : bool, default True this parameter is not supported but just dummy parameter to match pandas. Returns ------- copy : DataFrame Examples -------- >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df_copy = df.copy() >>> df_copy x y z w 0 1 3 5 7 1 2 4 6 8 """ return DataFrame(self._internal) def dropna( self, axis: Axis = 0, how: str = "any", thresh: Optional[int] = None, subset: Optional[Union[Name, List[Name]]] = None, inplace: bool = False, ) -> Optional["DataFrame"]: """ Remove missing values. Parameters ---------- axis : {0 or 'index'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.drop : Drop specified labels from columns. DataFrame.isnull: Indicate missing values. DataFrame.notnull : Indicate existing (non-missing) values. Examples -------- >>> df = ps.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [None, 'Batmobile', 'Bullwhip'], ... "born": [None, "1940-04-25", None]}, ... columns=['name', 'toy', 'born']) >>> df name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ axis = validate_axis(axis) inplace = validate_bool_kwarg(inplace, "inplace") if thresh is None: if how is None: raise TypeError("must specify how or thresh") elif how not in ("any", "all"): raise ValueError("invalid how option: {h}".format(h=how)) labels: Optional[List[Label]] if subset is not None: if isinstance(subset, str): labels = [(subset,)] elif isinstance(subset, tuple): labels = [subset] else: labels = [sub if isinstance(sub, tuple) else (sub,) for sub in subset] else: labels = None if axis == 0: if labels is not None: invalids = [label for label in labels if label not in self._internal.column_labels] if len(invalids) > 0: raise KeyError(invalids) else: labels = self._internal.column_labels cnt = reduce( lambda x, y: x + y, [ F.when(self._psser_for(label).notna().spark.column, 1).otherwise(0) for label in labels ], SF.lit(0), ) if thresh is not None: pred = cnt >= SF.lit(int(thresh)) elif how == "any": pred = cnt == SF.lit(len(labels)) elif how == "all": pred = cnt > SF.lit(0) internal = self._internal.with_filter(pred) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) else: assert axis == 1 internal = self._internal.resolved_copy if labels is not None: if any(len(lbl) != internal.index_level for lbl in labels): raise ValueError( "The length of each subset must be the same as the index size." ) cond = reduce( lambda x, y: x | y, [ reduce( lambda x, y: x & y, [ scol == SF.lit(part) for part, scol in zip(lbl, internal.index_spark_columns) ], ) for lbl in labels ], ) internal = internal.with_filter(cond) psdf: DataFrame = DataFrame(internal) null_counts = [] for label in internal.column_labels: psser = psdf._psser_for(label) cond = psser.isnull().spark.column null_counts.append( F.sum(F.when(~cond, 1).otherwise(0)).alias(name_like_string(label)) ) counts = internal.spark_frame.select(null_counts + [F.count("*")]).head() if thresh is not None: column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) >= int(thresh) ] elif how == "any": column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) == counts[-1] ] elif how == "all": column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) > 0 ] psdf = self[column_labels] if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf # TODO: add 'limit' when value parameter exists def fillna( self, value: Optional[Union[Any, Dict[Name, Any]]] = None, method: Optional[str] = None, axis: Optional[Axis] = None, inplace: bool = False, limit: Optional[int] = None, ) -> Optional["DataFrame"]: """Fill NA/NaN values. .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- value : scalar, dict, Series Value to use to fill holes. alternately a dict/Series of values specifying which value to use for each column. DataFrame is not supported. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None Returns ------- DataFrame DataFrame with NA entries filled. Examples -------- >>> df = ps.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0 1 3.0 4.0 0.0 1 2 0.0 0.0 0.0 5 3 0.0 3.0 1.0 4 We can also propagate non-null values forward or backward. >>> df.fillna(method='ffill') A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 3.0 4.0 NaN 5 3 3.0 3.0 1.0 4 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 2.0 1 2 0.0 1.0 2.0 5 3 0.0 3.0 1.0 4 """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if value is not None: if not isinstance(value, (float, int, str, bool, dict, pd.Series)): raise TypeError("Unsupported type %s" % type(value).__name__) if limit is not None: raise ValueError("limit parameter for value is not support now") if isinstance(value, pd.Series): value = value.to_dict() if isinstance(value, dict): for v in value.values(): if not isinstance(v, (float, int, str, bool)): raise TypeError("Unsupported type %s" % type(v).__name__) value = {k if is_name_like_tuple(k) else (k,): v for k, v in value.items()} def op(psser: ps.Series) -> ps.Series: label = psser._column_label for k, v in value.items(): if k == label[: len(k)]: return psser._fillna( value=value[k], method=method, axis=axis, limit=limit ) else: return psser else: def op(psser: ps.Series) -> ps.Series: return psser._fillna(value=value, method=method, axis=axis, limit=limit) elif method is not None: def op(psser: ps.Series) -> ps.Series: return psser._fillna(value=value, method=method, axis=axis, limit=limit) else: raise ValueError("Must specify a fillna 'value' or 'method' parameter.") psdf = self._apply_series_op(op, should_resolve=(method is not None)) inplace = validate_bool_kwarg(inplace, "inplace") if inplace: self._update_internal_frame(psdf._internal, requires_same_anchor=False) return None else: return psdf def interpolate( self, method: str = "linear", limit: Optional[int] = None, limit_direction: Optional[str] = None, limit_area: Optional[str] = None, ) -> "DataFrame": if method not in ["linear"]: raise NotImplementedError("interpolate currently works only for method='linear'") if (limit is not None) and (not limit > 0): raise ValueError("limit must be > 0.") if (limit_direction is not None) and ( limit_direction not in ["forward", "backward", "both"] ): raise ValueError("invalid limit_direction: '{}'".format(limit_direction)) if (limit_area is not None) and (limit_area not in ["inside", "outside"]): raise ValueError("invalid limit_area: '{}'".format(limit_area)) numeric_col_names = [] for label in self._internal.column_labels: psser = self._psser_for(label) if isinstance(psser.spark.data_type, (NumericType, BooleanType)): numeric_col_names.append(psser.name) psdf = self[numeric_col_names] return psdf._apply_series_op( lambda psser: psser._interpolate( method=method, limit=limit, limit_direction=limit_direction, limit_area=limit_area ), should_resolve=True, ) def replace( self, to_replace: Optional[Union[Any, List, Tuple, Dict]] = None, value: Optional[Any] = None, inplace: bool = False, limit: Optional[int] = None, regex: bool = False, method: str = "pad", ) -> Optional["DataFrame"]: """ Returns a new DataFrame replacing a value with another value. Parameters ---------- to_replace : int, float, string, list, tuple or dict Value to be replaced. value : int, float, string, list or tuple Value to use to replace holes. The replacement value must be an int, float, or string. If value is a list or tuple, value should be of the same length with to_replace. inplace : boolean, default False Fill in place (do not create a new object) Returns ------- DataFrame Object after replacement. Examples -------- >>> df = ps.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'], ... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']}, ... columns=['name', 'weapon']) >>> df name weapon 0 Ironman Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash Scalar `to_replace` and `value` >>> df.replace('Ironman', 'War-Machine') name weapon 0 War-Machine Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash List like `to_replace` and `value` >>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True) >>> df name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Mjolnir 3 Hulk Smash Dicts can be used to specify different replacement values for different existing values To use a dict in this way the value parameter should be None >>> df.replace({'Mjolnir': 'Stormbuster'}) name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash Dict can specify that different values should be replaced in different columns The value parameter should not be None in this case >>> df.replace({'weapon': 'Mjolnir'}, 'Stormbuster') name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash Nested dictionaries The value parameter should be None to use a nested dict in this way >>> df.replace({'weapon': {'Mjolnir': 'Stormbuster'}}) name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash """ if method != "pad": raise NotImplementedError("replace currently works only for method='pad") if limit is not None: raise NotImplementedError("replace currently works only when limit=None") if regex is not False: raise NotImplementedError("replace currently doesn't supports regex") inplace = validate_bool_kwarg(inplace, "inplace") if value is not None and not isinstance(value, (int, float, str, list, tuple, dict)): raise TypeError("Unsupported type {}".format(type(value).__name__)) if to_replace is not None and not isinstance( to_replace, (int, float, str, list, tuple, dict) ): raise TypeError("Unsupported type {}".format(type(to_replace).__name__)) if isinstance(value, (list, tuple)) and isinstance(to_replace, (list, tuple)): if len(value) != len(to_replace): raise ValueError("Length of to_replace and value must be same") if isinstance(to_replace, dict) and ( value is not None or all(isinstance(i, dict) for i in to_replace.values()) ): to_replace_dict = to_replace def op(psser: ps.Series) -> ps.Series: if psser.name in to_replace_dict: return psser.replace( to_replace=to_replace_dict[psser.name], value=value, regex=regex ) else: return psser else: def op(psser: ps.Series) -> ps.Series: return psser.replace(to_replace=to_replace, value=value, regex=regex) psdf = self._apply_series_op(op) if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "DataFrame": """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Parameters ---------- lower : float or int, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or int, default None Maximum threshold value. All values above this threshold will be set to it. Returns ------- DataFrame DataFrame with the values outside the clip boundaries replaced. Examples -------- >>> ps.DataFrame({'A': [0, 2, 4]}).clip(1, 3) A 0 1 1 2 2 3 Notes ----- One difference between this implementation and pandas is that running pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported between instances of 'str' and 'int'" while ps.DataFrame({'A': ['a', 'b']}).clip(0, 1) will output the original DataFrame, simply ignoring the incompatible types. """ if is_list_like(lower) or is_list_like(upper): raise TypeError( "List-like value are not supported for 'lower' and 'upper' at the " + "moment" ) if lower is None and upper is None: return self return self._apply_series_op(lambda psser: psser.clip(lower=lower, upper=upper)) def head(self, n: int = 5) -> "DataFrame": """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `n` rows of the caller object. Examples -------- >>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon """ if n < 0: n = len(self) + n if n <= 0: return DataFrame(self._internal.with_filter(SF.lit(False))) else: sdf = self._internal.resolved_copy.spark_frame if get_option("compute.ordered_head"): sdf = sdf.orderBy(NATURAL_ORDER_COLUMN_NAME) return DataFrame(self._internal.with_new_sdf(sdf.limit(n))) def last(self, offset: Union[str, DateOffset]) -> "DataFrame": """ Select final periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the last few rows based on a date offset. Parameters ---------- offset : str or DateOffset The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` Examples -------- >>> index = pd.date_range('2018-04-09', periods=4, freq='2D') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index) >>> psdf A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> psdf.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ # Check index type should be format DateTime if not isinstance(self.index, ps.DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex") offset_: Optional[DateOffset] = to_offset(offset) assert offset_ is not None from_date = cast(datetime.datetime, self.index.max()) - offset_ # type: ignore[operator] return cast(DataFrame, self.loc[from_date:]) def first(self, offset: Union[str, DateOffset]) -> "DataFrame": """ Select first periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str or DateOffset The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the first 3 days. Returns ------- DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` Examples -------- >>> index = pd.date_range('2018-04-09', periods=4, freq='2D') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index) >>> psdf A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> psdf.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 observed days in the dataset, and therefore data for 2018-04-13 was not returned. """ # Check index type should be format DatetimeIndex if not isinstance(self.index, ps.DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex") offset_: Optional[DateOffset] = to_offset(offset) assert offset_ is not None to_date = cast(datetime.datetime, self.index.min()) + offset_ # type: ignore[operator] return cast(DataFrame, self.loc[:to_date]) # type: ignore[misc] def pivot_table( self, values: Optional[Union[Name, List[Name]]] = None, index: Optional[List[Name]] = None, columns: Optional[Name] = None, aggfunc: Union[str, Dict[Name, str]] = "mean", fill_value: Optional[Any] = None, ) -> "DataFrame": """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ---------- values : column to aggregate. They should be either a list less than three or a string. index : column (string) or list of columns If an array is passed, it must be the same length as the data. The list should contain string. columns : column Columns used in the pivot operation. Only one column is supported and it should be a string. aggfunc : function (string), dict, default mean If dict is passed, the key is column to aggregate and value is function or list of functions. fill_value : scalar, default None Value to replace missing values with. Returns ------- table : DataFrame Examples -------- >>> df = ps.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}, ... columns=['A', 'B', 'C', 'D', 'E']) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum') >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4.0 5 two 7.0 6 foo one 4.0 1 two NaN 6 We can also fill missing values using the `fill_value` parameter. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum', fill_value=0) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 We can also calculate multiple types of aggregations for any given value column. >>> table = df.pivot_table(values=['D'], index =['C'], ... columns="A", aggfunc={'D': 'mean'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D A bar foo C large 5.5 2.000000 small 5.5 2.333333 The next example aggregates on multiple values. >>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'], ... aggfunc={'D': 'mean', 'E': 'sum'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D E A bar foo bar foo C large 5.5 2.000000 15 9 small 5.5 2.333333 17 13 """ if not is_name_like_value(columns): raise TypeError("columns should be one column name.") if not is_name_like_value(values) and not ( isinstance(values, list) and all(is_name_like_value(v) for v in values) ): raise TypeError("values should be one column or list of columns.") if not isinstance(aggfunc, str) and ( not isinstance(aggfunc, dict) or not all( is_name_like_value(key) and isinstance(value, str) for key, value in aggfunc.items() ) ): raise TypeError( "aggfunc must be a dict mapping from column name " "to aggregate functions (string)." ) if isinstance(aggfunc, dict) and index is None: raise NotImplementedError( "pivot_table doesn't support aggfunc" " as dict and without index." ) if isinstance(values, list) and index is None: raise NotImplementedError("values can't be a list without index.") if columns not in self.columns: raise ValueError("Wrong columns {}.".format(name_like_string(columns))) if not is_name_like_tuple(columns): columns = (columns,) if isinstance(values, list): values = [col if is_name_like_tuple(col) else (col,) for col in values] if not all( isinstance(self._internal.spark_type_for(col), NumericType) for col in values ): raise TypeError("values should be a numeric type.") else: values = values if is_name_like_tuple(values) else (values,) if not isinstance(self._internal.spark_type_for(values), NumericType): raise TypeError("values should be a numeric type.") if isinstance(aggfunc, str): if isinstance(values, list): agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format( self._internal.spark_column_name_for(value), aggfunc ) ) for value in values ] else: agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format( self._internal.spark_column_name_for(values), aggfunc ) ) ] elif isinstance(aggfunc, dict): aggfunc = { key if is_name_like_tuple(key) else (key,): value for key, value in aggfunc.items() } agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format(self._internal.spark_column_name_for(key), value) ) for key, value in aggfunc.items() ] agg_columns = [key for key, _ in aggfunc.items()] if set(agg_columns) != set(values): raise ValueError("Columns in aggfunc must be the same as values.") sdf = self._internal.resolved_copy.spark_frame if index is None: sdf = ( sdf.groupBy() .pivot(pivot_col=self._internal.spark_column_name_for(columns)) .agg(*agg_cols) ) elif isinstance(index, list): index = [label if is_name_like_tuple(label) else (label,) for label in index] sdf = ( sdf.groupBy([self._internal.spark_column_name_for(label) for label in index]) .pivot(pivot_col=self._internal.spark_column_name_for(columns)) .agg(*agg_cols) ) else: raise TypeError("index should be a None or a list of columns.") if fill_value is not None and isinstance(fill_value, (int, float)): sdf = sdf.fillna(fill_value) psdf: DataFrame if index is not None: index_columns = [self._internal.spark_column_name_for(label) for label in index] index_fields = [self._internal.field_for(label) for label in index] if isinstance(values, list): data_columns = [column for column in sdf.columns if column not in index_columns] if len(values) > 1: # If we have two values, Spark will return column's name # in this format: column_values, where column contains # their values in the DataFrame and values is # the column list passed to the pivot_table(). # E.g. if column is b and values is ['b','e'], # then ['2_b', '2_e', '3_b', '3_e']. # We sort the columns of Spark DataFrame by values. data_columns.sort(key=lambda x: x.split("_", 1)[1]) sdf = sdf.select(index_columns + data_columns) column_name_to_index = dict( zip(self._internal.data_spark_column_names, self._internal.column_labels) ) column_labels = [ tuple(list(column_name_to_index[name.split("_")[1]]) + [name.split("_")[0]]) for name in data_columns ] column_label_names = ( [cast(Optional[Name], None)] * column_labels_level(values) ) + [columns] internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) psdf = DataFrame(internal) else: column_labels = [tuple(list(values[0]) + [column]) for column in data_columns] column_label_names = ([cast(Optional[Name], None)] * len(values[0])) + [columns] internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) psdf = DataFrame(internal) else: internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_label_names=[columns], ) psdf = DataFrame(internal) else: if isinstance(values, list): index_values = values[-1] else: index_values = values index_map: Dict[str, Optional[Label]] = {} for i, index_value in enumerate(index_values): colname = SPARK_INDEX_NAME_FORMAT(i) sdf = sdf.withColumn(colname, SF.lit(index_value)) index_map[colname] = None internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_map.keys()], index_names=list(index_map.values()), column_label_names=[columns], ) psdf = DataFrame(internal) psdf_columns = psdf.columns if isinstance(psdf_columns, pd.MultiIndex): psdf.columns = psdf_columns.set_levels( psdf_columns.levels[-1].astype( # type: ignore[index] spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type) ), level=-1, ) else: psdf.columns = psdf_columns.astype( spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type) ) return psdf def pivot( self, index: Optional[Name] = None, columns: Optional[Name] = None, values: Optional[Name] = None, ) -> "DataFrame": """ Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation. Parameters ---------- index : string, optional Column to use to make new frame's index. If None, uses existing index. columns : string Column to use to make new frame's columns. values : string, object or a list of the previous Column(s) to use for populating new frame's values. Returns ------- DataFrame Returns reshaped DataFrame. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. Examples -------- >>> df = ps.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}, ... columns=['foo', 'bar', 'baz', 'zoo']) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE bar A B C 0 1.0 NaN NaN 1 NaN 2.0 NaN 2 NaN NaN 3.0 3 4.0 NaN NaN 4 NaN 5.0 NaN 5 NaN NaN 6.0 Notice that, unlike pandas raises an ValueError when duplicated values are found, pandas-on-Spark's pivot still works with its first value it meets during operation because pivot is an expensive operation and it is preferred to permissively execute over failing fast when processing large data. >>> df = ps.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz']) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1.0 NaN NaN two NaN 3.0 4.0 It also support multi-index and multi-index column. >>> df.columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'baz')]) >>> df = df.set_index(('a', 'bar'), append=True) >>> df # doctest: +NORMALIZE_WHITESPACE a b foo baz (a, bar) 0 A one 1 1 A one 2 2 B two 3 3 C two 4 >>> df.pivot(columns=('a', 'foo'), values=('b', 'baz')).sort_index() ... # doctest: +NORMALIZE_WHITESPACE ('a', 'foo') one two (a, bar) 0 A 1.0 NaN 1 A 2.0 NaN 2 B NaN 3.0 3 C NaN 4.0 """ if columns is None: raise ValueError("columns should be set.") if values is None: raise ValueError("values should be set.") should_use_existing_index = index is not None if should_use_existing_index: df = self index_labels = [index] else: # The index after `reset_index()` will never be used, so use "distributed" index # as a dummy to avoid overhead. with option_context("compute.default_index_type", "distributed"): df = self.reset_index() index_labels = df._internal.column_labels[: self._internal.index_level] df = df.pivot_table(index=index_labels, columns=columns, values=values, aggfunc="first") if should_use_existing_index: return df else: internal = df._internal.copy(index_names=self._internal.index_names) return DataFrame(internal) @property def columns(self) -> pd.Index: """The column labels of the DataFrame.""" names = [ name if name is None or len(name) > 1 else name[0] for name in self._internal.column_label_names ] if self._internal.column_labels_level > 1: columns = pd.MultiIndex.from_tuples(self._internal.column_labels, names=names) else: columns = pd.Index([label[0] for label in self._internal.column_labels], name=names[0]) return columns @columns.setter def columns(self, columns: Union[pd.Index, List[Name]]) -> None: if isinstance(columns, pd.MultiIndex): column_labels = columns.tolist() else: column_labels = [ col if is_name_like_tuple(col, allow_none=False) else (col,) for col in columns ] if len(self._internal.column_labels) != len(column_labels): raise ValueError( "Length mismatch: Expected axis has {} elements, " "new values have {} elements".format( len(self._internal.column_labels), len(column_labels) ) ) column_label_names: Optional[List] if isinstance(columns, pd.Index): column_label_names = [ name if is_name_like_tuple(name) else (name,) for name in columns.names ] else: column_label_names = None pssers = [ self._psser_for(label).rename(name) for label, name in zip(self._internal.column_labels, column_labels) ] self._update_internal_frame( self._internal.with_new_columns(pssers, column_label_names=column_label_names) ) @property def dtypes(self) -> pd.Series: """Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the object dtype. Returns ------- pd.Series The data type of each column. Examples -------- >>> df = ps.DataFrame({'a': list('abc'), ... 'b': list(range(1, 4)), ... 'c': np.arange(3, 6).astype('i1'), ... 'd': np.arange(4.0, 7.0, dtype='float64'), ... 'e': [True, False, True], ... 'f': pd.date_range('20130101', periods=3)}, ... columns=['a', 'b', 'c', 'd', 'e', 'f']) >>> df.dtypes a object b int64 c int8 d float64 e bool f datetime64[ns] dtype: object """ return pd.Series( [self._psser_for(label).dtype for label in self._internal.column_labels], index=pd.Index( [label if len(label) > 1 else label[0] for label in self._internal.column_labels] ), ) def select_dtypes( self, include: Optional[Union[str, List[str]]] = None, exclude: Optional[Union[str, List[str]]] = None, ) -> "DataFrame": """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. It also takes Spark SQL DDL type strings, for instance, 'string' and 'date'. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty >>> df = ps.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes() Traceback (most recent call last): ... ValueError: at least one of include or exclude must be nonempty * If ``include`` and ``exclude`` have overlapping elements >>> df = ps.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes(include='a', exclude='a') Traceback (most recent call last): ... ValueError: include and exclude overlap on {'a'} Notes ----- * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` Examples -------- >>> df = ps.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3, ... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd']) >>> df a b c d 0 1 True 1.0 a 1 2 False 2.0 b 2 1 True 1.0 a 3 2 False 2.0 b 4 1 True 1.0 a 5 2 False 2.0 b >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64'], exclude=['int']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int']) b c d 0 True 1.0 a 1 False 2.0 b 2 True 1.0 a 3 False 2.0 b 4 True 1.0 a 5 False 2.0 b Spark SQL DDL type strings can be used as well. >>> df.select_dtypes(exclude=['string']) a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 """ from pyspark.sql.types import _parse_datatype_string include_list: List[str] if not is_list_like(include): include_list = [cast(str, include)] if include is not None else [] else: include_list = list(include) exclude_list: List[str] if not is_list_like(exclude): exclude_list = [cast(str, exclude)] if exclude is not None else [] else: exclude_list = list(exclude) if not any((include_list, exclude_list)): raise ValueError("at least one of include or exclude must be " "nonempty") # can't both include AND exclude! if set(include_list).intersection(set(exclude_list)): raise ValueError( "include and exclude overlap on {inc_ex}".format( inc_ex=set(include_list).intersection(set(exclude_list)) ) ) # Handle Spark types include_spark_type = [] for inc in include_list: try: include_spark_type.append(_parse_datatype_string(inc)) except BaseException: pass exclude_spark_type = [] for exc in exclude_list: try: exclude_spark_type.append(_parse_datatype_string(exc)) except BaseException: pass # Handle pandas types include_numpy_type = [] for inc in include_list: try: include_numpy_type.append(infer_dtype_from_object(inc)) except BaseException: pass exclude_numpy_type = [] for exc in exclude_list: try: exclude_numpy_type.append(infer_dtype_from_object(exc)) except BaseException: pass column_labels = [] for label in self._internal.column_labels: if len(include_list) > 0: should_include = ( infer_dtype_from_object(self._psser_for(label).dtype.name) in include_numpy_type or self._internal.spark_type_for(label) in include_spark_type ) else: should_include = not ( infer_dtype_from_object(self._psser_for(label).dtype.name) in exclude_numpy_type or self._internal.spark_type_for(label) in exclude_spark_type ) if should_include: column_labels.append(label) return DataFrame( self._internal.with_new_columns([self._psser_for(label) for label in column_labels]) ) def droplevel( self, level: Union[int, Name, List[Union[int, Name]]], axis: Axis = 0 ) -> "DataFrame": """ Return DataFrame with requested index / column level(s) removed. Parameters ---------- level: int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis: {0 or ‘index’, 1 or ‘columns’}, default 0 Returns ------- DataFrame with requested index / column level(s) removed. Examples -------- >>> df = ps.DataFrame( ... [[3, 4], [7, 8], [11, 12]], ... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]), ... ) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df # doctest: +NORMALIZE_WHITESPACE level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') # doctest: +NORMALIZE_WHITESPACE level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) # doctest: +NORMALIZE_WHITESPACE level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ axis = validate_axis(axis) if axis == 0: if not isinstance(level, (tuple, list)): # huh? level = [level] names = self.index.names nlevels = self._internal.index_level int_level = set() for n in level: if isinstance(n, int): if n < 0: n = n + nlevels if n < 0: raise IndexError( "Too many levels: Index has only {} levels, " "{} is not a valid level number".format(nlevels, (n - nlevels)) ) if n >= nlevels: raise IndexError( "Too many levels: Index has only {} levels, not {}".format( nlevels, (n + 1) ) ) else: if n not in names: raise KeyError("Level {} not found".format(n)) n = names.index(n) int_level.add(n) if len(level) >= nlevels: raise ValueError( "Cannot remove {} levels from an index with {} levels: " "at least one level must be left.".format(len(level), nlevels) ) index_spark_columns, index_names, index_fields = zip( *[ item for i, item in enumerate( zip( self._internal.index_spark_columns, self._internal.index_names, self._internal.index_fields, ) ) if i not in int_level ] ) internal = self._internal.copy( index_spark_columns=list(index_spark_columns), index_names=list(index_names), index_fields=list(index_fields), ) return DataFrame(internal) else: psdf = self.copy() psdf.columns = psdf.columns.droplevel(level) # type: ignore[arg-type] return psdf def drop( self, labels: Optional[Union[Name, List[Name]]] = None, axis: Optional[Axis] = 0, index: Union[Name, List[Name]] = None, columns: Union[Name, List[Name]] = None, ) -> "DataFrame": """ Drop specified labels from columns. Remove rows and/or columns by specifying label names and corresponding axis, or by specifying directly index and/or column names. Drop rows of a MultiIndex DataFrame is not supported yet. Parameters ---------- labels : single label or list-like Column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionchanged:: 3.3 Set dropping by index by default. index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is quivalent to ``index=columns``). .. versionchanged:: 3.3 Added dropping rows by 'index'. columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). Returns ------- dropped : DataFrame See Also -------- Series.dropna Examples -------- >>> df = ps.DataFrame(np.arange(12).reshape(3, 4), columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 >>> df.drop(index=[0, 1], columns='A') B C D 2 9 10 11 Also support dropping columns for MultiIndex >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df # doctest: +NORMALIZE_WHITESPACE a b x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop(labels='a', axis=1) # doctest: +NORMALIZE_WHITESPACE b z w 0 5 7 1 6 8 Notes ----- Currently, dropping rows of a MultiIndex DataFrame is not supported yet. """ if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis = validate_axis(axis) if axis == 1: return self.drop(index=index, columns=labels) else: return self.drop(index=labels, columns=columns) else: if index is None and columns is None: raise ValueError("Need to specify at least one of 'labels' or 'columns' or 'index'") internal = self._internal if index is not None: if is_name_like_tuple(index) or is_name_like_value(index): index = [index] if len(index) > 0: if internal.index_level == 1: internal = internal.resolved_copy if len(index) <= ps.get_option("compute.isin_limit"): self_index_type = self.index.spark.data_type cond = ~internal.index_spark_columns[0].isin( [SF.lit(label).cast(self_index_type) for label in index] ) internal = internal.with_filter(cond) else: index_sdf_col = "__index" index_sdf = default_session().createDataFrame( pd.DataFrame({index_sdf_col: index}) ) joined_sdf = internal.spark_frame.join( other=F.broadcast(index_sdf), on=( internal.index_spark_columns[0] == scol_for(index_sdf, index_sdf_col) ), how="anti", ) internal = internal.with_new_sdf(joined_sdf) else: raise NotImplementedError( "Drop rows of MultiIndex DataFrame is not supported yet" ) if columns is not None: if is_name_like_tuple(columns): columns = [columns] elif is_name_like_value(columns): columns = [(columns,)] else: columns = [col if is_name_like_tuple(col) else (col,) for col in columns] if len(columns) > 0: drop_column_labels = set( label for label in internal.column_labels for col in columns if label[: len(col)] == col ) if len(drop_column_labels) == 0: raise KeyError(columns) keep_columns_and_labels = [ (column, label) for column, label in zip( self._internal.data_spark_column_names, self._internal.column_labels ) if label not in drop_column_labels ] cols, labels = ( zip(*keep_columns_and_labels) if len(keep_columns_and_labels) > 0 else ([], []) ) internal = internal.with_new_columns( [self._psser_for(label) for label in labels] ) return DataFrame(internal) def _prepare_sort_by_scols(self, by: Union[Name, List[Name]]) -> List[Column]: if is_name_like_value(by): by = [by] else: assert is_list_like(by), type(by) new_by = [] for colname in by: ser = self[colname] if not isinstance(ser, ps.Series): raise ValueError( "The column %s is not unique. For a multi-index, the label must be a tuple " "with elements corresponding to each level." % name_like_string(colname) ) new_by.append(ser.spark.column) return new_by def _sort( self, by: List[Column], ascending: Union[bool, List[bool]], na_position: str, keep: str = "first", ) -> "DataFrame": if isinstance(ascending, bool): ascending = [ascending] * len(by) if len(ascending) != len(by): raise ValueError( "Length of ascending ({}) != length of by ({})".format(len(ascending), len(by)) ) if na_position not in ("first", "last"): raise ValueError("invalid na_position: '{}'".format(na_position)) # Mapper: Get a spark column function for (ascending, na_position) combination mapper = { (True, "first"): Column.asc_nulls_first, (True, "last"): Column.asc_nulls_last, (False, "first"): Column.desc_nulls_first, (False, "last"): Column.desc_nulls_last, } by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)] natural_order_scol = F.col(NATURAL_ORDER_COLUMN_NAME) if keep == "last": natural_order_scol = Column.desc(natural_order_scol) elif keep == "all": raise NotImplementedError("`keep`=all is not implemented yet.") elif keep != "first": raise ValueError('keep must be either "first", "last" or "all".') sdf = self._internal.resolved_copy.spark_frame.sort(*by, natural_order_scol) return DataFrame(self._internal.with_new_sdf(sdf)) def sort_values( self, by: Union[Name, List[Name]], ascending: Union[bool, List[bool]] = True, inplace: bool = False, na_position: str = "last", ignore_index: bool = False, ) -> Optional["DataFrame"]: """ Sort by the values along either axis. Parameters ---------- by : str or list of str ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if True, perform operation in-place na_position : {'first', 'last'}, default 'last' `first` puts NaNs at the beginning, `last` puts NaNs at the end ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ps.DataFrame({ ... 'col1': ['A', 'B', None, 'D', 'C'], ... 'col2': [2, 9, 8, 7, 4], ... 'col3': [0, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3'], ... index=['a', 'b', 'c', 'd', 'e']) >>> df col1 col2 col3 a A 2 0 b B 9 9 c None 8 4 d D 7 2 e C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 a A 2 0 b B 9 9 e C 4 3 d D 7 2 c None 8 4 Ignore index for the resulting axis >>> df.sort_values(by=['col1'], ignore_index=True) col1 col2 col3 0 A 2 0 1 B 9 9 2 C 4 3 3 D 7 2 4 None 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 d D 7 2 e C 4 3 b B 9 9 a A 2 0 c None 8 4 Sort by multiple columns >>> df = ps.DataFrame({ ... 'col1': ['A', 'A', 'B', None, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 None 8 4 """ inplace = validate_bool_kwarg(inplace, "inplace") new_by = self._prepare_sort_by_scols(by) psdf = self._sort(by=new_by, ascending=ascending, na_position=na_position) if inplace: if ignore_index: psdf.reset_index(drop=True, inplace=inplace) self._update_internal_frame(psdf._internal) return None else: return psdf.reset_index(drop=True) if ignore_index else psdf def sort_index( self, axis: Axis = 0, level: Optional[Union[int, List[int]]] = None, ascending: bool = True, inplace: bool = False, kind: str = None, na_position: str = "last", ignore_index: bool = False, ) -> Optional["DataFrame"]: """ Sort object by labels (along an axis) Parameters ---------- axis : index, columns to direct sorting. Currently, only axis = 0 is supported. level : int or level name or list of ints or list of level names if not None, sort on values in specified index level(s) ascending : boolean, default True Sort ascending vs. descending inplace : bool, default False if True, perform operation in-place kind : str, default None pandas-on-Spark does not allow specifying the sorting algorithm at the moment, default None na_position : {‘first’, ‘last’}, default ‘last’ first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for MultiIndex. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 3.4.0 Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ps.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan]) >>> df.sort_index() A a 1.0 b 2.0 NaN NaN >>> df.sort_index(ascending=False) A b 2.0 a 1.0 NaN NaN >>> df.sort_index(na_position='first') A NaN NaN a 1.0 b 2.0 >>> df.sort_index(ignore_index=True) A 0 1.0 1 2.0 2 NaN >>> df.sort_index(inplace=True) >>> df A a 1.0 b 2.0 NaN NaN >>> df = ps.DataFrame({'A': range(4), 'B': range(4)[::-1]}, ... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], ... columns=['A', 'B']) >>> df.sort_index() A B a 0 3 0 1 2 1 b 0 1 2 1 0 3 >>> df.sort_index(level=1) # doctest: +SKIP A B a 0 3 0 b 0 1 2 a 1 2 1 b 1 0 3 >>> df.sort_index(level=[1, 0]) A B a 0 3 0 b 0 1 2 a 1 2 1 b 1 0 3 >>> df.sort_index(ignore_index=True) A B 0 3 0 1 2 1 2 1 2 3 0 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = validate_axis(axis) if axis != 0: raise NotImplementedError("No other axis than 0 are supported at the moment") if kind is not None: raise NotImplementedError( "Specifying the sorting algorithm is not supported at the moment." ) if level is None or (is_list_like(level) and len(level) == 0): # type: ignore[arg-type] by = self._internal.index_spark_columns elif is_list_like(level): by = [ self._internal.index_spark_columns[lvl] for lvl in level # type: ignore[union-attr] ] else: by = [self._internal.index_spark_columns[level]] # type: ignore[index] psdf = self._sort(by=by, ascending=ascending, na_position=na_position) if inplace: if ignore_index: psdf.reset_index(drop=True, inplace=inplace) self._update_internal_frame(psdf._internal) return None else: return psdf.reset_index(drop=True) if ignore_index else psdf def swaplevel( self, i: Union[int, Name] = -2, j: Union[int, Name] = -1, axis: Axis = 0 ) -> "DataFrame": """ Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to swap levels on. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. Returns ------- DataFrame DataFrame with levels swapped in MultiIndex. Examples -------- >>> midx = pd.MultiIndex.from_arrays( ... [['red', 'blue'], [1, 2], ['s', 'm']], names = ['color', 'number', 'size']) >>> midx # doctest: +SKIP MultiIndex([( 'red', 1, 's'), ('blue', 2, 'm')], names=['color', 'number', 'size']) Swap levels in a MultiIndex on index. >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}, index=midx) >>> psdf # doctest: +NORMALIZE_WHITESPACE x y color number size red 1 s 5 5 blue 2 m 6 6 >>> psdf.swaplevel() # doctest: +NORMALIZE_WHITESPACE x y color size number red s 1 5 5 blue m 2 6 6 >>> psdf.swaplevel(0, 1) # doctest: +NORMALIZE_WHITESPACE x y number color size 1 red s 5 5 2 blue m 6 6 >>> psdf.swaplevel('number', 'size') # doctest: +NORMALIZE_WHITESPACE x y color size number red s 1 5 5 blue m 2 6 6 Swap levels in a MultiIndex on columns. >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}) >>> psdf.columns = midx >>> psdf color red blue number 1 2 size s m 0 5 5 1 6 6 >>> psdf.swaplevel(axis=1) color red blue size s m number 1 2 0 5 5 1 6 6 >>> psdf.swaplevel(axis=1) color red blue size s m number 1 2 0 5 5 1 6 6 >>> psdf.swaplevel(0, 1, axis=1) number 1 2 color red blue size s m 0 5 5 1 6 6 >>> psdf.swaplevel('number', 'color', axis=1) number 1 2 color red blue size s m 0 5 5 1 6 6 """ axis = validate_axis(axis) if axis == 0: internal = self._swaplevel_index(i, j) else: assert axis == 1 internal = self._swaplevel_columns(i, j) return DataFrame(internal) def swapaxes(self, i: Axis, j: Axis, copy: bool = True) -> "DataFrame": """ Interchange axes and swap values axes appropriately. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from pyspark.pandas.config import option_context >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE ... ps.DataFrame({'a': range(1001)}).swapaxes(i=0, j=1) Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Parameters ---------- i: {0 or 'index', 1 or 'columns'}. The axis to swap. j: {0 or 'index', 1 or 'columns'}. The axis to swap. copy : bool, default True. Returns ------- DataFrame Examples -------- >>> psdf = ps.DataFrame( ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['x', 'y', 'z'], columns=['a', 'b', 'c'] ... ) >>> psdf a b c x 1 2 3 y 4 5 6 z 7 8 9 >>> psdf.swapaxes(i=1, j=0) x y z a 1 4 7 b 2 5 8 c 3 6 9 >>> psdf.swapaxes(i=1, j=1) a b c x 1 2 3 y 4 5 6 z 7 8 9 """ assert copy is True i = validate_axis(i) j = validate_axis(j) return self.copy() if i == j else self.transpose() def _swaplevel_columns(self, i: Union[int, Name], j: Union[int, Name]) -> InternalFrame: assert isinstance(self.columns, pd.MultiIndex) for index in (i, j): if not isinstance(index, int) and index not in self.columns.names: raise KeyError("Level %s not found" % index) i = i if isinstance(i, int) else self.columns.names.index(i) j = j if isinstance(j, int) else self.columns.names.index(j) for index in (i, j): if index >= len(self.columns) or index < -len(self.columns): raise IndexError( "Too many levels: Columns have only %s levels, " "%s is not a valid level number" % (self._internal.index_level, index) ) column_label_names = self._internal.column_label_names.copy() column_label_names[i], column_label_names[j], = ( column_label_names[j], column_label_names[i], ) column_labels = self._internal._column_labels column_label_list = [list(label) for label in column_labels] for label_list in column_label_list: label_list[i], label_list[j] = label_list[j], label_list[i] column_labels = [tuple(x) for x in column_label_list] internal = self._internal.copy( column_label_names=list(column_label_names), column_labels=list(column_labels) ) return internal def _swaplevel_index(self, i: Union[int, Name], j: Union[int, Name]) -> InternalFrame: assert isinstance(self.index, ps.MultiIndex) for index in (i, j): if not isinstance(index, int) and index not in self.index.names: raise KeyError("Level %s not found" % index) i = i if isinstance(i, int) else self.index.names.index(i) j = j if isinstance(j, int) else self.index.names.index(j) for index in (i, j): if index >= self._internal.index_level or index < -self._internal.index_level: raise IndexError( "Too many levels: Index has only %s levels, " "%s is not a valid level number" % (self._internal.index_level, index) ) index_map = list( zip( self._internal.index_spark_columns, self._internal.index_names, self._internal.index_fields, ) ) index_map[i], index_map[j] = index_map[j], index_map[i] index_spark_columns, index_names, index_fields = zip(*index_map) internal = self._internal.copy( index_spark_columns=list(index_spark_columns), index_names=list(index_names), index_fields=list(index_fields), ) return internal def nlargest( self, n: int, columns: Union[Name, List[Name]], keep: str = "first" ) -> "DataFrame": """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant in pandas. In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet. Determines which duplicates (if any) to keep. - ``first`` : Keep the first occurrence. - ``last`` : Keep the last occurrence. Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "X". >>> df.nlargest(n=3, columns='X') X Y 5 7.0 11 4 6.0 10 3 5.0 9 To order by the largest values in column "Y" and then "X", we can specify multiple columns like in the next example. >>> df.nlargest(n=3, columns=['Y', 'X']) X Y 6 NaN 12 5 7.0 11 4 6.0 10 The examples below show how ties are resolved, which is decided by `keep`. >>> tied_df = ps.DataFrame({'X': [1, 2, 2, 3, 3]}, index=['a', 'b', 'c', 'd', 'e']) >>> tied_df X a 1 b 2 c 2 d 3 e 3 When using keep='first' (by default), ties are resolved in order: >>> tied_df.nlargest(3, 'X') X d 3 e 3 b 2 >>> tied_df.nlargest(3, 'X', keep='first') X d 3 e 3 b 2 When using keep='last', ties are resolved in reverse order: >>> tied_df.nlargest(3, 'X', keep='last') X e 3 d 3 c 2 """ by_scols = self._prepare_sort_by_scols(columns) return self._sort(by=by_scols, ascending=False, na_position="last", keep=keep).head(n=n) def nsmallest( self, n: int, columns: Union[Name, List[Name]], keep: str = "first" ) -> "DataFrame": """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet. Determines which duplicates (if any) to keep. - ``first`` : Keep the first occurrence. - ``last`` : Keep the last occurrence. Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "X". >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 To order by the smallest values in column "Y" and then "X", we can specify multiple columns like in the next example. >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 The examples below show how ties are resolved, which is decided by `keep`. >>> tied_df = ps.DataFrame({'X': [1, 1, 2, 2, 3]}, index=['a', 'b', 'c', 'd', 'e']) >>> tied_df X a 1 b 1 c 2 d 2 e 3 When using keep='first' (by default), ties are resolved in order: >>> tied_df.nsmallest(3, 'X') X a 1 b 1 c 2 >>> tied_df.nsmallest(3, 'X', keep='first') X a 1 b 1 c 2 When using keep='last', ties are resolved in reverse order: >>> tied_df.nsmallest(3, 'X', keep='last') X b 1 a 1 d 2 """ by_scols = self._prepare_sort_by_scols(columns) return self._sort(by=by_scols, ascending=True, na_position="last", keep=keep).head(n=n) def isin(self, values: Union[List, Dict]) -> "DataFrame": """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable or dict The sequence of values to test. If values is a dict, the keys must be the column names, which must match. Series and DataFrame are not supported. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. Examples -------- >>> df = ps.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog'], ... columns=['num_legs', 'num_wings']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True """ if isinstance(values, (pd.DataFrame, pd.Series)): raise NotImplementedError("DataFrame and Series are not supported") if isinstance(values, dict) and not set(values.keys()).issubset(self.columns): raise AttributeError( "'DataFrame' object has no attribute %s" % (set(values.keys()).difference(self.columns)) ) data_spark_columns = [] if isinstance(values, dict): for i, col in enumerate(self.columns): if col in values: item = values[col] item = item.tolist() if isinstance(item, np.ndarray) else list(item) scol = self._internal.spark_column_for(self._internal.column_labels[i]).isin( [SF.lit(v) for v in item] ) scol = F.coalesce(scol, F.lit(False)) else: scol = SF.lit(False) data_spark_columns.append(scol.alias(self._internal.data_spark_column_names[i])) elif is_list_like(values): values = ( cast(np.ndarray, values).tolist() if isinstance(values, np.ndarray) else list(values) ) for label in self._internal.column_labels: scol = self._internal.spark_column_for(label).isin([SF.lit(v) for v in values]) scol = F.coalesce(scol, F.lit(False)) data_spark_columns.append(scol.alias(self._internal.spark_column_name_for(label))) else: raise TypeError("Values should be iterable, Series, DataFrame or dict.") return DataFrame( self._internal.with_new_columns( data_spark_columns, data_fields=[ field.copy(dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False) for field in self._internal.data_fields ], ) ) @property def shape(self) -> Tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self), len(self.columns) def merge( self, right: "DataFrame", how: str = "inner", on: Optional[Union[Name, List[Name]]] = None, left_on: Optional[Union[Name, List[Name]]] = None, right_on: Optional[Union[Name, List[Name]]] = None, left_index: bool = False, right_index: bool = False, suffixes: Tuple[str, str] = ("_x", "_y"), ) -> "DataFrame": """ Merge DataFrame objects with a database-style join. The index of the resulting DataFrame will be one of the following: - 0...n if no index is used for merging - Index of the left DataFrame if merged only on the index of the right DataFrame - Index of the right DataFrame if merged only on the index of the left DataFrame - All involved indices if merged using the indices of both DataFrames e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will be an index (x, a, b) Parameters ---------- right: Object to merge with. how: Type of merge to be performed. {'left', 'right', 'outer', 'inner'}, default 'inner' left: use only keys from left frame, similar to a SQL left outer join; not preserve key order unlike pandas. right: use only keys from right frame, similar to a SQL right outer join; not preserve key order unlike pandas. outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. inner: use intersection of keys from both frames, similar to a SQL inner join; not preserve the order of the left keys unlike pandas. on: Column or index level names to join on. These must be found in both DataFrames. If on is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on: Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on: Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index: Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index: Use the index from the right DataFrame as the join key. Same caveats as left_index. suffixes: Suffix to apply to overlapping column names in the left and right side, respectively. Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- DataFrame.join : Join columns of another DataFrame. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}, ... columns=['lkey', 'value']) >>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}, ... columns=['rkey', 'value']) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> merged = df1.merge(df2, left_on='lkey', right_on='rkey') >>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS lkey value_x rkey value_y ...bar 2 bar 6 ...baz 3 baz 7 ...foo 1 foo 5 ...foo 1 foo 8 ...foo 5 foo 5 ...foo 5 foo 8 >>> left_psdf = ps.DataFrame({'A': [1, 2]}) >>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2]) >>> left_psdf.merge(right_psdf, left_index=True, right_index=True).sort_index() A B 1 2 x >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='left').sort_index() A B 0 1 None 1 2 x >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='right').sort_index() A B 1 2.0 x 2 NaN y >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='outer').sort_index() A B 0 1.0 None 1 2.0 x 2 NaN y Notes ----- As described in #263, joining string columns currently returns None for missing values instead of NaN. """ def to_list(os: Optional[Union[Name, List[Name]]]) -> List[Label]: if os is None: return [] elif is_name_like_tuple(os): return [cast(Label, os)] elif is_name_like_value(os): return [(os,)] else: return [o if is_name_like_tuple(o) else (o,) for o in os] if isinstance(right, ps.Series): right = right.to_frame() if on: if left_on or right_on: raise ValueError( 'Can only pass argument "on" OR "left_on" and "right_on", ' "not a combination of both." ) left_key_names = list(map(self._internal.spark_column_name_for, to_list(on))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(on))) else: # TODO: need special handling for multi-index. if left_index: left_key_names = self._internal.index_spark_column_names else: left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on))) if right_index: right_key_names = right._internal.index_spark_column_names else: right_key_names = list( map(right._internal.spark_column_name_for, to_list(right_on)) ) if left_key_names and not right_key_names: raise ValueError("Must pass right_on or right_index=True") if right_key_names and not left_key_names: raise ValueError("Must pass left_on or left_index=True") if not left_key_names and not right_key_names: common = list(self.columns.intersection(right.columns)) if len(common) == 0: raise ValueError( "No common columns to perform merge on. Merge options: " "left_on=None, right_on=None, left_index=False, right_index=False" ) left_key_names = list(map(self._internal.spark_column_name_for, to_list(common))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(common))) if len(left_key_names) != len(right_key_names): raise ValueError("len(left_keys) must equal len(right_keys)") # We should distinguish the name to avoid ambiguous column name after merging. right_prefix = "__right_" right_key_names = [right_prefix + right_key_name for right_key_name in right_key_names] how = validate_how(how) def resolve(internal: InternalFrame, side: str) -> InternalFrame: def rename(col: str) -> str: return "__{}_{}".format(side, col) internal = internal.resolved_copy sdf = internal.spark_frame sdf = sdf.select( *[ scol_for(sdf, col).alias(rename(col)) for col in sdf.columns if col not in HIDDEN_COLUMNS ], *HIDDEN_COLUMNS, ) return internal.copy( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, rename(col)) for col in internal.index_spark_column_names ], index_fields=[ field.copy(name=rename(field.name)) for field in internal.index_fields ], data_spark_columns=[ scol_for(sdf, rename(col)) for col in internal.data_spark_column_names ], data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields], ) left_internal = self._internal.resolved_copy right_internal = resolve(right._internal, "right") left_table = left_internal.spark_frame.alias("left_table") right_table = right_internal.spark_frame.alias("right_table") left_key_columns = [scol_for(left_table, label) for label in left_key_names] right_key_columns = [scol_for(right_table, label) for label in right_key_names] join_condition = reduce( lambda x, y: x & y, [lkey == rkey for lkey, rkey in zip(left_key_columns, right_key_columns)], ) joined_table = left_table.join(right_table, join_condition, how=how) # Unpack suffixes tuple for convenience left_suffix = suffixes[0] right_suffix = suffixes[1] # Append suffixes to columns with the same name to avoid conflicts later duplicate_columns = set(left_internal.column_labels) & set(right_internal.column_labels) exprs = [] data_columns = [] column_labels = [] def left_scol_for(label: Label) -> Column: return scol_for(left_table, left_internal.spark_column_name_for(label)) def right_scol_for(label: Label) -> Column: return scol_for(right_table, right_internal.spark_column_name_for(label)) for label in left_internal.column_labels: col = left_internal.spark_column_name_for(label) scol = left_scol_for(label) if label in duplicate_columns: spark_column_name = left_internal.spark_column_name_for(label) if ( spark_column_name in left_key_names and (right_prefix + spark_column_name) in right_key_names ): right_scol = right_scol_for(label) if how == "right": scol = right_scol.alias(col) elif how == "full": scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col) else: pass else: col = col + left_suffix scol = scol.alias(col) label = tuple([str(label[0]) + left_suffix] + list(label[1:])) exprs.append(scol) data_columns.append(col) column_labels.append(label) for label in right_internal.column_labels: # recover `right_prefix` here. col = right_internal.spark_column_name_for(label)[len(right_prefix) :] scol = right_scol_for(label).alias(col) if label in duplicate_columns: spark_column_name = left_internal.spark_column_name_for(label) if ( spark_column_name in left_key_names and (right_prefix + spark_column_name) in right_key_names ): continue else: col = col + right_suffix scol = scol.alias(col) label = tuple([str(label[0]) + right_suffix] + list(label[1:])) exprs.append(scol) data_columns.append(col) column_labels.append(label) left_index_scols = left_internal.index_spark_columns right_index_scols = right_internal.index_spark_columns # Retain indices if they are used for joining if left_index: if right_index: if how in ("inner", "left"): exprs.extend(left_index_scols) index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names elif how == "right": exprs.extend(right_index_scols) index_spark_column_names = right_internal.index_spark_column_names index_names = right_internal.index_names else: index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names for col, left_scol, right_scol in zip( index_spark_column_names, left_index_scols, right_index_scols ): scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol) exprs.append(scol.alias(col)) else: exprs.extend(right_index_scols) index_spark_column_names = right_internal.index_spark_column_names index_names = right_internal.index_names elif right_index: exprs.extend(left_index_scols) index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names else: index_spark_column_names = [] index_names = [] selected_columns = joined_table.select(*exprs) internal = InternalFrame( spark_frame=selected_columns, index_spark_columns=[ scol_for(selected_columns, col) for col in index_spark_column_names ], index_names=index_names, column_labels=column_labels, data_spark_columns=[scol_for(selected_columns, col) for col in data_columns], ) return DataFrame(internal) def join( self, right: "DataFrame", on: Optional[Union[Name, List[Name]]] = None, how: str = "left", lsuffix: str = "", rsuffix: str = "", ) -> "DataFrame": """ Join columns of another DataFrame. Join columns with `right` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- right: DataFrame, Series on: str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `right`, otherwise joins index-on-index. If multiple values given, the `right` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how: {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use `left` frame’s index (or column if on is specified). * right: use `right`’s index. * outer: form union of `left` frame’s index (or column if on is specified) with right’s index, and sort it. lexicographically. * inner: form intersection of `left` frame’s index (or column if on is specified) with `right`’s index, preserving the order of the `left`’s one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from `right` frame's overlapping columns. Returns ------- DataFrame A dataframe containing columns from both the `left` and `right`. See Also -------- DataFrame.merge: For column(s)-on-columns(s) operations. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Notes ----- Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame objects. Examples -------- >>> psdf1 = ps.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], ... 'A': ['A0', 'A1', 'A2', 'A3']}, ... columns=['key', 'A']) >>> psdf2 = ps.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}, ... columns=['key', 'B']) >>> psdf1 key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 >>> psdf2 key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> join_psdf = psdf1.join(psdf2, lsuffix='_left', rsuffix='_right') >>> join_psdf.sort_values(by=join_psdf.columns) key_left A key_right B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 None None If we want to join using the key columns, we need to set key to be the index in both df and right. The joined DataFrame will have key as its index. >>> join_psdf = psdf1.set_index('key').join(psdf2.set_index('key')) >>> join_psdf.sort_values(by=join_psdf.columns) # doctest: +NORMALIZE_WHITESPACE A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 None Another option to join using the key columns is to use the on parameter. DataFrame.join always uses right’s index but we can use any column in df. This method not preserve the original DataFrame’s index in the result unlike pandas. >>> join_psdf = psdf1.join(psdf2.set_index('key'), on='key') >>> join_psdf.index Int64Index([0, 1, 2, 3], dtype='int64') """ if isinstance(right, ps.Series): common = list(self.columns.intersection([right.name])) else: common = list(self.columns.intersection(right.columns)) if len(common) > 0 and not lsuffix and not rsuffix: raise ValueError( "columns overlap but no suffix specified: " "{rename}".format(rename=common) ) need_set_index = False if on: if not is_list_like(on): on = [on] if len(on) != right._internal.index_level: raise ValueError( 'len(left_on) must equal the number of levels in the index of "right"' ) need_set_index = len(set(on) & set(self.index.names)) == 0 if need_set_index: self = self.set_index(on) join_psdf = self.merge( right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix) ) return join_psdf.reset_index() if need_set_index else join_psdf def combine_first(self, other: "DataFrame") -> "DataFrame": """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. .. versionadded:: 3.3.0 Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame Examples -------- >>> ps.set_option("compute.ops_on_diff_frames", True) >>> df1 = ps.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = ps.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2).sort_index() A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in other >>> df1 = ps.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = ps.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2).sort_index() A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 >>> ps.reset_option("compute.ops_on_diff_frames") """ if not isinstance(other, DataFrame): raise TypeError("`combine_first` only allows `DataFrame` for parameter `other`") if same_anchor(self, other): combined = self this = self that = other else: combined = combine_frames(self, other) this = combined["this"] that = combined["that"] intersect_column_labels = set(self._internal.column_labels).intersection( set(other._internal.column_labels) ) column_labels, data_spark_columns = [], [] for column_label in this._internal.column_labels: this_scol = this._internal.spark_column_for(column_label) if column_label in intersect_column_labels: that_scol = that._internal.spark_column_for(column_label) this_scol_name = this._internal.spark_column_name_for(column_label) combined_scol = ( F.when(this_scol.isNull(), that_scol).otherwise(this_scol).alias(this_scol_name) ) data_spark_columns.append(combined_scol) else: data_spark_columns.append(this_scol) column_labels.append(column_label) for column_label in that._internal.column_labels: if column_label not in intersect_column_labels: that_scol = that._internal.spark_column_for(column_label) data_spark_columns.append(that_scol) column_labels.append(column_label) internal = combined._internal.copy( column_labels=column_labels, data_spark_columns=data_spark_columns, data_fields=None, # TODO: dtype? column_label_names=self._internal.column_label_names, ) return DataFrame(internal) def append( self, other: "DataFrame", ignore_index: bool = False, verify_integrity: bool = False, sort: bool = False, ) -> "DataFrame": """ Append rows of other to the end of caller, returning a new object. Columns in other that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default False Currently not supported. Returns ------- appended : DataFrame Examples -------- >>> df = ps.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df.append(df) A B 0 1 2 1 3 4 0 1 2 1 3 4 >>> df.append(df, ignore_index=True) A B 0 1 2 1 3 4 2 1 2 3 3 4 """ if isinstance(other, ps.Series): raise TypeError("DataFrames.append() does not support appending Series to DataFrames") if sort: raise NotImplementedError("The 'sort' parameter is currently not supported") if not ignore_index: index_scols = self._internal.index_spark_columns if len(index_scols) != other._internal.index_level: raise ValueError("Both DataFrames have to have the same number of index levels") if verify_integrity and len(index_scols) > 0: if ( self._internal.spark_frame.select(index_scols) .intersect( other._internal.spark_frame.select(other._internal.index_spark_columns) ) .count() ) > 0: raise ValueError("Indices have overlapping values") # Lazy import to avoid circular dependency issues from pyspark.pandas.namespace import concat return cast(DataFrame, concat([self, other], ignore_index=ignore_index)) # TODO: add 'filter_func' and 'errors' parameter def update(self, other: "DataFrame", join: str = "left", overwrite: bool = True) -> None: """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or Series join : 'left', default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. Returns ------- None : method directly changes calling object See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. DataFrame.join : Join columns of another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ps.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_df = ps.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_column = ps.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df.sort_index() A B 0 a d 1 b y 2 c e If `other` contains None the corresponding values are not updated in the original dataframe. >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ps.DataFrame({'B': [4, None, 6]}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4.0 1 2 500.0 2 3 6.0 """ if join != "left": raise NotImplementedError("Only left join is supported") if isinstance(other, ps.Series): other = other.to_frame() update_columns = list( set(self._internal.column_labels).intersection(set(other._internal.column_labels)) ) update_sdf = self.join( other[update_columns], rsuffix="_new" )._internal.resolved_copy.spark_frame data_fields = self._internal.data_fields.copy() for column_labels in update_columns: column_name = self._internal.spark_column_name_for(column_labels) old_col = scol_for(update_sdf, column_name) new_col = scol_for( update_sdf, other._internal.spark_column_name_for(column_labels) + "_new" ) if overwrite: update_sdf = update_sdf.withColumn( column_name, F.when(new_col.isNull(), old_col).otherwise(new_col) ) else: update_sdf = update_sdf.withColumn( column_name, F.when(old_col.isNull(), new_col).otherwise(old_col) ) data_fields[self._internal.column_labels.index(column_labels)] = None sdf = update_sdf.select( *[scol_for(update_sdf, col) for col in self._internal.spark_column_names], *HIDDEN_COLUMNS, ) internal = self._internal.with_new_sdf(sdf, data_fields=data_fields) self._update_internal_frame(internal, requires_same_anchor=False) # TODO: ddof should be implemented. def cov(self, min_periods: Optional[int] = None) -> "DataFrame": """ Compute pairwise covariance of columns, excluding NA/null values. Compute the pairwise covariance among the series of a DataFrame. The returned data frame is the `covariance matrix <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns of the DataFrame. Both NA and null values are automatically excluded from the calculation. (See the note below about bias from missing values.) A threshold can be set for the minimum number of observations for each value created. Comparisons with observations below this threshold will be returned as ``NaN``. This method is generally used for the analysis of time series data to understand the relationship between different measures across time. .. versionadded:: 3.3.0 Parameters ---------- min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Returns ------- DataFrame The covariance matrix of the series of the DataFrame. See Also -------- Series.cov : Compute covariance with another Series. Examples -------- >>> df = ps.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)], ... columns=['dogs', 'cats']) >>> df.cov() dogs cats dogs 0.666667 -1.000000 cats -1.000000 1.666667 >>> np.random.seed(42) >>> df = ps.DataFrame(np.random.randn(1000, 5), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df.cov() a b c d e a 0.998438 -0.020161 0.059277 -0.008943 0.014144 b -0.020161 1.059352 -0.008543 -0.024738 0.009826 c 0.059277 -0.008543 1.010670 -0.001486 -0.000271 d -0.008943 -0.024738 -0.001486 0.921297 -0.013692 e 0.014144 0.009826 -0.000271 -0.013692 0.977795 **Minimum number of periods** This method also supports an optional ``min_periods`` keyword that specifies the required minimum number of non-NA observations for each column pair in order to have a valid result: >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(20, 3), ... columns=['a', 'b', 'c']) >>> df.loc[df.index[:5], 'a'] = np.nan >>> df.loc[df.index[5:10], 'b'] = np.nan >>> sdf = ps.from_pandas(df) >>> sdf.cov(min_periods=12) a b c a 0.316741 NaN -0.150812 b NaN 1.248003 0.191417 c -0.150812 0.191417 0.895202 """ min_periods = 1 if min_periods is None else min_periods # Only compute covariance for Boolean and Numeric except Decimal psdf = self[ [ col for col in self.columns if isinstance(self[col].spark.data_type, BooleanType) or ( isinstance(self[col].spark.data_type, NumericType) and not isinstance(self[col].spark.data_type, DecimalType) ) ] ] num_cols = len(psdf.columns) cov = np.zeros([num_cols, num_cols]) if num_cols == 0: return DataFrame() if len(psdf) < min_periods: cov.fill(np.nan) return DataFrame(cov, columns=psdf.columns, index=psdf.columns) data_cols = psdf._internal.data_spark_column_names cov_scols = [] count_not_null_scols = [] # Count number of null row between two columns # Example: # a b c # 0 1 1 1 # 1 NaN 2 2 # 2 3 NaN 3 # 3 4 4 4 # # a b c # a count(a, a) count(a, b) count(a, c) # b count(b, b) count(b, c) # c count(c, c) # # count_not_null_scols = # [F.count(a, a), F.count(a, b), F.count(a, c), F.count(b, b), F.count(b, c), F.count(c, c)] for r in range(0, num_cols): for c in range(r, num_cols): count_not_null_scols.append( F.count( F.when(F.col(data_cols[r]).isNotNull() & F.col(data_cols[c]).isNotNull(), 1) ) ) count_not_null = ( psdf._internal.spark_frame.replace(float("nan"), None) .select(*count_not_null_scols) .head(1)[0] ) # Calculate covariance between two columns # Example: # with min_periods = 3 # a b c # 0 1 1 1 # 1 NaN 2 2 # 2 3 NaN 3 # 3 4 4 4 # # a b c # a cov(a, a) None cov(a, c) # b cov(b, b) cov(b, c) # c cov(c, c) # # cov_scols = [F.cov(a, a), None, F.cov(a, c), F.cov(b, b), F.cov(b, c), F.cov(c, c)] step = 0 for r in range(0, num_cols): step += r for c in range(r, num_cols): cov_scols.append( F.covar_samp( F.col(data_cols[r]).cast("double"), F.col(data_cols[c]).cast("double") ) if count_not_null[r * num_cols + c - step] >= min_periods else F.lit(None) ) pair_cov = psdf._internal.spark_frame.select(*cov_scols).head(1)[0] # Convert from row to 2D array # Example: # pair_cov = [cov(a, a), None, cov(a, c), cov(b, b), cov(b, c), cov(c, c)] # # cov = # # a b c # a cov(a, a) None cov(a, c) # b cov(b, b) cov(b, c) # c cov(c, c) step = 0 for r in range(0, num_cols): step += r for c in range(r, num_cols): cov[r][c] = pair_cov[r * num_cols + c - step] # Copy values # Example: # cov = # a b c # a cov(a, a) None cov(a, c) # b None cov(b, b) cov(b, c) # c cov(a, c) cov(b, c) cov(c, c) cov = cov + cov.T - np.diag(np.diag(cov)) return DataFrame(cov, columns=psdf.columns, index=psdf.columns) def sample( self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False, random_state: Optional[int] = None, ignore_index: bool = False, ) -> "DataFrame": """ Return a random sample of items from an axis of object. Please call this function using named argument by specifying the ``frac`` argument. You can use `random_state` for reproducibility. However, note that different from pandas, specifying a seed in pandas-on-Spark/Spark does not guarantee the sampled rows will be fixed. The result set depends on not only the seed, but also how the data is distributed across machines and to some extent network randomness when shuffle operations are involved. Even in the simplest case, the result set will depend on the system's CPU core count. Parameters ---------- n : int, optional Number of items to return. This is currently NOT supported. Use frac instead. frac : float, optional Fraction of axis items to return. replace : bool, default False Sample with or without replacement. random_state : int, optional Seed for the random number generator (if int). ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 3.4.0 Returns ------- Series or DataFrame A new object of same type as caller containing the sampled items. Examples -------- >>> df = ps.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish'], ... columns=['num_legs', 'num_wings', 'num_specimen_seen']) >>> df # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 A random 25% sample of the ``DataFrame``. Note that we use `random_state` to ensure the reproducibility of the examples. >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 A random 50% sample of the ``DataFrame``, while ignoring the index. >>> df.sample(frac=0.5, random_state=1, ignore_index=True) # doctest: +SKIP num_legs num_wings num_specimen_seen 0 4 0 2 1 8 0 1 2 0 0 8 Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement, so the same items could appear more than once. >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP falcon 2 spider 8 spider 8 Name: num_legs, dtype: int64 Specifying the exact number of items to return is not supported at the moment. >>> df.sample(n=5) # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError: Function sample currently does not support specifying ... """ # Note: we don't run any of the doctests because the result can change depending on the # system's core count. if n is not None: raise NotImplementedError( "Function sample currently does not support specifying " "exact number of items to return. Use frac instead." ) if frac is None: raise ValueError("frac must be specified.") sdf = self._internal.resolved_copy.spark_frame.sample( withReplacement=replace, fraction=frac, seed=random_state ) if ignore_index: return DataFrame(sdf.drop(*self._internal.index_spark_column_names)) else: return DataFrame(self._internal.with_new_sdf(sdf)) def astype(self, dtype: Union[str, Dtype, Dict[Name, Union[str, Dtype]]]) -> "DataFrame": """ Cast a pandas-on-Spark object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas-on-Spark object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. Examples -------- >>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64') >>> df a b 0 1 1 1 2 2 2 3 3 Convert to float type: >>> df.astype('float') a b 0 1.0 1.0 1 2.0 2.0 2 3.0 3.0 Convert to int64 type back: >>> df.astype('int64') a b 0 1 1 1 2 2 2 3 3 Convert column a to float type: >>> df.astype({'a': float}) a b 0 1.0 1 1 2.0 2 2 3.0 3 """ applied = [] if is_dict_like(dtype): dtype_dict = cast(Dict[Name, Union[str, Dtype]], dtype) for col_name in dtype_dict.keys(): if col_name not in self.columns: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument." ) for col_name, col in self.items(): if col_name in dtype_dict: applied.append(col.astype(dtype=dtype_dict[col_name])) else: applied.append(col) else: for col_name, col in self.items(): applied.append(col.astype(dtype=cast(Union[str, Dtype], dtype))) return DataFrame(self._internal.with_new_columns(applied)) def add_prefix(self, prefix: str) -> "DataFrame": """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ assert isinstance(prefix, str) return self._apply_series_op( lambda psser: psser.rename(tuple([prefix + i for i in psser._column_label])) ) def add_suffix(self, suffix: str) -> "DataFrame": """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ assert isinstance(suffix, str) return self._apply_series_op( lambda psser: psser.rename(tuple([i + suffix for i in psser._column_label])) ) # TODO: include, and exclude should be implemented. def describe(self, percentiles: Optional[List[float]] = None) -> "DataFrame": """ Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75] A list of percentiles to be computed. Returns ------- DataFrame Summary statistics of the Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``. For object data (e.g. strings or timestamps), the result’s index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value’s frequency. Timestamps also include the ``first`` and ``last`` items. Examples -------- Describing a numeric ``Series``. >>> s = ps.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 dtype: float64 Describing a ``DataFrame``. Only numeric fields are returned. >>> df = ps.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0], ... 'object': ['a', 'b', 'c'] ... }, ... columns=['numeric1', 'numeric2', 'object']) >>> df.describe() numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 For multi-index columns: >>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')] >>> df.describe() # doctest: +NORMALIZE_WHITESPACE num a b count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 >>> df[('num', 'b')].describe() count 3.0 mean 5.0 std 1.0 min 4.0 25% 4.0 50% 5.0 75% 6.0 max 6.0 Name: (num, b), dtype: float64 Describing a ``DataFrame`` and selecting custom percentiles. >>> df = ps.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0] ... }, ... columns=['numeric1', 'numeric2']) >>> df.describe(percentiles = [0.85, 0.15]) numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 15% 1.0 4.0 50% 2.0 5.0 85% 3.0 6.0 max 3.0 6.0 Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric1.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 Name: numeric1, dtype: float64 Describing a column from a ``DataFrame`` by accessing it as an attribute and selecting custom percentiles. >>> df.numeric1.describe(percentiles = [0.85, 0.15]) count 3.0 mean 2.0 std 1.0 min 1.0 15% 1.0 50% 2.0 85% 3.0 max 3.0 Name: numeric1, dtype: float64 """ psser_numeric: List[Series] = [] psser_string: List[Series] = [] psser_timestamp: List[Series] = [] spark_data_types: List[DataType] = [] column_labels: Optional[List[Label]] = [] column_names: List[str] = [] for label in self._internal.column_labels: psser = self._psser_for(label) spark_data_type = psser.spark.data_type if isinstance(spark_data_type, NumericType): psser_numeric.append(psser) column_labels.append(label) spark_data_types.append(spark_data_type) elif isinstance(spark_data_type, (TimestampType, TimestampNTZType)): psser_timestamp.append(psser) column_labels.append(label) spark_data_types.append(spark_data_type) else: psser_string.append(psser) column_names.append(self._internal.spark_column_name_for(label)) if percentiles is not None: if any((p < 0.0) or (p > 1.0) for p in percentiles): raise ValueError("Percentiles should all be in the interval [0, 1]") # appending 50% if not in percentiles already percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles else: percentiles = [0.25, 0.5, 0.75] # Identify the cases is_all_string_type = ( len(psser_numeric) == 0 and len(psser_timestamp) == 0 and len(psser_string) > 0 ) is_all_numeric_type = len(psser_numeric) > 0 and len(psser_timestamp) == 0 has_timestamp_type = len(psser_timestamp) > 0 has_numeric_type = len(psser_numeric) > 0 if is_all_string_type: # Handling string type columns # We will retrive the `count`, `unique`, `top` and `freq`. internal = self._internal.resolved_copy exprs_string = [ internal.spark_column_for(psser._column_label) for psser in psser_string ] sdf = internal.spark_frame.select(*exprs_string) # Get `count` & `unique` for each columns counts, uniques = map(lambda x: x[1:], sdf.summary("count", "count_distinct").take(2)) # Handling Empty DataFrame if len(counts) == 0 or counts[0] == "0": data = dict() for psser in psser_string: data[psser.name] = [0, 0, np.nan, np.nan] return DataFrame(data, index=["count", "unique", "top", "freq"]) # Get `top` & `freq` for each columns tops = [] freqs = [] # TODO(SPARK-37711): We should do it in single pass since invoking Spark job # for every columns is too expensive. for column in exprs_string: top, freq = sdf.groupby(column).count().sort("count", ascending=False).first() tops.append(str(top)) freqs.append(str(freq)) stats = [counts, uniques, tops, freqs] stats_names = ["count", "unique", "top", "freq"] result: DataFrame = DataFrame( data=stats, index=stats_names, columns=column_names, ) elif is_all_numeric_type: # Handling numeric columns exprs_numeric = [ psser._dtype_op.nan_to_null(psser).spark.column for psser in psser_numeric ] formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)] stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"] # In this case, we can simply use `summary` to calculate the stats. sdf = self._internal.spark_frame.select(*exprs_numeric).summary(*stats) sdf = sdf.replace("stddev", "std", subset=["summary"]) internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, "summary")], column_labels=column_labels, data_spark_columns=[ scol_for(sdf, self._internal.spark_column_name_for(label)) for label in column_labels ], ) result = DataFrame(internal).astype("float64") elif has_timestamp_type: internal = self._internal.resolved_copy column_names = [ internal.spark_column_name_for(column_label) for column_label in column_labels ] column_length = len(column_labels) # Apply stat functions for each column. count_exprs = map(F.count, column_names) min_exprs = map(F.min, column_names) # Here we try to flat the multiple map into single list that contains each calculated # percentile using `chain`. # e.g. flat the `[<map object at 0x7fc1907dc280>, <map object at 0x7fc1907dcc70>]` # to `[Column<'percentile_approx(A, 0.2, 10000)'>, # Column<'percentile_approx(B, 0.2, 10000)'>, # Column<'percentile_approx(A, 0.5, 10000)'>, # Column<'percentile_approx(B, 0.5, 10000)'>]` perc_exprs = chain( *[ map(F.percentile_approx, column_names, [percentile] * column_length) for percentile in percentiles ] ) max_exprs = map(F.max, column_names) mean_exprs = [] for column_name, spark_data_type in zip(column_names, spark_data_types): mean_exprs.append(F.mean(column_name).astype(spark_data_type)) exprs = [*count_exprs, *mean_exprs, *min_exprs, *perc_exprs, *max_exprs] formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)] stats_names = ["count", "mean", "min", *formatted_perc, "max"] # If not all columns are timestamp type, # we also need to calculate the `std` for numeric columns if has_numeric_type: std_exprs = [] for label, spark_data_type in zip(column_labels, spark_data_types): column_name = label[0] if isinstance(spark_data_type, (TimestampType, TimestampNTZType)): std_exprs.append(F.lit(None).alias("stddev_samp({})".format(column_name))) else: std_exprs.append(F.stddev(column_name)) exprs.extend(std_exprs) stats_names.append("std") # Select stats for all columns at once. sdf = internal.spark_frame.select(exprs) stat_values = sdf.first() num_stats = int(len(exprs) / column_length) # `column_name_stats_kv` is key-value store that has column name as key, and # the stats as values e.g. {"A": [{count_value}, {min_value}, ...], # "B": [{count_value}, {min_value} ...]} column_name_stats_kv: Dict[str, List[str]] = defaultdict(list) for i, column_name in enumerate(column_names): for first_stat_idx in range(num_stats): column_name_stats_kv[column_name].append( stat_values[(first_stat_idx * column_length) + i] ) # For timestamp type columns, we should cast the column type to string. for key, spark_data_type in zip(column_name_stats_kv, spark_data_types): if isinstance(spark_data_type, (TimestampType, TimestampNTZType)): column_name_stats_kv[key] = [str(value) for value in column_name_stats_kv[key]] result: DataFrame = DataFrame( # type: ignore[no-redef] data=column_name_stats_kv, index=stats_names, columns=column_names, ) else: # Empty DataFrame without column raise ValueError("Cannot describe a DataFrame without columns") return result def drop_duplicates( self, subset: Optional[Union[Name, List[Name]]] = None, keep: Union[bool, str] = "first", inplace: bool = False, ignore_index: bool = False, ) -> Optional["DataFrame"]: """ Return DataFrame with duplicate rows removed, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns. keep : {'first', 'last', False}, default 'first' Determines which duplicates (if any) to keep. - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy. ignore_index : boolean, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. Returns ------- DataFrame DataFrame with duplicates removed or None if ``inplace=True``. >>> df = ps.DataFrame( ... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b']) >>> df a b 0 1 a 1 2 a 2 2 a 3 2 c 4 3 d >>> df.drop_duplicates().sort_index() a b 0 1 a 1 2 a 3 2 c 4 3 d >>> df.drop_duplicates(ignore_index=True).sort_index() a b 0 1 a 1 2 a 2 2 c 3 3 d >>> df.drop_duplicates('a').sort_index() a b 0 1 a 1 2 a 4 3 d >>> df.drop_duplicates(['a', 'b']).sort_index() a b 0 1 a 1 2 a 3 2 c 4 3 d >>> df.drop_duplicates(keep='last').sort_index() a b 0 1 a 2 2 a 3 2 c 4 3 d >>> df.drop_duplicates(keep=False).sort_index() a b 0 1 a 3 2 c 4 3 d """ inplace = validate_bool_kwarg(inplace, "inplace") sdf, column = self._mark_duplicates(subset, keep) sdf = sdf.where(~scol_for(sdf, column)).drop(column) internal = self._internal.with_new_sdf(sdf) psdf: DataFrame = DataFrame(internal) if inplace: if ignore_index: psdf.reset_index(drop=True, inplace=inplace) self._update_internal_frame(psdf._internal) return None else: return psdf.reset_index(drop=True) if ignore_index else psdf def reindex( self, labels: Optional[Sequence[Any]] = None, index: Optional[Union["Index", Sequence[Any]]] = None, columns: Optional[Union[pd.Index, Sequence[Any]]] = None, axis: Optional[Axis] = None, copy: Optional[bool] = True, fill_value: Optional[Any] = None, ) -> "DataFrame": """ Conform DataFrame to new index with optional filling logic, placing NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- labels: array-like, optional New labels / index to conform the axis specified by ‘axis’ to. index, columns: array-like, optional New labels / index to conform to, should be specified using keywords. Preferably an Index object to avoid duplicating data axis: int or str, optional Axis to target. Can be either the axis name (‘index’, ‘columns’) or number (0, 1). copy : bool, default True Return a new object, even if the passed indexes are the same. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. Returns ------- DataFrame with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = ps.DataFrame({ ... 'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}, ... index=index, ... columns=['http_status', 'response_time']) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index).sort_index() http_status response_time Chrome 200.0 0.02 Comodo Dragon NaN NaN IE10 404.0 0.08 Iceweasel NaN NaN Safari 404.0 0.07 We can fill in the missing values by passing a value to the keyword ``fill_value``. >>> df.reindex(new_index, fill_value=0, copy=False).sort_index() http_status response_time Chrome 200 0.02 Comodo Dragon 0 0.00 IE10 404 0.08 Iceweasel 0 0.00 Safari 404 0.07 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']).sort_index() http_status user_agent Chrome 200 NaN Firefox 200 NaN IE10 404 NaN Konqueror 301 NaN Safari 404 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index() http_status user_agent Chrome 200 NaN Firefox 200 NaN IE10 404 NaN Konqueror 301 NaN Safari 404 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = ps.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]}, ... index=date_index) >>> df2.sort_index() prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2).sort_index() prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN """ if axis is not None and (index is not None or columns is not None): raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.") if labels is not None: axis = validate_axis(axis) if axis == 0: index = labels elif axis == 1: columns = labels if index is not None and not is_list_like(index): raise TypeError( "Index must be called with a collection of some kind, " "%s was passed" % type(index) ) if columns is not None and not is_list_like(columns): raise TypeError( "Columns must be called with a collection of some kind, " "%s was passed" % type(columns) ) df = self if index is not None: df = df._reindex_index(index, fill_value) if columns is not None: df = df._reindex_columns(columns, fill_value) # Copy if copy and df is self: return df.copy() else: return df def _reindex_index( self, index: Optional[Union["Index", Sequence[Any]]], fill_value: Optional[Any] ) -> "DataFrame": # When axis is index, we can mimic pandas' by a right outer join. nlevels = self._internal.index_level assert nlevels <= 1 or ( isinstance(index, ps.MultiIndex) and nlevels == index.nlevels ), "MultiIndex DataFrame can only be reindexed with a similar pandas-on-Spark MultiIndex." index_columns = self._internal.index_spark_column_names frame = self._internal.resolved_copy.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME) if isinstance(index, ps.Index): if nlevels != index.nlevels: return DataFrame(index._internal.with_new_columns([])).reindex( columns=self.columns, fill_value=fill_value ) index_names = index._internal.index_names scols = index._internal.index_spark_columns labels = index._internal.spark_frame.select( [scol.alias(index_column) for scol, index_column in zip(scols, index_columns)] ) else: index = ps.Index(list(index)) labels = index._internal.spark_frame.select(index.spark.column.alias(index_columns[0])) index_names = self._internal.index_names if fill_value is not None: frame_index_columns = [ verify_temp_column_name(frame, "__frame_index_column_{}__".format(i)) for i in range(nlevels) ] index_scols = [ scol_for(frame, index_col).alias(frame_index_col) for index_col, frame_index_col in zip(index_columns, frame_index_columns) ] scols = self._internal.resolved_copy.data_spark_columns frame = frame.select(index_scols + scols) temp_fill_value = verify_temp_column_name(frame, "__fill_value__") labels = labels.withColumn(temp_fill_value, SF.lit(fill_value)) frame_index_scols = [scol_for(frame, col) for col in frame_index_columns] labels_index_scols = [scol_for(labels, col) for col in index_columns] joined_df = frame.join( labels, on=[fcol == lcol for fcol, lcol in zip(frame_index_scols, labels_index_scols)], how="right", ) joined_df = joined_df.select( *labels_index_scols, *[ F.when( reduce( lambda c1, c2: c1 & c2, [ fcol.isNull() & lcol.isNotNull() for fcol, lcol in zip(frame_index_scols, labels_index_scols) ], ), scol_for(joined_df, temp_fill_value), ) .otherwise(scol_for(joined_df, col)) .alias(col) for col in self._internal.data_spark_column_names ], ) data_fields = None else: joined_df = frame.join(labels, on=index_columns, how="right") data_fields = [field.copy(nullable=True) for field in self._internal.data_fields] sdf = joined_df.drop(NATURAL_ORDER_COLUMN_NAME) internal = self._internal.copy( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in self._internal.index_spark_column_names ], index_names=index_names, index_fields=[ field.copy(name=name) for field, name in zip( index._internal.index_fields, self._internal.index_spark_column_names ) ], data_spark_columns=[ scol_for(sdf, col) for col in self._internal.data_spark_column_names ], data_fields=data_fields, ) return DataFrame(internal) def _reindex_columns( self, columns: Optional[Union[pd.Index, Sequence[Any]]], fill_value: Optional[Any] ) -> "DataFrame": level = self._internal.column_labels_level if level > 1: label_columns = list(columns) for col in label_columns: if not isinstance(col, tuple): raise TypeError("Expected tuple, got {}".format(type(col).__name__)) else: label_columns = [(col,) for col in columns] for col in label_columns: if len(col) != level: raise ValueError( "shape (1,{}) doesn't match the shape (1,{})".format(len(col), level) ) fill_value = np.nan if fill_value is None else fill_value scols_or_pssers: List[Union[Series, Column]] = [] labels = [] for label in label_columns: if label in self._internal.column_labels: scols_or_pssers.append(self._psser_for(label)) else: scols_or_pssers.append(SF.lit(fill_value).alias(name_like_string(label))) labels.append(label) if isinstance(columns, pd.Index): column_label_names = [ name if is_name_like_tuple(name) else (name,) for name in columns.names ] internal = self._internal.with_new_columns( scols_or_pssers, column_labels=labels, column_label_names=column_label_names ) else: internal = self._internal.with_new_columns(scols_or_pssers, column_labels=labels) return DataFrame(internal) def reindex_like(self, other: "DataFrame", copy: bool = True) -> "DataFrame": """ Return a DataFrame with matching indices as other object. Conform the object to the same index on all axes. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : DataFrame Its row and column indices are used to define the new indices of this object. copy : bool, default True Return a new object, even if the passed indexes are the same. Returns ------- DataFrame DataFrame with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = ps.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = ps.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1).sort_index() # doctest: +NORMALIZE_WHITESPACE temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN None 2014-02-15 35.1 NaN medium """ if isinstance(other, DataFrame): return self.reindex(index=other.index, columns=other.columns, copy=copy) else: raise TypeError("other must be a pandas-on-Spark DataFrame") def melt( self, id_vars: Optional[Union[Name, List[Name]]] = None, value_vars: Optional[Union[Name, List[Name]]] = None, var_name: Optional[Union[str, List[str]]] = None, value_name: str = "value", ) -> "DataFrame": """ Unpivot a DataFrame from wide format to long format, optionally leaving identifier variables set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. Parameters ---------- frame : DataFrame id_vars : tuple, list, or ndarray, optional Column(s) to use as identifier variables. value_vars : tuple, list, or ndarray, optional Column(s) to unpivot. If not specified, uses all columns that are not set as `id_vars`. var_name : scalar, default 'variable' Name to use for the 'variable' column. If None it uses `frame.columns.name` or ‘variable’. value_name : scalar, default 'value' Name to use for the 'value' column. Returns ------- DataFrame Unpivoted DataFrame. Examples -------- >>> df = ps.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, ... 'B': {0: 1, 1: 3, 2: 5}, ... 'C': {0: 2, 1: 4, 2: 6}}, ... columns=['A', 'B', 'C']) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> ps.melt(df) variable value 0 A a 1 B 1 2 C 2 3 A b 4 B 3 5 C 4 6 A c 7 B 5 8 C 6 >>> df.melt(id_vars='A') A variable value 0 a B 1 1 a C 2 2 b B 3 3 b C 4 4 c B 5 5 c C 6 >>> df.melt(value_vars='A') variable value 0 A a 1 A b 2 A c >>> ps.melt(df, id_vars=['A', 'B']) A B variable value 0 a 1 C 2 1 b 3 C 4 2 c 5 C 6 >>> df.melt(id_vars=['A'], value_vars=['C']) A variable value 0 a C 2 1 b C 4 2 c C 6 The names of 'variable' and 'value' columns can be customized: >>> ps.melt(df, id_vars=['A'], value_vars=['B'], ... var_name='myVarname', value_name='myValname') A myVarname myValname 0 a B 1 1 b B 3 2 c B 5 """ column_labels = self._internal.column_labels if id_vars is None: id_vars = [] else: if isinstance(id_vars, tuple): if self._internal.column_labels_level == 1: id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars] else: raise ValueError( "id_vars must be a list of tuples" " when columns are a MultiIndex" ) elif is_name_like_value(id_vars): id_vars = [(id_vars,)] else: id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars] non_existence_col = [idv for idv in id_vars if idv not in column_labels] if len(non_existence_col) != 0: raveled_column_labels = np.ravel(column_labels) missing = [ nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels ] if len(missing) != 0: raise KeyError( "The following 'id_vars' are not present" " in the DataFrame: {}".format(missing) ) else: raise KeyError( "None of {} are in the {}".format(non_existence_col, column_labels) ) if value_vars is None: value_vars = [] else: if isinstance(value_vars, tuple): if self._internal.column_labels_level == 1: value_vars = [ valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars ] else: raise ValueError( "value_vars must be a list of tuples" " when columns are a MultiIndex" ) elif is_name_like_value(value_vars): value_vars = [(value_vars,)] else: value_vars = [valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars] non_existence_col = [valv for valv in value_vars if valv not in column_labels] if len(non_existence_col) != 0: raveled_column_labels = np.ravel(column_labels) missing = [ nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels ] if len(missing) != 0: raise KeyError( "The following 'value_vars' are not present" " in the DataFrame: {}".format(missing) ) else: raise KeyError( "None of {} are in the {}".format(non_existence_col, column_labels) ) if len(value_vars) == 0: value_vars = column_labels column_labels = [label for label in column_labels if label not in id_vars] sdf = self._internal.spark_frame if var_name is None: if ( self._internal.column_labels_level == 1 and self._internal.column_label_names[0] is None ): var_name = ["variable"] else: var_name = [ name_like_string(name) if name is not None else "variable_{}".format(i) for i, name in enumerate(self._internal.column_label_names) ] elif isinstance(var_name, str): var_name = [var_name] pairs = F.explode( F.array( *[ F.struct( *[SF.lit(c).alias(name) for c, name in zip(label, var_name)], *[self._internal.spark_column_for(label).alias(value_name)], ) for label in column_labels if label in value_vars ] ) ) columns = ( [ self._internal.spark_column_for(label).alias(name_like_string(label)) for label in id_vars ] + [F.col("pairs.`%s`" % name) for name in var_name] + [F.col("pairs.`%s`" % value_name)] ) exploded_df = sdf.withColumn("pairs", pairs).select(columns) return DataFrame( InternalFrame( spark_frame=exploded_df, index_spark_columns=None, column_labels=( [label if len(label) == 1 else (name_like_string(label),) for label in id_vars] + [(name,) for name in var_name] + [(value_name,)] ), ) ) def stack(self) -> DataFrameOrSeries: """ Stack the prescribed level(s) from columns to index. Return a reshaped DataFrame or Series having a multi-level index with one or more new inner-most levels compared to the current DataFrame. The new inner-most levels are created by pivoting the columns of the current dataframe: - if the columns have a single level, the output is a Series; - if the columns have multiple levels, the new index level(s) is (are) taken from the prescribed level(s) and the output is a DataFrame. The new index levels are sorted. Returns ------- DataFrame or Series Stacked dataframe or series. See Also -------- DataFrame.unstack : Unstack prescribed level(s) from index axis onto column axis. DataFrame.pivot : Reshape dataframe from long format to wide format. DataFrame.pivot_table : Create a spreadsheet-style pivot table as a DataFrame. Notes ----- The function is named by analogy with a collection of books being reorganized from being side by side on a horizontal position (the columns of the dataframe) to being stacked vertically on top of each other (in the index of the dataframe). Examples -------- **Single level columns** >>> df_single_level_cols = ps.DataFrame([[0, 1], [2, 3]], ... index=['cat', 'dog'], ... columns=['weight', 'height']) Stacking a dataframe with a single level column axis returns a Series: >>> df_single_level_cols weight height cat 0 1 dog 2 3 >>> df_single_level_cols.stack().sort_index() cat height 1 weight 0 dog height 3 weight 2 dtype: int64 **Multi level columns: simple case** >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('weight', 'pounds')]) >>> df_multi_level_cols1 = ps.DataFrame([[1, 2], [2, 4]], ... index=['cat', 'dog'], ... columns=multicol1) Stacking a dataframe with a multi-level column axis: >>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE weight kg pounds cat 1 2 dog 2 4 >>> df_multi_level_cols1.stack().sort_index() weight cat kg 1 pounds 2 dog kg 2 pounds 4 **Missing values** >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('height', 'm')]) >>> df_multi_level_cols2 = ps.DataFrame([[1.0, 2.0], [3.0, 4.0]], ... index=['cat', 'dog'], ... columns=multicol2) It is common to have missing values when stacking a dataframe with multi-level columns, as the stacked dataframe typically has more values than the original dataframe. Missing values are filled with NaNs: >>> df_multi_level_cols2 weight height kg m cat 1.0 2.0 dog 3.0 4.0 >>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP height weight cat kg NaN 1.0 m 2.0 NaN dog kg NaN 3.0 m 4.0 NaN """ from pyspark.pandas.series import first_series if len(self._internal.column_labels) == 0: return DataFrame( self._internal.copy( column_label_names=self._internal.column_label_names[:-1] ).with_filter(SF.lit(False)) ) column_labels: Dict[Label, Dict[Any, Column]] = defaultdict(dict) index_values = set() should_returns_series = False for label in self._internal.column_labels: new_label = label[:-1] if len(new_label) == 0: new_label = None should_returns_series = True value = label[-1] scol = self._internal.spark_column_for(label) column_labels[new_label][value] = scol index_values.add(value) column_labels = dict(sorted(column_labels.items(), key=lambda x: x[0])) index_name = self._internal.column_label_names[-1] column_label_names = self._internal.column_label_names[:-1] if len(column_label_names) == 0: column_label_names = [None] index_column = SPARK_INDEX_NAME_FORMAT(self._internal.index_level) data_columns = [name_like_string(label) for label in column_labels] structs = [ F.struct( *[SF.lit(value).alias(index_column)], *[ ( column_labels[label][value] if value in column_labels[label] else SF.lit(None) ).alias(name) for label, name in zip(column_labels, data_columns) ], ).alias(value) for value in index_values ] pairs = F.explode(F.array(*structs)) sdf = self._internal.spark_frame.withColumn("pairs", pairs) sdf = sdf.select( self._internal.index_spark_columns + [sdf["pairs"][index_column].alias(index_column)] + [sdf["pairs"][name].alias(name) for name in data_columns] ) internal = InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in (self._internal.index_spark_column_names + [index_column]) ], index_names=self._internal.index_names + [index_name], index_fields=self._internal.index_fields + [None], column_labels=list(column_labels), data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) psdf: DataFrame = DataFrame(internal) if should_returns_series: return first_series(psdf) else: return psdf def unstack(self) -> DataFrameOrSeries: """ Pivot the (necessarily hierarchical) index labels. Returns a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series. .. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and it could cause a serious performance degradation since Spark partitions it row based. Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack). Examples -------- >>> df = ps.DataFrame({"A": {"0": "a", "1": "b", "2": "c"}, ... "B": {"0": "1", "1": "3", "2": "5"}, ... "C": {"0": "2", "1": "4", "2": "6"}}, ... columns=["A", "B", "C"]) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> df.unstack().sort_index() A 0 a 1 b 2 c B 0 1 1 3 2 5 C 0 2 1 4 2 6 dtype: object >>> df.columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')]) >>> df.unstack().sort_index() X A 0 a 1 b 2 c B 0 1 1 3 2 5 Y C 0 2 1 4 2 6 dtype: object For MultiIndex case: >>> df = ps.DataFrame({"A": ["a", "b", "c"], ... "B": [1, 3, 5], ... "C": [2, 4, 6]}, ... columns=["A", "B", "C"]) >>> df = df.set_index('A', append=True) >>> df # doctest: +NORMALIZE_WHITESPACE B C A 0 a 1 2 1 b 3 4 2 c 5 6 >>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE B C A a b c a b c 0 1.0 NaN NaN 2.0 NaN NaN 1 NaN 3.0 NaN NaN 4.0 NaN 2 NaN NaN 5.0 NaN NaN 6.0 """ from pyspark.pandas.series import first_series if self._internal.index_level > 1: # The index after `reset_index()` will never be used, so use "distributed" index # as a dummy to avoid overhead. with option_context("compute.default_index_type", "distributed"): df = self.reset_index() index = df._internal.column_labels[: self._internal.index_level - 1] columns = df.columns[self._internal.index_level - 1] df = df.pivot_table( index=index, columns=columns, values=self._internal.column_labels, aggfunc="first" ) internal = df._internal.copy( index_names=self._internal.index_names[:-1], index_fields=df._internal.index_fields[: self._internal.index_level - 1], column_label_names=( df._internal.column_label_names[:-1] + [ None if self._internal.index_names[-1] is None else df._internal.column_label_names[-1] ] ), ) return DataFrame(internal) # TODO: Codes here are similar with melt. Should we deduplicate? column_labels = self._internal.column_labels ser_name = SPARK_DEFAULT_SERIES_NAME sdf = self._internal.spark_frame new_index_columns = [ SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level) ] new_index_map = list(zip_longest(new_index_columns, self._internal.column_label_names, [])) pairs = F.explode( F.array( *[ F.struct( *[SF.lit(c).alias(name) for c, name in zip(idx, new_index_columns)], *[self._internal.spark_column_for(idx).alias(ser_name)], ) for idx in column_labels ] ) ) columns = [ F.col("pairs.%s" % name) for name in new_index_columns[: self._internal.column_labels_level] ] + [F.col("pairs.%s" % ser_name)] new_index_len = len(new_index_columns) existing_index_columns = [] for i, (index_name, index_field) in enumerate( zip(self._internal.index_names, self._internal.index_fields) ): name = SPARK_INDEX_NAME_FORMAT(i + new_index_len) new_index_map.append((name, index_name, index_field.copy(name=name))) existing_index_columns.append(self._internal.index_spark_columns[i].alias(name)) exploded_df = sdf.withColumn("pairs", pairs).select(existing_index_columns + columns) index_spark_column_names, index_names, index_fields = zip(*new_index_map) return first_series( DataFrame( InternalFrame( exploded_df, index_spark_columns=[ scol_for(exploded_df, col) for col in index_spark_column_names ], index_names=list(index_names), index_fields=list(index_fields), column_labels=[None], ) ) ) # TODO: axis, level and **kwargs should be implemented. def all( self, axis: Axis = 0, bool_only: Optional[bool] = None, skipna: bool = True ) -> "Series": """ Return whether all elements are True. Returns True unless there is at least one element within a series that is False or equivalent (e.g. zero or empty) Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. bool_only : bool, default None Include only boolean columns. If None, will attempt to use everything, then use only boolean data. skipna : boolean, default True Exclude NA values, such as None or numpy.NaN. If an entire row/column is NA values and `skipna` is True, then the result will be True, as for an empty row/column. If `skipna` is False, numpy.NaNs are treated as True because these are not equal to zero, Nones are treated as False. Returns ------- Series Examples -------- Create a dataframe from a dictionary. >>> df = ps.DataFrame({ ... 'col1': [True, True, True], ... 'col2': [True, False, False], ... 'col3': [0, 0, 0], ... 'col4': [1, 2, 3], ... 'col5': [True, True, None], ... 'col6': [True, False, None]}, ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6']) Default behaviour checks if column-wise values all return True. >>> df.all() col1 True col2 False col3 False col4 True col5 True col6 False dtype: bool Include NA values when set `skipna=False`. >>> df[['col5', 'col6']].all(skipna=False) col5 False col6 False dtype: bool Include only boolean columns when set `bool_only=True`. >>> df.all(bool_only=True) col1 True col2 False dtype: bool """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') column_labels = self._internal.column_labels if bool_only: column_labels = self._bool_column_labels(column_labels) if len(column_labels) == 0: return ps.Series([], dtype=bool) applied = [] for label in column_labels: scol = self._internal.spark_column_for(label) if isinstance(self._internal.spark_type_for(label), NumericType) or skipna: # np.nan takes no effect to the result; None takes no effect if `skipna` all_col = F.min(F.coalesce(scol.cast("boolean"), SF.lit(True))) else: # Take None as False when not `skipna` all_col = F.min( F.when(scol.isNull(), SF.lit(False)).otherwise(scol.cast("boolean")) ) applied.append(F.when(all_col.isNull(), True).otherwise(all_col)) return self._result_aggregated(column_labels, applied) # TODO: axis, skipna, level and **kwargs should be implemented. def any(self, axis: Axis = 0, bool_only: Optional[bool] = None) -> "Series": """ Return whether any element is True. Returns False unless there is at least one element within a series that is True or equivalent (e.g. non-zero or non-empty). Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. bool_only : bool, default None Include only boolean columns. If None, will attempt to use everything, then use only boolean data. Returns ------- Series Examples -------- Create a dataframe from a dictionary. >>> df = ps.DataFrame({ ... 'col1': [False, False, False], ... 'col2': [True, False, False], ... 'col3': [0, 0, 1], ... 'col4': [0, 1, 2], ... 'col5': [False, False, None], ... 'col6': [True, False, None]}, ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6']) Default behaviour checks if column-wise values all return True. >>> df.any() col1 False col2 True col3 True col4 True col5 False col6 True dtype: bool Include only boolean columns when set `bool_only=True`. >>> df.any(bool_only=True) col1 False col2 True dtype: bool """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') column_labels = self._internal.column_labels if bool_only: column_labels = self._bool_column_labels(column_labels) if len(column_labels) == 0: return ps.Series([], dtype=bool) applied = [] for label in column_labels: scol = self._internal.spark_column_for(label) any_col = F.max(F.coalesce(scol.cast("boolean"), SF.lit(False))) applied.append(F.when(any_col.isNull(), False).otherwise(any_col)) return self._result_aggregated(column_labels, applied) def _bool_column_labels(self, column_labels: List[Label]) -> List[Label]: """ Filter column labels of boolean columns (without None). """ bool_column_labels = [] for label in column_labels: psser = self._psser_for(label) if is_bool_dtype(psser): # Rely on dtype rather than spark type because # columns that consist of bools and Nones should be excluded # if bool_only is True bool_column_labels.append(label) return bool_column_labels def _result_aggregated(self, column_labels: List[Label], scols: List[Column]) -> "Series": """ Given aggregated Spark columns and respective column labels from the original pandas-on-Spark DataFrame, construct the result Series. """ from pyspark.pandas.series import first_series cols = [] result_scol_name = "value" for label, applied_col in zip(column_labels, scols): cols.append( F.struct( *[SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)], *[applied_col.alias(result_scol_name)], ) ) # Statements under this comment implement spark frame transformations as below: # From: # +-------------------------------------------------------------------------------------+ # |arrays | # +-------------------------------------------------------------------------------------+ # |[{col1, true}, {col2, true}, {col3, false}, {col4, true}]| # +-------------------------------------------------------------------------------------+ # To: # +-------------+ # |col | # +-------------+ # |{col1, true} | # |{col2, true} | # |{col3, false}| # |{col4, true} | # +-------------+ # To: # +-----------------+-----+ # |__index_level_0__|value| # +-----------------+-----+ # |col1 |true | # |col2 |true | # |col3 |false| # |col4 |true | # +-----------------+-----+ sdf = self._internal.spark_frame.select(F.array(*cols).alias("arrays")).select( F.explode(F.col("arrays")) ) sdf = sdf.selectExpr("col.*") internal = InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i)) for i in range(self._internal.column_labels_level) ], index_names=self._internal.column_label_names, column_labels=[None], data_spark_columns=[scol_for(sdf, result_scol_name)], ) # (cont.) The result Series should look as below: # col1 False # col2 True # col3 True # col4 True # dtype: bool return first_series(DataFrame(internal)) # TODO: add axis, pct, na_option parameter def rank( self, method: str = "average", ascending: bool = True, numeric_only: Optional[bool] = None ) -> "DataFrame": """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the average of the ranks of those values. .. note:: the current implementation of rank uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'} * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups ascending : boolean, default True False for ranks by high (1) to low (N) numeric_only : bool, optional For DataFrame objects, rank only numeric columns if set to True. Returns ------- ranks : same type as caller Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns=['A', 'B']) >>> df A B 0 1 4 1 2 3 2 2 2 3 3 1 >>> df.rank().sort_index() A B 0 1.0 4.0 1 2.5 3.0 2 2.5 2.0 3 4.0 1.0 If method is set to 'min', it use lowest rank in group. >>> df.rank(method='min').sort_index() A B 0 1.0 4.0 1 2.0 3.0 2 2.0 2.0 3 4.0 1.0 If method is set to 'max', it use highest rank in group. >>> df.rank(method='max').sort_index() A B 0 1.0 4.0 1 3.0 3.0 2 3.0 2.0 3 4.0 1.0 If method is set to 'dense', it leaves no gaps in group. >>> df.rank(method='dense').sort_index() A B 0 1.0 4.0 1 2.0 3.0 2 2.0 2.0 3 3.0 1.0 If numeric_only is set to 'True', rank only numeric columns. >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': ['a', 'b', 'd', 'c']}, columns= ['A', 'B']) >>> df A B 0 1 a 1 2 b 2 2 d 3 3 c >>> df.rank(numeric_only=True) A 0 1.0 1 2.5 2 2.5 3 4.0 """ if numeric_only: numeric_col_names = [] for label in self._internal.column_labels: psser = self._psser_for(label) if isinstance(psser.spark.data_type, (NumericType, BooleanType)): numeric_col_names.append(psser.name) psdf = self[numeric_col_names] if numeric_only else self return psdf._apply_series_op( lambda psser: psser._rank(method=method, ascending=ascending), should_resolve=True ) def filter( self, items: Optional[Sequence[Any]] = None, like: Optional[str] = None, regex: Optional[str] = None, axis: Optional[Axis] = None, ) -> "DataFrame": """ Subset rows or columns of dataframe according to labels in the specified index. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : string Keep labels from axis for which "like in label == True". regex : string (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = ps.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 For a Series, >>> # select rows by name >>> df.one.filter(items=['rabbit']) rabbit 4 Name: one, dtype: int64 >>> # select rows by regular expression >>> df.one.filter(regex='e$') mouse 1 Name: one, dtype: int64 >>> # select rows containing 'bbi' >>> df.one.filter(like='bbi') rabbit 4 Name: one, dtype: int64 """ if sum(x is not None for x in (items, like, regex)) > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) axis = validate_axis(axis, none_axis=1) index_scols = self._internal.index_spark_columns if items is not None: if is_list_like(items): items = list(items) else: raise ValueError("items should be a list-like object.") if axis == 0: if len(index_scols) == 1: if len(items) <= ps.get_option("compute.isin_limit"): col = index_scols[0].isin([SF.lit(item) for item in items]) return DataFrame(self._internal.with_filter(col)) else: item_sdf_col = verify_temp_column_name( self._internal.spark_frame, "__item__" ) item_sdf = default_session().createDataFrame( pd.DataFrame({item_sdf_col: items}) ) joined_sdf = self._internal.spark_frame.join( other=F.broadcast(item_sdf), on=(index_scols[0] == scol_for(item_sdf, item_sdf_col)), how="semi", ) return DataFrame(self._internal.with_new_sdf(joined_sdf)) else: # for multi-index col = None for item in items: if not isinstance(item, tuple): raise TypeError("Unsupported type {}".format(type(item).__name__)) if not item: raise ValueError("The item should not be empty.") midx_col = None for i, element in enumerate(item): if midx_col is None: midx_col = index_scols[i] == SF.lit(element) else: midx_col = midx_col & (index_scols[i] == SF.lit(element)) if col is None: col = midx_col else: col = col | midx_col return DataFrame(self._internal.with_filter(col)) else: return self[items] elif like is not None: if axis == 0: col = None for index_scol in index_scols: if col is None: col = index_scol.contains(like) else: col = col | index_scol.contains(like) return DataFrame(self._internal.with_filter(col)) else: column_labels = self._internal.column_labels output_labels = [label for label in column_labels if any(like in i for i in label)] return self[output_labels] elif regex is not None: if axis == 0: col = None for index_scol in index_scols: if col is None: col = index_scol.rlike(regex) else: col = col | index_scol.rlike(regex) return DataFrame(self._internal.with_filter(col)) else: column_labels = self._internal.column_labels matcher = re.compile(regex) output_labels = [ label for label in column_labels if any(matcher.search(i) is not None for i in label) ] return self[output_labels] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def rename( self, mapper: Optional[Union[Dict, Callable[[Any], Any]]] = None, index: Optional[Union[Dict, Callable[[Any], Any]]] = None, columns: Optional[Union[Dict, Callable[[Any], Any]]] = None, axis: Axis = "index", inplace: bool = False, level: Optional[int] = None, errors: str = "ignore", ) -> Optional["DataFrame"]: """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don’t throw an error. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis’ values. Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index` and `columns`. index : dict-like or function Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper"). columns : dict-like or function Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper"). axis : int or str, default 'index' Axis to target with mapper. Can be either the axis name ('index', 'columns') or number (0, 1). inplace : bool, default False Whether to return a new DataFrame. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame with the renamed axis labels. Raises ------ `KeyError` If any of the labels is not found in the selected axis and "errors='raise'". Examples -------- >>> psdf1 = ps.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> psdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE a c 0 1 4 1 2 5 2 3 6 >>> psdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE A B 0 1 4 10 2 5 20 3 6 >>> def str_lower(s) -> str: ... return str.lower(s) >>> psdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE a b 0 1 4 1 2 5 2 3 6 >>> def mul10(x) -> int: ... return x * 10 >>> psdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE A B 0 1 4 10 2 5 20 3 6 >>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]) >>> psdf2 = ps.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx) >>> psdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE x y A B C D 0 1 2 3 4 1 5 6 7 8 >>> psdf3 = ps.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab')) >>> psdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE a b x a 1 2 b 3 4 y c 5 6 d 7 8 """ def gen_mapper_fn( mapper: Union[Dict, Callable[[Any], Any]], skip_return_type: bool = False ) -> Tuple[Callable[[Any], Any], Dtype, DataType]: if isinstance(mapper, dict): mapper_dict = mapper type_set = set(map(lambda x: type(x), mapper_dict.values())) if len(type_set) > 1: raise ValueError("Mapper dict should have the same value type.") dtype, spark_return_type = pandas_on_spark_type(list(type_set)[0]) def mapper_fn(x: Any) -> Any: if x in mapper_dict: return mapper_dict[x] else: if errors == "raise": raise KeyError("Index include value which is not in the `mapper`") return x return mapper_fn, dtype, spark_return_type elif callable(mapper): mapper_callable = cast(Callable, mapper) def mapper_fn(x: Any) -> Any: return mapper_callable(x) if skip_return_type: return mapper_fn, None, None else: return_type = cast(ScalarType, infer_return_type(mapper)) dtype = return_type.dtype spark_return_type = return_type.spark_type return mapper_fn, dtype, spark_return_type else: raise ValueError( "`mapper` or `index` or `columns` should be " "either dict-like or function type." ) index_mapper_fn = None index_mapper_ret_stype = None columns_mapper_fn = None inplace = validate_bool_kwarg(inplace, "inplace") if mapper: axis = validate_axis(axis) if axis == 0: index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype = gen_mapper_fn( mapper ) elif axis == 1: columns_mapper_fn, _, _ = gen_mapper_fn(mapper) else: if index: index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype = gen_mapper_fn( index ) if columns: columns_mapper_fn, _, _ = gen_mapper_fn(columns, skip_return_type=True) if not index and not columns: raise ValueError("Either `index` or `columns` should be provided.") psdf = self.copy() if index_mapper_fn: # rename index labels, if `level` is None, rename all index columns, otherwise only # rename the corresponding level index. # implement this by transform the underlying spark dataframe, # Example: # suppose the psdf index column in underlying spark dataframe is "index_0", "index_1", # if rename level 0 index labels, will do: # ``psdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))`` # if rename all index labels (`level` is None), then will do: # ``` # psdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0")) # .withColumn("index_1", mapper_fn_udf(col("index_1")) # ``` index_columns = psdf._internal.index_spark_column_names num_indices = len(index_columns) if level: if level < 0 or level >= num_indices: raise ValueError("level should be an integer between [0, num_indices)") @pandas_udf(returnType=index_mapper_ret_stype) # type: ignore[call-overload] def index_mapper_udf(s: pd.Series) -> pd.Series: return s.map(index_mapper_fn) index_spark_columns = psdf._internal.index_spark_columns.copy() index_fields = psdf._internal.index_fields.copy() if level is None: for i in range(num_indices): index_spark_columns[i] = index_mapper_udf(index_spark_columns[i]).alias( index_columns[i] ) index_fields[i] = index_fields[i].copy( dtype=index_mapper_ret_dtype, spark_type=index_mapper_ret_stype, nullable=True, ) else: index_spark_columns[level] = index_mapper_udf(index_spark_columns[level]).alias( index_columns[level] ) index_fields[level] = index_fields[level].copy( dtype=index_mapper_ret_dtype, spark_type=index_mapper_ret_stype, nullable=True, ) psdf = DataFrame( psdf._internal.copy( index_spark_columns=index_spark_columns, index_fields=index_fields ) ) if columns_mapper_fn: # rename column name. # Will modify the `_internal._column_labels` and transform underlying spark dataframe # to the same column name with `_internal._column_labels`. if level: if level < 0 or level >= psdf._internal.column_labels_level: raise ValueError("level should be an integer between [0, column_labels_level)") def gen_new_column_labels_entry(column_labels_entry: Label) -> Label: if level is None: # rename all level columns return tuple(map(columns_mapper_fn, column_labels_entry)) else: # only rename specified level column entry_list = list(column_labels_entry) entry_list[level] = columns_mapper_fn(entry_list[level]) return tuple(entry_list) new_column_labels = list(map(gen_new_column_labels_entry, psdf._internal.column_labels)) new_data_pssers = [ psdf._psser_for(old_label).rename(new_label) for old_label, new_label in zip(psdf._internal.column_labels, new_column_labels) ] psdf = DataFrame(psdf._internal.with_new_columns(new_data_pssers)) if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf def rename_axis( self, mapper: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None, index: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None, columns: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None, axis: Optional[Axis] = 0, inplace: Optional[bool] = False, ) -> Optional["DataFrame"]: """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional A scalar, list-like, dict-like or functions transformations to apply to the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. inplace : bool, default False Modifies the object directly, instead of creating a new DataFrame. Returns ------- DataFrame, or None if `inplace` is True. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. The second calling convention will modify the names of the corresponding index specified by axis. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- >>> df = ps.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... index=["dog", "cat", "monkey"], ... columns=["num_legs", "num_arms"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal").sort_index() >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_arms animal cat 4 0 dog 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns").sort_index() >>> df # doctest: +NORMALIZE_WHITESPACE limbs num_legs num_arms animal cat 4 0 dog 4 0 monkey 2 2 **MultiIndex** >>> index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df = ps.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... index=index, ... columns=["num_legs", "num_arms"]) >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE num_legs num_arms class name mammal cat 4 0 dog 4 0 monkey 2 2 >>> df.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE num_legs num_arms TYPE NAME mammal cat 4 0 dog 4 0 monkey 2 2 """ def gen_names( v: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]], curnames: List[Name], ) -> List[Label]: newnames: List[Name] if is_scalar(v): newnames = [cast(Name, v)] elif is_list_like(v) and not is_dict_like(v): newnames = list(cast(Sequence[Name], v)) elif is_dict_like(v): v_dict = cast(Dict[Name, Name], v) newnames = [v_dict[name] if name in v_dict else name for name in curnames] elif callable(v): v_callable = cast(Callable[[Name], Name], v) newnames = [v_callable(name) for name in curnames] else: raise ValueError( "`mapper` or `index` or `columns` should be " "either dict-like or function type." ) if len(newnames) != len(curnames): raise ValueError( "Length of new names must be {}, got {}".format(len(curnames), len(newnames)) ) return [name if is_name_like_tuple(name) else (name,) for name in newnames] if mapper is not None and (index is not None or columns is not None): raise TypeError("Cannot specify both 'mapper' and any of 'index' or 'columns'.") if mapper is not None: axis = validate_axis(axis) if axis == 0: index = mapper elif axis == 1: columns = mapper column_label_names = ( gen_names(columns, self.columns.names) if columns is not None else self._internal.column_label_names ) index_names = ( gen_names(index, self.index.names) if index is not None else self._internal.index_names ) internal = self._internal.copy( index_names=index_names, column_label_names=column_label_names ) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def keys(self) -> pd.Index: """ Return alias for columns. Returns ------- Index Columns of the DataFrame. Examples -------- >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', 'sidewinder'], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 sidewinder 7 8 >>> df.keys() Index(['max_speed', 'shield'], dtype='object') """ return self.columns def pct_change(self, periods: int = 1) -> "DataFrame": """ Percentage change between the current and a prior element. .. note:: the current implementation of this API uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. Returns ------- DataFrame Examples -------- Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = ps.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 You can set periods to shift for forming percent change >>> df.pct_change(2) FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 NaN NaN NaN 1980-03-01 0.067912 0.073814 0.06883 """ window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods) def op(psser: ps.Series) -> Column: prev_row = F.lag(psser.spark.column, periods).over(window) return ((psser.spark.column - prev_row) / prev_row).alias( psser._internal.data_spark_column_names[0] ) return self._apply_series_op(op, should_resolve=True) # TODO: axis = 1 def idxmax(self, axis: Axis = 0) -> "Series": """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. .. note:: This API collect all rows with maximum value using `to_pandas()` because we suppose the number of rows with max values are usually small in general. Parameters ---------- axis : 0 or 'index' Can only be set to 0 at the moment. Returns ------- Series See Also -------- Series.idxmax Examples -------- >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> psdf a b c 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> psdf.idxmax() a 2 b 0 c 2 dtype: int64 For Multi-column Index >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> psdf a b c x y z 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> psdf.idxmax() a x 2 b y 0 c z 2 dtype: int64 """ max_cols = map(lambda scol: F.max(scol), self._internal.data_spark_columns) sdf_max = self._internal.spark_frame.select(*max_cols).head() # `sdf_max` looks like below # +------+------+------+ # |(a, x)|(b, y)|(c, z)| # +------+------+------+ # | 3| 4.0| 400| # +------+------+------+ conds = ( scol == max_val for scol, max_val in zip(self._internal.data_spark_columns, sdf_max) ) cond = reduce(lambda x, y: x | y, conds) psdf: DataFrame = DataFrame(self._internal.with_filter(cond)) return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmax())) # TODO: axis = 1 def idxmin(self, axis: Axis = 0) -> "Series": """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. .. note:: This API collect all rows with minimum value using `to_pandas()` because we suppose the number of rows with min values are usually small in general. Parameters ---------- axis : 0 or 'index' Can only be set to 0 at the moment. Returns ------- Series See Also -------- Series.idxmin Examples -------- >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> psdf a b c 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> psdf.idxmin() a 0 b 3 c 1 dtype: int64 For Multi-column Index >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> psdf a b c x y z 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> psdf.idxmin() a x 0 b y 3 c z 1 dtype: int64 """ min_cols = map(lambda scol: F.min(scol), self._internal.data_spark_columns) sdf_min = self._internal.spark_frame.select(*min_cols).head() conds = ( scol == min_val for scol, min_val in zip(self._internal.data_spark_columns, sdf_min) ) cond = reduce(lambda x, y: x | y, conds) psdf: DataFrame = DataFrame(self._internal.with_filter(cond)) return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmin())) def info( self, verbose: Optional[bool] = None, buf: Optional[IO[str]] = None, max_cols: Optional[int] = None, null_counts: Optional[bool] = None, ) -> None: """ Print a concise summary of a DataFrame. This method prints information about a DataFrame including the index dtype and column dtypes, non-null values and memory usage. Parameters ---------- verbose : bool, optional Whether to print the full summary. buf : writable buffer, defaults to sys.stdout Where to send the output. By default, the output is printed to sys.stdout. Pass a writable buffer if you need to further process the output. max_cols : int, optional When to switch from the verbose to the truncated output. If the DataFrame has more than `max_cols` columns, the truncated output is used. null_counts : bool, optional Whether to show the non-null counts. Returns ------- None This method prints a summary of a DataFrame and returns None. See Also -------- DataFrame.describe: Generate descriptive statistics of DataFrame columns. Examples -------- >>> int_values = [1, 2, 3, 4, 5] >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] >>> df = ps.DataFrame( ... {"int_col": int_values, "text_col": text_values, "float_col": float_values}, ... columns=['int_col', 'text_col', 'float_col']) >>> df int_col text_col float_col 0 1 alpha 0.00 1 2 beta 0.25 2 3 gamma 0.50 3 4 delta 0.75 4 5 epsilon 1.00 Prints information of all columns: >>> df.info(verbose=True) # doctest: +SKIP <class 'pyspark.pandas.frame.DataFrame'> Index: 5 entries, 0 to 4 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 int_col 5 non-null int64 1 text_col 5 non-null object 2 float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) Prints a summary of columns count and its dtypes but not per column information: >>> df.info(verbose=False) # doctest: +SKIP <class 'pyspark.pandas.frame.DataFrame'> Index: 5 entries, 0 to 4 Columns: 3 entries, int_col to float_col dtypes: float64(1), int64(1), object(1) Pipe output of DataFrame.info to buffer instead of sys.stdout, get buffer content and writes to a text file: >>> import io >>> buffer = io.StringIO() >>> df.info(buf=buffer) >>> s = buffer.getvalue() >>> with open('%s/info.txt' % path, "w", ... encoding="utf-8") as f: ... _ = f.write(s) >>> with open('%s/info.txt' % path) as f: ... f.readlines() # doctest: +SKIP ["<class 'pyspark.pandas.frame.DataFrame'>\\n", 'Index: 5 entries, 0 to 4\\n', 'Data columns (total 3 columns):\\n', ' # Column Non-Null Count Dtype \\n', '--- ------ -------------- ----- \\n', ' 0 int_col 5 non-null int64 \\n', ' 1 text_col 5 non-null object \\n', ' 2 float_col 5 non-null float64\\n', 'dtypes: float64(1), int64(1), object(1)'] """ # To avoid pandas' existing config affects pandas-on-Spark. # TODO: should we have corresponding pandas-on-Spark configs? with pd.option_context( "display.max_info_columns", sys.maxsize, "display.max_info_rows", sys.maxsize ): try: # hack to use pandas' info as is. object.__setattr__(self, "_data", self) count_func = self.count self.count = ( # type: ignore[assignment] lambda: count_func()._to_pandas() # type: ignore[assignment, misc, union-attr] ) return pd.DataFrame.info( self, # type: ignore[arg-type] verbose=verbose, buf=buf, max_cols=max_cols, memory_usage=False, null_counts=null_counts, ) finally: del self._data self.count = count_func # type: ignore[assignment] # TODO: fix parameter 'axis' and 'numeric_only' to work same as pandas' def quantile( self, q: Union[float, Iterable[float]] = 0.5, axis: Axis = 0, numeric_only: bool = True, accuracy: int = 10000, ) -> DataFrameOrSeries: """ Return value at the given quantile. .. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile based upon approximate percentile computation because computing quantile across a large dataset is extremely expensive. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. axis : int or str, default 0 or 'index' Can only be set to 0 at the moment. numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. Can only be set to True at the moment. accuracy : int, optional Default accuracy of approximation. Larger value means better accuracy. The relative error can be deduced by 1.0 / accuracy. Returns ------- Series or DataFrame If q is an array, a DataFrame will be returned where the index is q, the columns are the columns of self, and the values are the quantiles. If q is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. Examples -------- >>> psdf = ps.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]}) >>> psdf a b 0 1 6 1 2 7 2 3 8 3 4 9 4 5 0 >>> psdf.quantile(.5) a 3.0 b 7.0 Name: 0.5, dtype: float64 >>> psdf.quantile([.25, .5, .75]) a b 0.25 2.0 6.0 0.50 3.0 7.0 0.75 4.0 8.0 """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') if not isinstance(accuracy, int): raise TypeError( "accuracy must be an integer; however, got [%s]" % type(accuracy).__name__ ) qq: Union[float, List[float]] = list(q) if isinstance(q, Iterable) else q for v in qq if isinstance(qq, list) else [qq]: if not isinstance(v, float): raise TypeError( "q must be a float or an array of floats; however, [%s] found." % type(v) ) if v < 0.0 or v > 1.0: raise ValueError("percentiles should all be in the interval [0, 1].") def quantile(psser: "Series") -> Column: spark_type = psser.spark.data_type spark_column = psser.spark.column if isinstance(spark_type, (BooleanType, NumericType)): return F.percentile_approx(spark_column.cast(DoubleType()), qq, accuracy) else: raise TypeError( "Could not convert {} ({}) to numeric".format( spark_type_to_pandas_dtype(spark_type), spark_type.simpleString() ) ) if isinstance(qq, list): # First calculate the percentiles from all columns and map it to each `quantiles` # by creating each entry as a struct. So, it becomes an array of structs as below: # # +-----------------------------------------+ # | arrays| # +-----------------------------------------+ # |[[0.25, 2, 6], [0.5, 3, 7], [0.75, 4, 8]]| # +-----------------------------------------+ percentile_cols: List[Column] = [] percentile_col_names: List[str] = [] column_labels: List[Label] = [] for label, column in zip( self._internal.column_labels, self._internal.data_spark_column_names ): psser = self._psser_for(label) is_numeric_or_boolean = isinstance( psser.spark.data_type, (NumericType, BooleanType) ) keep_column = not numeric_only or is_numeric_or_boolean if keep_column: percentile_col = quantile(psser) percentile_cols.append(percentile_col.alias(column)) percentile_col_names.append(column) column_labels.append(label) if len(percentile_cols) == 0: return DataFrame(index=qq) sdf = self._internal.spark_frame.select(percentile_cols) # Here, after select percentile cols, a spark_frame looks like below: # +---------+---------+ # | a| b| # +---------+---------+ # |[2, 3, 4]|[6, 7, 8]| # +---------+---------+ cols_dict: Dict[str, List[Column]] = {} for column in percentile_col_names: cols_dict[column] = list() for i in range(len(qq)): cols_dict[column].append(scol_for(sdf, column)[i].alias(column)) internal_index_column = SPARK_DEFAULT_INDEX_NAME cols = [] for i, col in enumerate(zip(*cols_dict.values())): cols.append(F.struct(SF.lit(qq[i]).alias(internal_index_column), *col)) sdf = sdf.select(F.array(*cols).alias("arrays")) # And then, explode it and manually set the index. # +-----------------+---+---+ # |__index_level_0__| a| b| # +-----------------+---+---+ # | 0.25| 2| 6| # | 0.5| 3| 7| # | 0.75| 4| 8| # +-----------------+---+---+ sdf = sdf.select(F.explode(F.col("arrays"))).selectExpr("col.*") internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, internal_index_column)], column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in percentile_col_names], ) return DataFrame(internal) else: return self._reduce_for_stat_function( quantile, name="quantile", numeric_only=numeric_only ).rename(qq) def query(self, expr: str, inplace: bool = False) -> Optional["DataFrame"]: """ Query the columns of a DataFrame with a boolean expression. .. note:: Internal columns that starting with a '__' prefix are able to access, however, they are not supposed to be accessed. .. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the pandas specific syntax such as `@` is not supported. If you want the pandas syntax, you can work around with :meth:`DataFrame.pandas_on_spark.apply_batch`, but you should be aware that `query_func` will be executed at different nodes in a distributed manner. So, for example, to use `@` syntax, make sure the variable is serialized by, for example, putting it within the closure as below. >>> df = ps.DataFrame({'A': range(2000), 'B': range(2000)}) >>> def query_func(pdf): ... num = 1995 ... return pdf.query('A > @num') >>> df.pandas_on_spark.apply_batch(query_func) A B 1996 1996 1996 1997 1997 1997 1998 1998 1998 1999 1999 1999 Parameters ---------- expr : str The query string to evaluate. You can refer to column names that contain spaces by surrounding them in backticks. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether the query should modify the data in place or return a modified copy. Returns ------- DataFrame DataFrame resulting from the provided query expression. Examples -------- >>> df = ps.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ if isinstance(self.columns, pd.MultiIndex): raise TypeError("Doesn't support for MultiIndex columns") if not isinstance(expr, str): raise TypeError( "expr must be a string to be evaluated, {} given".format(type(expr).__name__) ) inplace = validate_bool_kwarg(inplace, "inplace") data_columns = [label[0] for label in self._internal.column_labels] sdf = self._internal.spark_frame.select( self._internal.index_spark_columns + [ scol.alias(col) for scol, col in zip(self._internal.data_spark_columns, data_columns) ] ).filter(expr) internal = self._internal.with_new_sdf(sdf, data_columns=data_columns) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def take(self, indices: List[int], axis: Axis = 0, **kwargs: Any) -> "DataFrame": """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = ps.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]).sort_index() name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]).sort_index() name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ axis = validate_axis(axis) if not is_list_like(indices) or isinstance(indices, (dict, set)): raise TypeError("`indices` must be a list-like except dict or set") if axis == 0: return cast(DataFrame, self.iloc[indices, :]) else: return cast(DataFrame, self.iloc[:, indices]) def eval(self, expr: str, inplace: bool = False) -> Optional[DataFrameOrSeries]: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. Returns ------- The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Examples -------- >>> df = ps.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 """ from pyspark.pandas.series import first_series if isinstance(self.columns, pd.MultiIndex): raise TypeError("`eval` is not supported for multi-index columns") inplace = validate_bool_kwarg(inplace, "inplace") should_return_series = False series_name = None should_return_scalar = False # Since `eval_func` doesn't have a type hint, inferring the schema is always preformed # in the `apply_batch`. Hence, the variables `should_return_series`, `series_name`, # and `should_return_scalar` can be updated. def eval_func(pdf): # type: ignore[no-untyped-def] nonlocal should_return_series nonlocal series_name nonlocal should_return_scalar result_inner = pdf.eval(expr, inplace=inplace) if inplace: result_inner = pdf if isinstance(result_inner, pd.Series): should_return_series = True series_name = result_inner.name result_inner = result_inner.to_frame() elif is_scalar(result_inner): should_return_scalar = True result_inner = pd.Series(result_inner).to_frame() return result_inner result = self.pandas_on_spark.apply_batch(eval_func) if inplace: # Here, the result is always a frame because the error is thrown during schema inference # from pandas. self._update_internal_frame(result._internal, requires_same_anchor=False) return None elif should_return_series: return first_series(result).rename(series_name) elif should_return_scalar: return first_series(result)[0] else: # Returns a frame return result def explode(self, column: Name, ignore_index: bool = False) -> "DataFrame": """ Transform each element of a list-like to a row, replicating index values. Parameters ---------- column : str or tuple Column to explode. ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. Returns ------- DataFrame Exploded lists to rows of the subset columns; index will be duplicated for these rows. See Also -------- DataFrame.unstack : Pivot a level of the (necessarily hierarchical) index labels. DataFrame.melt : Unpivot a DataFrame from wide format to long format. Examples -------- >>> df = ps.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1}) >>> df A B 0 [1, 2, 3] 1 1 [] 1 2 [3, 4] 1 >>> df.explode('A') A B 0 1.0 1 0 2.0 1 0 3.0 1 1 NaN 1 2 3.0 1 2 4.0 1 >>> df.explode('A', ignore_index=True) A B 0 1.0 1 1 2.0 1 2 3.0 1 3 NaN 1 4 3.0 1 5 4.0 1 """ from pyspark.pandas.series import Series if not is_name_like_value(column): raise TypeError("column must be a scalar") psdf: DataFrame = DataFrame(self._internal.resolved_copy) psser = psdf[column] if not isinstance(psser, Series): raise ValueError( "The column %s is not unique. For a multi-index, the label must be a tuple " "with elements corresponding to each level." % name_like_string(column) ) if not isinstance(psser.spark.data_type, ArrayType): return self.copy() sdf = psdf._internal.spark_frame.withColumn( psser._internal.data_spark_column_names[0], F.explode_outer(psser.spark.column) ) data_fields = psdf._internal.data_fields.copy() idx = psdf._internal.column_labels.index(psser._column_label) field = data_fields[idx] spark_type = cast(ArrayType, field.spark_type).elementType dtype = spark_type_to_pandas_dtype(spark_type) data_fields[idx] = field.copy(dtype=dtype, spark_type=spark_type, nullable=True) internal = psdf._internal.with_new_sdf(sdf, data_fields=data_fields) result_df: DataFrame = DataFrame(internal) return result_df.reset_index(drop=True) if ignore_index else result_df def mad(self, axis: Axis = 0) -> "Series": """ Return the mean absolute deviation of values. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. Examples -------- >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) >>> df.mad() a 0.666667 b 0.066667 dtype: float64 >>> df.mad(axis=1) 0 0.45 1 0.90 2 1.35 3 NaN dtype: float64 """ from pyspark.pandas.series import first_series axis = validate_axis(axis) if axis == 0: def get_spark_column(psdf: DataFrame, label: Label) -> Column: scol = psdf._internal.spark_column_for(label) col_type = psdf._internal.spark_type_for(label) if isinstance(col_type, BooleanType): scol = scol.cast("integer") return scol new_column_labels: List[Label] = [] for label in self._internal.column_labels: # Filtering out only columns of numeric and boolean type column. dtype = self._psser_for(label).spark.data_type if isinstance(dtype, (NumericType, BooleanType)): new_column_labels.append(label) new_columns = [ F.avg(get_spark_column(self, label)).alias(name_like_string(label)) for label in new_column_labels ] mean_data = self._internal.spark_frame.select(*new_columns).first() new_columns = [ F.avg( F.abs(get_spark_column(self, label) - mean_data[name_like_string(label)]) ).alias(name_like_string(label)) for label in new_column_labels ] sdf = self._internal.spark_frame.select( *[SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)], *new_columns ) # The data is expected to be small so it's fine to transpose/use default index. with ps.option_context("compute.max_rows", 1): internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=new_column_labels, column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal).transpose()) else: @pandas_udf(returnType=DoubleType()) # type: ignore[call-overload] def calculate_columns_axis(*cols: pd.Series) -> pd.Series: return pd.concat(cols, axis=1).mad(axis=1) internal = self._internal.copy( column_labels=[None], data_spark_columns=[ calculate_columns_axis(*self._internal.data_spark_columns).alias( SPARK_DEFAULT_SERIES_NAME ) ], data_fields=[None], column_label_names=None, ) return first_series(DataFrame(internal)) def tail(self, n: int = 5) -> "DataFrame": """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `n` rows, equivalent to ``df[n:]``. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = ps.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() # doctest: +SKIP animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) # doctest: +SKIP animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) # doctest: +SKIP animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if not isinstance(n, int): raise TypeError("bad operand type for unary -: '{}'".format(type(n).__name__)) if n < 0: n = len(self) + n if n <= 0: return ps.DataFrame(self._internal.with_filter(SF.lit(False))) # Should use `resolved_copy` here for the case like `(psdf + 1).tail()` sdf = self._internal.resolved_copy.spark_frame rows = sdf.tail(n) new_sdf = default_session().createDataFrame(rows, sdf.schema) return DataFrame(self._internal.with_new_sdf(new_sdf)) def align( self, other: DataFrameOrSeries, join: str = "outer", axis: Optional[Axis] = None, copy: bool = True, ) -> Tuple["DataFrame", DataFrameOrSeries]: """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. Returns ------- (left, right) : (DataFrame, type of other) Aligned objects. Examples -------- >>> ps.set_option("compute.ops_on_diff_frames", True) >>> df1 = ps.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30]) >>> df2 = ps.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12]) Align both axis: >>> aligned_l, aligned_r = df1.align(df2) >>> aligned_l.sort_index() a b c 10 1.0 a NaN 11 NaN None NaN 12 NaN None NaN 20 2.0 b NaN 30 3.0 c NaN >>> aligned_r.sort_index() a b c 10 4.0 NaN d 11 5.0 NaN e 12 6.0 NaN f 20 NaN NaN None 30 NaN NaN None Align only axis=0 (index): >>> aligned_l, aligned_r = df1.align(df2, axis=0) >>> aligned_l.sort_index() a b 10 1.0 a 11 NaN None 12 NaN None 20 2.0 b 30 3.0 c >>> aligned_r.sort_index() a c 10 4.0 d 11 5.0 e 12 6.0 f 20 NaN None 30 NaN None Align only axis=1 (column): >>> aligned_l, aligned_r = df1.align(df2, axis=1) >>> aligned_l.sort_index() a b c 10 1 a NaN 20 2 b NaN 30 3 c NaN >>> aligned_r.sort_index() a b c 10 4 NaN d 11 5 NaN e 12 6 NaN f Align with the join type "inner": >>> aligned_l, aligned_r = df1.align(df2, join="inner") >>> aligned_l.sort_index() a 10 1 >>> aligned_r.sort_index() a 10 4 Align with a Series: >>> s = ps.Series([7, 8, 9], index=[10, 11, 12]) >>> aligned_l, aligned_r = df1.align(s, axis=0) >>> aligned_l.sort_index() a b 10 1.0 a 11 NaN None 12 NaN None 20 2.0 b 30 3.0 c >>> aligned_r.sort_index() 10 7.0 11 8.0 12 9.0 20 NaN 30 NaN dtype: float64 >>> ps.reset_option("compute.ops_on_diff_frames") """ from pyspark.pandas.series import Series, first_series if not isinstance(other, (DataFrame, Series)): raise TypeError("unsupported type: {}".format(type(other).__name__)) how = validate_how(join) axis = validate_axis(axis, None) right_is_series = isinstance(other, Series) if right_is_series: if axis is None: raise ValueError("Must specify axis=0 or 1") elif axis != 0: raise NotImplementedError( "align currently only works for axis=0 when right is Series" ) left = self right = other if (axis is None or axis == 0) and not same_anchor(left, right): combined = combine_frames(left, right, how=how) left = combined["this"] right = combined["that"] if right_is_series: right = first_series(cast(DataFrame[Any], right)).rename(other.name) if ( axis is None or axis == 1 ) and left._internal.column_labels != right._internal.column_labels: if left._internal.column_labels_level != right._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") left = left.copy() right = right.copy() if how == "full": column_labels = sorted( list(set(left._internal.column_labels) | set(right._internal.column_labels)) ) elif how == "inner": column_labels = sorted( list(set(left._internal.column_labels) & set(right._internal.column_labels)) ) elif how == "left": column_labels = left._internal.column_labels else: column_labels = right._internal.column_labels for label in column_labels: if label not in left._internal.column_labels: left[label] = SF.lit(None).cast(DoubleType()) left = left[column_labels] for label in column_labels: if label not in right._internal.column_labels: right[label] = SF.lit(None).cast(DoubleType()) right = right[column_labels] return (left.copy(), right.copy()) if copy else (left, right) @staticmethod def from_dict( data: Dict[Name, Sequence[Any]], orient: str = "columns", dtype: Union[str, Dtype] = None, columns: Optional[List[Name]] = None, ) -> "DataFrame": """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': [10, 20, 30, 40]} >>> ps.DataFrame.from_dict(data) col_1 col_2 0 3 10 1 2 20 2 1 30 3 0 40 Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': [10, 20, 30, 40]} >>> ps.DataFrame.from_dict(data, orient='index').sort_index() 0 1 2 3 row_1 3 2 1 0 row_2 10 20 30 40 When using the 'index' orientation, the column names can be specified manually: >>> ps.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']).sort_index() A B C D row_1 3 2 1 0 row_2 10 20 30 40 """ return DataFrame( pd.DataFrame.from_dict( data, orient=orient, dtype=dtype, columns=columns # type: ignore[arg-type] ) ) # Override the `groupby` to specify the actual return type annotation. def groupby( self, by: Union[Name, "Series", List[Union[Name, "Series"]]], axis: Axis = 0, as_index: bool = True, dropna: bool = True, ) -> "DataFrameGroupBy": return cast( "DataFrameGroupBy", super().groupby(by=by, axis=axis, as_index=as_index, dropna=dropna) ) groupby.__doc__ = Frame.groupby.__doc__ def _build_groupby( self, by: List[Union["Series", Label]], as_index: bool, dropna: bool ) -> "DataFrameGroupBy": from pyspark.pandas.groupby import DataFrameGroupBy return DataFrameGroupBy._build(self, by, as_index=as_index, dropna=dropna) def resample( self, rule: str, closed: Optional[str] = None, label: Optional[str] = None, on: Optional["Series"] = None, ) -> "DataFrameResampler": """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (only support `DatetimeIndex` for now), or the caller must pass the label of a datetime-like series/index to the ``on`` keyword parameter. .. versionadded:: 3.4.0 Parameters ---------- rule : str The offset string or object representing target conversion. Currently, supported units are {'Y', 'A', 'M', 'D', 'H', 'T', 'MIN', 'S'}. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'A', 'Y' and 'M' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'A', 'Y' and 'M' which all have a default of 'right'. on : Series, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. Returns ------- DataFrameResampler See Also -------- Series.resample : Resample a Series. groupby : Group by mapping, function, label, or list of labels. """ from pyspark.pandas.indexes import DatetimeIndex from pyspark.pandas.resample import DataFrameResampler if on is None and not isinstance(self.index, DatetimeIndex): raise NotImplementedError("resample currently works only for DatetimeIndex") if on is not None and not isinstance(as_spark_type(on.dtype), TimestampType): raise NotImplementedError("resample currently works only for TimestampType") agg_columns: List[ps.Series] = [] for column_label in self._internal.column_labels: if isinstance(self._internal.spark_type_for(column_label), (NumericType, BooleanType)): agg_columns.append(self._psser_for(column_label)) if len(agg_columns) == 0: raise ValueError("No available aggregation columns!") return DataFrameResampler( psdf=self, resamplekey=on, rule=rule, closed=closed, label=label, agg_columns=agg_columns, ) def _to_internal_pandas(self) -> pd.DataFrame: """ Return a pandas DataFrame directly from _internal to avoid overhead of copy. This method is for internal use only. """ return self._internal.to_pandas_frame def _get_or_create_repr_pandas_cache(self, n: int) -> Union[pd.DataFrame, pd.Series]: if not hasattr(self, "_repr_pandas_cache") or n not in self._repr_pandas_cache: object.__setattr__( self, "_repr_pandas_cache", {n: self.head(n + 1)._to_internal_pandas()} ) return self._repr_pandas_cache[n] def __repr__(self) -> str: max_display_count = get_option("display.max_rows") if max_display_count is None: return self._to_internal_pandas().to_string() pdf = cast("DataFrame", self._get_or_create_repr_pandas_cache(max_display_count)) pdf_length = len(pdf) pdf = cast("DataFrame", pdf.iloc[:max_display_count]) if pdf_length > max_display_count: repr_string = pdf.to_string(show_dimensions=True) match = REPR_PATTERN.search(repr_string) if match is not None: nrows = match.group("rows") ncols = match.group("columns") footer = "\n\n[Showing only the first {nrows} rows x {ncols} columns]".format( nrows=nrows, ncols=ncols ) return REPR_PATTERN.sub(footer, repr_string) return pdf.to_string() def _repr_html_(self) -> str: max_display_count = get_option("display.max_rows") if max_display_count is None: return self._to_internal_pandas().to_html(notebook=True) pdf = self._get_or_create_repr_pandas_cache(max_display_count) pdf_length = len(pdf) pdf = pdf.iloc[:max_display_count] if pdf_length > max_display_count: repr_html = pdf.to_html(show_dimensions=True, notebook=True) match = REPR_HTML_PATTERN.search(repr_html) if match is not None: nrows = match.group("rows") ncols = match.group("columns") by = chr(215) footer = ( "\n<p>Showing only the first {rows} rows " "{by} {cols} columns</p>\n</div>".format(rows=nrows, by=by, cols=ncols) ) return REPR_HTML_PATTERN.sub(footer, repr_html) return pdf.to_html(notebook=True) def __getitem__(self, key: Any) -> Any: from pyspark.pandas.series import Series if key is None: raise KeyError("none key") elif isinstance(key, Series): return self.loc[key.astype(bool)] elif isinstance(key, slice): if any(type(n) == int or None for n in [key.start, key.stop]): # Seems like pandas Frame always uses int as positional search when slicing # with ints. return self.iloc[key] return self.loc[key] elif is_name_like_value(key): return self.loc[:, key] elif is_list_like(key): return self.loc[:, list(key)] raise NotImplementedError(key) def __setitem__(self, key: Any, value: Any) -> None: from pyspark.pandas.series import Series if isinstance(value, (DataFrame, Series)) and not same_anchor(value, self): # Different Series or DataFrames level = self._internal.column_labels_level key = DataFrame._index_normalized_label(level, key) value = DataFrame._index_normalized_frame(level, value) def assign_columns( psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label] ) -> Iterator[Tuple["Series", Label]]: assert len(key) == len(that_column_labels) # Note that here intentionally uses `zip_longest` that combine # that_columns. for k, this_label, that_label in zip_longest( key, this_column_labels, that_column_labels ): yield (psdf._psser_for(that_label), tuple(["that", *k])) if this_label is not None and this_label[1:] != k: yield (psdf._psser_for(this_label), this_label) psdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left") elif isinstance(value, list): if len(self) != len(value): raise ValueError("Length of values does not match length of index") # TODO: avoid using default index? with option_context( "compute.default_index_type", "distributed-sequence", "compute.ops_on_diff_frames", True, ): psdf = self.reset_index() psdf[key] = ps.DataFrame(value) psdf = psdf.set_index(psdf.columns[: self._internal.index_level]) psdf.index.names = self.index.names elif isinstance(key, list): assert isinstance(value, DataFrame) # Same DataFrames. field_names = value.columns psdf = self._assign({k: value[c] for k, c in zip(key, field_names)}) else: # Same Series. psdf = self._assign({key: value}) self._update_internal_frame(psdf._internal) @staticmethod def _index_normalized_label(level: int, labels: Union[Name, Sequence[Name]]) -> List[Label]: """ Returns a label that is normalized against the current column index level. For example, the key "abc" can be ("abc", "", "") if the current Frame has a multi-index for its column """ if is_name_like_tuple(labels): labels = [labels] elif is_name_like_value(labels): labels = [(labels,)] else: labels = [k if is_name_like_tuple(k) else (k,) for k in labels] if any(len(label) > level for label in labels): raise KeyError( "Key length ({}) exceeds index depth ({})".format( max(len(label) for label in labels), level ) ) return [tuple(list(label) + ([""] * (level - len(label)))) for label in labels] @staticmethod def _index_normalized_frame(level: int, psser_or_psdf: DataFrameOrSeries) -> "DataFrame": """ Returns a frame that is normalized against the current column index level. For example, the name in `pd.Series([...], name="abc")` can be can be ("abc", "", "") if the current DataFrame has a multi-index for its column """ from pyspark.pandas.series import Series if isinstance(psser_or_psdf, Series): psdf = psser_or_psdf.to_frame() else: assert isinstance(psser_or_psdf, DataFrame), type(psser_or_psdf) psdf = psser_or_psdf.copy() psdf.columns = pd.MultiIndex.from_tuples( [ tuple([name_like_string(label)] + ([""] * (level - 1))) for label in psdf._internal.column_labels ], ) return psdf def __getattr__(self, key: str) -> Any: if key.startswith("__"): raise AttributeError(key) if hasattr(_MissingPandasLikeDataFrame, key): property_or_func = getattr(_MissingPandasLikeDataFrame, key) if isinstance(property_or_func, property): return property_or_func.fget(self) else: return partial(property_or_func, self) try: return self.loc[:, key] except KeyError: raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__.__name__, key) ) def __setattr__(self, key: str, value: Any) -> None: try: object.__getattribute__(self, key) return object.__setattr__(self, key, value) except AttributeError: pass if (key,) in self._internal.column_labels: self[key] = value else: msg = "pandas-on-Spark doesn't allow columns to be created via a new attribute name" if is_testing(): raise AssertionError(msg) else: warnings.warn(msg, UserWarning) def __len__(self) -> int: return self._internal.resolved_copy.spark_frame.count() def __dir__(self) -> Iterable[str]: fields = [ f for f in self._internal.resolved_copy.spark_frame.schema.fieldNames() if " " not in f ] return list(super().__dir__()) + fields def __iter__(self) -> Iterator[Name]: return iter(self.columns) # NDArray Compat def __array_ufunc__( self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any ) -> "DataFrame": # TODO: is it possible to deduplicate it with '_map_series_op'? if all(isinstance(inp, DataFrame) for inp in inputs) and any( not same_anchor(inp, inputs[0]) for inp in inputs ): # binary only assert len(inputs) == 2 this = inputs[0] that = inputs[1] if this._internal.column_labels_level != that._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") # Different DataFrames def apply_op( psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label] ) -> Iterator[Tuple["Series", Label]]: for this_label, that_label in zip(this_column_labels, that_column_labels): yield ( ufunc( psdf._psser_for(this_label), psdf._psser_for(that_label), **kwargs ).rename(this_label), this_label, ) return align_diff_frames(apply_op, this, that, fillna=True, how="full") else: # DataFrame and Series applied = [] this = inputs[0] assert all(inp is this for inp in inputs if isinstance(inp, DataFrame)) for label in this._internal.column_labels: arguments = [] for inp in inputs: arguments.append(inp[label] if isinstance(inp, DataFrame) else inp) # both binary and unary. applied.append(ufunc(*arguments, **kwargs).rename(label)) internal = this._internal.with_new_columns(applied) return DataFrame(internal) def __class_getitem__(cls, params: Any) -> object: # This is a workaround to support variadic generic in DataFrame in Python 3.7. # See https://github.com/python/typing/issues/193 # we always wraps the given type hints by a tuple to mimic the variadic generic. return create_tuple_for_frame_type(params) def _reduce_spark_multi(sdf: SparkDataFrame, aggs: List[Column]) -> Any: """ Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions. """ assert isinstance(sdf, SparkDataFrame) sdf0 = sdf.agg(*aggs) lst = sdf0.limit(2).toPandas() assert len(lst) == 1, (sdf, lst) row = lst.iloc[0] lst2 = list(row) assert len(lst2) == len(aggs), (row, lst2) return lst2 class CachedDataFrame(DataFrame): """ Cached pandas-on-Spark DataFrame, which corresponds to pandas DataFrame logically, but internally it caches the corresponding Spark DataFrame. """ def __init__(self, internal: InternalFrame, storage_level: Optional[StorageLevel] = None): if storage_level is None: object.__setattr__(self, "_cached", internal.spark_frame.cache()) elif isinstance(storage_level, StorageLevel): object.__setattr__(self, "_cached", internal.spark_frame.persist(storage_level)) else: raise TypeError( "Only a valid pyspark.StorageLevel type is acceptable for the `storage_level`" ) super().__init__(internal) def __enter__(self) -> "CachedDataFrame": return self def __exit__( self, exception_type: Optional[Type[BaseException]], exception_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> Optional[bool]: self.spark.unpersist() return None # create accessor for Spark related methods. spark = CachedAccessor("spark", CachedSparkFrameMethods) def _test() -> None: import os import doctest import shutil import sys import tempfile import uuid from pyspark.sql import SparkSession import pyspark.pandas.frame os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.pandas.frame.__dict__.copy() globs["ps"] = pyspark.pandas spark = ( SparkSession.builder.master("local[4]").appName("pyspark.pandas.frame tests").getOrCreate() ) db_name = "db%s" % str(uuid.uuid4()).replace("-", "") spark.sql("CREATE DATABASE %s" % db_name) globs["db"] = db_name path = tempfile.mkdtemp() globs["path"] = path (failure_count, test_count) = doctest.testmod( pyspark.pandas.frame, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE, ) shutil.rmtree(path, ignore_errors=True) spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db_name) spark.stop() if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
the-stack_106_30946
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Apr 21 23:47:11 2021 @author: bruzewskis """ from dataclasses import dataclass, field from astropy.coordinates import SkyCoord import astropy.units as u from typing import Union @dataclass class Resource: ''' This class theoretically describes a VLA resource. ''' name: str library: str = field(init=False) def __post_init__(self): default_resources = {'Lx32'} if self.name in default_resources: self.library = 'NRAO_Default' else: self.library = 'MyResources' @dataclass class Source: ''' This class theoretically describes a source to be pointed at ''' name: str coord: SkyCoord resource: Resource dur: u.Quantity = 30*u.s def __str__(self): return self.pretty() def __repr__(self): pretty_pos = self.coord.to_string() return f'<Scan: {self.name}, Pos: ({pretty_pos})>' def pretty(self, depth=1, indent=0): pretty_time = self.dur.to_string() return '\t'*indent + f'- {self.name} ({pretty_time})\n' def separation(self, other): return self.coord.separation(other.coord).deg @dataclass class Loop: ''' This class theoretically describes a loop which contains sources ''' name: str repeat: int scans: list[Source] def __str__(self): return self.pretty() def __repr__(self): pretty_scans = [ s.name for s in self.scans ] return f'<Loop: {self.name}, Scans: {pretty_scans}>' def pretty(self, depth=1, indent=0): out = '\t'*indent + f'o {self.name} [x{self.repeat}]\n' for item in self.scans: if depth>0: out += item.pretty(depth-1, indent+1) return out @dataclass class Block: ''' This class theoretically describes a block which contains loops or sources ''' name: str start_time: str scans: list[Union[Source, Loop]] def __str__(self): return self.pretty() def __repr__(self): pretty_scans = [ s.name for s in self.scans ] return f'<Block: {self.name}, Scans: {pretty_scans}>' def pretty(self, depth : int = 1, indent : int = 0) -> str: out = '\t'*indent + f'> {self.name}\n' for item in self.scans: if depth>0: out += item.pretty(depth-1, indent+1) return out @classmethod def from_file(cls, filename : str, stype : str = 'fits'): ''' This method should read a block from a file. It should be flexible enough to handle a csv or fits table with the right columns (Name,RA,DEC,Intent,etc...) or just a file right from the OPT. Can either have the user entry fits/csv/opt or we can guess it ''' return cls() @dataclass class Project: ''' This class theoretically describes a project which contains blocks ''' name: str = 'Default' blocks: list[Block] = field(default_factory=list) def __str__(self): return self.pretty() def __repr__(self): pretty_blocks = [ b.name for b in self.blocks ] return f'<Project: {self.name}, Blocks: {pretty_blocks}>' def pretty(self, depth=1, indent=0): out = '\t'*indent + self.name + '\n' for item in self.blocks: if depth>0: out += item.pretty(depth-1, indent+1) return out @classmethod def from_xml(cls, filename : str): ''' Not implemented yet, will eventually return a full constructed project which one can then edit as they like ''' return cls() def write(self, filename : str, style : str = 'xml', clobber : bool = False) -> bool: ''' Not implemented yet, will eventually write out the file either as XML or as all the relevant text files one would need ''' return True def simulate(self) -> float: ''' Simple implementation of timing, assuming no slew time, just adds up the time ''' time = 0 for block in self.blocks: for scan in block.scans: if isinstance(scan, Loop): loop_time = 0 for sub_scan in scan.scans: loop_time += sub_scan.dur time += scan.repeat * loop_time else: time += scan.dur return time def make_test_project(): Lx32 = Resource('Lx32') s1 = Source('Source1', SkyCoord('01h01m01s','01d01\'01"'), Lx32, 5*u.min) s2 = Source('Source2', SkyCoord('02h02m02s','02d02\'02"'), Lx32) s3 = Source('Source3', SkyCoord('03h03m03s','03d03\'03"'), Lx32) l1 = Loop('Loop1', 2, [s2,s3]) b1 = Block('Block1', '00:00:00.00', [s1,l1]) p1 = Project('Project1', [b1,b1]) print(p1.pretty(3)) print(p1) print(p1.simulate()) return p1 if __name__=='__main__': test_project = make_test_project()
the-stack_106_30947
# coding: utf-8 from __future__ import division import unicodedata, math, re, sys, string, os, ntpath, numpy as np from time import gmtime, strftime from io import open, StringIO from imp import reload from difflib import SequenceMatcher try: from itertools import izip except ImportError: izip = zip WORD = re.compile(r'\w+') def getCP(ali, w = 6): l = len(ali) if l == 0: l = 1 result = 0.0 for ali_i in ali: s = sum(ali_i) pen = 1/ (1 + (abs(1 - s))**w) result += math.log(pen) return result / l def getEnt(ali): l = len(ali) if l == 0: l = 1 res = 0.0 for pd in ali: norm = sum(pd) if norm > 0: normPd = [p / norm for p in pd] entr = -sum([(p * math.log(p) if p else 0) for p in normPd]) res -= entr else: res = 0 return res / l def getRevEnt(ali, w = 0.1): return getEnt(list(zip(*ali))) def printHelp(): print ('process_alignments.py -i <input_file> [-o <output_type>] [-f <from_system>] [-s <source_sentence_file>] [-t <target_sentence_file>]') print ('input_file is the file with alignment weights (required)') print ('source_sentence_file and target_sentence_file are required only for NeuralMonkey') print ('output_type can be web (default), block, block2 or color') print ('from_system can be Nematus, Marian, Sockeye, OpenNMT or NeuralMonkey (default)') def printColor(value): colors = [ ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ] num = int(math.floor((value-0.01)*25)) if num<0: num = 0 sys.stdout.write(colors[num]) def printBlock2(value): blocks2 = ['██', '▉▉', '▊▊', '▋▋', '▌▌', '▍▍', '▎▎', '▏▏', ' ',] num = int(math.floor((value-0.01)*8)) if num<0: num = 0 sys.stdout.write(blocks2[num]) def printBlock(value): blocks = ['██', '▓▓', '▒▒', '░░', ' ',] num = int(math.floor((value-0.01)*4)) if num<0: num = 0 sys.stdout.write(blocks[num]) def readSnts(filename): with open(filename, 'r', encoding='utf-8') as fh: return [escape(line).strip().split() for line in fh] def deBPE(srcs, tgts, ali, sources, targets): slen = len(sources) for i in range(slen): if i > len(sources)-1: break; while len(sources[i]) > 2 and sources[i][-2:] == "@@": sources[i] = sources[i].replace("@@","") + sources[i+1] del sources[i+1] slen = len(sources) #Now sum the alignments newLength = ali.shape[1]-1 result = np.zeros((ali.shape[0],newLength)) for x in range(newLength): if x == i: result[:,x] = np.sum(ali[:,x:x+2],axis=1) ali = np.delete(ali, x+1, 1) else: result[:,x] = ali[:,x] ali = result srcs[-1] = sources tlen = len(targets) for i in range(tlen): if i > len(targets)-1: break; n = 0 while len(targets[i]) > 2 and targets[i][-2:] == "@@": n+=1 targets[i] = targets[i].replace("@@","") + targets[i+1] del targets[i+1] tlen = len(targets) if n>0: #Now average the alignments newLength = ali.shape[0]-n result = np.zeros((newLength, ali.shape[1])) for x in range(newLength): if x == i: result[x,:] = np.average(ali[x:x+n+1,:],axis=0) for c in range(x+n, x, -1): ali = np.delete(ali, c, 0) else: result[x,:] = ali[x,:] ali = result tgts[-1] = targets return srcs, tgts, ali def readNematus(filename, from_system = "Nematus", de_bpe = False): with open(filename, 'r', encoding='utf-8') as fh: alis = [] tgts = [] srcs = [] wasNew = True aliTXT = '' for line in fh: # Reads the first line that contains a translation and it's source sentence if wasNew: if len(aliTXT) > 0: c = StringIO(aliTXT) ali = np.loadtxt(c) # Now we probably have source and target tokens + attentions if de_bpe == True: # In case we want to combine subword units and the respective attentions (by summing columns and averaging rows) sources = escape(lineparts[3]).strip().split() targets = escape(lineparts[1]).strip().split() (srcs, tgts, ali) = deBPE(srcs, tgts, ali, sources, targets) if from_system == "Nematus" or from_system == "OpenNMT" or from_system == "Marian-Dev": ali = ali.transpose() alis.append(ali) aliTXT = '' lineparts = line.split(' ||| ') if from_system == "Nematus": lineparts[1] += ' <EOS>' lineparts[3] += ' <EOS>' tgts.append(escape(lineparts[1]).strip().split()) srcs.append(escape(lineparts[3]).strip().split()) wasNew = False continue # Reads the attention matrix into "aliTXT" if line != '\n' and line != '\r\n': aliTXT += line else: wasNew = True if len(aliTXT) > 0: c = StringIO(aliTXT) ali = np.loadtxt(c) if de_bpe == True: # In case we want to combine subword units and the respective attentions (by summing columns and averaging rows) sources = escape(lineparts[3]).strip().split() targets = escape(lineparts[1]).strip().split() (srcs, tgts, ali) = deBPE(srcs, tgts, ali, sources, targets) if from_system == "Nematus" or from_system == "Sockeye" or from_system == "Marian-Dev": ali = ali.transpose() alis.append(ali) aliTXT = '' return srcs, tgts, alis def escape(string): return string.replace('"','&quot;').replace("'","&apos;") def readAmu(in_file, src_file): with open(src_file, 'r', encoding='utf-8') as fi: with open(in_file, 'r', encoding='utf-8') as fh: alis = [] tgts = [] srcs = [] aliTXT = '' for src_line, out_line in izip(fi, fh): lineparts = out_line.split(' ||| ') src_line = src_line.strip() + ' <EOS>' tgts.append(escape(lineparts[0]).strip().split()) srcs.append(escape(src_line).split()) #alignment weights weightparts = lineparts[1].split(' ') for weightpart in weightparts: aliTXT += weightpart.replace(',',' ') + '\n' if len(aliTXT) > 0: c = StringIO(aliTXT) ali = np.loadtxt(c) ali = ali.transpose() alis.append(ali) aliTXT = '' return srcs, tgts, alis def compare(srcs1, srcs2): for i in range(0, len(srcs1)): if srcs1[i][len(srcs1[i])-1] != '<EOS>': srcs1[i].append('<EOS>') if srcs2[i][len(srcs2[i])-1] != '<EOS>': srcs2[i].append('<EOS>') return srcs1==srcs2 def synchData(data1,data2): addEOS1 = False addEOS2 = False for i in range(0, len(data1)): diff1 = len(data1[i][1]) - len(data2[i][1]) diff2 = len(data2[i][1]) - len(data1[i][1]) if(diff1 > 0): for j in range(0, diff1): data2[i][1].append(u'') if(diff2 > 0): for j in range(0, diff2): data1[i][1].append(u'') return data1, data2 def longestCommonSubstring(s1, s2): m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))] longest, x_longest = 0, 0 for x in range(1, 1 + len(s1)): for y in range(1, 1 + len(s2)): if s1[x - 1] == s2[y - 1]: m[x][y] = m[x - 1][y - 1] + 1 if m[x][y] > longest: longest = m[x][y] x_longest = x else: m[x][y] = 0 return s1[x_longest - longest: x_longest] def processAlignments(data, folder, inputfile, outputType, num, refs=False): with open(folder + "/" + ntpath.basename(inputfile) + '.ali.js', 'w', encoding='utf-8') as out_a_js: with open(folder + "/" + ntpath.basename(inputfile) + '.src.js', 'w', encoding='utf-8') as out_s_js: with open(folder + "/" + ntpath.basename(inputfile) + '.trg.js', 'w', encoding='utf-8') as out_t_js: with open(folder + "/" + ntpath.basename(inputfile) + '.con.js', 'w', encoding='utf-8') as out_c_js: with open(folder + "/" + ntpath.basename(inputfile) + '.sc.js', 'w', encoding='utf-8') as out_sc_js: out_a_js.write(u'var alignments = [\n') out_s_js.write(u'var sources = [\n') out_t_js.write(u'var targets = [\n') out_c_js.write(u'var confidences = [\n') out_sc_js.write(u'var sentence_confidences = [\n') num = int(num) - 1 if num > -1 and (num < len(data)): data = [data[num]] elif num >= len(data): print ('The selected sentence number is higher than the sentence count!\n') printHelp() sys.exit() for i in range(0, len(data)): (src, tgt, rawAli) = data[i] #In case the source string is empty if rawAli.ndim == 1: rawAli = np.array([rawAli]) ali = [l[:len(list(filter(None, tgt)))] for l in rawAli[:len(src)]] srcTotal = [] trgTotal = [] tali = np.array(ali).transpose() for a in range(0, len(ali)): srcTotal.append(str(math.pow(math.e, -0.05 * math.pow((getCP([ali[a]]) + getEnt([ali[a]]) + getRevEnt([ali[a]])), 2)))) for a in range(0, len(tali)): trgTotal.append(str(math.pow(math.e, -0.05 * math.pow((getCP([tali[a]]) + getEnt([tali[a]]) + getRevEnt([tali[a]])), 2)))) JoinedSource = " ".join(src) JoinedTarget = " ".join(tgt) StrippedSource = ''.join(c for c in JoinedSource if unicodedata.category(c).startswith('L')).replace('EOS','').replace('quot','').replace('apos','') StrippedTarget = ''.join(c for c in JoinedTarget if unicodedata.category(c).startswith('L')).replace('EOS','').replace('quot','').replace('apos','') #Get the confidence metrics CDP = round(getCP(ali), 10) APout = round(getEnt(ali), 10) APin = round(getRevEnt(ali), 10) Total = round(CDP + APout + APin, 10) #Can we calculate BLEU? bleuNumber = -1 if(refs): try: #NLTK requires Python versions 3.5, 3.6, 3.7, or 3.8 version = sys.version_info if version.major == 3 and version.minor > 4: from nltk.translate import bleu from nltk.translate.bleu_score import SmoothingFunction sm = SmoothingFunction() refNumber = i if num < 0 else num deBpeRef = " ".join(refs[refNumber]).replace('@@ ','') deBpeHyp = JoinedTarget.replace('@@ ','').replace('<EOS>','').strip() bleuNumber = round(bleu([deBpeRef.split()], deBpeHyp.split(), smoothing_function=sm.method3)*100, 2) bleuScore = u', ' + repr(bleuNumber) else: refs = False bleuScore = u'' except ImportError: sys.stdout.write('NLTK not found! BLEU will not be calculated\n') refs = False bleuScore = u'' else: bleuScore = u'' jls = JoinedSource.replace('@@ ','').replace('<EOS>','').replace('&quot;','"').replace("&apos;","'").replace("&amp;","&").replace("@-@","-").strip() jlt = JoinedTarget.replace('@@ ','').replace('<EOS>','').replace('&quot;','"').replace("&apos;","'").replace("&amp;","&").replace("@-@","-").strip() longest = longestCommonSubstring(jls, jlt).strip() similarity = len(longest)/len(jlt) #Penalize sentences with more than 4 tokens if (len(tgt) > 4) and (similarity > 0.3): #The more similar, the higher penalty #It's worse to have more words with a higher similarity #Let's make it between 0.7 and about 1.5 for veeeery long sentences multiplier = ((0.8+(len(tgt)*0.01)) * (3-((1-similarity)*5)) * (0.7 + similarity) * math.tan(similarity)) Total = round(CDP + APout + APin - multiplier, 10) # e^(-1(x^2)) CDP_pr = round(math.pow(math.e, -1 * math.pow(CDP, 2)) * 100, 2) # e^(-0.05(x^2)) APout_pr = round(math.pow(math.e, -0.05 * math.pow(APout, 2)) * 100, 2) APin_pr = round(math.pow(math.e, -0.05 * math.pow(APin, 2)) * 100, 2) Total_pr = round(math.pow(math.e, -0.05 * math.pow(Total, 2)) * 100, 2) # 1-e^(-0.0001(x^2)) Len = round((1-math.pow(math.e, -0.0001 * math.pow(len(JoinedSource), 2))) * 100, 2) out_s_js.write('["'+ JoinedSource.replace(' ','", "') +'"], \n') out_t_js.write('["'+ JoinedTarget.replace(' ','", "') +'"], \n') out_c_js.write(u'['+ repr(CDP_pr) + u', '+ repr(APout_pr) + u', '+ repr(APin_pr) + u', '+ repr(Total_pr) + u', '+ repr(Len) + u', '+ repr(len(JoinedSource)) + u', ' + repr(round(similarity*100, 2)) + bleuScore + u'], \n') out_sc_js.write(u'[[' + ", ".join(srcTotal) + u'], ' + u'[' + ", ".join(trgTotal) + u'], ' + u'], \n') word = 0 out_a_js.write(u'[') for ali_i in ali: linePartC=0 for ali_j in ali_i: # Maybe worth playing around with this for transformer (and convolutional) NMT output # if ali_j < 0.15: # ali_j = 0 out_a_js.write(u'['+repr(word)+u', ' + str(np.round(ali_j, 8)) + u', '+repr(linePartC)+u'], ') linePartC+=1 if outputType == 'color': printColor(ali_j) elif outputType == 'block': printBlock(ali_j) elif outputType == 'block2': printBlock2(ali_j) if outputType != 'web' and outputType != 'compare': sys.stdout.write(src[word].encode('utf-8', errors='replace').decode('utf-8')) word+=1 if outputType != 'web' and outputType != 'compare': sys.stdout.write('\n') # write target sentences #build 2d array occupied_to = [] outchars = [] outchars.append([]) tw = 0 for tword in tgt: columns = len(tgt) # Some characters use multiple symbols. Need to decode and then encode... twchars = list(tword) twlen = len(twchars) xpos = tw * 2 emptyline = 0 for el in range(0, len(occupied_to)): # if occupied, move to a new line! if occupied_to[el] < xpos: emptyline = el if len(outchars) < emptyline+1: # add a new row outchars.append([]) break if el == len(occupied_to)-1: emptyline=el+1 if len(outchars) < emptyline+1: outchars.append([]) for column in range(0, xpos): if len(outchars[emptyline]) <= column: outchars[emptyline].append(' ') for charindex in range(0, twlen): if xpos+charindex == len(outchars[emptyline]): outchars[emptyline].append(twchars[charindex]) else: outchars[emptyline][charindex] = twchars[charindex] if len(occupied_to) <= emptyline: occupied_to.append(xpos+twlen+1) else: occupied_to[emptyline]=xpos+twlen+1; tw+=1 #print 2d array if outputType != 'web' and outputType != 'compare': for liline in outchars: sys.stdout.write(''.join(liline).encode('utf-8', errors='replace').decode('utf-8') + '\n') # print scores sys.stdout.write('\nCoverage Deviation Penalty: \t\t' + repr(round(CDP, 8)) + ' (' + repr(CDP_pr) + '%)' + '\n') sys.stdout.write('Input Absentmindedness Penalty: \t' + repr(round(APin, 8)) + ' (' + repr(APin_pr) + '%)' + '\n') sys.stdout.write('Output Absentmindedness Penalty: \t' + repr(round(APout, 8)) + ' (' + repr(APout_pr) + '%)' + '\n') sys.stdout.write('Confidence: \t\t\t\t' + repr(round(Total, 8)) + ' (' + repr(Total_pr) + '%)' + '\n') sys.stdout.write('Similarity: \t\t\t\t' + repr(round(similarity*100, 2)) + '%' + '\n') if bleuNumber > -1: sys.stdout.write('BLEU: \t\t\t\t\t' + repr(bleuNumber) + '\n') # write target sentences word = 0 out_a_js.write(u'], \n') if outputType != 'web' and outputType != 'compare': sys.stdout.write('\n') out_a_js.write(u'\n]') out_s_js.write(u']') out_t_js.write(u']') out_c_js.write(u']') out_sc_js.write(u']')
the-stack_106_30948
#!/usr/bin/env python3 # Copyright (c) 2016-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test various net timeouts. - Create three sparkd nodes: no_verack_node - we never send a verack in response to their version no_version_node - we never send a version (only a ping) no_send_node - we never send any P2P message. - Start all three nodes - Wait 1 second - Assert that we're connected - Send a ping to no_verack_node and no_version_node - Wait 30 seconds - Assert that we're still connected - Send a ping to no_verack_node and no_version_node - Wait 31 seconds - Assert that we're no longer connected (timeout to receive version/verack is 60 seconds) """ from time import sleep from test_framework.messages import msg_ping from test_framework.mininode import P2PInterface from test_framework.test_framework import SparkTestFramework class TestP2PConn(P2PInterface): def on_version(self, message): # Don't send a verack in response pass class TimeoutsTest(SparkTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def run_test(self): # Setup the p2p connections no_verack_node = self.nodes[0].add_p2p_connection(TestP2PConn()) no_version_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False) no_send_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False) sleep(1) assert no_verack_node.is_connected assert no_version_node.is_connected assert no_send_node.is_connected no_verack_node.send_message(msg_ping()) no_version_node.send_message(msg_ping()) sleep(30) assert "version" in no_verack_node.last_message assert no_verack_node.is_connected assert no_version_node.is_connected assert no_send_node.is_connected no_verack_node.send_message(msg_ping()) no_version_node.send_message(msg_ping()) sleep(31) assert not no_verack_node.is_connected assert not no_version_node.is_connected assert not no_send_node.is_connected if __name__ == '__main__': TimeoutsTest().main()
the-stack_106_30950
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module is to support text processing for nlp. It includes two parts: transforms and utils. transforms is a high performance nlp text processing module which is developed with icu4c and cppjieba. utils provides some general methods for nlp text processing. """ import platform from .transforms import Lookup, JiebaTokenizer, UnicodeCharTokenizer, Ngram, WordpieceTokenizer, TruncateSequencePair, \ ToNumber from .utils import to_str, to_bytes, JiebaMode, Vocab, NormalizeForm __all__ = [ "Lookup", "JiebaTokenizer", "UnicodeCharTokenizer", "Ngram", "to_str", "to_bytes", "Vocab", "WordpieceTokenizer", "TruncateSequencePair", "ToNumber", "PythonTokenizer" ] if platform.system().lower() != 'windows': from .transforms import UnicodeScriptTokenizer, WhitespaceTokenizer, CaseFold, NormalizeUTF8, \ RegexReplace, RegexTokenizer, BasicTokenizer, BertTokenizer, PythonTokenizer __all__.append(["UnicodeScriptTokenizer", "WhitespaceTokenizer", "CaseFold", "NormalizeUTF8", "RegexReplace", "RegexTokenizer", "BasicTokenizer", "BertTokenizer"])
the-stack_106_30951
# -*- coding: utf-8 -*- """ Functions for converting RCNN-derived zircon segmentation masks to polygons viewable and editable in GUI and vice-versa. """ import numpy as np from skimage import draw import skimage.measure as measure __all__ = ['mask_to_poly', 'poly_to_mask', 'vertex_dict_to_list', 'poly_dicts_to_arrays'] # code for fxn below significantly modified from: \ # https://github.com/waspinator/pycococreator (covered by Apache-2.0 License) def mask_to_poly(mask_for_conversion, tolerance = 1, scale_factor = 1.0): """Convert a numpy mask array to polygon suitable for GUI display, editing. Parameters ---------- mask_for_conversion : np array A numpy binary array representing the central zircon mask for an image, as returned by (successfully) running mos_proc.get_central_mask(). tolerance : Int, optional Tolerance in microns for polygon converted from input mask; resulting polygon will approximate the mask within *tolerance* microns. The default is 1. scale_factor : float, optional Scale factor for the current mosaic image. Used to adjust polygon tolerance to microns. The default is 1.0. Returns ------- export_polygon An ordered list of dicts {x:, y:} representing vertices in a polygon. Point coordinates are x = x/image width, y = y/image height. Suitable for display/editing in manual adjustment/annotation GUI. """ #print('Input shape:', mask_for_conversion.shape) #closes contour def close_contour(contour): if not np.array_equal(contour[0], contour[-1]): contour = np.vstack((contour, contour[0])) return contour export_polygon = [] full_mask_h, full_mask_w = mask_for_conversion.shape #size of original mask #adjust tolerance to image size so that polygons are consistent during processing adj_tolerance = tolerance / scale_factor # padding of mask is apparently necessary for contour closure. / # This line also converts mask to binary. padded_mask = np.pad(mask_for_conversion.astype(int), pad_width = 1, mode='constant', constant_values = 0) mask_labels, labels_nnum = measure.label(padded_mask, return_num=True) main_region_label = 1 if labels_nnum > 1: #selects largest region in case central zircon mask has multiple disconnected regions regions = measure.regionprops(mask_labels) area_list = [props.area for props in regions] main_region_label = regions[area_list.index(max(area_list))].label #gets contours of mask mask_contours = measure.find_contours(mask_labels == main_region_label, 0.5)[0] mask_contours = np.subtract(mask_contours, 1) mask_contours = close_contour(mask_contours) poly_pts = measure.approximate_polygon(mask_contours, adj_tolerance) #converts contours to mask #flip ensures that polygons load properly (rather than mirrored) in GUI poly_pts = np.flip(poly_pts, axis=1) #converts to list of {x:, y:} dicts for JS annotation tool for each_pt in poly_pts: pt_dict = {'x': 0.0, 'y': 0.0} if each_pt[0] >= 0: pt_dict['x'] = round(each_pt[0]/full_mask_w, 3) if each_pt[1] >= 0: pt_dict['y'] = round(each_pt[1]/full_mask_h, 3) export_polygon.append(pt_dict) return export_polygon def poly_to_mask(poly_for_conversion, original_image): """Converts polygons exported by JS annotation tool to masks for automated measurement. Parameters ---------- poly_for_conversion : list of np 2d arrays An ordered list of arrays [x, y] representing vertices in a polygon. original_image : np array Numpy array representing the original image from which polygon was derived. Returns ------- success_bool : Boolean Boolean indicating whether the polygon was successfully converted. Will be False if input polygon didn't exist, had under three points, or had no area. mask_output : np array or list If conversion successful, a numpy binary array representing the input polygon. Otherwise, an empty list. """ success_bool = False if poly_for_conversion is None: return(success_bool, []) #polygon must have at least 3 points to have any area if np.shape(poly_for_conversion)[0] < 3: return(success_bool, []) poly_pts = np.clip(poly_for_conversion, 0, 1) original_image_shape = original_image.shape[:2] rescaled_poly = poly_pts * np.asarray(original_image_shape) mask_output = draw.polygon2mask(original_image_shape, rescaled_poly) #if polygon has no area, do not send it for measurements! if len(np.column_stack(np.where(mask_output > 0))) < 10: return(success_bool, []) success_bool = True return success_bool, mask_output def vertex_dict_to_list(input_poly): """Convert polygon vertices from {x:, y:} to [x, y]. Parameters ---------- input_poly : dict Dict with position of x, y polygon vertex {x:, y:}. Returns ------- Type: any X coordinate of vertex. Type: any Y coordinate of vertex. """ return (input_poly['y'], input_poly['x']) def poly_dicts_to_arrays(input_list): """Convert a list of lists of dicts {x:, y:} with polygon vertices to a list of arrays for same vertices. Parameters ---------- input_list : list of lists of dicts List of lists (1 per polygon, 1 polygon per image) of dicts containing polygon vertex locations. Returns ------- arr_list : list[arr] List of np arrays representing polygon vertices (1 per image). """ arr_list = [] for vertices_per_img in input_list: poly_as_array = [vertex_dict_to_list(vertex) for vertex in vertices_per_img] if poly_as_array: arr_list.append(np.stack(poly_as_array)) else: arr_list.append(None) return arr_list
the-stack_106_30952
#/usr/bin/env python import io import re from setuptools import setup, find_packages import sys if sys.version_info[:3] < (3, 4): raise SystemExit("Toga requires Python 3.4+.") with io.open('src/core/toga/__init__.py', encoding='utf8') as version_file: version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M) if version_match: version = version_match.group(1) else: raise RuntimeError("Unable to find version string.") with io.open('src/android/README.rst', encoding='utf8') as readme: long_description = readme.read() setup( name='toga-android', version=version, description='An Android backend for the Toga widget toolkit.', long_description=long_description, author='Russell Keith-Magee', author_email='[email protected]', url='http://pybee.org/toga', packages=find_packages('src/android'), package_dir={'': 'src/android'}, install_requires=[ 'toga-core>=%s' % version, ], license='New BSD', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Environment :: Handhelds/PDA\'s', 'Operating System :: Android', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Software Development', 'Topic :: Software Development :: User Interfaces', 'Topic :: Software Development :: Widget Sets', ], test_suite='tests', )
the-stack_106_30953
from graphnet.data.extractors.i3extractor import I3Extractor from graphnet.data.extractors.utilities import ( frame_is_montecarlo, frame_is_noise, ) class I3RetroExtractor(I3Extractor): def __init__(self, name="retro"): super().__init__(name) def __call__(self, frame) -> dict: """Extracts RETRO reco. and associated quantities if available.""" output = {} if self._frame_contains_retro(frame): output.update( { "azimuth_retro": frame["L7_reconstructed_azimuth"].value, "time_retro": frame["L7_reconstructed_time"].value, "energy_retro": frame[ "L7_reconstructed_total_energy" ].value, "position_x_retro": frame[ "L7_reconstructed_vertex_x" ].value, "position_y_retro": frame[ "L7_reconstructed_vertex_y" ].value, "position_z_retro": frame[ "L7_reconstructed_vertex_z" ].value, "zenith_retro": frame["L7_reconstructed_zenith"].value, "azimuth_sigma": frame[ "L7_retro_crs_prefit__azimuth_sigma_tot" ].value, "position_x_sigma": frame[ "L7_retro_crs_prefit__x_sigma_tot" ].value, "position_y_sigma": frame[ "L7_retro_crs_prefit__y_sigma_tot" ].value, "position_z_sigma": frame[ "L7_retro_crs_prefit__z_sigma_tot" ].value, "time_sigma": frame[ "L7_retro_crs_prefit__time_sigma_tot" ].value, "zenith_sigma": frame[ "L7_retro_crs_prefit__zenith_sigma_tot" ].value, "energy_sigma": frame[ "L7_retro_crs_prefit__energy_sigma_tot" ].value, "cascade_energy_retro": frame[ "L7_reconstructed_cascade_energy" ].value, "track_energy_retro": frame[ "L7_reconstructed_track_energy" ].value, "track_length_retro": frame[ "L7_reconstructed_track_length" ].value, } ) if self._frame_contains_classifiers(frame): classifiers = [ "L7_MuonClassifier_FullSky_ProbNu", "L4_MuonClassifier_Data_ProbNu", "L4_NoiseClassifier_ProbNu", "L7_PIDClassifier_FullSky_ProbTrack", ] for classifier in classifiers: if classifier in frame: output.update({classifier: frame[classifier].value}) if frame_is_montecarlo(frame): if frame_is_noise(frame): output.update( { "osc_weight": frame["noise_weight"]["weight"], } ) else: output["osc_weight"] = self._try_get_key( frame["I3MCWeightDict"], "weight", default_value=-1 ) return output def _frame_contains_retro(self, frame): return "L7_reconstructed_zenith" in frame def _frame_contains_classifiers(self, frame): return "L4_MuonClassifier_Data_ProbNu" in frame def _try_get_key(self, frame, key, default_value=-1): """Return `key` in `frame` if it exists; otherwise return `default_value.""" try: return frame[key] except KeyError: return default_value
the-stack_106_30954
from __future__ import annotations import asyncio import functools import os import random import shutil import signal from typing import Any, Dict, List, Optional import backoff import colorama import devtools import httpx import pydantic import pyfiglet import typer import servo import servo.api import servo.configuration import servo.utilities.key_paths import servo.utilities.strings from servo.servo import _set_current_servo from servo.types import Adjustment, Control, Description, Duration, Measurement class ServoRunner(pydantic.BaseModel, servo.logging.Mixin, servo.api.Mixin): interactive: bool = False _servo: servo.Servo = pydantic.PrivateAttr(None) _connected: bool = pydantic.PrivateAttr(False) _running: bool = pydantic.PrivateAttr(False) _main_loop_task: Optional[asyncio.Task] = pydantic.PrivateAttr(None) class Config: arbitrary_types_allowed = True def __init__(self, servo_: servo, **kwargs) -> None: # noqa: D10 super().__init__(**kwargs) self._servo = servo_ # initialize default servo options if not configured if self.config.settings is None: self.config.settings = servo.CommonConfiguration() @property def servo(self) -> servo.Servo: return self._servo @property def running(self) -> bool: return self._running @property def connected(self) -> bool: return self._connected @property def optimizer(self) -> servo.Optimizer: return self.servo.optimizer @property def config(self) -> servo.BaseServoConfiguration: return self.servo.config @property def api_client_options(self) -> Dict[str, Any]: # Adopt the servo config for driving the API mixin return self.servo.api_client_options async def describe(self, control: Control) -> Description: self.logger.info("Describing...") aggregate_description = Description.construct() results: List[servo.EventResult] = await self.servo.dispatch_event(servo.Events.describe, control=control) for result in results: description = result.value aggregate_description.components.extend(description.components) aggregate_description.metrics.extend(description.metrics) return aggregate_description async def measure(self, param: servo.MeasureParams) -> Measurement: if isinstance(param, dict): # required parsing has failed in api.Mixin._post_event(), run parse_obj to surface the validation errors servo.api.MeasureParams.parse_obj(param) servo.logger.info(f"Measuring... [metrics={', '.join(param.metrics)}]") servo.logger.trace(devtools.pformat(param)) aggregate_measurement = Measurement.construct() results: List[servo.EventResult] = await self.servo.dispatch_event( servo.Events.measure, metrics=param.metrics, control=param.control ) for result in results: measurement = result.value aggregate_measurement.readings.extend(measurement.readings) aggregate_measurement.annotations.update(measurement.annotations) return aggregate_measurement async def adjust( self, adjustments: List[Adjustment], control: Control ) -> Description: summary = f"[{', '.join(list(map(str, adjustments)))}]" self.logger.info(f"Adjusting... {summary}") self.logger.trace(devtools.pformat(adjustments)) self.logger.trace(devtools.pformat(control)) aggregate_description = Description.construct() results = await self.servo.dispatch_event( servo.Events.adjust, adjustments=adjustments, control=control ) for result in results: description = result.value aggregate_description.components.extend(description.components) aggregate_description.metrics.extend(description.metrics) self.logger.success(f"Adjustment completed {summary}") return aggregate_description @backoff.on_exception( backoff.expo, (httpx.HTTPError, pydantic.ValidationError), max_time=lambda: servo.current_servo().config.settings.backoff.max_time(), max_tries=lambda: servo.current_servo().config.settings.backoff.max_tries(), ) async def exec_command(self) -> servo.api.Status: cmd_response = await self._post_event(servo.api.Events.whats_next, None) self.logger.info(f"What's Next? => {cmd_response.command}") self.logger.trace(devtools.pformat(cmd_response)) if cmd_response.command == servo.api.Commands.describe: description = await self.describe(Control(**cmd_response.param.get("control", {}))) self.logger.success( f"Described: {len(description.components)} components, {len(description.metrics)} metrics" ) self.logger.debug(devtools.pformat(description)) status = servo.api.Status.ok(descriptor=description.__opsani_repr__()) return await self._post_event(servo.api.Events.describe, status.dict()) elif cmd_response.command == servo.api.Commands.measure: try: measurement = await self.measure(cmd_response.param) self.logger.success( f"Measured: {len(measurement.readings)} readings, {len(measurement.annotations)} annotations" ) self.logger.trace(devtools.pformat(measurement)) param = measurement.__opsani_repr__() except servo.errors.EventError as error: self.logger.error(f"Measurement failed: {error}") param = servo.api.Status.from_error(error).dict() self.logger.error(f"Responding with {param}") self.logger.opt(exception=error).debug("Measure failure details") return await self._post_event(servo.api.Events.measure, param) elif cmd_response.command == servo.api.Commands.adjust: adjustments = servo.api.descriptor_to_adjustments(cmd_response.param["state"]) control = Control(**cmd_response.param.get("control", {})) try: description = await self.adjust(adjustments, control) status = servo.api.Status.ok(state=description.__opsani_repr__()) components_count = len(description.components) settings_count = sum( len(component.settings) for component in description.components ) self.logger.success( f"Adjusted: {components_count} components, {settings_count} settings" ) except servo.EventError as error: self.logger.error(f"Adjustment failed: {error}") status = servo.api.Status.from_error(error) self.logger.error(f"Responding with {status.dict()}") self.logger.opt(exception=error).debug("Adjust failure details") return await self._post_event(servo.api.Events.adjust, status.dict()) elif cmd_response.command == servo.api.Commands.sleep: # TODO: Model this duration = Duration(cmd_response.param.get("duration", 120)) status = servo.utilities.key_paths.value_for_key_path(cmd_response.param, "data.status", None) reason = servo.utilities.key_paths.value_for_key_path( cmd_response.param, "data.reason", "unknown reason" ) msg = f"{status}: {reason}" if status else f"{reason}" self.logger.info(f"Sleeping for {duration} ({msg}).") await asyncio.sleep(duration.total_seconds()) # Return a status so we have a simple API contract return servo.api.Status(status="ok", message=msg) else: raise ValueError(f"Unknown command '{cmd_response.command.value}'") # Main run loop for processing commands from the optimizer async def main_loop(self) -> None: # FIXME: We have seen exceptions from using `with self.servo.current()` crossing contexts _set_current_servo(self.servo) while self._running: try: if self.interactive: if not typer.confirm("Poll for next command?"): typer.echo("Sleeping for 1m") await asyncio.sleep(60) continue status = await self.exec_command() if status.status == servo.api.OptimizerStatuses.unexpected_event: self.logger.warning( f"server reported unexpected event: {status.reason}" ) except (httpx.TimeoutException, httpx.HTTPStatusError) as error: self.logger.warning(f"command execution failed HTTP client error: {error}") except pydantic.ValidationError as error: self.logger.warning(f"command execution failed with model validation error: {error}") self.logger.opt(exception=error).debug("Pydantic model failed validation") except Exception as error: self.logger.exception(f"failed with unrecoverable error: {error}") raise error def run_main_loop(self) -> None: if self._main_loop_task: self._main_loop_task.cancel() def _reraise_if_necessary(task: asyncio.Task) -> None: try: if not task.cancelled(): task.result() except Exception as error: # pylint: disable=broad-except self.logger.error(f"Exiting from servo main loop do to error: {error} (task={task})") self.logger.opt(exception=error).trace(f"Exception raised by task {task}") raise error # Ensure that we surface the error for handling self._main_loop_task = asyncio.create_task(self.main_loop(), name=f"main loop for servo {self.optimizer.id}") self._main_loop_task.add_done_callback(_reraise_if_necessary) async def run(self, *, poll: bool = True) -> None: self._running = True _set_current_servo(self.servo) await self.servo.startup() self.logger.info( f"Servo started with {len(self.servo.connectors)} active connectors [{self.optimizer.id} @ {self.optimizer.url or self.optimizer.base_url}]" ) async def giveup(details) -> None: loop = asyncio.get_event_loop() self.logger.critical("retries exhausted, giving up") asyncio.create_task(self.shutdown(loop)) try: @backoff.on_exception( backoff.expo, httpx.HTTPError, max_time=lambda: self.config.settings.backoff.max_time(), max_tries=lambda: self.config.settings.backoff.max_tries(), on_giveup=giveup, ) async def connect() -> None: self.logger.info("Saying HELLO.", end=" ") await self._post_event(servo.api.Events.hello, dict( agent=servo.api.user_agent(), telemetry=self.servo.telemetry.values )) self._connected = True self.logger.info(f"Connecting to Opsani Optimizer @ {self.optimizer.url}...") if self.interactive: typer.confirm("Connect to the optimizer?", abort=True) await connect() except typer.Abort: # Rescue abort and notify user servo.logger.warning("Operation aborted. Use Control-C to exit") except asyncio.CancelledError as error: self.logger.trace("task cancelled, aborting servo runner") raise error except: self.logger.exception("exception encountered during connect") if poll: self.run_main_loop() else: self.logger.warning(f"Servo runner initialized with polling disabled -- command loop is not running") async def shutdown(self, *, reason: Optional[str] = None) -> None: """Shutdown the running servo.""" try: self._running = False if self.connected: await self._post_event(servo.api.Events.goodbye, dict(reason=reason)) except Exception: self.logger.exception(f"Exception occurred during GOODBYE request") class AssemblyRunner(pydantic.BaseModel, servo.logging.Mixin): assembly: servo.Assembly runners: List[ServoRunner] = [] progress_handler: Optional[servo.logging.ProgressHandler] = None progress_handler_id: Optional[int] = None _running: bool = pydantic.PrivateAttr(False) class Config: arbitrary_types_allowed = True def __init__(self, assembly: servo.Assembly, **kwargs) -> None: super().__init__(assembly=assembly, **kwargs) def _runner_for_servo(self, servo: servo.Servo) -> ServoRunner: for runner in self.runners: if runner.servo == servo: return runner raise KeyError(f"no runner was found for the servo: \"{servo}\"") @property def running(self) -> bool: return self._running def run(self, *, poll: bool = True, interactive: bool = False) -> None: """Asynchronously run all servos active within the assembly. Running the assembly takes over the current event loop and schedules a `ServoRunner` instance for each servo active in the assembly. """ if self.running: raise RuntimeError("Cannot run an assembly that is already running") self._running = True loop = asyncio.get_event_loop() # Setup signal handling signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT, signal.SIGUSR1) for s in signals: loop.add_signal_handler( s, lambda s=s: asyncio.create_task(self._shutdown(loop, signal=s)) ) loop.set_exception_handler(self._handle_exception) # Setup logging async def _report_progress(**kwargs) -> None: # Forward to the active servo... if servo_ := servo.current_servo(): await servo_.report_progress(**kwargs) else: self.logger.warning( f"failed progress reporting -- no current servo context is established (kwargs={devtools.pformat(kwargs)})" ) async def handle_progress_exception(progress: Dict[str, Any], error: Exception) -> None: # FIXME: This needs to be made multi-servo aware # Restart the main event loop if we get out of sync with the server if isinstance(error, (servo.errors.UnexpectedEventError, servo.errors.EventCancelledError)): if isinstance(error, servo.errors.UnexpectedEventError): self.logger.error( "servo has lost synchronization with the optimizer: restarting" ) elif isinstance(error, servo.errors.EventCancelledError): self.logger.error( "optimizer has cancelled operation in progress: cancelling and restarting loop" ) # Post a status to resolve the operation operation = progress['operation'] status = servo.api.Status.from_error(error) self.logger.error(f"Responding with {status.dict()}") runner = self._runner_for_servo(servo.current_servo()) await runner._post_event(operation, status.dict()) tasks = [ t for t in asyncio.all_tasks() if t is not asyncio.current_task() ] self.logger.info(f"Cancelling {len(tasks)} outstanding tasks") [task.cancel() for task in tasks] await asyncio.gather(*tasks, return_exceptions=True) # Restart a fresh main loop if poll: runner = self._runner_for_servo(servo.current_servo()) runner.run_main_loop() else: self.logger.error( f"unrecognized exception passed to progress exception handler: {error}" ) self.progress_handler = servo.logging.ProgressHandler( _report_progress, self.logger.warning, handle_progress_exception ) self.progress_handler_id = self.logger.add(self.progress_handler.sink) self._display_banner() try: for servo_ in self.assembly.servos: servo_runner = ServoRunner(servo_, interactive=interactive) loop.create_task(servo_runner.run(poll=poll)) self.runners.append(servo_runner) loop.run_forever() finally: loop.close() def _display_banner(self) -> None: fonts = ['slant', 'banner3', 'bigchief', 'cosmic', 'speed', 'nancyj', 'fourtops', 'contessa', 'doom', 'broadway', 'acrobatic', 'trek', 'eftirobot', 'roman'] color_map = {'RED': colorama.Fore.RED, 'GREEN': colorama.Fore.GREEN, 'YELLOW': colorama.Fore.YELLOW, 'BLUE': colorama.Fore.BLUE, 'MAGENTA': colorama.Fore.MAGENTA, 'CYAN': colorama.Fore.CYAN, 'RAINBOW': colorama.Fore.MAGENTA} terminal_size = shutil.get_terminal_size() # Generate an awesome banner for this launch font = os.getenv('SERVO_BANNER_FONT', random.choice(fonts)) color_name = os.getenv('SERVO_BANNER_COLOR') # coinflip unless we have been directly configured from the env rainbow = ( bool(random.getrandbits(1)) if color_name is None else (color_name.upper() == 'RAINBOW') ) figlet = pyfiglet.Figlet(font=font, width=terminal_size.columns) banner = figlet.renderText('ServoX').rstrip() if rainbow: # Rainbow it colored_banner = [random.choice(list(color_map.values())) + char for char in banner] typer.echo(''.join(colored_banner), color=True) else: # Flat single color color = (color_map[color_name.upper()] if color_name else random.choice(list(color_map.values()))) typer.echo(f'{color}{banner}', color=True) secho = functools.partial(typer.secho, color=True) types = servo.Assembly.all_connector_types() types.remove(servo.Servo) names = [] for c in types: name = typer.style( servo.utilities.strings.commandify(c.__default_name__), fg=typer.colors.CYAN, bold=False ) version = typer.style(str(c.version), fg=typer.colors.WHITE, bold=True) names.append(f"{name}-{version}") version = typer.style(f"v{servo.__version__}", fg=typer.colors.WHITE, bold=True) codename = typer.style(servo.__cryptonym__, fg=typer.colors.MAGENTA, bold=False) initialized = typer.style( "initialized", fg=typer.colors.BRIGHT_GREEN, bold=True ) version = typer.style(f"v{servo.__version__}", fg=typer.colors.WHITE, bold=True) secho(f'{version} "{codename}" {initialized}') secho(reset=True) secho(f"connectors: {', '.join(sorted(names))}") secho( f"config file: {typer.style(str(self.assembly.config_file), bold=True, fg=typer.colors.YELLOW)}" ) if len(self.assembly.servos) == 1: servo_ = self.assembly.servos[0] optimizer = servo_.optimizer id = typer.style(optimizer.id, bold=True, fg=typer.colors.WHITE) secho(f"optimizer: {id}") if optimizer.base_url != "https://api.opsani.com/": base_url = typer.style( f"{optimizer.base_url}", bold=True, fg=typer.colors.RED ) secho(f"base url: {base_url}") if servo_.config.settings and servo_.config.settings.proxies: proxies = typer.style( f"{devtools.pformat(servo_.config.settings.proxies)}", bold=True, fg=typer.colors.CYAN, ) secho(f"proxies: {proxies}") else: servo_count = typer.style(str(len(self.assembly.servos)), bold=True, fg=typer.colors.WHITE) secho(f"servos: {servo_count}") secho(reset=True) async def _shutdown(self, loop, signal=None): if not self.running: raise RuntimeError("Cannot shutdown an assembly that is not running") if signal: self.logger.info(f"Received exit signal {signal.name}...") reason = signal.name if signal else "shutdown" # Shut down the servo runners, breaking active control loops if len(self.runners) == 1: self.logger.info(f"Shutting down servo...") else: self.logger.info(f"Shutting down {len(self.runners)} running servos...") for fut in asyncio.as_completed(list(map(lambda r: r.shutdown(reason=reason), self.runners)), timeout=30.0): try: await fut except Exception as error: self.logger.critical(f"Failed servo runner shutdown with error: {error}") # Shutdown the assembly and the servos it contains self.logger.debug("Dispatching shutdown event...") try: await self.assembly.shutdown() except Exception as error: self.logger.critical(f"Failed assembly shutdown with error: {error}") await asyncio.gather(self.progress_handler.shutdown(), return_exceptions=True) self.logger.remove(self.progress_handler_id) # Cancel any outstanding tasks -- under a clean, graceful shutdown this list will be empty # The shutdown of the assembly and the servo should clean up its tasks tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()] if len(tasks): [task.cancel() for task in tasks] self.logger.info(f"Cancelling {len(tasks)} outstanding tasks") self.logger.debug(f"Outstanding tasks: {devtools.pformat(tasks)}") await asyncio.gather(*tasks, return_exceptions=True) self.logger.info("Servo shutdown complete.") await asyncio.gather(self.logger.complete(), return_exceptions=True) self._running = False loop.stop() def _handle_exception(self, loop: asyncio.AbstractEventLoop, context: dict) -> None: self.logger.debug(f"asyncio exception handler triggered with context: {context}") exception = context.get("exception", None) logger = self.logger.opt(exception=exception) if isinstance(exception, asyncio.CancelledError): logger.warning(f"ignoring asyncio.CancelledError exception") pass elif loop.is_closed(): logger.critical( "Ignoring exception -- the event loop is closed." ) elif self.running: logger.critical( "Shutting down due to unhandled exception in asyncio event loop..." ) loop.create_task(self._shutdown(loop)) else: logger.critical( "Ignoring exception -- the assembly is not running" )
the-stack_106_30955
#!/usr/bin/env python3 # Copyright (c) 2018-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import AgroCoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, ) from test_framework.blocktools import ( TIME_GENESIS_BLOCK, ) class CreateTxWalletTest(AgroCoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.log.info('Create some old blocks') self.nodes[0].setmocktime(TIME_GENESIS_BLOCK) self.nodes[0].generate(200) self.nodes[0].setmocktime(0) self.test_anti_fee_sniping() self.test_tx_size_too_large() def test_anti_fee_sniping(self): self.log.info('Check that we have some (old) blocks and that anti-fee-sniping is disabled') assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200) txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex']) assert_equal(tx['locktime'], 0) self.log.info('Check that anti-fee-sniping is enabled when we mine a recent block') self.nodes[0].generate(1) txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex']) assert 0 < tx['locktime'] <= 201 def test_tx_size_too_large(self): # More than 10kB of outputs, so that we hit -maxtxfee with a high feerate outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for _ in range(400)} raw_tx = self.nodes[0].createrawtransaction(inputs=[], outputs=outputs) for fee_setting in ['-minrelaytxfee=0.01', '-mintxfee=0.01', '-paytxfee=0.01']: self.log.info('Check maxtxfee in combination with {}'.format(fee_setting)) self.restart_node(0, extra_args=[fee_setting]) assert_raises_rpc_error( -6, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)", lambda: self.nodes[0].sendmany(dummy="", amounts=outputs), ) assert_raises_rpc_error( -4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)", lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx), ) self.log.info('Check maxtxfee in combination with settxfee') self.restart_node(0) self.nodes[0].settxfee(0.01) assert_raises_rpc_error( -6, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)", lambda: self.nodes[0].sendmany(dummy="", amounts=outputs), ) assert_raises_rpc_error( -4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)", lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx), ) self.nodes[0].settxfee(0) if __name__ == '__main__': CreateTxWalletTest().main()
the-stack_106_30956
import copy __author__ = 'rolandh' class UserInfo(object): """ Read only interface to a user info store """ def __init__(self, db=None): self.db = db def filter(self, userinfo, user_info_claims=None): """ Return only those claims that are asked for. It's a best effort task; if essential claims are not present no error is flagged. :param userinfo: A dictionary containing the available user info. :param user_info_claims: A dictionary specifying the asked for claims :return: A dictionary of filtered claims. """ if user_info_claims is None: return copy.copy(userinfo) else: result = {} missing = [] optional = [] for key, restr in user_info_claims.items(): try: result[key] = userinfo[key] except KeyError: if restr == {"essential": True}: missing.append(key) else: optional.append(key) return result def __call__(self, userid, client_id, user_info_claims=None, **kwargs): try: return self.filter(self.db[userid], user_info_claims) except KeyError: return {}
the-stack_106_30958
"""Marks all fixed errors #34 on ruwiki's CheckWikipedia.""" import re import pywikibot from checkwiki import load_page_list, mark_error_done, log NUMBER = "34" REGEXP = r"{{{[^!]|#if:|#ifeq:|#switch:|#ifexist:|{{fullpagename}}|{{sitename}}|{{namespace}}|{{basepagename}}|{{pagename}}|{{subpagename}}|{{talkpagename}}|{{подст:|{{subst:" FLAGS = re.I def main(): """Main script function.""" site = pywikibot.Site() for line in load_page_list(NUMBER): page = pywikibot.Page(site, line) if re.search(REGEXP, page.text, flags=FLAGS) is None: mark_error_done(NUMBER, page.title()) log(line, success=True) else: log(line, success=False) if __name__ == "__main__": main()
the-stack_106_30959
from main.gui import Draw from main.model import Service def main(): # Configuring microservice structure proxy = Service(5, 100, 'proxy') aggregate = Service(5, 100, 'aggregate') app = Service(5, 100, 'crud') another_app = Service(5, 100, 'another_crud') database = Service(5, 100, 'database') another_app_db = Service(5, 100, 'database') cache = Service(5, 100, 'cache') proxy.add_dependency(aggregate) aggregate.add_dependency(app) aggregate.add_dependency(another_app) app.add_dependency(database) app.add_dependency(cache) another_app.add_dependency(cache) another_app.add_dependency(another_app_db) # Simulating calls in cycles cycles = 100000 for _ in range(cycles): proxy.call() # Drawing from root draw = Draw() draw.draw_tree(proxy) if __name__ == '__main__': main()
the-stack_106_30960
import requests from legitindicators import atrpips BINANCE_URL = "https://api.binance.com/api/v3/klines" SYMBOL = "BTCUSDT" INTERVAL = "5m" PARAMS = {"symbol":SYMBOL, "interval":INTERVAL} def test_atrpips(): response = requests.get(url=BINANCE_URL, params=PARAMS) data = response.json() open = [float(o[1]) for o in data] high = [float(h[2]) for h in data] low = [float(l[3]) for l in data] close = [float(c[4]) for c in data] input_data = [] for i in range(0, len(data)): ohlc = [open[i], high[i], low[i], close[i]] input_data.append(ohlc) apips = atrpips(input_data, 14) print(apips) assert len(apips) == len(close)
the-stack_106_30961
""" Advent of code 2021 day 10 / 2 """ from os import path from collections import deque p = { "()": 1, "[]": 2, "kk": 3, "<>": 4, } m = { "(": "()", "[": "[]", "{": "kk", "<": "<>", ")": "()", "]": "[]", "}": "kk", ">": "<>", } opening = set(["(", "[", "{", "<"]) closing = set([")", "]", "}", ">"]) class Code(object): def __init__(self, lines): self.lines = lines def solve(self): # print(self.lines) scores = deque() for line in self.lines: s = 0 check_stack = deque() for c in line: if c in opening: check_stack.append(c) elif c in closing: shouldclose = check_stack.pop() if m[shouldclose] != m[c]: # ignore corrupted line break else: # calc incomplete line while len(check_stack) > 0: next_char = check_stack.pop() s *= 5 s += p[m[next_char]] scores.append(s) return sorted(scores)[len(scores)//2] def preprocess(raw_data): # pattern = re.compile(r'(\w+) (\d+)') processed_data = [] for line in raw_data.split("\n"): # match = re.match(pattern, line) # data = [match.group(1), match.group(2)] data = line processed_data.append(data) return processed_data def solution(data): """ Solution to the problem """ lines = preprocess(data) solver = Code(lines) return solver.solve() if __name__ == "__main__": with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file: print(solution(input_file.read()))
the-stack_106_30964
# coding:utf-8 # -------------------------------------------------------- # Pytorch multi-GPU Faster R-CNN # Licensed under The MIT License [see LICENSE for details] # Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick # -------------------------------------------------------- from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import pprint import pdb import time import _init_paths import torch from torch.autograd import Variable import torch.nn as nn from model.utils.config import cfg, cfg_from_file, cfg_from_list from model.utils.net_utils import ( adjust_learning_rate, save_checkpoint, get_dataloader, setup_seed, ) from model.ema.optim_weight_ema import WeightEMA from model.utils.parser_func import parse_args, set_dataset_args from model.rpn.bbox_transform import clip_boxes from model.nms.nms_wrapper import nms from model.rpn.bbox_transform import bbox_transform_inv from prettytimer import PrettyTimer def get_cfg(): args = parse_args() print("Called with args:") print(args) args = set_dataset_args(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) print("Using config:") pprint.pprint(cfg) # np.random.seed(cfg.RNG_SEED) setup_seed(cfg.RNG_SEED) return args if __name__ == "__main__": args = get_cfg() output_dir = f"{args.save_dir}/{args.net}/{args.dataset}" if not os.path.exists(output_dir): os.makedirs(output_dir) if args.dataset_t == "water": args.aug = False if args.dataset_t == "foggy_cityscape": # initilize the network here. from model.umt_faster_rcnn_truncate.umt_vgg16 import vgg16 from model.umt_faster_rcnn_truncate.umt_resnet import resnet else: from model.umt_faster_rcnn.umt_vgg16 import vgg16 from model.umt_faster_rcnn.umt_resnet import resnet student_save_name = os.path.join( output_dir, "conf_{}_conf_gamma_{}_source_like_{}_aug_{}_target_like_{}_pe_{}_pl_{}_thresh_{}" "_lambda_{}_student_target_{}".format( args.conf, args.conf_gamma, args.source_like, args.aug, args.target_like, args.pretrained_epoch, args.pl, args.threshold, args.lam, args.dataset_t, ), ) print("Model will be saved to: ") print(student_save_name) # torch.backends.cudnn.benchmark = True if torch.cuda.is_available() and not args.cuda: print("WARNING: You have a CUDA device, so you should probably run with --cuda") # train set # -- Note: Use validation set and disable the flipped to enable faster loading. cfg.TRAIN.USE_FLIPPED = True cfg.USE_GPU_NMS = args.cuda # source train set s_imdb, s_train_size, s_dataloader = get_dataloader(args.imdb_name, args) # source-like/fake-source train set data loader if args.source_like: s_fake_imdb, s_fake_train_size, s_fake_dataloader = get_dataloader( args.imdb_name_fake_source, args, sequential=True, augment=args.aug ) else: s_fake_imdb, s_fake_train_size, s_fake_dataloader = get_dataloader( args.imdb_name_target, args, sequential=True, augment=args.aug ) # target train set t_imdb, t_train_size, t_dataloader = get_dataloader( args.imdb_name_target, args, sequential=True, augment=args.aug ) # target-like/fake-target train set t_fake_imdb, t_fake_train_size, t_fake_dataloader = get_dataloader( args.imdb_name_fake_target, args ) print("{:d} source roidb entries".format(s_train_size)) print("{:d} source like roidb entries".format(s_fake_train_size)) print("{:d} target roidb entries".format(t_train_size)) print("{:d} target like roidb entries".format(t_fake_train_size)) # initilize the tensor holder here. im_data = torch.FloatTensor(1) im_info = torch.FloatTensor(1) num_boxes = torch.LongTensor(1) gt_boxes = torch.FloatTensor(1) # ship to cuda if args.cuda: im_data = im_data.cuda() im_info = im_info.cuda() num_boxes = num_boxes.cuda() gt_boxes = gt_boxes.cuda() # make variable im_data = Variable(im_data) im_info = Variable(im_info) num_boxes = Variable(num_boxes) gt_boxes = Variable(gt_boxes) if args.cuda: cfg.CUDA = True imdb = s_imdb if args.net == "vgg16": student_fasterRCNN = vgg16( imdb.classes, pretrained=True, class_agnostic=args.class_agnostic, conf=args.conf, ) teacher_fasterRCNN = vgg16( imdb.classes, pretrained=True, class_agnostic=args.class_agnostic, conf=args.conf, ) elif args.net == "res101": student_fasterRCNN = resnet( imdb.classes, 101, pretrained=True, class_agnostic=args.class_agnostic, conf=args.conf, ) teacher_fasterRCNN = resnet( imdb.classes, 101, pretrained=True, class_agnostic=args.class_agnostic, conf=args.conf, ) elif args.net == "res50": student_fasterRCNN = resnet( imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic ) teacher_fasterRCNN = resnet( imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic ) else: print("network is not defined") pdb.set_trace() student_fasterRCNN.create_architecture() teacher_fasterRCNN.create_architecture() lr = cfg.TRAIN.LEARNING_RATE lr = args.lr student_detection_params = [] params = [] for key, value in dict(student_fasterRCNN.named_parameters()).items(): if value.requires_grad: if "bias" in key: params += [ { "params": [value], "lr": lr * (cfg.TRAIN.DOUBLE_BIAS + 1), "weight_decay": cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0, } ] else: params += [ { "params": [value], "lr": lr, "weight_decay": cfg.TRAIN.WEIGHT_DECAY, } ] student_detection_params += [value] teacher_detection_params = [] for key, value in dict(teacher_fasterRCNN.named_parameters()).items(): if value.requires_grad: teacher_detection_params += [value] value.requires_grad = False if args.optimizer == "adam": lr = lr * 0.1 student_optimizer = torch.optim.Adam(params) elif args.optimizer == "sgd": student_optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM) teacher_optimizer = WeightEMA( teacher_detection_params, student_detection_params, alpha=args.teacher_alpha ) if args.cuda: student_fasterRCNN.cuda() teacher_fasterRCNN.cuda() if args.resume: student_checkpoint = torch.load(args.student_load_name) args.session = student_checkpoint["session"] args.start_epoch = student_checkpoint["epoch"] student_fasterRCNN.load_state_dict(student_checkpoint["model"]) student_optimizer.load_state_dict(student_checkpoint["optimizer"]) lr = student_optimizer.param_groups[0]["lr"] if "pooling_mode" in student_checkpoint.keys(): cfg.POOLING_MODE = student_checkpoint["pooling_mode"] print("loaded checkpoint %s" % (args.student_load_name)) teacher_checkpoint = torch.load(args.teacher_load_name) teacher_fasterRCNN.load_state_dict(teacher_checkpoint["model"]) if "pooling_mode" in teacher_checkpoint.keys(): cfg.POOLING_MODE = teacher_checkpoint["pooling_mode"] print("loaded checkpoint %s" % (args.teacher_load_name)) if args.mGPUs: student_fasterRCNN = nn.DataParallel(student_fasterRCNN) teacher_fasterRCNN = nn.DataParallel(teacher_fasterRCNN) iters_per_epoch = int(10000 / args.batch_size) if args.use_tfboard: from tensorboardX import SummaryWriter logger = SummaryWriter("logs") count_iter = 0 conf_gamma = args.conf_gamma pretrained_epoch = args.pretrained_epoch timer = PrettyTimer() for epoch in range(args.start_epoch, args.max_epochs + 1): # setting to train mode student_fasterRCNN.train() teacher_fasterRCNN.train() loss_temp = 0 start = time.time() epoch_start = time.time() if epoch % (args.lr_decay_step + 1) == 0: adjust_learning_rate(student_optimizer, args.lr_decay_gamma) lr *= args.lr_decay_gamma data_iter_s = iter(s_dataloader) data_iter_t = iter(t_dataloader) data_iter_s_fake = iter(s_fake_dataloader) data_iter_t_fake = iter(t_fake_dataloader) for step in range(1, iters_per_epoch + 1): timer.start("iter") try: data_s = next(data_iter_s) except: data_iter_s = iter(s_dataloader) data_s = next(data_iter_s) try: data_s_fake = next(data_iter_s_fake) except: data_iter_s_fake = iter(s_fake_dataloader) data_s_fake = next(data_iter_s_fake) try: data_t = next(data_iter_t) except: data_iter_t = iter(t_dataloader) data_t = next(data_iter_t) assert ( data_s_fake[0].size() == data_t[0].size() ), "The size should be same between source fake and target" assert ( data_s_fake[1] == data_t[1] ).all(), "The image info should be same between source fake and target" try: data_t_fake = next(data_iter_t_fake) except: data_iter_t_fake = iter(t_fake_dataloader) data_t_fake = next(data_iter_t_fake) # eta = 1.0 count_iter += 1 # put source data into variable im_data.data.resize_(data_s[0].size()).copy_(data_s[0]) im_info.data.resize_(data_s[1].size()).copy_(data_s[1]) gt_boxes.data.resize_(data_s[2].size()).copy_(data_s[2]) num_boxes.data.resize_(data_s[3].size()).copy_(data_s[3]) student_fasterRCNN.zero_grad() ( rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_box, RCNN_loss_cls, RCNN_loss_bbox, rois_label, out_d_pixel, out_d, confidence_loss, _, ) = student_fasterRCNN(im_data, im_info, gt_boxes, num_boxes, hints=True) loss = ( rpn_loss_cls.mean() + rpn_loss_box.mean() + RCNN_loss_cls.mean() + RCNN_loss_bbox.mean() ) if args.conf: conf_loss = confidence_loss.mean() if args.target_like: # put fake target data into variable im_data.data.resize_(data_t_fake[0].size()).copy_(data_t_fake[0]) im_info.data.resize_(data_t_fake[1].size()).copy_(data_t_fake[1]) # gt is empty gt_boxes.data.resize_(data_t_fake[2].size()).copy_(data_t_fake[2]) num_boxes.data.resize_(data_t_fake[3].size()).copy_(data_t_fake[3]) ( rois, cls_prob, bbox_pred, rpn_loss_cls_t_fake, rpn_loss_box_t_fake, RCNN_loss_cls_t_fake, RCNN_loss_bbox_t_fake, rois_label_t_fake, out_d_pixel, out_d, _, _, ) = student_fasterRCNN( im_data, im_info, gt_boxes, num_boxes, hints=False ) # -------------------------------- loss += ( rpn_loss_cls_t_fake.mean() + rpn_loss_box_t_fake.mean() + RCNN_loss_cls_t_fake.mean() + RCNN_loss_bbox_t_fake.mean() ) if epoch > pretrained_epoch and args.pl: teacher_fasterRCNN.eval() im_data.data.resize_(data_s_fake[0].size()).copy_(data_s_fake[0]) im_info.data.resize_(data_s_fake[1].size()).copy_(data_s_fake[1]) # gt is emqpty gt_boxes.data.resize_(1, 1, 5).zero_() num_boxes.data.resize_(1).zero_() ( rois, cls_prob, bbox_pred, rpn_loss_cls_, rpn_loss_box_, RCNN_loss_cls_, RCNN_loss_bbox_, rois_label_, d_pred_, _, _, confidence_s_fake, ) = teacher_fasterRCNN(im_data, im_info, gt_boxes, num_boxes, test=True) scores = cls_prob.data boxes = rois.data[:, :, 1:5] if cfg.TEST.BBOX_REG: # Apply bounding-box regression deltas box_deltas = bbox_pred.data if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED: # Optionally normalize targets by a precomputed mean and stdev if args.class_agnostic: box_deltas = ( box_deltas.view(-1, 4) * torch.FloatTensor( cfg.TRAIN.BBOX_NORMALIZE_STDS ).cuda() + torch.FloatTensor( cfg.TRAIN.BBOX_NORMALIZE_MEANS ).cuda() ) box_deltas = box_deltas.view(1, -1, 4) else: box_deltas = ( box_deltas.view(-1, 4) * torch.FloatTensor( cfg.TRAIN.BBOX_NORMALIZE_STDS ).cuda() + torch.FloatTensor( cfg.TRAIN.BBOX_NORMALIZE_MEANS ).cuda() ) box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes)) pred_boxes = bbox_transform_inv(boxes, box_deltas, 1) pred_boxes = clip_boxes(pred_boxes, im_info.data, 1) else: # Simply repeat the boxes, once for each class pred_boxes = np.tile(boxes, (1, scores.shape[1])) scores = scores.squeeze() if args.conf: scores = torch.sqrt( scores * confidence_s_fake ) # using confidence score to adjust scores pred_boxes = pred_boxes.squeeze() gt_boxes_target = [] pre_thresh = 0.0 thresh = args.threshold empty_array = np.transpose(np.array([[], [], [], [], []]), (1, 0)) for j in range(1, len(imdb.classes)): inds = torch.nonzero(scores[:, j] > pre_thresh).view(-1) # if there is det if inds.numel() > 0: cls_scores = scores[:, j][inds] _, order = torch.sort(cls_scores, 0, True) if args.class_agnostic: cls_boxes = pred_boxes[inds, :] else: cls_boxes = pred_boxes[inds][:, j * 4 : (j + 1) * 4] cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1) # cls_dets = torch.cat((cls_boxes, cls_scores), 1) cls_dets = cls_dets[order] keep = nms(cls_dets, cfg.TEST.NMS) cls_dets = cls_dets[keep.view(-1).long()] # all_boxes[j][i] = cls_dets.cpu().numpy() cls_dets_numpy = cls_dets.cpu().numpy() for i in range(np.minimum(10, cls_dets_numpy.shape[0])): bbox = tuple( int(np.round(x)) for x in cls_dets_numpy[i, :4] ) score = cls_dets_numpy[i, -1] if score > thresh: gt_boxes_target.append(list(bbox[0:4]) + [j]) gt_boxes_padding = torch.FloatTensor(cfg.MAX_NUM_GT_BOXES, 5).zero_() if len(gt_boxes_target) != 0: gt_boxes_numpy = torch.FloatTensor(gt_boxes_target) num_boxes_cpu = torch.LongTensor( [min(gt_boxes_numpy.size(0), cfg.MAX_NUM_GT_BOXES)] ) gt_boxes_padding[:num_boxes_cpu, :] = gt_boxes_numpy[:num_boxes_cpu] else: num_boxes_cpu = torch.LongTensor([0]) # teacher_fasterRCNN.train() # put source-like data into variable im_data.data.resize_(data_t[0].size()).copy_(data_t[0]) im_info.data.resize_(data_t[1].size()).copy_(data_t[1]) gt_boxes_padding = torch.unsqueeze(gt_boxes_padding, 0) gt_boxes.data.resize_(gt_boxes_padding.size()).copy_(gt_boxes_padding) num_boxes.data.resize_(num_boxes_cpu.size()).copy_(num_boxes_cpu) ( rois, cls_prob, bbox_pred, rpn_loss_cls_s_fake, rpn_loss_box_s_fake, RCNN_loss_cls_s_fake, RCNN_loss_bbox_s_fake, rois_label_s_fake, out_d_pixel, out_d, _, _, ) = student_fasterRCNN(im_data, im_info, gt_boxes, num_boxes) loss += args.lam * ( rpn_loss_cls_s_fake.mean() + rpn_loss_box_s_fake.mean() + RCNN_loss_cls_s_fake.mean() + RCNN_loss_bbox_s_fake.mean() ) if args.conf: loss += conf_gamma * conf_loss loss_temp += loss.item() student_optimizer.zero_grad() loss.backward() student_optimizer.step() teacher_fasterRCNN.zero_grad() teacher_optimizer.step() timer.end("iter") estimate_time = timer.eta( "iter", count_iter, args.max_epochs * iters_per_epoch ) if step % args.disp_interval == 0: end = time.time() if step > 0: loss_temp /= args.disp_interval if args.mGPUs: loss_rpn_cls = rpn_loss_cls.mean().item() loss_rpn_box = rpn_loss_box.mean().item() loss_rcnn_cls = RCNN_loss_cls.mean().item() loss_rcnn_box = RCNN_loss_bbox.mean().item() fg_cnt = torch.sum(rois_label.data.ne(0)) bg_cnt = rois_label.data.numel() - fg_cnt if args.pl and epoch > pretrained_epoch: loss_rpn_cls_s_fake = rpn_loss_cls_s_fake.mean().item() loss_rpn_box_s_fake = rpn_loss_box_s_fake.mean().item() loss_rcnn_cls_s_fake = RCNN_loss_cls_s_fake.mean().item() loss_rcnn_box_s_fake = RCNN_loss_bbox_s_fake.mean().item() fg_cnt_s_fake = torch.sum(rois_label_s_fake.data.ne(0)) bg_cnt_s_fake = rois_label_s_fake.data.numel() - fg_cnt_s_fake if args.target_like: loss_rpn_cls_t_fake = rpn_loss_cls_t_fake.mean().item() loss_rpn_box_t_fake = rpn_loss_box_t_fake.mean().item() loss_rcnn_cls_t_fake = RCNN_loss_cls_t_fake.mean().item() loss_rcnn_box_t_fake = RCNN_loss_bbox_t_fake.mean().item() fg_cnt_t_fake = torch.sum(rois_label_t_fake.data.ne(0)) bg_cnt_t_fake = rois_label_t_fake.data.numel() - fg_cnt_t_fake # dloss_s_fake = dloss_s_fake.mean().item() # dloss_t_fake = dloss_t_fake.mean().item() # dloss_s_p_fake = dloss_s_p_fake.mean().item() # dloss_t_p_fake = dloss_t_p_fake.mean().item() else: loss_rpn_cls = rpn_loss_cls.item() loss_rpn_box = rpn_loss_box.item() loss_rcnn_cls = RCNN_loss_cls.item() loss_rcnn_box = RCNN_loss_bbox.item() fg_cnt = torch.sum(rois_label.data.ne(0)) bg_cnt = rois_label.data.numel() - fg_cnt if args.conf: loss_conf = conf_loss.item() if args.pl and epoch > pretrained_epoch: loss_rpn_cls_s_fake = rpn_loss_cls_s_fake.item() loss_rpn_box_s_fake = rpn_loss_box_s_fake.item() loss_rcnn_cls_s_fake = RCNN_loss_cls_s_fake.item() loss_rcnn_box_s_fake = RCNN_loss_bbox_s_fake.item() fg_cnt_s_fake = torch.sum(rois_label_s_fake.data.ne(0)) bg_cnt_s_fake = rois_label_s_fake.data.numel() - fg_cnt if args.target_like: loss_rpn_cls_t_fake = rpn_loss_cls_t_fake.item() loss_rpn_box_t_fake = rpn_loss_box_t_fake.item() loss_rcnn_cls_t_fake = RCNN_loss_cls_t_fake.item() loss_rcnn_box_t_fake = RCNN_loss_bbox_t_fake.item() fg_cnt_t_fake = torch.sum(rois_label_t_fake.data.ne(0)) bg_cnt_t_fake = rois_label_t_fake.data.numel() - fg_cnt_t_fake print( "[session %d][epoch %2d][iter %4d/%4d] lr: %.2e, loss: %.4f, eta: %s" % ( args.session, epoch, step, iters_per_epoch, lr, loss_temp, estimate_time, ) ) print( "\t\t\tfg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end - start) ) print( "\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f" % (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box) ) if args.pl and epoch > pretrained_epoch: print("\t\t\tfg/bg=(%d/%d)" % (fg_cnt_s_fake, bg_cnt_s_fake)) print( "\t\t\trpn_cls_s_fake: %.4f, rpn_box_s_fake: %.4f, rcnn_cls_s_fake: %.4f, rcnn_box_s_fake %.4f" % ( loss_rpn_cls_s_fake, loss_rpn_box_s_fake, loss_rcnn_cls_s_fake, loss_rcnn_box_s_fake, ) ) if args.target_like: print("\t\t\tfg/bg=(%d/%d)" % (fg_cnt_t_fake, bg_cnt_t_fake)) print( "\t\t\trpn_cls_t_fake: %.4f, rpn_box_t_fake: %.4f, rcnn_cls_t_fake: %.4f, rcnn_box_t_fake %.4f" % ( loss_rpn_cls_t_fake, loss_rpn_box_t_fake, loss_rcnn_cls_t_fake, loss_rcnn_box_t_fake, ) ) if args.conf is True: print(f"\t\t\tconf loss: {loss_conf:.4}") if args.use_tfboard: info = { "loss": loss_temp, "loss_rpn_cls": loss_rpn_cls, "loss_rpn_box": loss_rpn_box, "loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_box": loss_rcnn_box, "loss_rpn_cls_s_fake": loss_rpn_cls_s_fake, "loss_rpn_box_s_fake": loss_rpn_box_s_fake, "loss_rcnn_cls_s_fake": loss_rcnn_cls_s_fake, "loss_rcnn_box_s_fake": loss_rcnn_box_s_fake, "loss_rpn_cls_t_fake": loss_rpn_cls_t_fake if args.target_like is True else 0, "loss_rpn_box_t_fake": loss_rpn_box_t_fake if args.target_like is True else 0, "loss_rcnn_cls_t_fake": loss_rcnn_cls_t_fake if args.target_like is True else 0, "loss_rcnn_box_t_fake": loss_rcnn_box_t_fake if args.target_like is True else 0, "loss_conf": loss_conf if args.conf is True else 0, "conf_gamma": conf_gamma, } logger.add_scalars( "logs_s_{}/losses".format(args.session), info, (epoch - 1) * iters_per_epoch + step, ) loss_temp = 0 start = time.time() student_save_name = os.path.join( output_dir, "conf_{}_conf_gamma_{}_source_like_{}_aug_{}_target_like_{}_pe_{}_pl_{}_" "thresh_{}_lambda_{}_lam2_{}_student_target_{}_session_{}_epoch_{}_step_{}.pth".format( args.conf, args.conf_gamma, args.source_like, args.aug, args.target_like, args.pretrained_epoch, args.pl, args.threshold, args.lam, args.lam2, args.dataset_t, args.session, epoch, step, ), ) save_checkpoint( { "session": args.session, "epoch": epoch + 1, "model": student_fasterRCNN.mumt_train.pyodule.state_dict() if args.mGPUs else student_fasterRCNN.state_dict(), "optimizer": student_optimizer.state_dict(), "pooling_mode": cfg.POOLING_MODE, "class_agnostic": args.class_agnostic, }, student_save_name, ) print("save student model: {}".format(student_save_name)) teacher_save_name = os.path.join( output_dir, "conf_{}_conf_gamma_{}_source_like_{}_aug_{}_target_like_{}_pe_{}_pl_{}_" "thresh_{}_lambda_{}_lam2_{}_teacher_target_{}_session_{}_epoch_{}_step_{}.pth".format( args.conf, args.conf_gamma, args.source_like, args.aug, args.target_like, args.pretrained_epoch, args.pl, args.threshold, args.lam, args.lam2, args.dataset_t, args.session, epoch, step, ), ) save_checkpoint( { "session": args.session, "epoch": epoch + 1, "model": teacher_fasterRCNN.mumt_train.pyodule.state_dict() if args.mGPUs else teacher_fasterRCNN.state_dict(), "pooling_mode": cfg.POOLING_MODE, "class_agnostic": args.class_agnostic, }, teacher_save_name, ) print("save teacher model: {}".format(teacher_save_name)) epoch_end = time.time() print("epoch cost time: {} min".format((epoch_end - epoch_start) / 60.0)) # cmd = ( # f"python test_net_global_local.py --dataset {args.dataset_t} --net {args.net}" # f" --load_name {student_save_name}" # ) # print("cmd: ", cmd) # cmd = [i.strip() for i in cmd.split(" ") if len(i.strip()) > 0] # try: # proc = subprocess.Popen(cmd) # proc.wait() # except (KeyboardInterrupt, SystemExit): # pass # cmd = ( # f"python test_net_global_local.py --dataset {args.dataset_t} --net {args.net}" # f" --load_name {teacher_save_name}" # ) # print("cmd: ", cmd) # cmd = [i.strip() for i in cmd.split(" ") if len(i.strip()) > 0] # try: # proc = subprocess.Popen(cmd) # proc.wait() # except (KeyboardInterrupt, SystemExit): # pass if args.use_tfboard: logger.close()
the-stack_106_30966
import sys done = [] totalrmed = 0 fname = sys.argv[1] lines = open(fname).read().split("\n") output = open(fname,"w") for line in lines: if line.startswith("#") or line.startswith("!") or line == "": output.write("{}\n".format(line)) else: if line in done: totalrmed += 1 continue done.append(line) output.write("{}\n".format(line)) output.close() print("----- All entries removed -----") print("Scanned file {}".format(fname)) print("{} total non-redundant entries".format(len(done))) print("{} entries removed".format(totalrmed)) input()
the-stack_106_30967
# Copyright 2013-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys import warnings from itertools import chain try: from setuptools import setup from setuptools.command.sdist import sdist as orig_sdist except ImportError: from distutils.core import setup from distutils.command.sdist import sdist as orig_sdist wa_dir = os.path.join(os.path.dirname(__file__), 'wa') sys.path.insert(0, os.path.join(wa_dir, 'framework')) from version import get_wa_version, get_wa_version_with_commit # happens if falling back to distutils warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'") warnings.filterwarnings('ignore', "Unknown distribution option: 'extras_require'") try: os.remove('MANIFEST') except OSError: pass packages = [] data_files = {'': [os.path.join(wa_dir, 'commands', 'postgres_schema.sql')]} source_dir = os.path.dirname(__file__) for root, dirs, files in os.walk(wa_dir): rel_dir = os.path.relpath(root, source_dir) data = [] if '__init__.py' in files: for f in files: if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']: data.append(f) package_name = rel_dir.replace(os.sep, '.') package_dir = root packages.append(package_name) data_files[package_name] = data else: # use previous package name filepaths = [os.path.join(root, f) for f in files] data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths]) scripts = [os.path.join('scripts', s) for s in os.listdir('scripts')] params = dict( name='wlauto', description='A framework for automating workload execution and measurement collection on ARM devices.', version=get_wa_version_with_commit(), packages=packages, package_data=data_files, include_package_data=True, scripts=scripts, url='https://github.com/ARM-software/workload-automation', license='Apache v2', maintainer='ARM Architecture & Technology Device Lab', maintainer_email='[email protected]', setup_requires=[ 'numpy' ], install_requires=[ 'python-dateutil', # converting between UTC and local time. 'pexpect>=3.3', # Send/receive to/from device 'pyserial', # Serial port interface 'colorama', # Printing with colors 'pyYAML', # YAML-formatted agenda parsing 'requests', # Fetch assets over HTTP 'devlib>=1.1.dev1', # Interacting with devices 'louie-latest', # callbacks dispatch 'wrapt', # better decorators 'pandas>=0.23.0', # Data analysis and manipulation 'future', # Python 2-3 compatiblity ], dependency_links=['https://github.com/ARM-software/devlib/tarball/master#egg=devlib-1.1.dev1'], extras_require={ 'other': ['jinja2'], 'test': ['nose', 'mock'], 'mongodb': ['pymongo'], 'notify': ['notify2'], 'doc': ['sphinx'], }, # https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', ], ) all_extras = list(chain(iter(params['extras_require'].values()))) params['extras_require']['everything'] = all_extras class sdist(orig_sdist): user_options = orig_sdist.user_options + [ ('strip-commit', 's', "Strip git commit hash from package version ") ] def initialize_options(self): orig_sdist.initialize_options(self) self.strip_commit = False def run(self): if self.strip_commit: self.distribution.get_version = get_wa_version orig_sdist.run(self) params['cmdclass'] = {'sdist': sdist} setup(**params)
the-stack_106_30968
# -*- coding: utf-8 -*- # pylint: disable=E1101, C0330, C0103 # E1101: Module X has no Y member # C0330: Wrong continued indentation # C0103: Invalid attribute/variable/method name """ utils.py ========= This is a collection of utilities used by the :mod:`wx.lib.plot` package. """ __docformat__ = "restructuredtext en" # Standard Library import functools import inspect import itertools from warnings import warn as _warn # Third Party import numpy as np class DisplaySide(object): """ Generic class for describing which sides of a box are displayed. Used for fine-tuning the axis, ticks, and values of a graph. This class somewhat mimics a collections.namedtuple factory function in that it is an iterable and can have indiviual elements accessible by name. It differs from a namedtuple in a few ways: - it's mutable - it's not a factory function but a full-fledged class - it contains type checking, only allowing boolean values - it contains name checking, only allowing valid_names as attributes :param bottom: Display the bottom side :type bottom: bool :param left: Display the left side :type left: bool :param top: Display the top side :type top: bool :param right: Display the right side :type right: bool """ # TODO: Do I want to replace with __slots__? # Not much memory gain because this class is only called a small # number of times, but it would remove the need for part of # __setattr__... valid_names = ("bottom", "left", "right", "top") def __init__(self, bottom, left, top, right): if not all([isinstance(x, bool) for x in [bottom, left, top, right]]): raise TypeError("All args must be bools") self.bottom = bottom self.left = left self.top = top self.right = right def __str__(self): s = "{}(bottom={}, left={}, top={}, right={})" s = s.format(self.__class__.__name__, self.bottom, self.left, self.top, self.right, ) return s def __repr__(self): # for now, just return the str representation return self.__str__() def __setattr__(self, name, value): """ Override __setattr__ to implement some type checking and prevent other attributes from being created. """ if name not in self.valid_names: err_str = "attribute must be one of {}" raise NameError(err_str.format(self.valid_names)) if not isinstance(value, bool): raise TypeError("'{}' must be a boolean".format(name)) self.__dict__[name] = value def __len__(self): return 4 def __hash__(self): return hash(tuple(self)) def __getitem__(self, key): return (self.bottom, self.left, self.top, self.right)[key] def __setitem__(self, key, value): if key == 0: self.bottom = value elif key == 1: self.left = value elif key == 2: self.top = value elif key == 3: self.right = value else: raise IndexError("list index out of range") def __iter__(self): return iter([self.bottom, self.left, self.top, self.right]) # TODO: replace with wx.DCPenChanger/wx.DCBrushChanger, etc. # Alternatively, replace those with this function... class TempStyle(object): """ Decorator / Context Manager to revert pen or brush changes. Will revert pen, brush, or both to their previous values after a method call or block finish. :param which: The item to save and revert after execution. Can be one of ``{'both', 'pen', 'brush'}``. :type which: str :param dc: The DC to get brush/pen info from. :type dc: :class:`wx.DC` :: # Using as a method decorator: @TempStyle() # same as @TempStyle('both') def func(self, dc, a, b, c): # dc must be 1st arg (beside self) # edit pen and brush here # Or as a context manager: with TempStyle('both', dc): # do stuff .. Note:: As of 2016-06-15, this can only be used as a decorator for **class methods**, not standard functions. There is a plan to try and remove this restriction, but I don't know when that will happen... .. epigraph:: *Combination Decorator and Context Manager! Also makes Julienne fries! Will not break! Will not... It broke!* -- The Genie """ _valid_types = {'both', 'pen', 'brush'} _err_str = ( "No DC provided and unable to determine DC from context for function " "`{func_name}`. When `{cls_name}` is used as a decorator, the " "decorated function must have a wx.DC as a keyword arg 'dc=' or " "as the first arg." ) def __init__(self, which='both', dc=None): if which not in self._valid_types: raise ValueError( "`which` must be one of {}".format(self._valid_types) ) self.which = which self.dc = dc self.prevPen = None self.prevBrush = None def __call__(self, func): @functools.wraps(func) def wrapper(instance, dc, *args, **kwargs): # fake the 'with' block. This solves: # 1. plots only being shown on 2nd menu selection in demo # 2. self.dc compalaining about not having a super called when # trying to get or set the pen/brush values in __enter__ and # __exit__: # RuntimeError: super-class __init__() of type # BufferedDC was never called self._save_items(dc) func(instance, dc, *args, **kwargs) self._revert_items(dc) #import copy # copy solves issue #1 above, but #self.dc = copy.copy(dc) # possibly causes issue #2. #with self: # print('in with') # func(instance, dc, *args, **kwargs) return wrapper def __enter__(self): self._save_items(self.dc) return self def __exit__(self, *exc): self._revert_items(self.dc) return False # True means exceptions *are* suppressed. def _save_items(self, dc): if self.which == 'both': self._save_pen(dc) self._save_brush(dc) elif self.which == 'pen': self._save_pen(dc) elif self.which == 'brush': self._save_brush(dc) else: err_str = ("How did you even get here?? This class forces " "correct values for `which` at instancing..." ) raise ValueError(err_str) def _revert_items(self, dc): if self.which == 'both': self._revert_pen(dc) self._revert_brush(dc) elif self.which == 'pen': self._revert_pen(dc) elif self.which == 'brush': self._revert_brush(dc) else: err_str = ("How did you even get here?? This class forces " "correct values for `which` at instancing...") raise ValueError(err_str) def _save_pen(self, dc): self.prevPen = dc.GetPen() def _save_brush(self, dc): self.prevBrush = dc.GetBrush() def _revert_pen(self, dc): dc.SetPen(self.prevPen) def _revert_brush(self, dc): dc.SetBrush(self.prevBrush) def pendingDeprecation(new_func): """ Raise `PendingDeprecationWarning` and display a message. Uses inspect.stack() to determine the name of the item that this is called from. :param new_func: The name of the function that should be used instead. :type new_func: string. """ warn_txt = "`{}` is pending deprecation. Please use `{}` instead." _warn(warn_txt.format(inspect.stack()[1][3], new_func), PendingDeprecationWarning) def scale_and_shift_point(x, y, scale=1, shift=0): """ Creates a scaled and shifted 2x1 numpy array of [x, y] values. The shift value must be in the scaled units. :param float `x`: The x value of the unscaled, unshifted point :param float `y`: The y valye of the unscaled, unshifted point :param np.array `scale`: The scale factor to use ``[x_sacle, y_scale]`` :param np.array `shift`: The offset to apply ``[x_shift, y_shift]``. Must be in scaled units :returns: a numpy array of 2 elements :rtype: np.array .. note:: :math:`new = (scale * old) + shift` """ point = scale * np.array([x, y]) + shift return point def set_displayside(value): """ Wrapper around :class:`~wx.lib.plot._DisplaySide` that allows for "overloaded" calls. If ``value`` is a boolean: all 4 sides are set to ``value`` If ``value`` is a 2-tuple: the bottom and left sides are set to ``value`` and the other sides are set to False. If ``value`` is a 4-tuple, then each item is set individually: ``(bottom, left, top, right)`` :param value: Which sides to display. :type value: bool, 2-tuple of bool, or 4-tuple of bool :raises: `TypeError` if setting an invalid value. :raises: `ValueError` if the tuple has incorrect length. :rtype: :class:`~wx.lib.plot._DisplaySide` """ err_txt = ("value must be a bool or a 2- or 4-tuple of bool") # TODO: for 2-tuple, do not change other sides? rather than set to False. if isinstance(value, bool): # turns on or off all axes _value = (value, value, value, value) elif isinstance(value, tuple): if len(value) == 2: _value = (value[0], value[1], False, False) elif len(value) == 4: _value = value else: raise ValueError(err_txt) else: raise TypeError(err_txt) return DisplaySide(*_value) def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = itertools.tee(iterable) next(b, None) return zip(a, b) if __name__ == "__main__": raise RuntimeError("This module is not intended to be run by itself.")
the-stack_106_30970
import rmgpy.quantity as quantity import logging from rmgpy.species import Species from rmgpy.data.solvation import SolventData, SoluteData, SoluteGroups, SolvationDatabase from rmgpy.reaction import Reaction class DiffusionLimited(): def __init__(self): # default is false, enabled if there is a solvent self.enabled = False def enable(self, solventData, solvationDatabase, comment=''): # diffusionLimiter is enabled if a solvent has been added to the RMG object. logging.info("Enabling diffusion-limited kinetics...") diffusionLimiter.enabled = True diffusionLimiter.database = solvationDatabase diffusionLimiter.solventData = solventData def getSolventViscosity(self, T): return self.solventData.getSolventViscosity(T) def getEffectiveRate(self, reaction, T): """ Return the ratio of k_eff to k_intrinsic, which is between 0 and 1. It is 1.0 if diffusion has no effect. For 1<=>2 reactions, the reverse rate is limited. For 2<=>2 reactions, the faster direction is limited. For 2<=>1 or 2<=>3 reactions, the forward rate is limited. """ intrinsicKinetics = reaction.kinetics reactants = len(reaction.reactants) products = len(reaction.products) k_forward = intrinsicKinetics.getRateCoefficient(T,P=100e5) Keq = reaction.getEquilibriumConstant(T) # Kc k_reverse = k_forward / Keq k_eff = k_forward if reactants == 1: if products == 1: k_eff = k_forward else: # two products; reverse rate is limited k_diff = self.getDiffusionLimit(T, reaction, forward=False) k_eff_reverse = k_reverse*k_diff/(k_reverse+k_diff) k_eff = k_eff_reverse * Keq else: # 2 reactants if products == 1 or products == 3: k_diff = self.getDiffusionLimit(T, reaction, forward=True) k_eff = k_forward*k_diff/(k_forward+k_diff) else: # 2 products if Keq > 1.0: # forward rate is faster and thus limited k_diff = self.getDiffusionLimit(T, reaction, forward=True) k_eff = k_forward*k_diff/(k_forward+k_diff) else: # reverse rate is faster and thus limited k_diff = self.getDiffusionLimit(T, reaction, forward=False) k_eff_reverse = k_reverse*k_diff/(k_reverse+k_diff) k_eff = k_eff_reverse * Keq return k_eff def getDiffusionLimit(self, T, reaction, forward=True): """ Return the diffusive limit on the rate coefficient, k_diff. This is the upper limit on the rate, in the specified direction. (ie. forward direction if forward=True [default] or reverse if forward=False) """ if forward: reacting = reaction.reactants else: reacting = reaction.products assert len(reacting)==2, "Can only calculate diffusion limit in a bimolecular direction" radii = 0.0 diffusivities = 0.0 for spec in reacting: soluteData = self.database.getSoluteData(spec) # calculate radius with the McGowan volume and assuming sphere radius = ((75*soluteData.V/3.14159)**(1/3))/100 diff = soluteData.getStokesDiffusivity(T, self.getSolventViscosity(T)) radii += radius diffusivities += diff N_a = 6.022e23 # Avogadro's Number k_diff = 4*3.14159*radii*diffusivities*N_a return k_diff # module level variable. There should only ever be one. It starts off disabled diffusionLimiter = DiffusionLimited()
the-stack_106_30971
# coding=utf-8 # Copyright 2021 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """schema_guided_dialogue dataset.""" import tensorflow_datasets.public_api as tfds from tensorflow_datasets.text.schema_guided_dialogue import schema_guided_dialogue class SchemaGuidedDialogueTest(tfds.testing.DatasetBuilderTestCase): """Tests for schema_guided_dialogue dataset.""" DATASET_CLASS = schema_guided_dialogue.SchemaGuidedDialogue SPLITS = { 'train': 3, # Number of fake train example 'dev': 2, # Number of fake train example 'test': 1, # Number of fake test example } if __name__ == '__main__': tfds.testing.test_main()
the-stack_106_30972
import torch from ..utils import box_utils from .data_preprocessing import PredictionTransform from ..utils.misc import Timer class Predictor: def __init__(self, net, size, mean=0.0, std=1.0, nms_method=None, iou_threshold=0.45, filter_threshold=0.01, candidate_size=200, sigma=0.5, device=None): self.net = net self.transform = PredictionTransform(size, mean, std) self.iou_threshold = iou_threshold self.filter_threshold = filter_threshold self.candidate_size = candidate_size self.nms_method = nms_method self.sigma = sigma if device: self.device = device else: self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.net.to(self.device) self.net.eval() self.timer = Timer() def predict(self, image, top_k=-1, prob_threshold=None): cpu_device = torch.device("cpu") height, width, _ = image.shape image = self.transform(image) images = image.unsqueeze(0) images = images.to(self.device) with torch.no_grad(): self.timer.start() scores, boxes = self.net.forward(images) print("Inference time: ", self.timer.end()) boxes = boxes[0] scores = scores[0] if not prob_threshold: prob_threshold = self.filter_threshold # this version of nms is slower on GPU, so we move data to CPU. boxes = boxes.to(cpu_device) scores = scores.to(cpu_device) picked_box_probs = [] picked_labels = [] for class_index in range(1, scores.size(1)): probs = scores[:, class_index] mask = probs > prob_threshold probs = probs[mask] if probs.size(0) == 0: continue subset_boxes = boxes[mask, :] box_probs = torch.cat([subset_boxes, probs.reshape(-1, 1)], dim=1) box_probs = box_utils.nms(box_probs, self.nms_method, score_threshold=prob_threshold, iou_threshold=self.iou_threshold, sigma=self.sigma, top_k=top_k, candidate_size=self.candidate_size) picked_box_probs.append(box_probs) picked_labels.extend([class_index] * box_probs.size(0)) if not picked_box_probs: return torch.tensor([]), torch.tensor([]), torch.tensor([]) picked_box_probs = torch.cat(picked_box_probs) picked_box_probs[:, 0] *= width picked_box_probs[:, 1] *= height picked_box_probs[:, 2] *= width picked_box_probs[:, 3] *= height return picked_box_probs[:, :4], torch.tensor(picked_labels), picked_box_probs[:, 4]
the-stack_106_30973
from pub_data import publish import psutil cpu = psutil.cpu_percent() print(cpu) ram = psutil.virtual_memory().percent print(ram) disk = psutil.disk_usage('/').percent print(disk) data = { "cpu" : cpu, "ram" : ram, "disk": disk } publish("piSystemUsage", data)
the-stack_106_30974
from requests import post import os """ TG 消息推送模块 """ TG_TOKEN = os.getenv("TG_TOKEN") #TG机器人的TOKEN CHAT_ID = os.getenv("CHAT_ID") #推送消息的CHAT_ID def post_tg(message): telegram_message = f"{message}" params = ( ('chat_id', CHAT_ID), ('text', telegram_message), ('parse_mode', "Markdown"), #可选Html或Markdown ('disable_web_page_preview', "yes") ) telegram_url = "https://api.telegram.org/bot" + TG_TOKEN + "/sendMessage" telegram_req = post(telegram_url, params=params) telegram_status = telegram_req.status_code if telegram_status == 200: print(f"INFO: Telegram Message sent") else: print("Telegram Error") if __name__ == "__main__": post_tg('浙江大学每日健康打卡 V1.0 '+ " \n\n 签到结果: " + "请自行查看")
the-stack_106_30976
import click from count_all_mutations_helpers import count_mutations, file_merge_algorithm from count_all_mutations_helpers import post_analyse import os import pandas as pd import threading from queue import Queue print_lock = threading.Lock() url_queue = Queue() def process_queue(): while True: input_tuple = url_queue.get() file_merge_algorithm(input_tuple) url_queue.task_done() @click.command() @click.argument('input_folder') @click.argument('output_folder') @click.option('--from_step', '-s') @click.option('--rerun', '-r') def main(input_folder, output_folder, from_step='', rerun=''): if not from_step: from_step = 0 if not rerun: rerun = 0 rerun = bool(rerun) from_step = int(from_step) print(from_step, rerun) click.echo("Analysis starting for:") click.echo(input_folder) if from_step <= 1: print("Initializing step 1") count_mutations(input_folder=input_folder, output_folder=output_folder, rerun=rerun) print("Finished step 1") else: click.echo("Skipping step 1") if from_step <= 2: print("Initializing step 2") files_temp = [[x[0] + '/' + y for y in x[2] if ('_eval' not in y and 'results_count_all' in y)] for x in os.walk(output_folder + '/patients') ] files_temp = [file for sublist in files_temp for file in sublist] # print(files_temp) for file in files_temp: if not os.path.isfile(file.split('.')[0] + '_eval.csv'): post_analyse(file, file.split('.')[0] + '_eval.csv') if not os.path.isfile(file.split('.')[0] + '_evaluated.csv'): file_df = pd.read_csv(file.split('.')[0] + '_eval.csv') file_df = file_df[file_df['eval']] file_df.to_csv(file.split('.')[0] + '_evaluated.csv') print("Finished step 2") else: click.echo("Skipping step 2") if from_step <= 3: print("Initializing step 3") files_temp = [[x[0] + '/' + y for y in x[2] if ('_evaluated.csv' in y and 'results_count_all' in y )] for x in os.walk(output_folder + '/patients') ] files_temp = [file for sublist in files_temp for file in sublist] results = pd.DataFrame() for i in range(10): t = threading.Thread(target=process_queue) t.daemon = True t.start() for file in files_temp: # print(file) url_queue.put(file) url_queue.join() files_temp = [[x[0] + '/' + y for y in x[2] if ('_algorithms_merged.csv' in y and 'results_count_all' in y )] for x in os.walk(output_folder + '/patients') ] files_temp = [file for sublist in files_temp for file in sublist] for file in files_temp: temp_df = pd.read_csv(file) user = temp_df['indiv_name'].unique()[0] count = temp_df.shape[0] new_record = pd.DataFrame([[user, count]], columns=['patient_id', 'mutation_count']) results = pd.concat([results, new_record]) results.to_csv(output_folder + '/patient_mutation_count.csv', index=False) print("Finished step 3") else: click.echo("Skipping step 3") click.echo("Analysis finished") if __name__ == "__main__": main()
the-stack_106_30977
# --- # jupyter: # jupytext: # formats: ipynb,py:hydrogen # text_representation: # extension: .py # format_name: hydrogen # format_version: '1.2' # jupytext_version: 1.1.7 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% import datetime # %% import math import os import time # %% from collections import deque from collections.abc import Iterable from pathlib import Path import matplotlib.pyplot as plt import numpy as np # %% import torch from torch import nn, optim from torch.autograd import Variable from torch.utils import data import yolact # %% from siim import bootstrap, io from siim.config import cfg from siim.resource import WEIGHTS_DIR_PATH, ScatterWrapper, TrainDataset from yolact.data import detection_collate from yolact.utils.augmentations import BaseTransform, SSDAugmentation from yolact.utils.functions import MovingAverage, SavePath # %% def prepare_data(datum, device=torch.device('cpu')): images, (targets, masks, num_crowds) = datum images = Variable(images.to(device), requires_grad=False) targets = [Variable(ann.to(device), requires_grad=False) for ann in targets] masks = [Variable(mask.to(device), requires_grad=False) for mask in masks] return images, targets, masks, num_crowds # %% def set_lr(optimizer, new_lr): for param_group in optimizer.param_groups: param_group['lr'] = new_lr # %% bootstrap() # %% dataset = TrainDataset(transform=SSDAugmentation(), empty_mask_is_negative=True) n_samples = len(dataset) # n_samples is 60000 train_size = int(n_samples * 0.8) # train_size is 48000 val_size = n_samples - train_size # val_size is 48000 train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size]) # %% img, (target, mask, no_crowd) = train_dataset[2900] plt.figure(figsize=(10, 10)) plt.subplot(1, 2, 1) plt.imshow(img[0], cmap="gray") plt.axis("off") # plt.subplot(1, 2, 2) plt.imshow(img[0] * mask, cmap="gray") plt.axis("off") # %% net = yolact.yolact.Yolact() net.train() net.init_weights(backbone_path=str(WEIGHTS_DIR_PATH / cfg.backbone.path)) # %% data_loader = torch.utils.data.DataLoader( train_dataset, batch_size=cfg.batch_size, num_workers=cfg.num_workers, shuffle=True, collate_fn=detection_collate, pin_memory=True, ) optimizer = optim.SGD( net.parameters(), lr=cfg.lr, momentum=cfg.momentum, weight_decay=cfg.decay ) criterion = yolact.layers.MultiBoxLoss( num_classes=cfg.num_classes, pos_threshold=cfg.positive_iou_threshold, neg_threshold=cfg.negative_iou_threshold, negpos_ratio=3, ) # %% save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=cfg.save_folder) time_avg = MovingAverage() loss_types = ["B", "C", "M", "P", "D", "E", "S"] loss_avgs = {k: MovingAverage(100) for k in loss_types} print("Begin training!") # %% # try-except so you can use ctrl+c to save early and stop training step_index = 0 iteration = 0 last_time = time.time() epoch_size = len(train_dataset) // cfg.batch_size num_epochs = math.ceil(cfg.max_iter / epoch_size) for epoch in range(num_epochs): for datum in data_loader: # Stop if we've reached an epoch if we're resuming from start_iter if iteration == (epoch + 1) * epoch_size: break # Stop at the configured number of iterations even if mid-epoch if iteration == cfg.max_iter: break # Change a config setting if we've reached the specified iteration changed = False for change in cfg.delayed_settings: if iteration >= change[0]: changed = True cfg.replace(change[1]) # Reset the loss averages because things might have changed for avg in loss_avgs: avg.reset() # If a config setting was changed, remove it from the list so we don't keep checking if changed: cfg.delayed_settings = [ x for x in cfg.delayed_settings if x[0] > iteration] # Warm up by linearly interpolating the learning rate from some smaller value if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until: set_lr(optimizer, (cfg.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init) # Adjust the learning rate at the given iterations, but also if we resume from past that iteration while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]: step_index += 1 set_lr(optimizer, args.lr * (args.gamma ** step_index)) # Load training data # Note, for training on multiple gpus this will use the custom replicate and gather I wrote up there images, targets, masks, num_crowds = prepare_data(datum) # Forward Pass out = net(images) # Compute Loss optimizer.zero_grad() wrapper = ScatterWrapper(targets, masks, num_crowds) losses = criterion(out, wrapper, wrapper.make_mask()) # Mean here because Dataparallel losses = {k: v.mean() for k, v in losses.items()} loss = sum([losses[k] for k in losses]) # Backprop loss.backward() # Do this to free up vram even if loss is not finite if torch.isfinite(loss).item(): optimizer.step() # Add the loss to the moving average for bookkeeping for k in losses: loss_avgs[k].add(losses[k].item()) cur_time = time.time() elapsed = cur_time - last_time last_time = cur_time # Exclude graph setup from the timing information if 0 < iteration: time_avg.add(elapsed) if iteration % 10 == 0: eta_str = str(datetime.timedelta( seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0] total = sum([loss_avgs[k].get_avg() for k in losses]) loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], []) print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f') % tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True) iteration += 1 if iteration % cfg.save_interval == 0 and 0 < iteration: print('Saving state, iter:', iteration) net.save_weights(save_path(epoch, iteration)) #if cfg.keep_latest: # for p in SavePath.get_olds(cfg.save_folder, cfg.name): # print('Deleting old save...') # os.remove(str(p)) break break net.save_weights(save_path(epoch, iteration))
the-stack_106_30979
import re import sys from pathlib import Path import setuptools LABEXTENSIONS_DIR = Path('py_src') / 'jupyter_lsp' / 'labextensions' LABEXTENSIONS_INSTALL_DIR = Path('share') / 'jupyter' / 'labextensions' def get_data_files(): extension_files = [ (str(LABEXTENSIONS_INSTALL_DIR / file.relative_to(LABEXTENSIONS_DIR).parent), [str(file)]) for file in LABEXTENSIONS_DIR.rglob("*.*") ] extension_files.append(("etc/jupyter/jupyter_notebook_config.d", ["py_src/jupyter_lsp/etc/jupyter-lsp-serverextension.json"])) return extension_files setuptools.setup( version=re.findall( r"""__version__ = "([^"]+)"$""", (Path(__file__).parent / "py_src" / "jupyter_lsp" / "_version.py").read_text(), )[0], setup_requires=["pytest-runner"] if "test" in sys.argv else [], data_files=get_data_files(), )
the-stack_106_30980
# -*- coding:utf8 -*- """ 传入的key和类型,写在db_api外,当作一个小conf一起传入,db_api根据传入自行判断接受与否 db会自动创建两个时间键值:create_time,last_write_time """ from db.local_db import LocalDb as BaseDb class PipeTaskInfo(BaseDb): def __init__(self): super(PipeTaskInfo, self).__init__() self.table_type = "pipetask_info" self.design_table_type.update({ # db会自动添加create_time:str和last_write_time:str两项 "pipetask_id": str, "pipeline_id": str, "finish_node_list": list, "first_input_args": None, "first_input_kwargs": None, "pipetask_status": str, "flags": None }) self.map_id = "pipetask_id" self._init_db_folder() class PipeLineInfo(BaseDb): def __init__(self): super(PipeLineInfo, self).__init__() self.table_type = "pipeline_info" self.design_table_type.update({ # db会自动添加create_time:str和last_write_time:str两项 "pipeline_id": str, "pipeline_name": str, "dag_dict": dict, "topo_order_list": list, "config": None, "node_id_dict": dict, "flags": None }) self.map_id = "pipeline_id" self._init_db_folder() class PipeNodeInfo(BaseDb): def __init__(self): super(PipeNodeInfo, self).__init__() self.table_type = "pipenode_info" self.design_table_type.update({ # db会自动添加create_time:str和last_write_time:str两项 "pipenode_id": str, "pipenode_name": str, "func_des": list, "func_str": str, "type": str, "inputs": list, "outputs": list, "next_nodes": list, "prep_nodes": list, "outputs_r": dict, "flags": None }) self.map_id = "pipenode_id" self._init_db_folder()
the-stack_106_30984
""" Lift Curve Widget ----------------- """ from collections import namedtuple import numpy as np import sklearn.metrics as skl_metrics from AnyQt import QtWidgets from AnyQt.QtGui import QColor, QPen, QPalette, QFont from AnyQt.QtCore import Qt import pyqtgraph as pg import Orange from Orange.widgets import widget, gui, settings from Orange.widgets.evaluate.utils import check_results_adequacy from Orange.widgets.utils import colorpalette, colorbrewer from Orange.widgets.evaluate.owrocanalysis import convex_hull from Orange.widgets.widget import Input from Orange.widgets import report CurvePoints = namedtuple("CurvePoints", ["cases", "tpr", "thresholds"]) CurvePoints.is_valid = property(lambda self: self.cases.size > 0) LiftCurve = namedtuple("LiftCurve", ["points", "hull"]) LiftCurve.is_valid = property(lambda self: self.points.is_valid) def liftCurve_from_results(results, clf_index, target): x, y, thresholds = lift_curve_from_results(results, target, clf_index) points = CurvePoints(x, y, thresholds) hull = CurvePoints(*convex_hull([(x, y, thresholds)])) return LiftCurve(points, hull) PlotCurve = namedtuple("PlotCurve", ["curve", "curve_item", "hull_item"]) class OWLiftCurve(widget.OWWidget): name = "Lift Curve" description = ( "Construct and display a lift curve " "from the evaluation of classifiers." ) icon = "icons/LiftCurve.svg" priority = 1020 class Inputs: evaluation_results = Input("Evaluation Results", Orange.evaluation.Results) target_index = settings.Setting(0) selected_classifiers = settings.Setting([]) display_convex_hull = settings.Setting(False) display_cost_func = settings.Setting(True) fp_cost = settings.Setting(500) fn_cost = settings.Setting(500) target_prior = settings.Setting(50.0) graph_name = "plot" def __init__(self): super().__init__() self.results = None self.classifier_names = [] self.colors = [] self._curve_data = {} box = gui.vBox(self.controlArea, "Plot") tbox = gui.vBox(box, "Target Class") tbox.setFlat(True) self.target_cb = gui.comboBox( tbox, self, "target_index", callback=self._on_target_changed, contentsLength=8, ) cbox = gui.vBox(box, "Classifiers") cbox.setFlat(True) self.classifiers_list_box = gui.listBox( cbox, self, "selected_classifiers", "classifier_names", selectionMode=QtWidgets.QListView.MultiSelection, callback=self._on_classifiers_changed, ) gui.checkBox( box, self, "display_convex_hull", "Show lift convex hull", callback=self._replot, ) self.plotview = pg.GraphicsView(background="w") self.plotview.setFrameStyle(QtWidgets.QFrame.StyledPanel) self.plot = pg.PlotItem(enableMenu=False) self.plot.setMouseEnabled(False, False) self.plot.hideButtons() pen = QPen(self.palette().color(QPalette.Text)) tickfont = QFont(self.font()) tickfont.setPixelSize(max(int(tickfont.pixelSize() * 2 // 3), 11)) axis = self.plot.getAxis("bottom") axis.setTickFont(tickfont) axis.setPen(pen) axis.setLabel("P Rate") axis = self.plot.getAxis("left") axis.setTickFont(tickfont) axis.setPen(pen) axis.setLabel("TP Rate") self.plot.showGrid(True, True, alpha=0.1) self.plot.setRange(xRange=(0.0, 1.0), yRange=(0.0, 1.0), padding=0.05) self.plotview.setCentralItem(self.plot) self.mainArea.layout().addWidget(self.plotview) @Inputs.evaluation_results def set_results(self, results): """Set the input evaluation results.""" self.clear() self.results = check_results_adequacy(results, self.Error) if self.results is not None: self._initialize(results) self._setup_plot() def clear(self): """Clear the widget state.""" self.plot.clear() self.results = None self.target_cb.clear() self.target_index = 0 self.classifier_names = [] self.colors = [] self._curve_data = {} def _initialize(self, results): N = len(results.predicted) names = getattr(results, "learner_names", None) if names is None: names = ["#{}".format(i + 1) for i in range(N)] scheme = colorbrewer.colorSchemes["qualitative"]["Dark2"] if N > len(scheme): scheme = colorpalette.DefaultRGBColors self.colors = colorpalette.ColorPaletteGenerator(N, scheme) self.classifier_names = names self.selected_classifiers = list(range(N)) for i in range(N): item = self.classifiers_list_box.item(i) item.setIcon(colorpalette.ColorPixmap(self.colors[i])) self.target_cb.addItems(results.data.domain.class_var.values) def plot_curves(self, target, clf_idx): if (target, clf_idx) not in self._curve_data: curve = liftCurve_from_results(self.results, clf_idx, target) color = self.colors[clf_idx] pen = QPen(color, 1) pen.setCosmetic(True) shadow_pen = QPen(pen.color().lighter(160), 2.5) shadow_pen.setCosmetic(True) item = pg.PlotDataItem( curve.points[0], curve.points[1], pen=pen, shadowPen=shadow_pen, symbol="+", symbolSize=3, symbolPen=shadow_pen, antialias=True, ) hull_item = pg.PlotDataItem( curve.hull[0], curve.hull[1], pen=pen, antialias=True ) self._curve_data[target, clf_idx] = PlotCurve(curve, item, hull_item) return self._curve_data[target, clf_idx] def _setup_plot(self): target = self.target_index selected = self.selected_classifiers curves = [self.plot_curves(target, clf_idx) for clf_idx in selected] for curve in curves: self.plot.addItem(curve.curve_item) if self.display_convex_hull: hull = convex_hull([c.curve.hull for c in curves]) self.plot.plot(hull[0], hull[1], pen="y", antialias=True) pen = QPen(QColor(100, 100, 100, 100), 1, Qt.DashLine) pen.setCosmetic(True) self.plot.plot([0, 1], [0, 1], pen=pen, antialias=True) warning = "" if not all(c.curve.is_valid for c in curves): if any(c.curve.is_valid for c in curves): warning = "Some lift curves are undefined" else: warning = "All lift curves are undefined" self.warning(warning) def _replot(self): self.plot.clear() if self.results is not None: self._setup_plot() def _on_target_changed(self): self._replot() def _on_classifiers_changed(self): self._replot() def send_report(self): if self.results is None: return caption = report.list_legend( self.classifiers_list_box, self.selected_classifiers ) self.report_items((("Target class", self.target_cb.currentText()),)) self.report_plot() self.report_caption(caption) def lift_curve_from_results(results, target, clf_idx, subset=slice(0, -1)): actual = results.actual[subset] scores = results.probabilities[clf_idx][subset][:, target] yrate, tpr, thresholds = lift_curve(actual, scores, target) return yrate, tpr, thresholds def lift_curve(ytrue, ypred, target=1): P = np.sum(ytrue == target) N = ytrue.size - P if P == 0 or N == 0: # Undefined TP and FP rate return np.array([]), np.array([]), np.array([]) fpr, tpr, thresholds = skl_metrics.roc_curve(ytrue, ypred, target) rpp = fpr * (N / (P + N)) + tpr * (P / (P + N)) return rpp, tpr, thresholds def main(): import sip from AnyQt.QtWidgets import QApplication from Orange.classification import ( LogisticRegressionLearner, SVMLearner, NuSVMLearner, ) app = QApplication([]) w = OWLiftCurve() w.show() w.raise_() data = Orange.data.Table("ionosphere") results = Orange.evaluation.CrossValidation( data, [ LogisticRegressionLearner(penalty="l2"), LogisticRegressionLearner(penalty="l1"), SVMLearner(probability=True), NuSVMLearner(probability=True), ], store_data=True, ) results.learner_names = ["LR l2", "LR l1", "SVM", "Nu SVM"] w.set_results(results) rval = app.exec_() sip.delete(w) del w app.processEvents() del app return rval if __name__ == "__main__": main()
the-stack_106_30986
# --------------------------------------------------------------------------- # # Diagnostics # # --------------------------------------------------------------------------- # """Diagnostic Plots for single gradient descent optimizations. """ import datetime from IPython.display import HTML import matplotlib as mpl import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import numpy as np import pandas as pd import seaborn as sns import warnings warnings.filterwarnings("ignore", category=RuntimeWarning) from .basic_plots import BasicPlots from ..utils.filemanager import save_fig, save_csv, save_gif class Diagnostics(): def learning_rates(self, models, directory=None, filename=None, xlim=None, ylim=None,show=True): """Prints learning rates by epoch for one or more models""" results = [] for _, v in models.items(): result = pd.DataFrame({"Name": v.name, "Iteration": np.arange(1,len(v.blackbox.learning_rates)+1), "Learning Rate": v.blackbox.learning_rates}) results.append(result) results = pd.concat(results, axis=0) # Render Plot fig, ax = plt.subplots(figsize=(12,4)) plot = BasicPlots() title = "Learning Rate(s)" ax = plot.lineplot(x='Iteration', y='Learning Rate', z='Name', data=results, title=title, ax=ax) # Set x and y limits if xlim is not None: ax.set_xlim(left = xlim[0], right=xlim[1]) if ylim is not None: ax.set_xlim(bottom=ylim[0], top=ylim[1]) # Finalize, show and save fig.tight_layout() if show: plt.show() if directory is not None: if filename is None: filename = title + '.png' save_fig(fig, directory, filename) plt.close(fig) def validation_curve(self, model, directory=None, filename=None, xlim=None, ylim=None,show=True): """Renders validation curve e.g. training and validation error""" # Extract parameters and data params = model.get_params() d = {'Iteration': np.arange(1,model.epochs+1), 'Learning Rates': model.learning_rates, 'Training Set': model.train_scores, 'Validation Set': model.val_scores} df = pd.DataFrame(data=d) df = pd.melt(df, id_vars=['Iteration', 'Learning Rates'], var_name='Dataset', value_name='Scores') # Format title title = model.algorithm + "\n" + "Validation Curve" # Initialize plot and set aesthetics fig, ax = plt.subplots(figsize=(12,4)) sns.set(style="whitegrid", font_scale=1) ax.set_facecolor('w') ax.tick_params(colors='k') ax.xaxis.label.set_color('k') ax.yaxis.label.set_color('k') ax.set_xlabel('Iteration') ax.set_ylabel('Learning Rates') ax.set_title(title, color='k') # Plot Learning Rates ax = sns.lineplot(x='Iteration', y='Learning Rates', color='g', data=df, ax=ax) # Plot scores ax2 = ax.twinx() ax2 = sns.lineplot(x='Iteration', y='Scores', hue='Dataset', data=df, ax=ax2) ax2.set_ylabel('Scores') # Set x and y limits if xlim is not None: ax.set_xlim(left = xlim[0], right=xlim[1]) if ylim is not None: ax.set_xlim(bottom=ylim[0], top=ylim[1]) # Show plot fig.tight_layout() if show: plt.show() # Save plot if instructed to do so if directory is not None: if filename is None: filename = model.algorithm + ' Validation Curve.png ' save_fig(fig, directory, filename) return fig
the-stack_106_30987
################################################################################################## # Copyright (c) 2012 Brett Dixon # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ################################################################################################## from django.conf.urls import url # import views from frog import views urlpatterns = [ # -- Gallery url(r'^gallery$', views.gallery.index), url(r'^gallery/(?P<obj_id>\d+)$', views.gallery.index), url(r'^gallery/(?P<obj_id>\d+)/filter$', views.gallery.filterObjects), url(r'^gallery/(?P<obj_id>\d+)/subscribe$', views.gallery.subscribe), # -- Piece url(r'^like/(?P<guid>\w+)$', views.piece.like), url(r'^piece/group/$', views.piece.group), url(r'^piece/group/(?P<obj_id>\d+)/$', views.piece.group), url(r'^piece/(?P<guid>\w+)/$', views.piece.data), url(r'^p$', views.piece.getGuids), # -- Tag url(r'^tag/$', views.tag.index), url(r'^tag/(?P<obj_id>\d+)/$', views.tag.index), url(r'^tag/resolve/(?P<name>[\w\s\.\-\`\']+)$', views.tag.resolve), url(r'^tag/search$', views.tag.search), url(r'^tag/manage$', views.tag.manage), url(r'^tag/merge/(?P<obj_id>\d+)/$', views.tag.merge), # -- User prefs url(r'^pref/$', views.userpref.index), # -- Comments url(r'^comment/$', views.comment.commentList), url(r'^comment/(?P<obj_id>\d+)/$', views.comment.index), # -- Misc functions url(r'^download$', views.download), url(r'^switchartist$', views.switchArtist), url(r'^artistlookup$', views.artistLookup), url(r'^isunique/$', views.isUnique), url(r'^getuser$', views.getUser), url(r'^userlist', views.userList), url(r'^csrf$', views.csrf), url(r'^siteconfig/$', views.siteconfig.index), url(r'^clienterror/$', views.clientError), url(r'^releasenotes$', views.releaseNotes), url(r'^view/$', views.piece.recordView), # -- Authentication url(r'^login$', views.login_), url(r'^logout$', views.logout_), url(r'^access_denied', views.accessDenied), url(r'^$', views.index), ]
the-stack_106_30988
def to_type(o, new_type): ''' Helper funciton that receives an object or a dict and convert it to a new given type. :param object|dict o: The object to convert :param Type new_type: The type to convert to. ''' if new_type == type(o): return o else: return new_type(**o) class Position(object): def __init__(self, line, character): """ Constructs a new Position instance. :param int line: Line position in a document (zero-based). :param int character: Character offset on a line in a document (zero-based). """ self.line = line self.character = character class Range(object): def __init__(self, start, end): """ Constructs a new Range instance. :param Position start: The range's start position. :param Position end: The range's end position. """ self.start = to_type(start, Position) self.end = to_type(end, Position) class Location(object): """ Represents a location inside a resource, such as a line inside a text file. """ def __init__(self, uri, range): """ Constructs a new Range instance. :param str uri: Resource file. :param Range range: The range inside the file """ self.uri = uri self.range = to_type(range, Range) class Diagnostic(object): def __init__(self, range, severity, code, source, message, relatedInformation): """ Constructs a new Diagnostic instance. :param Range range: The range at which the message applies.Resource file. :param int severity: The diagnostic's severity. Can be omitted. If omitted it is up to the client to interpret diagnostics as error, warning, info or hint. :param str code: The diagnostic's code, which might appear in the user interface. :param str source: A human-readable string describing the source of this diagnostic, e.g. 'typescript' or 'super lint'. :param str message: The diagnostic's message. :param list relatedInformation: An array of related diagnostic information, e.g. when symbol-names within a scope collide all definitions can be marked via this property. """ self.range = range self.severity = severity self.code = code self.source = source self.message = message self.relatedInformation = relatedInformation class DiagnosticSeverity(object): Error = 1 Warning = 2 # TODO: warning is known in python Information = 3 Hint = 4 class DiagnosticRelatedInformation(object): def __init__(self, location, message): """ Constructs a new Diagnostic instance. :param Location location: The location of this related diagnostic information. :param str message: The message of this related diagnostic information. """ self.location = location self.message = message class Command(object): def __init__(self, title, command, arguments): """ Constructs a new Diagnostic instance. :param str title: Title of the command, like `save`. :param str command: The identifier of the actual command handler. :param list argusments: Arguments that the command handler should be invoked with. """ self.title = title self.command = command self.arguments = arguments class TextDocumentItem(object): """ An item to transfer a text document from the client to the server. """ def __init__(self, uri, languageId, version, text): """ Constructs a new Diagnostic instance. :param DocumentUri uri: Title of the command, like `save`. :param str languageId: The identifier of the actual command handler. :param int version: Arguments that the command handler should be invoked with. :param str text: Arguments that the command handler should be invoked with. """ self.uri = uri self.languageId = languageId self.version = version self.text = text class TextDocumentIdentifier(object): """ Text documents are identified using a URI. On the protocol level, URIs are passed as strings. """ def __init__(self, uri): """ Constructs a new TextDocumentIdentifier instance. :param DocumentUri uri: The text document's URI. """ self.uri = uri class TextDocumentPositionParams(object): """ A parameter literal used in requests to pass a text document and a position inside that document. """ def __init__(self, textDocument, position): """ Constructs a new TextDocumentPositionParams instance. :param TextDocumentIdentifier textDocument: The text document. :param Position position: The position inside the text document. """ self.textDocument = textDocument self.position = position class LANGUAGE_IDENTIFIER: BAT="bat" BIBTEX="bibtex" CLOJURE="clojure" COFFESCRIPT="coffeescript" C="c" CPP="cpp" CSHARP="csharp" CSS="css" DIFF="diff" DOCKERFILE="dockerfile" FSHARP="fsharp" GIT_COMMIT="git-commit" GIT_REBASE="git-rebase" GO="go" GROOVY="groovy" HANDLEBARS="handlebars" HTML="html" INI="ini" JAVA="java" JAVASCRIPT="javascript" JSON="json" LATEX="latex" LESS="less" LUA="lua" MAKEFILE="makefile" MARKDOWN="markdown" OBJECTIVE_C="objective-c" OBJECTIVE_CPP="objective-cpp" Perl="perl" PHP="php" POWERSHELL="powershell" PUG="jade" PYTHON="python" R="r" RAZOR="razor" RUBY="ruby" RUST="rust" SASS="sass" SCSS="scss" ShaderLab="shaderlab" SHELL_SCRIPT="shellscript" SQL="sql" SWIFT="swift" TYPE_SCRIPT="typescript" TEX="tex" VB="vb" XML="xml" XSL="xsl" YAML="yaml" class SymbolKind(object): File = 1 Module = 2 Namespace = 3 Package = 4 Class = 5 Method = 6 Property = 7 Field = 8 Constructor = 9 Enum = 10 Interface = 11 Function = 12 Variable = 13 Constant = 14 String = 15 Number = 16 Boolean = 17 Array = 18 Object = 19 Key = 20 Null = 21 EnumMember = 22 Struct = 23 Event = 24 Operator = 25 TypeParameter = 26 class SymbolInformation(object): """ Represents information about programming constructs like variables, classes, interfaces etc. """ def __init__(self, name, kind, location, containerName, deprecated=False): """ Constructs a new SymbolInformation instance. :param str name: The name of this symbol. :param int kind: The kind of this symbol. :param bool Location: The location of this symbol. The location's range is used by a tool to reveal the location in the editor. If the symbol is selected in the tool the range's start information is used to position the cursor. So the range usually spans more then the actual symbol's name and does normally include things like visibility modifiers. The range doesn't have to denote a node range in the sense of a abstract syntax tree. It can therefore not be used to re-construct a hierarchy of the symbols. :param str containerName: The name of the symbol containing this symbol. This information is for user interface purposes (e.g. to render a qualifier in the user interface if necessary). It can't be used to re-infer a hierarchy for the document symbols. :param bool deprecated: Indicates if this symbol is deprecated. """ self.name = name self.kind = kind self.deprecated = deprecated self.location = to_type(location, Location) self.containerName = containerName class ParameterInformation(object): """ Represents a parameter of a callable-signature. A parameter can have a label and a doc-comment. """ def __init__(self, label, documentation=""): """ Constructs a new ParameterInformation instance. :param str label: The label of this parameter. Will be shown in the UI. :param str documentation: The human-readable doc-comment of this parameter. Will be shown in the UI but can be omitted. """ self.label = label self.documentation = documentation class SignatureInformation(object): """ Represents the signature of something callable. A signature can have a label, like a function-name, a doc-comment, and a set of parameters. """ def __init__(self, label, documentation="", parameters=[]): """ Constructs a new SignatureInformation instance. :param str label: The label of this signature. Will be shown in the UI. :param str documentation: The human-readable doc-comment of this signature. Will be shown in the UI but can be omitted. :param ParameterInformation[] parameters: The parameters of this signature. """ self.label = label self.documentation = documentation self.parameters = [to_type(parameter, ParameterInformation) for parameter in parameters] class SignatureHelp(object): """ Signature help represents the signature of something callable. There can be multiple signature but only one active and only one active parameter. """ def __init__(self, signatures, activeSignature=0, activeParameter=0): """ Constructs a new SignatureHelp instance. :param SignatureInformation[] signatures: One or more signatures. :param int activeSignature: :param int activeParameter: """ self.signatures = [to_type(signature, SignatureInformation) for signature in signatures] self.activeSignature = activeSignature self.activeParameter = activeParameter class CompletionTriggerKind(object): Invoked = 1 TriggerCharacter = 2 TriggerForIncompleteCompletions = 3 class CompletionContext(object): """ Contains additional information about the context in which a completion request is triggered. """ def __init__(self, triggerKind, triggerCharacter=None): """ Constructs a new CompletionContext instance. :param CompletionTriggerKind triggerKind: How the completion was triggered. :param str triggerCharacter: The trigger character (a single character) that has trigger code complete. Is undefined if `triggerKind !== CompletionTriggerKind.TriggerCharacter` """ self.triggerKind = triggerKind if triggerCharacter: self.triggerCharacter = triggerCharacter class TextEdit(object): """ A textual edit applicable to a text document. """ def __init__(self, range, newText): """ :param Range range: The range of the text document to be manipulated. To insert text into a document create a range where start === end. :param str newText: The string to be inserted. For delete operations use an empty string. """ self.range = range self.newText = newText class InsertTextFormat(object): PlainText = 1 Snippet = 2 class CompletionItem(object): """ """ def __init__(self, label, kind=None, detail=None, documentation=None, deprecated=None, presented=None, sortText=None, filterText=None, insertText=None, insertTextFormat=None, textEdit=None, additionalTextEdits=None, commitCharacters=None, command=None, data=None): """ :param str label: The label of this completion item. By default also the text that is inserted when selecting this completion. :param int kind: The kind of this completion item. Based of the kind an icon is chosen by the editor. :param str detail: A human-readable string with additional information about this item, like type or symbol information. :param tr ocumentation: A human-readable string that represents a doc-comment. :param bool deprecated: Indicates if this item is deprecated. :param bool presented: Select this item when showing. Note: that only one completion item can be selected and that the tool / client decides which item that is. The rule is that the first item of those that match best is selected. :param str sortText: A string that should be used when comparing this item with other items. When `falsy` the label is used. :param str filterText: A string that should be used when filtering a set of completion items. When `falsy` the label is used. :param str insertText: A string that should be inserted into a document when selecting this completion. When `falsy` the label is used. The `insertText` is subject to interpretation by the client side. Some tools might not take the string literally. For example VS Code when code complete is requested in this example `con<cursor position>` and a completion item with an `insertText` of `console` is provided it will only insert `sole`. Therefore it is recommended to use `textEdit` instead since it avoids additional client side interpretation. @deprecated Use textEdit instead. :param InsertTextFormat isertTextFormat: The format of the insert text. The format applies to both the `insertText` property and the `newText` property of a provided `textEdit`. :param TextEdit textEdit: An edit which is applied to a document when selecting this completion. When an edit is provided the value of `insertText` is ignored. Note:* The range of the edit must be a single line range and it must contain the position at which completion has been requested. :param TextEdit additionalTextEdits: An optional array of additional text edits that are applied when selecting this completion. Edits must not overlap (including the same insert position) with the main edit nor with themselves. Additional text edits should be used to change text unrelated to the current cursor position (for example adding an import statement at the top of the file if the completion item will insert an unqualified type). :param str commitCharacters: An optional set of characters that when pressed while this completion is active will accept it first and then type that character. *Note* that all commit characters should have `length=1` and that superfluous characters will be ignored. :param Command command: An optional command that is executed *after* inserting this completion. Note: that additional modifications to the current document should be described with the additionalTextEdits-property. :param data: An data entry field that is preserved on a completion item between a completion and a completion resolve request. """ self.label = label self.kind = kind self.detail = detail self.documentation = documentation self.deprecated = deprecated self.presented = presented self.sortText = sortText self.filterText = filterText self.insertText = insertText self.insertTextFormat = insertTextFormat self.textEdit = textEdit self.additionalTextEdits = additionalTextEdits self.commitCharacters = commitCharacters self.command = command self.data = data class CompletionList(object): """ Represents a collection of [completion items](#CompletionItem) to be presented in the editor. """ def __init__(self, isIncomplete, items): """ Constructs a new CompletionContext instance. :param bool isIncomplete: This list it not complete. Further typing should result in recomputing this list. :param CompletionItem items: The completion items. """ self.isIncomplete = isIncomplete self.items = [to_type(i, CompletionItem) for i in items] class ErrorCodes(object): # Defined by JSON RPC ParseError = -32700 InvalidRequest = -32600 MethodNotFound = -32601 InvalidParams = -32602 InternalError = -32603 serverErrorStart = -32099 serverErrorEnd = -32000 ServerNotInitialized = -32002 UnknownErrorCode = -32001 # Defined by the protocol. RequestCancelled = -32800 ContentModified = -32801 class ResponseError(Exception): def __init__(self, code, message, data = None): self.code = code self.message = message if data: self.data = data
the-stack_106_30991
"""ResNets for Steering Prediction. Author: Yuhuang Hu Email : [email protected] """ from __future__ import print_function import os import cPickle as pickle from sacred import Experiment import numpy as np import h5py from keras.models import load_model import spiker from spiker.data import ddd17 exp = Experiment("ResNet - Steering - Experiment") exp.add_config({ "model_name": "", # the model name "data_name": "", # the data name "channel_id": 0, # which channel to chose, 0: dvs, 1: aps, 2: both "stages": 0, # number of stages "blocks": 0, # number of blocks of each stage "filter_list": [], # number of filters per stage "nb_epoch": 0, # number of training epochs "batch_size": 0, # batch size }) @exp.automain def resnet_exp(model_name, data_name, channel_id, stages, blocks, filter_list, nb_epoch, batch_size): """Perform ResNet experiment.""" model_path = os.path.join(spiker.HOME, "data", "exps", "ral-exps", model_name) model_file_base = os.path.join(model_path, model_name) # print model info print("[MESSAGE] Model Name: %s" % (model_name)) print("[MESSAGE] Number of epochs: %d" % (nb_epoch)) print("[MESSAGE] Batch Size: %d" % (batch_size)) print("[MESSAGE] Number of stages: %d" % (stages)) print("[MESSAGE] Number of blocks: %d" % (blocks)) # load data data_path = os.path.join(spiker.HOME, "data", "exps", "data", "ddd17", data_name) if not os.path.isfile(data_path): raise ValueError("This dataset does not exist at %s" % (data_path)) print("[MESSAGE] Dataset %s" % (data_path)) dataset = h5py.File(data_path, "r") if channel_id != 2: X_test = dataset["test_data"][ :, :, :, channel_id][()][..., np.newaxis].astype("float32")/255. else: X_test = dataset["test_data"][()].astype("float32")/255. Y_test = dataset["test_target"][()] dataset.close() print("[MESSAGE] Number of test samples %d" % (X_test.shape[0])) # Build model print ("[MESSAGE] Model is compiled.") model_file = model_file_base + "-best.hdf5" model = load_model(model_file) Y_predict = model.predict(X_test) with open(model_file_base+"-prediction.pkl", "wb") as f: pickle.dump([Y_test, Y_predict], f)
the-stack_106_30992
from tqdm import tqdm from concurrent.futures import ProcessPoolExecutor, as_completed def parallel_process(array, function, n_jobs=16, use_kwargs=False, front_num=3, tqdm=tqdm): """ This function was copied from here: http://danshiebler.com/2016-09-14-parallel-progress-bar/ A parallel version of the map function with a progress bar. Args: array (array-like): An array to iterate over. function (function): A python function to apply to the elements of array n_jobs (int, default=16): The number of cores to use use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of keyword arguments to function front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job. Useful for catching bugs Returns: [function(array[0]), function(array[1]), ...] """ # We run the first few iterations serially to catch bugs if front_num > 0: front = [function(**a) if use_kwargs else function(a) for a in array[:front_num]] # If we set n_jobs to 1, just run a list comprehension. This is useful for # benchmarking and debugging. if n_jobs == 1: return front + [function(**a) if use_kwargs else function(a) for a in tqdm(array[front_num:])] # Assemble the workers with ProcessPoolExecutor(max_workers=n_jobs) as pool: # Pass the elements of array into function if use_kwargs: futures = [pool.submit(function, **a) for a in array[front_num:]] else: futures = [pool.submit(function, a) for a in array[front_num:]] kwargs = { 'total': len(futures), 'unit': 'it', 'unit_scale': True, 'leave': True } # Print out the progress as tasks complete for _ in tqdm(as_completed(futures), **kwargs): pass out = [] # Get the results from the futures. for i, future in tqdm(enumerate(futures)): try: out.append(future.result()) except Exception as e: out.append(e) return front + out
the-stack_106_30993
cube = lambda x: pow(x,3) # complete the lambda function def fibonacci(n): l = [0,1] for i in range(2, n): temp = l[-1] + l[-2] l.append(temp) return l[0:n] if __name__ == '__main__': n = int(input()) print(list(map(cube, fibonacci(n))))
the-stack_106_30994
from collections import OrderedDict from rest_framework import serializers from profiles.models import Project, Tag, Basemap, Spatialitedbs, Otherfiles, Profile, ProfileSet from profiles.models import UserProject from django.contrib.auth import get_user_model from rest_framework.fields import SkipField class ProjectSerializer(serializers.ModelSerializer): class Meta: model = Project fields = ('path', 'modifieddate', 'url', 'uploadurl', 'size' ) # this bit is for omitting empty fields (size) def to_representation(self, instance): result = super(ProjectSerializer, self).to_representation(instance) return OrderedDict([(key, result[key]) for key in result if result[key] is not None]) class UserProjectSerializer(serializers.HyperlinkedModelSerializer): owner = serializers.SlugRelatedField(read_only=True, slug_field='id') class Meta: model = UserProject fields = ('modifieddate', 'owner', 'document', 'description') class TagSerializer(serializers.ModelSerializer): owner = serializers.SlugRelatedField(read_only=True, slug_field='id') class Meta: model = Tag fields = ('path', 'modifieddate', 'url', 'size', 'owner') # this bit is for omitting empty fields (size) def to_representation(self, instance): result = super(TagSerializer, self).to_representation(instance) return OrderedDict([(key, result[key]) for key in result if result[key] is not None]) class BasemapSerializer(serializers.ModelSerializer): class Meta: model = Basemap fields = ('path', 'modifieddate', 'url', 'size' ) # this bit is for omitting empty fields (size) def to_representation(self, instance): result = super(BasemapSerializer, self).to_representation(instance) return OrderedDict([(key, result[key]) for key in result if result[key] is not None]) class SpatialitedbsSerializer(serializers.ModelSerializer): class Meta: model = Spatialitedbs fields = ('path', 'modifieddate', 'url', 'size', 'uploadurl', 'visible' ) # this bit is for omitting empty fields (size) def to_representation(self, instance): result = super(SpatialitedbsSerializer, self).to_representation(instance) return OrderedDict([(key, result[key]) for key in result if result[key] is not None]) class OtherfilesSerializer(serializers.ModelSerializer): class Meta: model = Otherfiles fields = ('path', 'modifieddate', 'url', 'size' ) # this bit is for omitting empty fields (size) def to_representation(self, instance): result = super(OtherfilesSerializer, self).to_representation(instance) return OrderedDict([(key, result[key]) for key in result if result[key] is not None]) class ProfileSerializer(serializers.ModelSerializer): project = ProjectSerializer(read_only=True) tags = TagSerializer(read_only=True) basemaps = BasemapSerializer(many=True, read_only=True) spatialitedbs = SpatialitedbsSerializer(many=True, read_only=True) otherfiles = OtherfilesSerializer(many=True, read_only=True) # this bit is for omitting empty fields (size) def to_representation(self, instance): result = super(ProfileSerializer, self).to_representation(instance) return OrderedDict([(key, result[key]) for key in result if result[key] is not None]) class Meta: model = Profile fields = ('name', 'description', 'creationdate', 'modifieddate', 'color', 'active', 'sdcardPath', 'mapView', 'project', 'tags', 'basemaps', 'spatialitedbs', 'otherfiles' ) class ProfileSetSerializer(serializers.ModelSerializer): profiles = ProfileSerializer(read_only=True, many=True) class Meta: model = ProfileSet fields = ('formatVersion', 'profiles')
the-stack_106_30996
"""Code for handling downloading of HPO files used by scout from CLI""" import logging import pathlib import click from scout.utils.scout_requests import fetch_mim_files LOG = logging.getLogger(__name__) def print_omim(out_dir, api_key): """Print HPO files to a directory Args: out_dir(Path) """ mim_files = fetch_mim_files(api_key, mim2genes=True, genemap2=True) file_name = "genemap2.txt" file_path = out_dir / file_name LOG.info("Print genemap genes to %s", file_path) with file_path.open("w", encoding="utf-8") as outfile: for line in mim_files["genemap2"]: outfile.write(line + "\n") file_name = "mim2genes.txt" file_path = out_dir / file_name LOG.info("Print mim2gene info to %s", file_path) with file_path.open("w", encoding="utf-8") as outfile: for line in mim_files["mim2genes"]: outfile.write(line + "\n") @click.command("omim", help="Download a files with OMIM info") @click.option("--api-key", help="Specify the api key", required=True) @click.option("-o", "--out-dir", default="./", show_default=True) def omim(out_dir, api_key): """Download the OMIM genes""" out_dir = pathlib.Path(out_dir) out_dir.mkdir(parents=True, exist_ok=True) LOG.info("Download OMIM resources to %s", out_dir) print_omim(out_dir, api_key)
the-stack_106_30997
from slyd.orm.exceptions import ImproperlyConfigured __all__ = [ 'get_serializer', ] serializers = {} def get_serializer(schema_type): try: return serializers[schema_type] except KeyError: raise ImproperlyConfigured( u"No schema for type '{}' exists".format(schema_type))
the-stack_106_31000
# -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import pytest from bkuser_core.categories.constants import CategoryStatus from bkuser_core.categories.views import CategoryViewSet from bkuser_core.profiles.constants import ProfileStatus from bkuser_core.tests.utils import get_one_object, make_simple_category, make_simple_department, make_simple_profile from bkuser_core.user_settings.models import Setting pytestmark = pytest.mark.django_db class TestActionApis: @pytest.fixture(scope="class") def view(self): return CategoryViewSet.as_view( {"get": "retrieve", "put": "update", "patch": "partial_update", "delete": "destroy", "post": "restoration"} ) def test_update_category(self, view): pass @pytest.mark.parametrize( "enabled,status", [(False, CategoryStatus.NORMAL.value), (False, CategoryStatus.INACTIVE.value)] ) def test_category_restoration(self, factory, view, enabled, status): cc = make_simple_category("xoodomain", "Domain", force_create_params={"enabled": enabled, "status": status}) setting_id = [] for setting in cc.make_default_settings(): setting.enabled = 0 setting.save(update_fields=["enabled"]) setting_id.append(setting.id) d = make_simple_department("dep", parent_id=1, force_create_params={"category_id": cc.id, "enabled": enabled}) p = make_simple_profile("profile", force_create_params={"category_id": cc.id, "enabled": enabled}) request = factory.post(f"/api/v2/categories/{cc.id}/restoration/?include_disabled=1") setattr(request, "operator", "faker") response = view(request=request, lookup_value=f"{cc.id}") assert response.status_code == 200 cc = get_one_object("profilecategory", id=cc.id, domain=cc.domain) assert cc.enabled and cc.status == CategoryStatus.NORMAL.value assert get_one_object("department", id=d.id, name=d.name).enabled p = get_one_object("profile", id=p.id, username=p.username) assert p.enabled and p.status == ProfileStatus.NORMAL.value assert {x.id for x in Setting.objects.filter(id__in=setting_id, enabled=True)} == set(setting_id) class TestListCreateApis: @pytest.fixture(scope="class") def view(self): return CategoryViewSet.as_view({"get": "list", "post": "create"}) @pytest.mark.parametrize( "all_count,fields,result_count,include_disabled,expected_fields", [ (10, "id,display_name,domain,enabled", 5, "false", "id,display_name,domain,enabled"), (10, "id,display_name,domain", 10, "true", "id,display_name,domain,enabled"), (10, "id,display_name,domain,enabled", 10, "true", "id,display_name,domain,enabled"), ], ) def test_category_include_enabled_fields( self, factory, view, all_count, fields, result_count, include_disabled, expected_fields ): """测试目录软删除显式拉取和字段选择""" for i in range(1, all_count): make_simple_category(f"domain{i}", f"Display{i}", force_create_params={"enabled": i % 2 == 0}) response = view( request=factory.get(f"/api/v2/categories/?fields={fields}&include_disabled={include_disabled}") ) assert response.data["count"] == result_count assert set(response.data["results"][0].keys()) == set(expected_fields.split(","))
the-stack_106_31003
import logging import os logging.basicConfig( level=logging.DEBUG, format="[%(asctime)s] %(levelname)-12s|process:%(process)-5s|thread:%" "(thread)-5s|funcName:%(funcName)s|message:%(message)s", handlers=[ # logging.FileHandler('fileName.log'), logging.StreamHandler() ]) user_sessions = {} days = ["понедельник", "вторник", "среда", "четверг", "пятница", "суббота", "воскресенье"] bot_token = None debug_mode = None try: bot_token = os.environ['TELEGRAM_TOKEN'] debug_mode = 'true' == os.environ['DEBUG_MODE'] except KeyError as e: logging.error(e)
the-stack_106_31009
# Copyright 2016 VMware Inc # All Rights Reserved # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.network import base from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import decorators from tempest import test from vmware_nsx_tempest.services import nsxv3_client CONF = config.CONF class NSXv3RoutersTest(base.BaseAdminNetworkTest): """Test L3 Router and realization on NSX backend When test L3 Router feature, we need to test both REST API call from neutron and realization state on backend. Two tests have been added in this class: - Test create and update router - Test delete router """ @classmethod def skip_checks(cls): super(NSXv3RoutersTest, cls).skip_checks() if not test.is_extension_enabled('router', 'network'): msg = "router extension not enabled." raise cls.skipException(msg) @classmethod def resource_setup(cls): super(NSXv3RoutersTest, cls).resource_setup() cls.nsx = nsxv3_client.NSXV3Client(CONF.nsxv3.nsx_manager, CONF.nsxv3.nsx_user, CONF.nsxv3.nsx_password) @test.attr(type='nsxv3') @decorators.idempotent_id('0e9938bc-d2a3-4a9a-a4f9-7a93ee8bb344') def test_create_update_nsx_router(self): # Create a router router_name = data_utils.rand_name('router-') router = self.create_router(router_name, admin_state_up=True) self.addCleanup(self._delete_router, router['id']) nsx_router = self.nsx.get_logical_router(router['name'], router['id']) self.assertEqual(router['name'], router_name) self.assertEqual(router['admin_state_up'], True) self.assertIsNotNone(nsx_router) # Update the name of router and verify if it is updated on both # neutron and nsx backend updated_name = 'updated ' + router_name update_body = self.routers_client.update_router(router['id'], name=updated_name) updated_router = update_body['router'] nsx_router = self.nsx.get_logical_router(updated_router['name'], updated_router['id']) self.assertEqual(updated_router['name'], updated_name) self.assertIsNotNone(nsx_router) @test.attr(type='nsxv3') @decorators.idempotent_id('6f49b69c-0800-4c83-b1f8-595ae5bfeea7') def test_delete_nsx_router(self): # Create a router router_name = data_utils.rand_name('router-') router = self.create_router(router_name, admin_state_up=True) nsx_router = self.nsx.get_logical_router(router['name'], router['id']) self.assertEqual(router['name'], router_name) self.assertIsNotNone(nsx_router) # Delete the router and verify it is deleted on nsx backend self.routers_client.delete_router(router['id']) nsx_router = self.nsx.get_logical_router(router['name'], router['id']) self.assertIsNone(nsx_router) def _delete_router(self, router_id): # Delete the router in case the test exits with any exception list_body = self.routers_client.list_routers() for router in list_body.get('router', []): if router['id'] == router_id: self.routers_client.delete_router(router_id)
the-stack_106_31010
import firebase_admin from firebase_admin import credentials from firebase_admin import firestore from google.cloud import storage from linebot import LineBotApi from linebot.models import TextSendMessage, ImageSendMessage, QuickReply, QuickReplyButton, MessageAction from linebot.exceptions import LineBotApiError import time import os from models.task import Task import style_transfer import matplotlib.pyplot as plt ''' Configs ''' # Use a service account cred_path = os.environ["CRED_PATH"] bucket_name = os.environ["USER_INFO_GS_BUCKET_NAME"] styles = { 1: "01_01_starry_night.jpeg", 2: "01_02_sunflowers.jpeg", 3: "01_03_the_yellow_house.jpeg", 4: "02_04_water_fall.jpeg", 5: "02_05_ascending_and_descending.jpeg", 6: "02_06_ralativity.jpg", 7: "03_07_unknown.jpeg", 8: "03_08_flower_and_bird.jpeg", 9: "03_09_flowers_and_birds_seasons.jpeg", 10: "04_10_first_impression.jpeg", 11: "04_11_unknown.jpeg", 12: "04_12_unknown.jpeg", 13: "05_13_frida_kahlo.jpeg", 14: "05_14_frida_kahlo.jpeg", 15: "05_15_frida_kahlo.jpeg" } iteration_times = int(os.environ["ITERATION_TIMES"]) line_channel_access_token = os.environ["LINE_CHANNEL_ACCESS_TOKEN"] ''' ''' # Initialize CloudStorage storage_client = storage.Client.from_service_account_json(cred_path) bucket = storage_client.bucket(bucket_name) # Initialize Line Bot Api line_bot_api = LineBotApi(line_channel_access_token) cred = credentials.Certificate(cred_path) firebase_admin.initialize_app(cred) db = firestore.client() # Create a callback on_snapshot function to capture changes tasks_ref = db.collection(u'tasks') def on_snapshot(doc_snapshot, changes, read_time): for change in changes: print(change.document.id, "added") os.sleep(os.environ["DELTA"]) task_ref = tasks_ref.document(change.document.id) task_doc = task_ref.get() if task_doc.exists: task = Task.from_dict(task_doc) if task.status == 0: task.status = 1 task_ref.set(document_data=task.to_dict(), merge=True) print(change.document.id, "started") bucket.blob(task.content_pic_url).download_to_filename( os.path.basename(task.content_pic_url)) best_img, best_loss = style_transfer.run_style_transfer( os.path.basename(task.content_pic_url), 'styles/'+styles[task.style_id], num_iterations=iteration_times) plt.imsave('done.jpg', best_img) # remove original image os.remove(os.path.basename(task.content_pic_url)) # upload to CloudStorage destination_blob_name = f'{change.document.id}/image/done_{change.document.id}_{str(time.time())}.jpg' bucket.blob(destination_blob_name).upload_from_filename( 'done.jpg') # make it publicly readable bucket.blob(destination_blob_name).make_public() # delete task in FireStore task_ref.delete() # push message to user try: line_bot_api.push_message(change.document.id, [ TextSendMessage(text="啾啾幫你畫好啦~"), ImageSendMessage( original_content_url=f'https://storage.googleapis.com/{bucket_name}/{destination_blob_name}', preview_image_url=f'https://storage.googleapis.com/{bucket_name}/{destination_blob_name}' ), TextSendMessage(text="好不好看呢?\n歡迎多多抖內作者ㄛ~"), TextSendMessage(text="如果想再請啾啾畫一張畫,請輸入「再來一張」", quick_reply=QuickReply(items=[ QuickReplyButton(action=MessageAction( label="再來一張", text="再來一張")) ])) ]) except linebot.exceptions.LineBotApiError as e: print(e.status_code) print(e.request_id) print(e.error.message) print(e.error.details) # Watch the document doc_watch = tasks_ref.on_snapshot(on_snapshot) while True: time.sleep(1)
the-stack_106_31013
import json import threading import time import os import stat from decimal import Decimal from typing import Union from copy import deepcopy from . import util from .util import (user_dir, print_error, PrintError, make_dir, NoDynamicFeeEstimates, format_fee_satoshis, quantize_feerate) from .i18n import _ FEE_ETA_TARGETS = [25, 10, 5, 2] FEE_DEPTH_TARGETS = [10000000, 5000000, 2000000, 1000000, 500000, 200000, 100000] # satoshi per kbyte FEERATE_MAX_DYNAMIC = 1500000 FEERATE_WARNING_HIGH_FEE = 600000 FEERATE_FALLBACK_STATIC_FEE = 150000 FEERATE_DEFAULT_RELAY = 1000 FEERATE_STATIC_VALUES = [5000, 10000, 20000, 30000, 50000, 70000, 100000, 150000, 200000, 300000] config = None def get_config(): global config return config def set_config(c): global config config = c FINAL_CONFIG_VERSION = 3 class SimpleConfig(PrintError): """ The SimpleConfig class is responsible for handling operations involving configuration files. There are two different sources of possible configuration values: 1. Command line options. 2. User configuration (in the user's config directory) They are taken in order (1. overrides config options set in 2.) """ def __init__(self, options=None, read_user_config_function=None, read_user_dir_function=None): if options is None: options = {} # This lock needs to be acquired for updating and reading the config in # a thread-safe way. self.lock = threading.RLock() self.mempool_fees = {} self.fee_estimates = {} self.fee_estimates_last_updated = {} self.last_time_fee_estimates_requested = 0 # zero ensures immediate fees # The following two functions are there for dependency injection when # testing. if read_user_config_function is None: read_user_config_function = read_user_config if read_user_dir_function is None: self.user_dir = user_dir else: self.user_dir = read_user_dir_function # The command line options self.cmdline_options = deepcopy(options) # don't allow to be set on CLI: self.cmdline_options.pop('config_version', None) # Set self.path and read the user config self.user_config = {} # for self.get in electrum_path() self.path = self.electrum_path() self.user_config = read_user_config_function(self.path) if not self.user_config: # avoid new config getting upgraded self.user_config = {'config_version': FINAL_CONFIG_VERSION} # config "upgrade" - CLI options self.rename_config_keys( self.cmdline_options, {'auto_cycle': 'auto_connect'}, True) # config upgrade - user config if self.requires_upgrade(): self.upgrade() # Make a singleton instance of 'self' set_config(self) def electrum_path(self): # Read electrum_path from command line # Otherwise use the user's default data directory. path = self.get('electrum_path') if path is None: path = self.user_dir() make_dir(path, allow_symlink=False) if self.get('testnet'): path = os.path.join(path, 'testnet') make_dir(path, allow_symlink=False) elif self.get('regtest'): path = os.path.join(path, 'regtest') make_dir(path, allow_symlink=False) elif self.get('simnet'): path = os.path.join(path, 'simnet') make_dir(path, allow_symlink=False) self.print_error("electrum directory", path) return path def rename_config_keys(self, config, keypairs, deprecation_warning=False): """Migrate old key names to new ones""" updated = False for old_key, new_key in keypairs.items(): if old_key in config: if new_key not in config: config[new_key] = config[old_key] if deprecation_warning: self.print_stderr('Note that the {} variable has been deprecated. ' 'You should use {} instead.'.format(old_key, new_key)) del config[old_key] updated = True return updated def set_key(self, key, value, save=True): if not self.is_modifiable(key): self.print_stderr("Warning: not changing config key '%s' set on the command line" % key) return self._set_key_in_user_config(key, value, save) def _set_key_in_user_config(self, key, value, save=True): with self.lock: if value is not None: self.user_config[key] = value else: self.user_config.pop(key, None) if save: self.save_user_config() def get(self, key, default=None): with self.lock: out = self.cmdline_options.get(key) if out is None: out = self.user_config.get(key, default) return out def requires_upgrade(self): return self.get_config_version() < FINAL_CONFIG_VERSION def upgrade(self): with self.lock: self.print_error('upgrading config') self.convert_version_2() self.convert_version_3() self.set_key('config_version', FINAL_CONFIG_VERSION, save=True) def convert_version_2(self): if not self._is_upgrade_method_needed(1, 1): return self.rename_config_keys(self.user_config, {'auto_cycle': 'auto_connect'}) try: # change server string FROM host:port:proto TO host:port:s server_str = self.user_config.get('server') host, port, protocol = str(server_str).rsplit(':', 2) assert protocol in ('s', 't') int(port) # Throw if cannot be converted to int server_str = '{}:{}:s'.format(host, port) self._set_key_in_user_config('server', server_str) except BaseException: self._set_key_in_user_config('server', None) self.set_key('config_version', 2) def convert_version_3(self): if not self._is_upgrade_method_needed(2, 2): return base_unit = self.user_config.get('base_unit') if isinstance(base_unit, str): self._set_key_in_user_config('base_unit', None) map_ = {'vtc':8, 'mvtc':5, 'uvtc':2, 'bits':2, 'sat':0} decimal_point = map_.get(base_unit.lower()) self._set_key_in_user_config('decimal_point', decimal_point) self.set_key('config_version', 3) def _is_upgrade_method_needed(self, min_version, max_version): cur_version = self.get_config_version() if cur_version > max_version: return False elif cur_version < min_version: raise Exception( ('config upgrade: unexpected version %d (should be %d-%d)' % (cur_version, min_version, max_version))) else: return True def get_config_version(self): config_version = self.get('config_version', 1) if config_version > FINAL_CONFIG_VERSION: self.print_stderr('WARNING: config version ({}) is higher than ours ({})' .format(config_version, FINAL_CONFIG_VERSION)) return config_version def is_modifiable(self, key): return key not in self.cmdline_options def save_user_config(self): if not self.path: return path = os.path.join(self.path, "config") s = json.dumps(self.user_config, indent=4, sort_keys=True) try: with open(path, "w", encoding='utf-8') as f: f.write(s) os.chmod(path, stat.S_IREAD | stat.S_IWRITE) except FileNotFoundError: # datadir probably deleted while running... if os.path.exists(self.path): # or maybe not? raise def get_wallet_path(self): """Set the path of the wallet.""" # command line -w option if self.get('wallet_path'): return os.path.join(self.get('cwd'), self.get('wallet_path')) # path in config file path = self.get('default_wallet_path') if path and os.path.exists(path): return path # default path util.assert_datadir_available(self.path) dirpath = os.path.join(self.path, "wallets") make_dir(dirpath, allow_symlink=False) new_path = os.path.join(self.path, "wallets", "default_wallet") # default path in pre 1.9 versions old_path = os.path.join(self.path, "electrum.dat") if os.path.exists(old_path) and not os.path.exists(new_path): os.rename(old_path, new_path) return new_path def remove_from_recently_open(self, filename): recent = self.get('recently_open', []) if filename in recent: recent.remove(filename) self.set_key('recently_open', recent) def set_session_timeout(self, seconds): self.print_error("session timeout -> %d seconds" % seconds) self.set_key('session_timeout', seconds) def get_session_timeout(self): return self.get('session_timeout', 300) def open_last_wallet(self): if self.get('wallet_path') is None: last_wallet = self.get('gui_last_wallet') if last_wallet is not None and os.path.exists(last_wallet): self.cmdline_options['default_wallet_path'] = last_wallet def save_last_wallet(self, wallet): if self.get('wallet_path') is None: path = wallet.storage.path self.set_key('gui_last_wallet', path) def impose_hard_limits_on_fee(func): def get_fee_within_limits(self, *args, **kwargs): fee = func(self, *args, **kwargs) if fee is None: return fee fee = min(FEERATE_MAX_DYNAMIC, fee) fee = max(FEERATE_DEFAULT_RELAY, fee) return fee return get_fee_within_limits @impose_hard_limits_on_fee def eta_to_fee(self, slider_pos) -> Union[int, None]: """Returns fee in sat/kbyte.""" slider_pos = max(slider_pos, 0) slider_pos = min(slider_pos, len(FEE_ETA_TARGETS)) if slider_pos < len(FEE_ETA_TARGETS): target_blocks = FEE_ETA_TARGETS[slider_pos] fee = self.fee_estimates.get(target_blocks) else: fee = self.fee_estimates.get(2) if fee is not None: fee += fee/2 fee = int(fee) return fee def fee_to_depth(self, target_fee): depth = 0 for fee, s in self.mempool_fees: depth += s if fee <= target_fee: break else: return 0 return depth @impose_hard_limits_on_fee def depth_to_fee(self, slider_pos) -> int: """Returns fee in sat/kbyte.""" target = self.depth_target(slider_pos) depth = 0 for fee, s in self.mempool_fees: depth += s if depth > target: break else: return 0 return fee * 1000 def depth_target(self, slider_pos): slider_pos = max(slider_pos, 0) slider_pos = min(slider_pos, len(FEE_DEPTH_TARGETS)-1) return FEE_DEPTH_TARGETS[slider_pos] def eta_target(self, i): if i == len(FEE_ETA_TARGETS): return 1 return FEE_ETA_TARGETS[i] def fee_to_eta(self, fee_per_kb): import operator l = list(self.fee_estimates.items()) + [(1, self.eta_to_fee(4))] dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), l) min_target, min_value = min(dist, key=operator.itemgetter(1)) if fee_per_kb < self.fee_estimates.get(25)/2: min_target = -1 return min_target def depth_tooltip(self, depth): return "%.1f MB from tip"%(depth/1000000) def eta_tooltip(self, x): if x < 0: return _('Low fee') elif x == 1: return _('In the next block') else: return _('Within {} blocks').format(x) def get_fee_status(self): dyn = self.is_dynfee() mempool = self.use_mempool_fees() pos = self.get_depth_level() if mempool else self.get_fee_level() fee_rate = self.fee_per_kb() target, tooltip = self.get_fee_text(pos, dyn, mempool, fee_rate) return tooltip + ' [%s]'%target if dyn else target + ' [Static]' def get_fee_text(self, pos, dyn, mempool, fee_rate): """Returns (text, tooltip) where text is what we target: static fee / num blocks to confirm in / mempool depth tooltip is the corresponding estimate (e.g. num blocks for a static fee) """ if fee_rate is None: rate_str = 'unknown' else: rate_str = format_fee_satoshis(fee_rate/1000) + ' sat/byte' if dyn: if mempool: depth = self.depth_target(pos) text = self.depth_tooltip(depth) else: eta = self.eta_target(pos) text = self.eta_tooltip(eta) tooltip = rate_str else: text = rate_str if mempool and self.has_fee_mempool(): depth = self.fee_to_depth(fee_rate) tooltip = self.depth_tooltip(depth) elif not mempool and self.has_fee_etas(): eta = self.fee_to_eta(fee_rate) tooltip = self.eta_tooltip(eta) else: tooltip = '' return text, tooltip def get_depth_level(self): maxp = len(FEE_DEPTH_TARGETS) - 1 return min(maxp, self.get('depth_level', 2)) def get_fee_level(self): maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block" return min(maxp, self.get('fee_level', 2)) def get_fee_slider(self, dyn, mempool): if dyn: if mempool: pos = self.get_depth_level() maxp = len(FEE_DEPTH_TARGETS) - 1 fee_rate = self.depth_to_fee(pos) else: pos = self.get_fee_level() maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block" fee_rate = self.eta_to_fee(pos) else: fee_rate = self.fee_per_kb(dyn=False) pos = self.static_fee_index(fee_rate) maxp = 9 return maxp, pos, fee_rate def static_fee(self, i): return FEERATE_STATIC_VALUES[i] def static_fee_index(self, value): if value is None: raise TypeError('static fee cannot be None') dist = list(map(lambda x: abs(x - value), FEERATE_STATIC_VALUES)) return min(range(len(dist)), key=dist.__getitem__) def has_fee_etas(self): return len(self.fee_estimates) == 4 def has_fee_mempool(self): return bool(self.mempool_fees) def has_dynamic_fees_ready(self): if self.use_mempool_fees(): return self.has_fee_mempool() else: return self.has_fee_etas() def is_dynfee(self): return bool(self.get('dynamic_fees', True)) def use_mempool_fees(self): return bool(self.get('mempool_fees', False)) def _feerate_from_fractional_slider_position(self, fee_level: float, dyn: bool, mempool: bool) -> Union[int, None]: fee_level = max(fee_level, 0) fee_level = min(fee_level, 1) if dyn: max_pos = (len(FEE_DEPTH_TARGETS) - 1) if mempool else len(FEE_ETA_TARGETS) slider_pos = round(fee_level * max_pos) fee_rate = self.depth_to_fee(slider_pos) if mempool else self.eta_to_fee(slider_pos) else: max_pos = len(FEERATE_STATIC_VALUES) - 1 slider_pos = round(fee_level * max_pos) fee_rate = FEERATE_STATIC_VALUES[slider_pos] return fee_rate def fee_per_kb(self, dyn: bool=None, mempool: bool=None, fee_level: float=None) -> Union[int, None]: """Returns sat/kvB fee to pay for a txn. Note: might return None. fee_level: float between 0.0 and 1.0, representing fee slider position """ if dyn is None: dyn = self.is_dynfee() if mempool is None: mempool = self.use_mempool_fees() if fee_level is not None: return self._feerate_from_fractional_slider_position(fee_level, dyn, mempool) # there is no fee_level specified; will use config. # note: 'depth_level' and 'fee_level' in config are integer slider positions, # unlike fee_level here, which (when given) is a float in [0.0, 1.0] if dyn: if mempool: fee_rate = self.depth_to_fee(self.get_depth_level()) else: fee_rate = self.eta_to_fee(self.get_fee_level()) else: fee_rate = self.get('fee_per_kb', FEERATE_FALLBACK_STATIC_FEE) return fee_rate def fee_per_byte(self): """Returns sat/vB fee to pay for a txn. Note: might return None. """ fee_per_kb = self.fee_per_kb() return fee_per_kb / 1000 if fee_per_kb is not None else None def estimate_fee(self, size): fee_per_kb = self.fee_per_kb() if fee_per_kb is None: raise NoDynamicFeeEstimates() return self.estimate_fee_for_feerate(fee_per_kb, size) @classmethod def estimate_fee_for_feerate(cls, fee_per_kb, size): fee_per_kb = Decimal(fee_per_kb) fee_per_byte = fee_per_kb / 1000 # to be consistent with what is displayed in the GUI, # the calculation needs to use the same precision: fee_per_byte = quantize_feerate(fee_per_byte) return round(fee_per_byte * size) def update_fee_estimates(self, key, value): self.fee_estimates[key] = value self.fee_estimates_last_updated[key] = time.time() def is_fee_estimates_update_required(self): """Checks time since last requested and updated fee estimates. Returns True if an update should be requested. """ now = time.time() return now - self.last_time_fee_estimates_requested > 60 def requested_fee_estimates(self): self.last_time_fee_estimates_requested = time.time() def get_video_device(self): device = self.get("video_device", "default") if device == 'default': device = '' return device def read_user_config(path): """Parse and store the user config settings in electrum.conf into user_config[].""" if not path: return {} config_path = os.path.join(path, "config") if not os.path.exists(config_path): return {} try: with open(config_path, "r", encoding='utf-8') as f: data = f.read() result = json.loads(data) except: print_error("Warning: Cannot read config file.", config_path) return {} if not type(result) is dict: return {} return result
the-stack_106_31014
from pandac.PandaModules import * from direct.showbase.DirectObject import * from direct.interval.IntervalGlobal import * from pirates.piratesbase import PiratesGlobals from direct.distributed import DistributedObject from pirates.effects.DustCloud import DustCloud from pirates.effects.SmallSplash import SmallSplash import random from PooledEffect import PooledEffect DebrisDict = {'0': 'models/props/rock_1_floor','1': 'models/props/rock_2_floor','2': 'models/props/rock_3_floor','3': 'models/props/rock_4_floor'} class RockDebris(PooledEffect): BaseEndPlaneZ = -10 def __init__(self): PooledEffect.__init__(self) self.collSphereRadius = 2.0 self.startPos = Vec3(0, 0, 0) self.endPlaneZ = self.BaseEndPlaneZ self.transNode = self.attachNewNode('trans') filePrefix = DebrisDict.get(str(random.randint(0, 3))) self.debris = loader.loadModel(filePrefix) self.debris.reparentTo(self.transNode) self.debris.setScale(0.5) self.debris.setColorScale(0.8, 0.8, 0.8, 1.0) self.weaponHitEvent = 'weaponHit' + str(id(self)) self.accept(self.weaponHitEvent, self.weaponHitObject) self.collSphere = CollisionSphere(0, 0, 0, self.collSphereRadius) self.cnode = CollisionNode('collSphere') self.cnode.addSolid(self.collSphere) self.collision = self.transNode.attachNewNode(self.cnode) self.cnode.setFromCollideMask(PiratesGlobals.TargetBitmask) self.cnode.setIntoCollideMask(BitMask32.allOff()) self.collHandler = CollisionHandlerEvent() self.collHandler.addInPattern(self.weaponHitEvent) self.radiusDist = 25 self.minHeight = 30 self.maxHeight = 100 self.track = None return def createTrack(self, rate=1): self.startVel = Vec3(random.uniform(-self.radiusDist, self.radiusDist), random.uniform(-self.radiusDist, self.radiusDist), random.uniform(self.minHeight, self.maxHeight)) try: playProjectile = ProjectileInterval(self.transNode, startPos=self.startPos, startVel=self.startVel, endZ=self.endPlaneZ, gravityMult=4.0) self.playProjectile = playProjectile except StandardError: playProjectile = Wait(0.2) self.playProjectile = None randomNumX = random.uniform(360, 2880) randomNumY = random.uniform(360, 2880) randomNumZ = random.uniform(360, 2880) self.playRotate = self.debris.hprInterval(6, Point3(randomNumX, randomNumY, randomNumZ)) enableColl = Sequence(Wait(0.2), Func(self.cnode.setFromCollideMask, PiratesGlobals.TargetBitmask)) playDebris = Parallel(playProjectile, enableColl) self.track = Sequence(Func(self.transNode.reparentTo, self), playDebris, Func(self.cleanUpEffect)) return def play(self, rate=1): self.createTrack() if self.startPos[2] > self.endPlaneZ: base.cTrav.addCollider(self.collision, self.collHandler) self.track.start() self.playRotate.loop() else: self.finish() def stop(self): if self.track: self.track.finish() if self.playRotate: self.playRotate.finish() def finish(self): self.stop() self.cleanUpEffect() def cleanUpEffect(self): self.detachNode() self.checkInEffect(self) def destroy(self): self.stop() del self.track del self.playProjectile self.removeNode() self.ignore(self.weaponHitEvent) PooledEffect.destroy(self) def weaponHitObject(self, entry): if not entry.hasSurfacePoint() or not entry.hasInto(): return if not entry.getInto().isTangible(): return hitObject = entry.getIntoNodePath() objType = hitObject.getNetTag('objType') if not objType: return objType = int(objType) if objType == PiratesGlobals.COLL_SEA and base.cr.wantSpecialEffects: pos = entry.getSurfacePoint(render) if base.cr.activeWorld.getWater(): entryWaterHeight = base.cr.activeWorld.getWater().calcHeight(pos[0], pos[1]) + 7.0 else: entryWaterHeight = pos[2] splashEffect = SmallSplash.getEffect() if splashEffect: splashEffect.reparentTo(render) splashEffect.setPos(pos[0], pos[1], entryWaterHeight) splashEffect.play() self.cnode.setFromCollideMask(PiratesGlobals.TargetBitmask.allOff()) elif objType == PiratesGlobals.COLL_LAND and base.cr.wantSpecialEffects: pos = entry.getSurfacePoint(render) dustCloudEffect = DustCloud.getEffect() if dustCloudEffect: dustCloudEffect.wrtReparentTo(render) dustCloudEffect.setPos(pos) dustCloudEffect.play() self.cnode.setFromCollideMask(PiratesGlobals.TargetBitmask.allOff()) def offsetEndPlaneZFrom(self, zHeight): self.endPlaneZ = self.BaseEndPlaneZ + zHeight def testTrajectory(self): self.createTrack() return bool(self.playProjectile and self.playProjectile.testTrajectory())
the-stack_106_31015
''' Data loader for annotated text datasets. ''' import os import re import enum import glob import array import random import shutil import struct import tempfile from collections import Counter from contextlib import ExitStack import torch from torch import nn import metrics from data import preprocess from data.text import TextDataset from data.utils import maybe_download from utils.file import Open, extract_all from utils.tree import ParseTree MASKED = '<MASKED>' class TextAnnotation(enum.Enum): ''' An enumeration of text annotation types ''' NONE = ('', 'bpe.32000.bin', 'bpe.32000') CONSTITUENCY_PARSE = ('parsed', '{lang}.parse', 'parse.fully.upto.span{span}') PARSE_SPANS = ('spans', '{lang}.parse', 'bpe.32000') def __init__(self, identifier, ext, vocab_ext): ''' Initialize the text annotation ''' self.ext = ext self.vocab_ext = vocab_ext self.identifier = identifier def data_path(self, split, directory, **kwargs): ''' Return the data path ''' data_ext = self.ext.format(**kwargs) return os.path.join(directory, f'{split}.{data_ext}') def vocab_path(self, directory, **kwargs): ''' Return the vocab path ''' vocab_ext = self.vocab_ext.format(**kwargs) return os.path.join(directory, f'vocab.{vocab_ext}') class AnnotatedTextDataset(TextDataset): ''' Class that encapsulates an annotated text dataset ''' NAME = '' LANGUAGE_PAIR = ('en', 'en') URLS = [] RAW_SPLITS = {} SPLITS = { 'train': 'train.tok', 'valid': 'valid.tok', 'dev': 'valid.tok', 'test': 'test.tok' } IGNORE_REGEX_LIST = [] SEGMENT_REGEX = re.compile(r'<\s*seg\s+id\s*=\s*"\d+"\s*>\s*(.+)\s*<\s*/\s*seg\s*>') def __init__(self, config, split='train', swap=False, annotation=TextAnnotation.NONE): ''' Initialize the annotated text dataset ''' super(AnnotatedTextDataset, self).__init__(config, split=split) self.swap = swap self.segmenters = [] self.annotation = annotation @classmethod def name(cls, swap=False, annotation=TextAnnotation.NONE): ''' Return a name for the dataset given the passed in configuration ''' config = [cls.NAME] + list(reversed(cls.LANGUAGE_PAIR) if swap else cls.LANGUAGE_PAIR) if annotation.identifier: config += [annotation.identifier] return '_'.join(config) @property def source_language(self): ''' Return the source language ''' return type(self).LANGUAGE_PAIR[1 if self.swap else 0] @property def target_language(self): ''' Return the target language ''' return type(self).LANGUAGE_PAIR[0 if self.swap else 1] @property def mask_idx(self): ''' Return the start of summary value ''' return self.token2id[MASKED] def span_idx(self, span): ''' Return the span index value ''' return self.token2id[f'<SPAN{span}>'] @property def base_data_path(self): ''' Get the path of the processed data file ''' return TextAnnotation.NONE.data_path( type(self).SPLITS[self.split], self.preprocess_directory ) @property def source_annotation_data_path(self): ''' Get the path of the processed data file ''' return self.annotation.data_path( type(self).SPLITS[self.split], self.preprocess_directory, lang=self.source_language ) @property def target_annotation_data_path(self): ''' Get the path of the processed data file ''' return self.annotation.data_path( type(self).SPLITS[self.split], self.preprocess_directory, lang=self.target_language ) @property def data_paths(self): ''' Get the list of data files ''' return set([ self.base_data_path, self.source_annotation_data_path, self.target_annotation_data_path ]) @property def base_vocab_path(self): ''' Get the path of the vocab file ''' return TextAnnotation.NONE.vocab_path( self.preprocess_directory, span=self.config.span ) @property def annotation_vocab_path(self): ''' Get the path of the annotation specific vocab file ''' return self.annotation.vocab_path( self.preprocess_directory, span=self.config.span ) @property def constituent_vocab_path(self): ''' Get the path of the constituent vocab file ''' return TextAnnotation.CONSTITUENCY_PARSE.vocab_path( self.preprocess_directory, span=self.config.span ) @property def vocab_paths(self): ''' Get the list of vocab files ''' return set([self.base_vocab_path, self.annotation_vocab_path]) @property def preprocess_directory(self): ''' Get the preprocess directory ''' return self.config.preprocess_directory @property def preprocess_buffer_size(self): ''' Get the preprocess buffer size ''' return self.config.preprocess_buffer_size @property def stats(self): ''' Return the dataset stats ''' metric_store = super(AnnotatedTextDataset, self).stats if self.annotation is TextAnnotation.NONE or self.split == 'train': return metric_store spans = metrics.Metric('Constituent Spans', metrics.format_float, 'l(max)') for datum in self.data: _, target_spans = self.segmenters[-1](datum['target_annotation']) if target_spans: spans.updates(target_spans) metric_store.add(spans) return metric_store def collate_field(self, batch, field_name, values): ''' Collate a specific field ''' if 'annotation' in field_name: batch[field_name + 's'] = nn.utils.rnn.pad_sequence( values, batch_first=True, padding_value=self.padding_idx - self.reserved_range) batch[field_name + '_lens'] = torch.LongTensor([len(sequence) for sequence in values]) else: super(AnnotatedTextDataset, self).collate_field(batch, field_name, values) def annotation_spans(self, annotation): ''' Calculate the spans from the annotation ''' spans = [] for constituent_id in annotation: constituent = self.id2token[int(constituent_id)] match = ParseTree.CONSTITUENT_REGEX.match(constituent) spans.append(int(match[2]) if match else 1) return spans def annotated_sequence(self, target, annotation, spans): ''' Create the masked target from the annotation and spans ''' annotation_target = [] original_target = list(target) for span_idx, span in enumerate(spans): annotation_target.append(annotation[span_idx]) annotation_target.extend(original_target[:span]) original_target = original_target[span:] assert not original_target return annotation_target def masked_target(self, annotation, spans): ''' Create the masked target from the annotation and spans ''' return self.annotated_sequence([self.mask_idx] * int(sum(spans)), annotation, spans) def tensorize(self, index): ''' Tensorize the specified example index ''' if self.annotation is TextAnnotation.NONE: return super(AnnotatedTextDataset, self).tensorize(index) datum = self.data[index] segmenter = ( self.segmenters[random.randrange(self.config.span)] if self.config.randomize_chunks else self.segmenters[-1] ) target_annotation, target_spans = segmenter(datum['target_annotation']) target_annotation = ( [self.token2id[annotation] for annotation in target_annotation] if self.annotation is TextAnnotation.CONSTITUENCY_PARSE else [self.span_idx(span) for span in target_spans] ) masked_target = self.masked_target(target_annotation, target_spans) annotated_target = self.annotated_sequence( datum['target'], target_annotation, target_spans ) example = {} example['input'] = torch.LongTensor(datum['input']) example['target'] = torch.LongTensor(annotated_target) example['masked_target'] = torch.LongTensor(masked_target) example['target_annotation'] = torch.LongTensor( [self.sos_idx] + list(target_annotation) + [self.eos_idx] ) - self.reserved_range return example def preprocess_raw_line(self, line, xml=False): ''' Preprocess the raw text ''' line = line.strip() if self.config.max_line_length and len(line) > self.config.max_line_length: return if any(ignore.match(line) for ignore in type(self).IGNORE_REGEX_LIST): return if xml: match = type(self).SEGMENT_REGEX.match(line) if not match: return return match[1] return line def download_and_extract(self): ''' Download and extract the dataset ''' for filename, url in type(self).URLS: filepath = os.path.join(self.config.data_directory, filename) maybe_download(filepath, url) extract_all(filepath, self.preprocess_directory) def preprocess_raw(self): ''' Tokenize/bpe encode the raw text ''' def is_xml(filename): ''' Determine if a file is XML formatted ''' return filename.endswith('.sgm') or filename.endswith('.xml') def filter_lines(in_file, basename): ''' Scan the file for any filtered lines ''' filtered = set() xml = is_xml(basename) for i, line in enumerate(in_file): if not self.preprocess_raw_line(line, xml=xml): filtered.add(i) return filtered def merge(basename, in_file, out_file, filtered=None): ''' Tokenize the passed in file and write it to the designated file ''' filtered = filtered or set() xml = is_xml(basename) for i, line in enumerate(in_file): if i in filtered: continue processed_line = self.preprocess_raw_line(line, xml=xml) out_file.write(processed_line + '\n') # First, clean-up any incomplete preprocessing files for path in glob.glob(os.path.join(self.preprocess_directory, '*.incomplete')): os.remove(os.path.join(self.preprocess_directory, path)) bpe_code_path = os.path.join(self.preprocess_directory, 'bpe.32000') if not os.path.exists(bpe_code_path): for split, file_pairs in type(self).RAW_SPLITS.items(): for pair in file_pairs: # First determine which lines must be skipped in both files, since the files are # a parallel corpora. filtered = set() for filename, lang in zip(pair, type(self).LANGUAGE_PAIR): in_path = os.path.join(self.preprocess_directory, filename) with ExitStack() as stack: in_file = stack.enter_context(Open(in_path, 'rt')) filtered.update(filter_lines(in_file, os.path.basename(filename))) for filename, lang in zip(pair, type(self).LANGUAGE_PAIR): basename = os.path.basename(filename) in_path = os.path.join(self.preprocess_directory, filename) split_path = os.path.join(self.preprocess_directory, f'{split}.{lang}') if os.path.exists(split_path): continue with ExitStack() as stack: out_path = f'{split_path}.incomplete' in_file = stack.enter_context(Open(in_path, 'rt')) out_file = stack.enter_context(Open(out_path, 'at')) merge(basename, in_file, out_file, filtered) word_counts = Counter() for split in type(self).RAW_SPLITS: for lang in type(self).LANGUAGE_PAIR: try: split_path = os.path.join(self.preprocess_directory, f'{split}.{lang}') os.rename(f'{split_path}.incomplete', split_path) except FileNotFoundError: # This can happen if the preprocessing is interrupted pass tokenized_path = os.path.join(self.preprocess_directory, f'{split}.tok.{lang}') word_counts.update(preprocess.tokenize( split_path, tokenized_path, self.preprocess_buffer_size )) print('Learning BPE') preprocess.learn_bpe(bpe_code_path, word_counts.items()) vocab_path = os.path.join(self.preprocess_directory, 'vocab.bpe.32000') if not os.path.exists(vocab_path): vocab = set() for split in type(self).RAW_SPLITS: for lang in type(self).LANGUAGE_PAIR: in_path = os.path.join( self.preprocess_directory, f'{split}.tok.{lang}' ) bpe_path = os.path.join( self.preprocess_directory, f'{split}.tok.bpe.32000.{lang}' ) vocab.update(preprocess.apply_bpe( bpe_code_path, in_path, bpe_path, self.preprocess_buffer_size )) vocab_path = os.path.join(self.preprocess_directory, 'vocab.bpe.32000') incomplete_vocab_path = f'{vocab_path}.incomplete' with Open(incomplete_vocab_path, 'wt') as vocab_file: vocab_file.writelines('\n'.join([word for word in sorted(vocab)])) os.rename(incomplete_vocab_path, vocab_path) def preprocess(self): ''' Do any data preprocessing if needed ''' if ( all(os.path.exists(p) for p in self.data_paths) and all(os.path.exists(p) for p in self.vocab_paths) ): return if not os.path.exists(self.preprocess_directory): os.makedirs(self.preprocess_directory) self.download_and_extract() self.preprocess_raw() # Make sure we have loaded the vocab self.load_vocab(preprocessing=True) split_filename = type(self).SPLITS[self.split] self.preprocess_bpe(split_filename) if self.annotation in ( TextAnnotation.PARSE_SPANS, TextAnnotation.CONSTITUENCY_PARSE ): base_annotation_id = len(self.id2token) for filename in type(self).SPLITS.values(): self.preprocess_parse(filename) if not os.path.exists(self.constituent_vocab_path): with Open(self.constituent_vocab_path, 'wt') as file: file.write('\n'.join([ self.id2token[annotation_id] for annotation_id in range(base_annotation_id, len(self.id2token)) ])) def preprocess_parse(self, filename): ''' Preprocess the parse data ''' base_path = os.path.join(self.preprocess_directory, f'{filename}') tokenized_bpe_path = f'{base_path}.bpe.32000' source_path = f'{base_path}.{self.source_language}.parse' if not os.path.exists(source_path): preprocess.parse( f'{tokenized_bpe_path}.{self.source_language}', source_path, self.preprocess_buffer_size ) target_path = f'{base_path}.{self.target_language}.parse' if not os.path.exists(target_path): preprocess.parse( f'{tokenized_bpe_path}.{self.target_language}', target_path, self.preprocess_buffer_size ) if os.path.exists(self.constituent_vocab_path): return bpe_path = os.path.join(self.preprocess_directory, 'bpe.32000') self.segmenters = [ preprocess.ParseSegmenter( bpe_path, span, self.config.max_span, self.config.randomize_chunks ) for span in range(1, self.config.span + 1) ] vocab = preprocess.get_parse_vocab( f'{base_path}.{self.source_language}.parse', self.segmenters, self.preprocess_buffer_size ) vocab.update(preprocess.get_parse_vocab( f'{base_path}.{self.target_language}.parse', self.segmenters, self.preprocess_buffer_size )) for token in vocab: if token not in self.token2id: self.token2id[token] = len(self.id2token) self.id2token.append(token) def preprocess_bpe(self, filename): ''' Preprocess the BPE data ''' tokenized_bpe_path = os.path.join(self.preprocess_directory, f'{filename}.bpe.32000') target_path = f'{tokenized_bpe_path}.{self.target_language}' source_path = f'{tokenized_bpe_path}.{self.source_language}' processed_path = f'{tokenized_bpe_path}.bin' if os.path.exists(processed_path): return with ExitStack() as stack: source_file = stack.enter_context(Open(source_path, 'rt')) target_file = stack.enter_context(Open(target_path, 'rt')) def encode_sentence(line): ''' Helper function that encodes a sentence ''' sentence = array.array('H') sentence.extend(( self.token2id[token] for token in line.split() )) byte_rep = sentence.tostring() byte_len = len(byte_rep) return struct.pack('Q{}s'.format(byte_len), byte_len, byte_rep) out_file = stack.enter_context(tempfile.NamedTemporaryFile()) for source_line, target_line in zip(source_file, target_file): source_sentence = encode_sentence(source_line) target_sentence = encode_sentence(target_line) out_file.write(source_sentence) out_file.write(target_sentence) out_file.flush() shutil.copy(out_file.name, f'{processed_path}.incomplete') os.rename(f'{processed_path}.incomplete', processed_path) def load_vocab(self, preprocessing=False): ''' Return the data loader for the dataset ''' if not os.path.exists(self.base_vocab_path): print('Cannot find the vocab file!') exit(1) with Open(self.base_vocab_path, 'rt') as vocab_file: self.token2id = {} self.id2token = [] for token in vocab_file.read().split('\n'): self.token2id[token] = len(self.id2token) self.id2token.append(token) super(AnnotatedTextDataset, self).load_vocab(preprocessing) if preprocessing or self.annotation is TextAnnotation.NONE: return if self.annotation is TextAnnotation.CONSTITUENCY_PARSE: if not os.path.exists(self.annotation_vocab_path): print('Cannot find the annotation vocab file!') exit(1) with Open(self.annotation_vocab_path, 'rt') as vocab_file: for token in vocab_file.read().split('\n'): self.token2id[token] = len(self.id2token) self.id2token.append(token) elif self.annotation is TextAnnotation.PARSE_SPANS: for i in range(self.config.span): token = f'<SPAN{i + 1}>' self.token2id[token] = len(self.id2token) self.id2token.append(token) self.token2id[MASKED] = len(self.id2token) self.id2token.append(MASKED) # Need to cache off the segmenters as the BPE loading is slow. We do # not want that overhead for each subprocess we create in the dataloaders. bpe_path = os.path.join(self.preprocess_directory, 'bpe.32000') self.segmenters = [ preprocess.ParseSegmenter( bpe_path, span, self.config.max_span, self.config.randomize_chunks ) for span in range(1, self.config.span + 1) ] def load_text(self): ''' Load the translations ''' if not all(os.path.exists(p) for p in self.data_paths): print('Cannot find the processed translations!') exit(1) with ExitStack() as stack: base_data_file = stack.enter_context(Open(self.base_data_path, 'rb')) if self.annotation is not TextAnnotation.NONE: source_annotation_data_file = stack.enter_context( Open(self.source_annotation_data_path, 'rt') ) target_annotation_data_file = stack.enter_context( Open(self.target_annotation_data_path, 'rt') ) while True: if self.swap: source_key = 'target' target_key = 'input' else: source_key = 'input' target_key = 'target' example = {} example['input'] = array.array('H') example['target'] = array.array('H') # prepend the start of sentence token to the target if self.annotation is TextAnnotation.NONE: example['target'].append(self.sos_idx) source_sentence_len = base_data_file.read(8) if not source_sentence_len: break source_sentence_len, = struct.unpack('Q', source_sentence_len) example[source_key].fromstring(base_data_file.read(source_sentence_len)) target_sentence_len = base_data_file.read(8) if not target_sentence_len: print('Unexpected end of file while trying to read a de sentence!') exit(1) target_sentence_len, = struct.unpack('Q', target_sentence_len) example[target_key].frombytes(base_data_file.read(target_sentence_len)) # append the end of sentence token to the target if self.annotation is TextAnnotation.NONE: example['target'].append(self.eos_idx) if self.annotation in ( TextAnnotation.PARSE_SPANS, TextAnnotation.CONSTITUENCY_PARSE ): example['source_annotation'] = source_annotation_data_file.readline() example['target_annotation'] = target_annotation_data_file.readline() self.add_datum(example)
the-stack_106_31016
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' @Author: chenzhen @Date: 2020-04-10 17:04:46 @LastEditTime: 2020-04-24 15:45:41 @LastEditors: chenzhen @Description: ''' import sys sys.path.append('../../') import numpy as np from sklearn.datasets import fetch_openml from sklearn.preprocessing import OneHotEncoder import matrixslow as ms from matrixslow.trainer import SimpleTrainer # 输入图像尺寸 img_shape = (28, 28) # 加载MNIST数据集,取一部分样本并归一化 test_data, test_label = fetch_openml( 'mnist_784', version=1, return_X_y=True, cache=True) test_data, test_label = test_data[1000:2000] / \ 255, test_label.astype(np.int)[1000:2000] test_data = np.reshape(np.array(test_data), (1000, *img_shape)) saver = ms.trainer.Saver('./epoches10') saver.load(model_file_name='my_model.json', weights_file_name='my_weights.npz') # 根据训练时定义的节点名称,从计算图中把输入输出节点查询出来 # 如果训练时未定义,节点名称自动生成,需要从模型文件中人为识别出来 x = ms.get_node_from_graph('img_input') pred = ms.get_node_from_graph('softmax_output') for index in range(len(test_data)): # 把预测数据赋值给输入节点 x.set_value(np.mat(test_data[index]).T) # 执行前向传播,计算输出节点的值,即模型预测概率 pred.forward() gt = test_label.values[index] print('model predict {} and ground truth: {}'.format( np.argmax(pred.value), gt))
the-stack_106_31020
import setuptools with open("README.md", "r") as fh: complete_readme = fh.read() long_description = complete_readme.split("**System image")[0] long_description += "\n\n**Made by Help-a-Sloth org. Check us on GitHub.**" setuptools.setup( name="mischief-managed", packages=setuptools.find_packages(), version="1.0.0", author="Hemant Singh", keywords=["Quick Work" , "Productivity" , "Automation", "Cleanup" "Files" , "Management" , "Tidy" , "Folder Manage"], description=("Files outside any folder are "+ "made tidy/managed by putting inside folder based on "+ "their extension or date"), long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/Help-a-Sloth/mischief-managed", maintainer="amifunny", entry_points={ 'console_scripts':[ 'mischief-managed=mischief_managed.__main__:main' ] }, include_package_data=True, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Operating System :: OS Independent", ] )
the-stack_106_31021
# coding: utf-8 # In[ ]: from __future__ import division get_ipython().magic(u'matplotlib inline') import numpy as np import matplotlib.pyplot as plt import math import multivarlinreg import rmse # In[ ]: #Linear regression red_train = np.loadtxt('redwine_training.txt') red_test = np.loadtxt('redwine_testing.txt') red_train_data = red_train[:, :11] red_train_score = red_train[:, 11] red_test_data = red_test[:, :11] red_test_score = red_test[:, 11] #red_train.shape # In[ ]: """ def multivarlinreg(data, ground_truth): #data = full_data[:, :-1] X = np.hstack((data, np.repeat(1, data.shape[0]).reshape(-1, 1))) X_T_X = np.dot(X.T, X) # if full-rank matrix or positive definite matrix: #check if it invertible if np.linalg.det(X_T_X) != 0: inverse = np.linalg.inv(X_T_X) w = np.dot(np.dot(inverse, X.T), ground_truth) #w0 at the last column #print w return w else: print "use other method" """ # In[ ]: #only contains the first feature (fixed acidity) train_fixed_acidity = red_train_data[:, 0].reshape(-1, 1) train_w_acidity = multivarlinreg.multivarlinreg(train_fixed_acidity, red_train_score) train_w_acidity #the propotion of acidity is not very high; bias is very large for it??? #actually we can not use it to predivt the wine's quality very well #array([0.05035934, 5.2057261 ]) # In[ ]: #physiochemical w_all = multivarlinreg.multivarlinreg(red_train_data, red_train_score) w_all.shape np.set_printoptions(suppress=True) w_all #positive relate negative relation #the first weight for acidity is changed #Some features play important roles in wine's quality. Some features are negatively related. # In[ ]: """#Exercise 3 (Evaluating Linear Regression). def rmse(predicted_value, ground_truth): diff = ground_truth - predicted_value diff_square = np.dot(diff, diff) #rmse = np.sqrt(np.divide(diff_square, ground_truth.shape[0])) rmse = np.sqrt(diff_square/ground_truth.shape[0]) return rmse """ # In[ ]: #1-dimensional input variables using the training set #first feature for the test set test_fixed_acidity = red_test_data[:, 0].reshape(-1, 1) test_X_acidity = np.hstack((test_fixed_acidity, np.repeat(1, test_fixed_acidity.shape[0]).reshape(-1, 1))) predicted_score_acidity = np.dot(test_X_acidity, train_w_acidity.T) #predicted_score_acidity = predicted_value(train_fixed_acidity, test_fixed_acidity, red_test_score) rmse.rmse(predicted_score_acidity, red_test_score) #0.7860892754162216 # In[ ]: #full 11-dimensional input variables test_X = np.hstack((red_test_data, np.repeat(1, red_test_data.shape[0]).reshape(-1, 1))) predicted_score = np.dot(test_X, w_all.T) rmse.rmse(predicted_score, red_test_score) #0.644717277241364
the-stack_106_31022
#!/usr/bin/python from __future__ import division import numpy as np import scipy as sp from scipy.stats import gaussian_kde from scipy.interpolate import interp1d from scipy.integrate import quad from scipy.special import gamma, gammaln, polygamma from scipy.optimize import minimize_scalar from math import pi TINY_FLOAT64 = sp.finfo(sp.float64).tiny """ Gaussian Kernel Density Estimation """ # Gaussian kernel density estimation with cross validation and bootstrap sampling def gkde(data0, xs, num_samples=0, num_h=100, massage_J=True, tolerance=1E-3, ERROR_switch=False): data = data0.copy() N = len(data) G = len(xs) dx = xs[1] - xs[0] # Sort data data.sort() # Set h_min to minimal data spacing. Shift data if there are ties h_min = np.diff(data).min() if h_min == 0.: # This ensures the shifted data will round to the correct value (to 1st decimal for WHO data) data_shifted = np.zeros(N) # Do not change data directly. Use data_shifted! for i in range(N): if data[i] == xs.min(): data_shifted[i] = data[i] + 0.05 * np.random.rand() if xs.min() < data[i] < xs.max(): data_shifted[i] = data[i] + 0.10 * (np.random.rand() - 0.5) if data[i] == xs.max(): data_shifted[i] = data[i] - 0.05 * np.random.rand() data = data_shifted data.sort() h_min = np.diff(data).min() # If there are still ties, give up if h_min == 0.: Q_star, Q_samples, ERROR_switch = None, None, True return Q_star, Q_samples, ERROR_switch # Set h_max to maximal data spacing x 10 h_max = (data.max()-data.min()) * 10 # Form hs hs = np.geomspace(h_min, h_max, num_h) # For each h, compute the risk function Js = np.zeros(num_h) for k in range(num_h): h = hs[k] sum_over_i = 0. for i in range(N): data_i = list(data.copy()) data_i.pop(i) Q_i = gaussian_kde(data_i, bw_method=h)(xs) Q_i /= (sp.sum(Q_i*dx) + TINY_FLOAT64) # Set negative interpolated values (occurring when h is very small) to 0 value = max(float(interp1d(xs, Q_i, kind='cubic', fill_value="extrapolate")(data[i])), 0.) sum_over_i += np.log(value + TINY_FLOAT64) J = - sum_over_i # Terminate if got an nan from gaussian_kde if np.isnan(J): Q_star, Q_samples, ERROR_switch = None, None, True return Q_star, Q_samples, ERROR_switch Js[k] = J # Massage Js so that the risk function is better-behaved if massage_J: Js = Js - Js.min() + 1.0 Js = np.log(Js) # Interpolate the risk function J_func = interp1d(hs, Js, kind='cubic') # Compute 1st derivative of the risk function dJdhs = np.gradient(Js) # Solve for all hs that correspond to local extrema of the risk function hs_solved, Js_solved = [], [] for k in range(num_h-1): if dJdhs[k] * dJdhs[k+1] < 0: h_k = h_solver(hs[k], hs[k+1], hs, dJdhs, tolerance) J_k = float(J_func(h_k)) hs_solved.append(h_k) Js_solved.append(J_k) # Pick up h_star that corresponds to the global minimum of the risk function if len(hs_solved) > 0: h_star = hs_solved[sp.array(Js_solved).argmin()] # If this minimum is actually local, set h_star to either h_max or h_min if (min(Js_solved) > Js[0]) or (min(Js_solved) > Js[-1]): if Js[0] > Js[-1]: h_star = h_max elif Js[0] < Js[-1]: h_star = h_min # If no h were solved, set h_star to either h_max or h_min else: if Js[0] > Js[-1]: h_star = h_max elif Js[0] < Js[-1]: h_star = h_min # Estimate the optimal density with h_star Q_star = gaussian_kde(data, bw_method=h_star)(xs) Q_star /= sp.sum(Q_star*dx) # Use bootstrap to estimate uncertainty (h is fixed at h_star) Q_samples = np.zeros([G,num_samples]) for k in range(num_samples): bootstrapped_data = np.random.choice(data, size=N, replace=True) Q_k = gaussian_kde(bootstrapped_data, bw_method=h_star)(xs) Q_k /= sp.sum(Q_k*dx) Q_samples[:,k] = Q_k # Return return Q_star, Q_samples, ERROR_switch # Solve h at which dJdh = 0 using bisection def h_solver(h_lb, h_ub, hs, dJdhs, tolerance): h1, h2 = h_lb, h_ub hm_old = np.inf while True: hm = (h1+h2)/2 if abs(hm-hm_old) < tolerance: break hm_old = hm f1 = dJdh_func(h1, hs, dJdhs) f2 = dJdh_func(h2, hs, dJdhs) fm = dJdh_func(hm, hs, dJdhs) if f1*fm < 0: h1, h2 = h1, hm elif fm*f2 < 0: h1, h2 = hm, h2 return hm # 1st derivative of the risk function def dJdh_func(h, hs, dJdhs): return interp1d(hs, dJdhs, kind='cubic')(h) """ Dirichlet Process Mixture Modeling """ # Dirichlet process mixture modeling with Gibbs sampling def dpmm(data, xs, num_samples=100, num_thermalization=100, H=10, M=1, ERROR_switch=False): N = len(data) G = len(xs) # Initialize kappa = 1 mu0 = sp.mean(data) alpha0 = 1 beta0 = sp.std(data)**2 m_array = np.zeros([H,2]) m_array[:,1] = invgamma_sampler(alpha=alpha0, beta=beta0, size=H) for h in range(H): m_array[h,0] = np.random.normal(loc=mu0, scale=sp.sqrt(kappa*m_array[h,1]), size=1) w_array = np.ones(H) / H # Gibbs sampling Q_samples = np.zeros([G,num_samples]) for k in range(num_thermalization+num_samples): # Update clustering r_array = np.zeros(N) for i in range(N): wf = np.zeros(H) for h in range(H): wf[h] = w_array[h] * normal(x=data[i], mu=m_array[h,0], sigma=sp.sqrt(m_array[h,1])) wf /= sp.sum(wf) r_array[i] = np.random.choice(range(H), size=1, p=wf) r_list = [int(r_array[i]) for i in range(N)] # Update locations m_array = np.zeros([H,2]) for h in range(H): i_list = [] for i in range(N): if r_list[i] == h: i_list.append(i) n_h = len(i_list) if n_h > 0: data_h = data[i_list] data_mean_h = sp.mean(data_h) kappa_h = 1 / (1/kappa + n_h) mu_h = kappa_h * (mu0/kappa + n_h*data_mean_h) alpha_h = alpha0 + n_h / 2 beta_h = beta0 + (sp.sum((data_h-data_mean_h)**2) + n_h/(1+kappa*n_h)*(data_mean_h-mu0)**2) / 2 m_array[h,1] = invgamma_sampler(alpha=alpha_h, beta=beta_h, size=1) m_array[h,0] = np.random.normal(loc=mu_h, scale=sp.sqrt(kappa_h*m_array[h,1]), size=1) else: m_array[h,1] = invgamma_sampler(alpha=alpha0, beta=beta0, size=1) m_array[h,0] = np.random.normal(loc=mu0, scale=sp.sqrt(kappa*m_array[h,1]), size=1) # Update weights (stick-breaking algorithm) A_array = np.zeros(H) for h in range(H): A_array[h] = r_list.count(h) B_array = np.zeros(H) for h in range(H): B_array[h] = sp.sum(A_array[h+1:]) v_array = np.zeros(H) for h in range(H): v_array[h] = np.random.beta(a=A_array[h]+1, b=B_array[h]+M, size=1) u_array = np.ones(H) - v_array w_array = np.zeros(H) w_array[0] = v_array[0] for h in range(1, H-1): w_array[h] = v_array[h] * np.cumprod(u_array[:h])[-1] w_array[-1] = abs(1-sp.sum(w_array)) # Save samples after thermalization if k > num_thermalization-1: Q_samples[:,k-num_thermalization] = combine_normals(xs, w_array, m_array) # Compute mean of the samples as the optimal density Q_star = Q_samples.mean(axis=1) # Return return Q_star, Q_samples, ERROR_switch # Inverse-gamma distribution def invgamma(x, alpha, beta): return beta**alpha * sp.exp(-beta/x) / gamma(alpha) / x**(alpha+1) # Draw random numbers from inverse-gamma distribution def invgamma_sampler(alpha, beta, size, invgamma_min=1E-3): x_start = beta/(alpha+1) # mode (most likely value) of invgamma x_lb = x_start while invgamma(x_lb, alpha, beta) > invgamma_min: x_lb /= 10.0 x_ub = x_start while invgamma(x_ub, alpha, beta) > invgamma_min: x_ub *= 10.0 xs = np.linspace(x_lb, x_ub, 10001) dx = xs[1] - xs[0] xs = np.linspace(x_lb+dx/2, x_ub-dx/2, 10000) prob = invgamma(xs, alpha, beta) / sp.sum(invgamma(xs, alpha, beta)) samples = np.random.choice(xs, size=size, replace=True, p=prob) jitter = dx * (np.random.rand(size)-0.5) samples += jitter return samples # Normal distribution def normal(x, mu, sigma): return sp.exp(-(x-mu)**2/(2*sigma**2)) / sp.sqrt(2*pi*sigma**2) # Combine normal distributions def combine_normals(xs, w_array, m_array): H = len(w_array) G = len(xs) dx = xs[1] - xs[0] wf = np.zeros([H,G]) for h in range(H): wf[h,:] = w_array[h] * normal(xs, mu=m_array[h,0], sigma=sp.sqrt(m_array[h,1])) Q = wf.sum(axis=0) Q /= sp.sum(Q*dx) return Q """ Some utility functions """ # Compute log-likelihood per datum def likelihood(xs, Q, data): Q_func = interp1d(xs, Q, kind='cubic', fill_value="extrapolate") L_data = 1/len(data) * sp.sum(sp.log(Q_func(data) + TINY_FLOAT64)) return L_data # Compute Kullback-Leibler divergence, D_KL(P||Q) def KL_divergence(P, Q, dx): D_KL = sp.sum(dx * P * sp.log((P+TINY_FLOAT64)/(Q+TINY_FLOAT64))) return D_KL # Given a set of data, compute p-value of an arbitrary data point def p_value_cal(data, point): count = 0 for i in range(len(data)): if data[i] <= point: count += 1 p_value = count/len(data) return p_value """ Entropy Estimators """ # Naive estimator. Ref: Justin's dissertation def naive_estimator(data, N, G, bbox): # Make a histogram of the data and get the count in each bin bin_edges = np.linspace(bbox[0], bbox[1], G+1) counts, bin_edges = np.histogram(a=data, bins=bin_edges) # Turn counts into frequencies freqs = counts/N # Compute entropy, Eqn.(3.15) H = -sp.sum(freqs * sp.log(freqs+TINY_FLOAT64)) # Correct entropy by adding log(L/G) L = bbox[1] - bbox[0] H += sp.log(L/G) # Convert from nats to bits H *= sp.log2(sp.exp(1)) # Return return H # kNN estimator. Ref: A. Kraskov et al, Phys. Rev. E 69, 066138 (2004) def kNN_estimator(data, N, k): # Compute pair-distances between the data points pair_dists = abs(sp.array(sp.mat(data).T * sp.mat(np.ones(N)) - sp.mat(np.ones(N)).T * sp.mat(data))) # Sort pair-distances, from small to large, for each row pair_dists.sort(axis=1) # Choose the kNN pair-distances kNN_pair_dist = pair_dists[:,k] # Compute entropy, Eqn.(20) H = polygamma(0,N) - polygamma(0,k) + 1/N * sp.sum(sp.log(2*kNN_pair_dist+TINY_FLOAT64)) # Convert from nats to bits H *= sp.log2(sp.exp(1)) # Return return H # NSB estimator. Ref: Justin's dissertation def NSB_estimator(data, N, G, bbox): # Make a histogram of the data and get the count in each bin bin_edges = np.linspace(bbox[0], bbox[1], G+1) counts, bin_edges = np.histogram(a=data, bins=bin_edges) # Determine the maximum of the log probability beta_star = minimize_scalar(neg_log_prob, method='golden', bounds=(0, np.inf), args=(G, N, counts)).x log_prob_beta_star = log_prob(beta_star, G, N, counts) # Compute entropy and its variance, Eqn.(3.29) and Eqn.(3.33) denom = quad(integrand_p, 0, np.inf, args=(G, N, counts, log_prob_beta_star))[0] numer_H = quad(integrand_pH, 0, np.inf, args=(G, N, counts, log_prob_beta_star))[0] numer_Hsq = quad(integrand_pHsq, 0, np.inf, args=(G, N, counts, log_prob_beta_star))[0] numer_varH = quad(integrand_pvarH, 0, np.inf, args=(G, N, counts, log_prob_beta_star))[0] H_mean = numer_H/denom H_sq_mean = numer_Hsq/denom H_var = numer_varH/denom + H_sq_mean - H_mean**2 # Correct H mean by adding log(L/G) L = bbox[1] - bbox[0] H_mean += sp.log(L/G) # Convert from nats to bits H_mean *= sp.log2(sp.exp(1)) H_error = np.sqrt(H_var) * sp.log2(sp.exp(1)) # Return return H_mean, H_error # log of Eqn.(3.32) def log_prob(beta, G, N, counts): if beta <= 0: return -np.inf else: return gammaln(beta*G) - G*gammaln(beta) + sp.sum(gammaln(counts+beta)) - gammaln(N+beta*G) + sp.log(G*polygamma(1,beta*G+1) - polygamma(1,beta+1)) # Negative of log_prob def neg_log_prob(beta, G, N, counts): return -log_prob(beta, G, N, counts) # Eqn.(3.22) def H(beta, G, N, counts): A = counts + beta + 1 B = N + beta*G + 1 return polygamma(0,B) - sp.sum((A-1)/(B-1)*polygamma(0,A)) # Eqn.(3.24) def var_H(beta, G, N, counts): A = counts + beta + 1 B = N + beta*G + 1 return sp.sum(A/B*(A-1)/(B-1)*polygamma(1,A)) - polygamma(1,B) + sp.sum(1/B*(A-1)/(B-1)*polygamma(0,A)**2) - 1/B*sp.sum((A-1)/(B-1)*polygamma(0,A))**2 def integrand_p(beta, G, N, counts, log_prob_beta_star): return np.exp(log_prob(beta, G, N, counts)-log_prob_beta_star) def integrand_pH(beta, G, N, counts, log_prob_beta_star): return np.exp(log_prob(beta, G, N, counts)-log_prob_beta_star) * H(beta, G, N, counts) def integrand_pHsq(beta, G, N, counts, log_prob_beta_star): return np.exp(log_prob(beta, G, N, counts)-log_prob_beta_star) * H(beta, G, N, counts)**2 def integrand_pvarH(beta, G, N, counts, log_prob_beta_star): return np.exp(log_prob(beta, G, N, counts)-log_prob_beta_star) * var_H(beta, G, N, counts)
the-stack_106_31023
from time import * from picamera import * import numpy as np from drawTheTableauLib import * """ Takes a picture from the camera and saves it in the current directory in a jpg format prereq : resX > 0, resY > 0 resX <= 2592, resY <= 1944 param : String filename The name of the file Int resX The X resolution of the picture Int resY The Y resolution of the picture """ def takePic (filename ='image', resX = 1024, resY = 768): camera = PiCamera() camera.resolution = (resX, resY) camera.start_preview() camera.capture(filename + '.jpg') camera.close() """ Takes a videp from the camera and saves it in the current directory in a h264 format prereq : resX > 0, resY > 0 resX <= 1360, resY <= 720 param : String filename The name of the file Int resX The X resolution of the picture Int resY The Y resolution of the picture """ def takeVid(filename ='video', time = 60, resX = 1024, resY = 768): camera = picamera.PiCamera() camera.resolution = (resX, resY) camera.start_recording(filename + '.h264') camera.wait_recording(time) camera.stop_recording() """ Takes a picture from the camera and returns it in a numpy array prereq : resX > 0, resY > 0 resX <= 2592, resY <= 1944 param : Int resX The X resolution of the picture Int resY The Y resolution of the picture """ def takePicToNumpy(resX = 1024, resY = 768): camera = PiCamera() camera.resolution = (resX, resY) camera.framerate = 24 xRounded = roundToNearestMultiple (resX, 16) yRounded = roundToNearestMultiple (resY, 32) output = np.empty((xRounded * yRounded * 3,), dtype=np.uint8) camera.capture(output, 'rgb') output = output.reshape((xRounded, yRounded, 3)) output = output[:resX, :resY, :] return output
the-stack_106_31024
import re import nidigital import nitsm.codemoduleapi from nitsm.codemoduleapi import SemiconductorModuleContext OPTIONS = {"Simulate": True, "driver_setup": {"Model": "6570"}} @nitsm.codemoduleapi.code_module def open_sessions(tsm_context: SemiconductorModuleContext): instrument_names = tsm_context.get_all_nidigital_instrument_names() for instrument_name in instrument_names: session = nidigital.Session(instrument_name, options=OPTIONS) session.load_pin_map(tsm_context.pin_map_file_path) session.load_specifications_levels_and_timing( tsm_context.nidigital_project_specifications_file_paths, tsm_context.nidigital_project_levels_file_paths, tsm_context.nidigital_project_timing_file_paths, ) session.apply_levels_and_timing("nidigital", "nidigital") for pattern_file_path in tsm_context.nidigital_project_pattern_file_paths: session.load_pattern(pattern_file_path) tsm_context.set_nidigital_session(instrument_name, session) @nitsm.codemoduleapi.code_module def measure_ppmu( tsm_context: SemiconductorModuleContext, pins, expected_instrument_names, expected_pin_set_strings, ): pin_query, sessions, pin_set_strings = tsm_context.pins_to_nidigital_sessions_for_ppmu(pins) expected_instrument_pin_sets = set(zip(expected_instrument_names, expected_pin_set_strings)) valid_pin_sets = [] for session, pin_set_string in zip(sessions, pin_set_strings): # call some methods on the session to ensure no errors session.pins[pin_set_string].ppmu_aperture_time = 4e-6 session.pins[ pin_set_string ].ppmu_aperture_time_units = nidigital.PPMUApertureTimeUnits.SECONDS session.pins[pin_set_string].ppmu_output_function = nidigital.PPMUOutputFunction.CURRENT session.pins[pin_set_string].ppmu_current_level_range = 2e-6 session.pins[pin_set_string].ppmu_current_level = 2e-6 session.pins[pin_set_string].ppmu_voltage_limit_high = 3.3 session.pins[pin_set_string].ppmu_voltage_limit_low = 0 session.pins[pin_set_string].ppmu_source() session.pins[pin_set_string].ppmu_measure(nidigital.PPMUMeasurementType.CURRENT) session.abort() # check instrument pin set we received is in the set of instrument pin sets we expected actual_instrument_pin_set = (session.io_resource_descriptor, pin_set_string) num_pins_for_session = len(pin_set_string.split(",")) valid_pin_sets.extend( [actual_instrument_pin_set in expected_instrument_pin_sets] * num_pins_for_session ) expected_instrument_pin_sets -= {actual_instrument_pin_set} pin_query.publish(valid_pin_sets, "ValidPinSetStrings") num_missing_pin_sets = [len(expected_instrument_pin_sets)] * len(valid_pin_sets) pin_query.publish(num_missing_pin_sets, "NumMissingPinSetStrings") @nitsm.codemoduleapi.code_module def measure_pattern( tsm_context: SemiconductorModuleContext, pins, expected_instrument_names, expected_site_lists ): pin_query, sessions, site_lists = tsm_context.pins_to_nidigital_sessions_for_pattern(pins) expected_instrument_site_lists = set(zip(expected_instrument_names, expected_site_lists)) valid_site_lists = [] re_pattern = re.compile(r"\s*site(\d+)") for session, site_list in zip(sessions, site_lists): # call some methods on the session to ensure no errors session.sites[site_list].burst_pattern("start_label") # check instrument site we received is in the set of instrument sites we expected actual_instrument_site_list = (session.io_resource_descriptor, site_list) actual_in_expected = actual_instrument_site_list in expected_instrument_site_lists site_numbers = (int(re_pattern.match(site)[1]) for site in site_list.split(",")) valid_site_lists.append({site: actual_in_expected for site in site_numbers}) expected_instrument_site_lists -= {actual_instrument_site_list} pin_query.publish_pattern_results(valid_site_lists, "ValidSiteLists") num_missing_site_lists = [len(expected_instrument_site_lists)] * len(tsm_context.site_numbers) tsm_context.publish_per_site(num_missing_site_lists, "NumMissingSiteLists") @nitsm.codemoduleapi.code_module def check_project_paths( tsm_context: SemiconductorModuleContext, specifications_paths, levels_paths, timing_paths, pattern_paths, source_waveform_paths, capture_waveform_paths, ): site_count = len(tsm_context.site_numbers) valid_project_paths = [ tsm_context.nidigital_project_specifications_file_paths == tuple(specifications_paths) ] * site_count valid_levels_paths = [ tsm_context.nidigital_project_levels_file_paths == tuple(levels_paths) ] * site_count valid_timing_paths = [ tsm_context.nidigital_project_timing_file_paths == tuple(timing_paths) ] * site_count valid_pattern_paths = [ tsm_context.nidigital_project_pattern_file_paths == tuple(pattern_paths) ] * site_count valid_source_waveform_paths = [ tsm_context.nidigital_project_source_waveform_file_paths == tuple(source_waveform_paths) ] * site_count valid_capture_waveform_paths = [ tsm_context.nidigital_project_capture_waveform_file_paths == tuple(capture_waveform_paths) ] * site_count tsm_context.publish_per_site(valid_project_paths, "ValidSpecificationsPaths") tsm_context.publish_per_site(valid_levels_paths, "ValidLevelsPaths") tsm_context.publish_per_site(valid_timing_paths, "ValidTimingPaths") tsm_context.publish_per_site(valid_pattern_paths, "ValidPatternPaths") tsm_context.publish_per_site(valid_source_waveform_paths, "ValidSourceWaveformPaths") tsm_context.publish_per_site(valid_capture_waveform_paths, "ValidCaptureWaveformPaths") @nitsm.codemoduleapi.code_module def close_sessions(tsm_context: SemiconductorModuleContext): sessions = tsm_context.get_all_nidigital_sessions() for session in sessions: session.close()
the-stack_106_31026
from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime import copy import io import logging import os import pickle from six import string_types import shutil import tempfile import time import uuid import ray from ray.tune.logger import UnifiedLogger from ray.tune.result import (DEFAULT_RESULTS_DIR, TIME_THIS_ITER_S, TIMESTEPS_THIS_ITER, DONE, TIMESTEPS_TOTAL, EPISODES_THIS_ITER, EPISODES_TOTAL, TRAINING_ITERATION, RESULT_DUPLICATE) from ray.tune.util import UtilMonitor logger = logging.getLogger(__name__) SETUP_TIME_THRESHOLD = 10 class Trainable(object): """Abstract class for trainable models, functions, etc. A call to ``train()`` on a trainable will execute one logical iteration of training. As a rule of thumb, the execution time of one train call should be large enough to avoid overheads (i.e. more than a few seconds), but short enough to report progress periodically (i.e. at most a few minutes). Calling ``save()`` should save the training state of a trainable to disk, and ``restore(path)`` should restore a trainable to the given state. Generally you only need to implement ``_train``, ``_save``, and ``_restore`` here when subclassing Trainable. Note that, if you don't require checkpoint/restore functionality, then instead of implementing this class you can also get away with supplying just a ``my_train(config, reporter)`` function to the config. The function will be automatically converted to this interface (sans checkpoint functionality). When using Tune, Tune will convert this class into a Ray actor, which runs on a separate process. Tune will also change the current working directory of this process to `self.logdir`. """ def __init__(self, config=None, logger_creator=None): """Initialize an Trainable. Sets up logging and points ``self.logdir`` to a directory in which training outputs should be placed. Subclasses should prefer defining ``_setup()`` instead of overriding ``__init__()`` directly. Args: config (dict): Trainable-specific configuration data. By default will be saved as ``self.config``. logger_creator (func): Function that creates a ray.tune.Logger object. If unspecified, a default logger is created. """ self._experiment_id = uuid.uuid4().hex self.config = config or {} log_sys_usage = self.config.get("log_sys_usage", False) if logger_creator: self._result_logger = logger_creator(self.config) self._logdir = self._result_logger.logdir else: logdir_prefix = datetime.today().strftime("%Y-%m-%d_%H-%M-%S") if not os.path.exists(DEFAULT_RESULTS_DIR): os.makedirs(DEFAULT_RESULTS_DIR) self._logdir = tempfile.mkdtemp( prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR) self._result_logger = UnifiedLogger(self.config, self._logdir, None) self._iteration = 0 self._time_total = 0.0 self._timesteps_total = None self._episodes_total = None self._time_since_restore = 0.0 self._timesteps_since_restore = 0 self._iterations_since_restore = 0 self._restored = False start_time = time.time() self._setup(copy.deepcopy(self.config)) setup_time = time.time() - start_time if setup_time > SETUP_TIME_THRESHOLD: logger.info("_setup took {:.3f} seconds. If your trainable is " "slow to initialize, consider setting " "reuse_actors=True to reduce actor creation " "overheads.".format(setup_time)) self._local_ip = ray.services.get_node_ip_address() self._monitor = UtilMonitor(start=log_sys_usage) @classmethod def default_resource_request(cls, config): """Returns the resource requirement for the given configuration. This can be overriden by sub-classes to set the correct trial resource allocation, so the user does not need to. """ return None @classmethod def resource_help(cls, config): """Returns a help string for configuring this trainable's resources.""" return "" def current_ip(self): logger.warning("Getting current IP.") self._local_ip = ray.services.get_node_ip_address() return self._local_ip def train(self): """Runs one logical iteration of training. Subclasses should override ``_train()`` instead to return results. This class automatically fills the following fields in the result: `done` (bool): training is terminated. Filled only if not provided. `time_this_iter_s` (float): Time in seconds this iteration took to run. This may be overriden in order to override the system-computed time difference. `time_total_s` (float): Accumulated time in seconds for this entire experiment. `experiment_id` (str): Unique string identifier for this experiment. This id is preserved across checkpoint / restore calls. `training_iteration` (int): The index of this training iteration, e.g. call to train(). This is incremented after `_train()` is called. `pid` (str): The pid of the training process. `date` (str): A formatted date of when the result was processed. `timestamp` (str): A UNIX timestamp of when the result was processed. `hostname` (str): Hostname of the machine hosting the training process. `node_ip` (str): Node ip of the machine hosting the training process. Returns: A dict that describes training progress. """ start = time.time() result = self._train() assert isinstance(result, dict), "_train() needs to return a dict." # We do not modify internal state nor update this result if duplicate. if RESULT_DUPLICATE in result: return result result = result.copy() self._iteration += 1 self._iterations_since_restore += 1 if result.get(TIME_THIS_ITER_S) is not None: time_this_iter = result[TIME_THIS_ITER_S] else: time_this_iter = time.time() - start self._time_total += time_this_iter self._time_since_restore += time_this_iter result.setdefault(DONE, False) # self._timesteps_total should only be tracked if increments provided if result.get(TIMESTEPS_THIS_ITER) is not None: if self._timesteps_total is None: self._timesteps_total = 0 self._timesteps_total += result[TIMESTEPS_THIS_ITER] self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER] # self._episodes_total should only be tracked if increments provided if result.get(EPISODES_THIS_ITER) is not None: if self._episodes_total is None: self._episodes_total = 0 self._episodes_total += result[EPISODES_THIS_ITER] # self._timesteps_total should not override user-provided total result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total) result.setdefault(EPISODES_TOTAL, self._episodes_total) result.setdefault(TRAINING_ITERATION, self._iteration) # Provides auto-filled neg_mean_loss for avoiding regressions if result.get("mean_loss"): result.setdefault("neg_mean_loss", -result["mean_loss"]) now = datetime.today() result.update( experiment_id=self._experiment_id, date=now.strftime("%Y-%m-%d_%H-%M-%S"), timestamp=int(time.mktime(now.timetuple())), time_this_iter_s=time_this_iter, time_total_s=self._time_total, pid=os.getpid(), hostname=os.uname()[1], node_ip=self._local_ip, config=self.config, time_since_restore=self._time_since_restore, timesteps_since_restore=self._timesteps_since_restore, iterations_since_restore=self._iterations_since_restore) monitor_data = self._monitor.get_data() if monitor_data: result.update(monitor_data) self._log_result(result) return result def delete_checkpoint(self, checkpoint_dir): """Removes subdirectory within checkpoint_folder Args: checkpoint_dir : path to checkpoint """ if os.path.isfile(checkpoint_dir): shutil.rmtree(os.path.dirname(checkpoint_dir)) else: shutil.rmtree(checkpoint_dir) def save(self, checkpoint_dir=None): """Saves the current model state to a checkpoint. Subclasses should override ``_save()`` instead to save state. This method dumps additional metadata alongside the saved path. Args: checkpoint_dir (str): Optional dir to place the checkpoint. Returns: Checkpoint path or prefix that may be passed to restore(). """ checkpoint_dir = os.path.join(checkpoint_dir or self.logdir, "checkpoint_{}".format(self._iteration)) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) checkpoint = self._save(checkpoint_dir) saved_as_dict = False if isinstance(checkpoint, string_types): if not checkpoint.startswith(checkpoint_dir): raise ValueError( "The returned checkpoint path must be within the " "given checkpoint dir {}: {}".format( checkpoint_dir, checkpoint)) checkpoint_path = checkpoint elif isinstance(checkpoint, dict): saved_as_dict = True checkpoint_path = os.path.join(checkpoint_dir, "checkpoint") with open(checkpoint_path, "wb") as f: pickle.dump(checkpoint, f) else: raise ValueError("Returned unexpected type {}. " "Expected str or dict.".format(type(checkpoint))) with open(checkpoint_path + ".tune_metadata", "wb") as f: pickle.dump({ "experiment_id": self._experiment_id, "iteration": self._iteration, "timesteps_total": self._timesteps_total, "time_total": self._time_total, "episodes_total": self._episodes_total, "saved_as_dict": saved_as_dict }, f) return checkpoint_path def save_to_object(self): """Saves the current model state to a Python object. It also saves to disk but does not return the checkpoint path. Returns: Object holding checkpoint data. """ tmpdir = tempfile.mkdtemp("save_to_object", dir=self.logdir) checkpoint_path = self.save(tmpdir) # Save all files in subtree. data = {} for basedir, _, file_names in os.walk(tmpdir): for file_name in file_names: path = os.path.join(basedir, file_name) with open(path, "rb") as f: data[os.path.relpath(path, tmpdir)] = f.read() out = io.BytesIO() data_dict = pickle.dumps({ "checkpoint_name": os.path.relpath(checkpoint_path, tmpdir), "data": data, }) if len(data_dict) > 10e6: # getting pretty large logger.info("Checkpoint size is {} bytes".format(len(data_dict))) out.write(data_dict) shutil.rmtree(tmpdir) return out.getvalue() def restore(self, checkpoint_path): """Restores training state from a given model checkpoint. These checkpoints are returned from calls to save(). Subclasses should override ``_restore()`` instead to restore state. This method restores additional metadata saved with the checkpoint. """ with open(checkpoint_path + ".tune_metadata", "rb") as f: metadata = pickle.load(f) self._experiment_id = metadata["experiment_id"] self._iteration = metadata["iteration"] self._timesteps_total = metadata["timesteps_total"] self._time_total = metadata["time_total"] self._episodes_total = metadata["episodes_total"] saved_as_dict = metadata["saved_as_dict"] if saved_as_dict: with open(checkpoint_path, "rb") as loaded_state: checkpoint_dict = pickle.load(loaded_state) checkpoint_dict.update(tune_checkpoint_path=checkpoint_path) self._restore(checkpoint_dict) else: self._restore(checkpoint_path) self._time_since_restore = 0.0 self._timesteps_since_restore = 0 self._iterations_since_restore = 0 self._restored = True def restore_from_object(self, obj): """Restores training state from a checkpoint object. These checkpoints are returned from calls to save_to_object(). """ info = pickle.loads(obj) data = info["data"] tmpdir = tempfile.mkdtemp("restore_from_object", dir=self.logdir) checkpoint_path = os.path.join(tmpdir, info["checkpoint_name"]) for relpath_name, file_contents in data.items(): path = os.path.join(tmpdir, relpath_name) # This may be a subdirectory, hence not just using tmpdir if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) with open(path, "wb") as f: f.write(file_contents) self.restore(checkpoint_path) shutil.rmtree(tmpdir) def export_model(self, export_formats, export_dir=None): """Exports model based on export_formats. Subclasses should override _export_model() to actually export model to local directory. Args: export_formats (list): List of formats that should be exported. export_dir (str): Optional dir to place the exported model. Defaults to self.logdir. Return: A dict that maps ExportFormats to successfully exported models. """ export_dir = export_dir or self.logdir return self._export_model(export_formats, export_dir) def reset_config(self, new_config): """Resets configuration without restarting the trial. This method is optional, but can be implemented to speed up algorithms such as PBT, and to allow performance optimizations such as running experiments with reuse_actors=True. Args: new_config (dir): Updated hyperparameter configuration for the trainable. Returns: True if reset was successful else False. """ return False def stop(self): """Releases all resources used by this trainable.""" self._result_logger.close() self._stop() @property def logdir(self): """Directory of the results and checkpoints for this Trainable. Tune will automatically sync this folder with the driver if execution is distributed. Note that the current working directory will also be changed to this. """ return self._logdir @property def iteration(self): """Current training iteration. This value is automatically incremented every time `train()` is called and is automatically inserted into the training result dict. """ return self._iteration def get_config(self): """Returns configuration passed in by Tune.""" return self.config def _train(self): """Subclasses should override this to implement train(). The return value will be automatically passed to the loggers. Users can also return `tune.result.DONE` or `tune.result.SHOULD_CHECKPOINT` to manually trigger termination of this trial or checkpointing of this trial. Note that manual checkpointing only works when subclassing Trainables. Returns: A dict that describes training progress. """ raise NotImplementedError def _save(self, checkpoint_dir): """Subclasses should override this to implement save(). Args: checkpoint_dir (str): The directory where the checkpoint file must be stored. Returns: checkpoint (str | dict): If string, the return value is expected to be the checkpoint path or prefix to be passed to `_restore()`. If dict, the return value will be automatically serialized by Tune and passed to `_restore()`. Examples: >>> print(trainable1._save("/tmp/checkpoint_1")) "/tmp/checkpoint_1/my_checkpoint_file" >>> print(trainable2._save("/tmp/checkpoint_2")) {"some": "data"} """ raise NotImplementedError def _restore(self, checkpoint): """Subclasses should override this to implement restore(). Args: checkpoint (str | dict): Value as returned by `_save`. If a string, then it is the checkpoint path. """ raise NotImplementedError def _setup(self, config): """Subclasses should override this for custom initialization. Args: config (dict): Hyperparameters and other configs given. Copy of `self.config`. """ pass def _log_result(self, result): """Subclasses can optionally override this to customize logging. Args: result (dict): Training result returned by _train(). """ self._result_logger.on_result(result) def _stop(self): """Subclasses should override this for any cleanup on stop.""" pass def _export_model(self, export_formats, export_dir): """Subclasses should override this to export model. Args: export_formats (list): List of formats that should be exported. export_dir (str): Directory to place exported models. Return: A dict that maps ExportFormats to successfully exported models. """ return {}
the-stack_106_31027
from thingset.cansocket import CANsocket sock = CANsocket('can0') # or other interface while(True): frame = sock.receive() if isinstance(frame.cbor, float): print("device: 0x%x data id: 0x%x value: %.2f" % (frame.source, frame.dataobjectID, frame.cbor)) else: print("device:", hex(frame.source), " data id:", hex(frame.dataobjectID), " value:", frame.cbor)
the-stack_106_31030
from __future__ import division # Use floating point for math calculations import math from flask import Blueprint from CTFd.models import ( ChallengeFiles, Challenges, Fails, Flags, Hints, Solves, Tags, db, ) from CTFd.plugins import register_plugin_assets_directory from CTFd.plugins.migrations import upgrade from CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge from CTFd.plugins.flags import get_flag_class from CTFd.utils.modes import get_model from CTFd.utils.uploads import delete_file from CTFd.utils.user import get_ip class DynamicValueChallenge(BaseChallenge): id = "dynamic" # Unique identifier used to register challenges name = "dynamic" # Name of a challenge type templates = { # Handlebars templates used for each aspect of challenge editing & viewing "create": "/plugins/dynamic_challenges/assets/create.html", "update": "/plugins/dynamic_challenges/assets/update.html", "view": "/plugins/dynamic_challenges/assets/view.html", } scripts = { # Scripts that are loaded when a template is loaded "create": "/plugins/dynamic_challenges/assets/create.js", "update": "/plugins/dynamic_challenges/assets/update.js", "view": "/plugins/dynamic_challenges/assets/view.js", } # Route at which files are accessible. This must be registered using register_plugin_assets_directory() route = "/plugins/dynamic_challenges/assets/" # Blueprint used to access the static_folder directory. blueprint = Blueprint( "dynamic_challenges", __name__, template_folder="templates", static_folder="assets", ) @classmethod def calculate_value(cls, challenge): Model = get_model() solve_count = ( Solves.query.join(Model, Solves.account_id == Model.id) .filter( Solves.challenge_id == challenge.id, Model.hidden == False, Model.banned == False, ) .count() ) # If the solve count is 0 we shouldn't manipulate the solve count to # let the math update back to normal if solve_count != 0: # We subtract -1 to allow the first solver to get max point value solve_count -= 1 # It is important that this calculation takes into account floats. # Hence this file uses from __future__ import division value = ( ((challenge.minimum - challenge.initial) / (challenge.decay ** 2)) * (solve_count ** 2) ) + challenge.initial value = math.ceil(value) if value < challenge.minimum: value = challenge.minimum challenge.value = value db.session.commit() return challenge @staticmethod def create(request): """ This method is used to process the challenge creation request. :param request: :return: """ data = request.form or request.get_json() challenge = DynamicChallenge(**data) db.session.add(challenge) db.session.commit() return challenge @staticmethod def read(challenge): """ This method is in used to access the data of a challenge in a format processable by the front end. :param challenge: :return: Challenge object, data dictionary to be returned to the user """ challenge = DynamicChallenge.query.filter_by(id=challenge.id).first() data = { "id": challenge.id, "name": challenge.name, "value": challenge.value, "initial": challenge.initial, "decay": challenge.decay, "minimum": challenge.minimum, "description": challenge.description, "category": challenge.category, "state": challenge.state, "max_attempts": challenge.max_attempts, "type": challenge.type, "type_data": { "id": DynamicValueChallenge.id, "name": DynamicValueChallenge.name, "templates": DynamicValueChallenge.templates, "scripts": DynamicValueChallenge.scripts, }, } return data @staticmethod def update(challenge, request): """ This method is used to update the information associated with a challenge. This should be kept strictly to the Challenges table and any child tables. :param challenge: :param request: :return: """ data = request.form or request.get_json() for attr, value in data.items(): # We need to set these to floats so that the next operations don't operate on strings if attr in ("initial", "minimum", "decay"): value = float(value) setattr(challenge, attr, value) return DynamicValueChallenge.calculate_value(challenge) @staticmethod def delete(challenge): """ This method is used to delete the resources used by a challenge. :param challenge: :return: """ Fails.query.filter_by(challenge_id=challenge.id).delete() Solves.query.filter_by(challenge_id=challenge.id).delete() Flags.query.filter_by(challenge_id=challenge.id).delete() files = ChallengeFiles.query.filter_by(challenge_id=challenge.id).all() for f in files: delete_file(f.id) ChallengeFiles.query.filter_by(challenge_id=challenge.id).delete() Tags.query.filter_by(challenge_id=challenge.id).delete() Hints.query.filter_by(challenge_id=challenge.id).delete() DynamicChallenge.query.filter_by(id=challenge.id).delete() Challenges.query.filter_by(id=challenge.id).delete() db.session.commit() @staticmethod def attempt(challenge, request): """ This method is used to check whether a given input is right or wrong. It does not make any changes and should return a boolean for correctness and a string to be shown to the user. It is also in charge of parsing the user's input from the request itself. :param challenge: The Challenge object from the database :param request: The request the user submitted :return: (boolean, string) """ data = request.form or request.get_json() submission = data["submission"].strip() flags = Flags.query.filter_by(challenge_id=challenge.id).all() for flag in flags: if get_flag_class(flag.type).compare(flag, submission): return True, "Correct" return False, "Incorrect" @staticmethod def solve(user, team, challenge, request): """ This method is used to insert Solves into the database in order to mark a challenge as solved. :param team: The Team object from the database :param chal: The Challenge object from the database :param request: The request the user submitted :return: """ challenge = DynamicChallenge.query.filter_by(id=challenge.id).first() data = request.form or request.get_json() submission = data["submission"].strip() solve = Solves( user_id=user.id, team_id=team.id if team else None, challenge_id=challenge.id, ip=get_ip(req=request), provided=submission, ) db.session.add(solve) db.session.commit() DynamicValueChallenge.calculate_value(challenge) @staticmethod def fail(user, team, challenge, request): """ This method is used to insert Fails into the database in order to mark an answer incorrect. :param team: The Team object from the database :param challenge: The Challenge object from the database :param request: The request the user submitted :return: """ data = request.form or request.get_json() submission = data["submission"].strip() wrong = Fails( user_id=user.id, team_id=team.id if team else None, challenge_id=challenge.id, ip=get_ip(request), provided=submission, ) db.session.add(wrong) db.session.commit() db.session.close() class DynamicChallenge(Challenges): __mapper_args__ = {"polymorphic_identity": "dynamic"} id = db.Column( db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE"), primary_key=True ) initial = db.Column(db.Integer, default=0) minimum = db.Column(db.Integer, default=0) decay = db.Column(db.Integer, default=0) def __init__(self, *args, **kwargs): super(DynamicChallenge, self).__init__(**kwargs) self.initial = kwargs["value"] def load(app): upgrade() CHALLENGE_CLASSES["dynamic"] = DynamicValueChallenge register_plugin_assets_directory( app, base_path="/plugins/dynamic_challenges/assets/" )
the-stack_106_31034
import RPi.GPIO as GPIO from lib_nrf24 import NRF24 import time import spidev from threading import Timer # Define Board GPIOs GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) # needed GPIO PINS PINS = [2,3,4,5,6,7] # set all pins off def allPinsOff(): for i in PINS: GPIO.setup(i, GPIO.OUT, initial=GPIO.HIGH) # initial off allPinsOff() pipes = [[0xe7, 0xe7, 0xe7, 0xe7, 0xe7], [0xc2, 0xc2, 0xc2, 0xc2, 0xc2]] radio = NRF24(GPIO, spidev.SpiDev()) radio.begin(0, 17) radio.setRetries(15,15) radio.setChannel(100) radio.setDataRate(NRF24.BR_250KBPS) radio.setPALevel(NRF24.PA_MAX) radio.setPayloadSize(7) radio.setAutoAck(True) radio.enableAckPayload() radio.enableDynamicPayloads() radio.openWritingPipe(pipes[1]) radio.openReadingPipe(1, pipes[0]) radio.stopListening() radio.startListening() millis = lambda: int(round(time.time() * 1000)) last_call = millis() # timer status isTimerActive = False def pinOff(pin): global isTimerActive GPIO.output(pin, GPIO.HIGH) isTimerActive = False while True: while not radio.available(): time.sleep(1/100) recv = [] radio.read(recv, radio.getDynamicPayloadSize()) radio.stopListening() radio.write(recv) radio.startListening() try: # translate message arr = [] for n in recv: # Decode into standard unicode set if (n >= 32 and n <= 126): arr.append(chr(n)) # validation if len(arr) > 0 and int(arr[0]) in PINS: # validation complete --> check last call to prevent bubbles delta = millis() - last_call if delta > 30: # on if arr[1] == '1': # reset all active pins for pin in PINS: if not GPIO.input(pin): GPIO.output(pin, GPIO.HIGH) # check if time is relevant secs = int("".join(arr)[2:]) if secs > 0: # activate GPIO.output(int(arr[0]), GPIO.LOW) # timer if isTimerActive: t.cancel() isTimerActive = False t = Timer(secs, pinOff, [int(arr[0])]) t.start() isTimerActive = True else: # off GPIO.output(int(arr[0]), GPIO.HIGH) # update last reply last_call = millis() except Exception as e: # secure off all for pin in PINS: if not GPIO.input(pin): GPIO.output(pin, GPIO.HIGH) pass
the-stack_106_31035
#!/bin/env python # -*- coding: utf8 -*- def shellSort(A): def getCols(n): cols = [1] val = 1 while val < n: val = int(val * 2.2) cols.insert(0, val) return cols for h in getCols(len(A)): for i in range(h, len(A)): cur = A[i] j = i while j >= h and A[j - h] > cur: A[j] = A[j - h] j -= h A[j] = cur return A
the-stack_106_31036
# vim: set fileencoding=utf-8 : # Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # NOTE: The fileencoding comment on the first line of the file is # important; without it, Python will choke while trying to parse the file, # since it includes non-ASCII characters. import os import stat import sys import tempfile import unittest from webkitpy.common.system.filesystem import FileSystem class GenericFileSystemTests(object): """Tests that should pass on either a real or mock filesystem.""" # pylint gets confused about this being a mixin: pylint: disable=E1101 def setup_generic_test_dir(self): fs = self.fs self.generic_test_dir = str(self.fs.mkdtemp()) self.orig_cwd = fs.getcwd() fs.chdir(self.generic_test_dir) fs.write_text_file('foo.txt', 'foo') fs.write_text_file('foobar', 'foobar') fs.maybe_make_directory('foodir') fs.write_text_file(fs.join('foodir', 'baz'), 'baz') fs.chdir(self.orig_cwd) def teardown_generic_test_dir(self): self.fs.rmtree(self.generic_test_dir) self.fs.chdir(self.orig_cwd) self.generic_test_dir = None def test_glob__trailing_asterisk(self): self.fs.chdir(self.generic_test_dir) self.assertEqual(set(self.fs.glob('fo*')), set(['foo.txt', 'foobar', 'foodir'])) def test_glob__leading_asterisk(self): self.fs.chdir(self.generic_test_dir) self.assertEqual(set(self.fs.glob('*xt')), set(['foo.txt'])) def test_glob__middle_asterisk(self): self.fs.chdir(self.generic_test_dir) self.assertEqual(set(self.fs.glob('f*r')), set(['foobar', 'foodir'])) def test_glob__period_is_escaped(self): self.fs.chdir(self.generic_test_dir) self.assertEqual(set(self.fs.glob('foo.*')), set(['foo.txt'])) def test_relpath_unix(self): if sys.platform == 'win32': return self.assertEqual(self.fs.relpath('aaa/bbb'), 'aaa/bbb') self.assertEqual(self.fs.relpath('aaa/bbb/'), 'aaa/bbb') self.assertEqual(self.fs.relpath('aaa/bbb/.'), 'aaa/bbb') self.assertEqual(self.fs.relpath('aaa/./bbb'), 'aaa/bbb') self.assertEqual(self.fs.relpath('aaa/../bbb/'), 'bbb') self.assertEqual(self.fs.relpath('aaa/bbb', 'aaa/bbb'), '.') self.assertEqual(self.fs.relpath('aaa/bbb/ccc', 'aaa/bbb'), 'ccc') self.assertEqual(self.fs.relpath('aaa/./ccc', 'aaa/bbb'), '../ccc') self.assertEqual(self.fs.relpath('aaa/../ccc', 'aaa/bbb'), '../../ccc') self.assertEqual(self.fs.relpath('aaa/bbb', 'aaa/ccc'), '../bbb') self.assertEqual(self.fs.relpath('aaa/bbb', 'ccc/ddd'), '../../aaa/bbb') self.assertEqual(self.fs.relpath('aaa/bbb', 'aaa/b'), '../bbb') self.assertEqual(self.fs.relpath('aaa/bbb', 'a/bbb'), '../../aaa/bbb') def test_relpath_win32(self): if sys.platform != 'win32': return self.assertEqual(self.fs.relpath('aaa\\bbb'), 'aaa\\bbb') self.assertEqual(self.fs.relpath('aaa\\bbb\\'), 'aaa\\bbb') self.assertEqual(self.fs.relpath('aaa\\bbb\\.'), 'aaa\\bbb') self.assertEqual(self.fs.relpath('aaa\\.\\bbb'), 'aaa\\bbb') self.assertEqual(self.fs.relpath('aaa\\..\\bbb\\'), 'bbb') self.assertEqual(self.fs.relpath('aaa\\bbb', 'aaa\\bbb'), '.') self.assertEqual(self.fs.relpath('aaa\\bbb\\ccc', 'aaa\\bbb'), 'ccc') self.assertEqual(self.fs.relpath('aaa\\.\\ccc', 'aaa\\bbb'), '..\\ccc') self.assertEqual(self.fs.relpath('aaa\\..\\ccc', 'aaa\\bbb'), '..\\..\\ccc') self.assertEqual(self.fs.relpath('aaa\\bbb', 'aaa\\ccc'), '..\\bbb') self.assertEqual(self.fs.relpath('aaa\\bbb', 'ccc\\ddd'), '..\\..\\aaa\\bbb') self.assertEqual(self.fs.relpath('aaa\\bbb', 'aaa\\b'), '..\\bbb') self.assertEqual(self.fs.relpath('aaa\\bbb', 'a\\bbb'), '..\\..\\aaa\\bbb') def test_rmtree(self): self.fs.chdir(self.generic_test_dir) self.fs.rmtree('foo') self.assertTrue(self.fs.exists('foodir')) self.assertTrue(self.fs.exists(self.fs.join('foodir', 'baz'))) self.fs.rmtree('foodir') self.assertFalse(self.fs.exists('foodir')) self.assertFalse(self.fs.exists(self.fs.join('foodir', 'baz'))) def test_copytree(self): self.fs.chdir(self.generic_test_dir) self.fs.copytree('foodir/', 'bardir/') self.assertTrue(self.fs.exists('bardir')) self.assertTrue(self.fs.exists(self.fs.join('bardir', 'baz'))) def test_move(self): self.fs.chdir(self.generic_test_dir) self.fs.move('foo.txt', 'bar.txt') self.assertFalse(self.fs.exists('foo.txt')) self.assertTrue(self.fs.exists('bar.txt')) self.fs.move('foodir', 'bardir') self.assertFalse(self.fs.exists('foodir')) self.assertFalse(self.fs.exists(self.fs.join('foodir', 'baz'))) self.assertTrue(self.fs.exists('bardir')) self.assertTrue(self.fs.exists(self.fs.join('bardir', 'baz'))) class RealFileSystemTest(unittest.TestCase, GenericFileSystemTests): def setUp(self): self.fs = FileSystem() self.setup_generic_test_dir() self._this_dir = os.path.dirname(os.path.abspath(__file__)) self._missing_file = os.path.join(self._this_dir, 'missing_file.py') self._this_file = os.path.join(self._this_dir, 'filesystem_unittest.py') def tearDown(self): self.teardown_generic_test_dir() self.fs = None def test_chdir(self): fs = FileSystem() cwd = fs.getcwd() newdir = '/' if sys.platform == 'win32': newdir = 'c:\\' fs.chdir(newdir) self.assertEqual(fs.getcwd(), newdir) fs.chdir(cwd) def test_chdir__notexists(self): fs = FileSystem() newdir = '/dirdoesnotexist' if sys.platform == 'win32': newdir = 'c:\\dirdoesnotexist' self.assertRaises(OSError, fs.chdir, newdir) def test_exists__true(self): fs = FileSystem() self.assertTrue(fs.exists(self._this_file)) def test_exists__false(self): fs = FileSystem() self.assertFalse(fs.exists(self._missing_file)) def test_getcwd(self): fs = FileSystem() self.assertTrue(fs.exists(fs.getcwd())) def test_isdir__true(self): fs = FileSystem() self.assertTrue(fs.isdir(self._this_dir)) def test_isdir__false(self): fs = FileSystem() self.assertFalse(fs.isdir(self._this_file)) def test_join(self): fs = FileSystem() self.assertEqual(fs.join('foo', 'bar'), os.path.join('foo', 'bar')) def test_listdir(self): fs = FileSystem() with fs.mkdtemp(prefix='filesystem_unittest_') as d: self.assertEqual(fs.listdir(d), []) new_file = os.path.join(d, 'foo') fs.write_text_file(new_file, u'foo') self.assertEqual(fs.listdir(d), ['foo']) os.remove(new_file) def test_walk(self): fs = FileSystem() with fs.mkdtemp(prefix='filesystem_unittest_') as d: self.assertEqual(list(fs.walk(d)), [(d, [], [])]) new_file = os.path.join(d, 'foo') fs.write_text_file(new_file, u'foo') self.assertEqual(list(fs.walk(d)), [(d, [], ['foo'])]) os.remove(new_file) def test_maybe_make_directory__success(self): fs = FileSystem() with fs.mkdtemp(prefix='filesystem_unittest_') as base_path: sub_path = os.path.join(base_path, "newdir") self.assertFalse(os.path.exists(sub_path)) self.assertFalse(fs.isdir(sub_path)) fs.maybe_make_directory(sub_path) self.assertTrue(os.path.exists(sub_path)) self.assertTrue(fs.isdir(sub_path)) # Make sure we can re-create it. fs.maybe_make_directory(sub_path) self.assertTrue(os.path.exists(sub_path)) self.assertTrue(fs.isdir(sub_path)) # Clean up. os.rmdir(sub_path) self.assertFalse(os.path.exists(base_path)) self.assertFalse(fs.isdir(base_path)) def test_maybe_make_directory__failure(self): # FIXME: os.chmod() doesn't work on Windows to set directories # as readonly, so we skip this test for now. if sys.platform in ('win32', 'cygwin'): return fs = FileSystem() with fs.mkdtemp(prefix='filesystem_unittest_') as d: # Remove write permissions on the parent directory. os.chmod(d, stat.S_IRUSR) # Now try to create a sub directory - should fail. sub_dir = fs.join(d, 'subdir') self.assertRaises(OSError, fs.maybe_make_directory, sub_dir) # Clean up in case the test failed and we did create the # directory. if os.path.exists(sub_dir): os.rmdir(sub_dir) def test_read_and_write_text_file(self): fs = FileSystem() text_path = None unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D' hex_equivalent = '\xC5\xAA\x6E\xC4\xAD\x63\xC5\x8D\x64\x65\xCC\xBD' try: text_path = tempfile.mktemp(prefix='tree_unittest_') file = fs.open_text_file_for_writing(text_path) file.write(unicode_text_string) file.close() file = fs.open_text_file_for_reading(text_path) read_text = file.read() file.close() self.assertEqual(read_text, unicode_text_string) finally: if text_path and fs.isfile(text_path): os.remove(text_path) def test_read_and_write_file(self): fs = FileSystem() text_path = None binary_path = None unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D' hex_equivalent = '\xC5\xAA\x6E\xC4\xAD\x63\xC5\x8D\x64\x65\xCC\xBD' try: text_path = tempfile.mktemp(prefix='tree_unittest_') binary_path = tempfile.mktemp(prefix='tree_unittest_') fs.write_text_file(text_path, unicode_text_string) contents = fs.read_binary_file(text_path) self.assertEqual(contents, hex_equivalent) fs.write_binary_file(binary_path, hex_equivalent) text_contents = fs.read_text_file(binary_path) self.assertEqual(text_contents, unicode_text_string) finally: if text_path and fs.isfile(text_path): os.remove(text_path) if binary_path and fs.isfile(binary_path): os.remove(binary_path) def test_read_binary_file__missing(self): fs = FileSystem() self.assertRaises(IOError, fs.read_binary_file, self._missing_file) def test_read_text_file__missing(self): fs = FileSystem() self.assertRaises(IOError, fs.read_text_file, self._missing_file) def test_remove_file_with_retry(self): RealFileSystemTest._remove_failures = 2 def remove_with_exception(filename): RealFileSystemTest._remove_failures -= 1 if RealFileSystemTest._remove_failures >= 0: try: raise WindowsError except NameError: raise FileSystem._WindowsError fs = FileSystem() self.assertTrue(fs.remove('filename', remove_with_exception)) self.assertEqual(-1, RealFileSystemTest._remove_failures) def test_sep(self): fs = FileSystem() self.assertEqual(fs.sep, os.sep) self.assertEqual(fs.join("foo", "bar"), os.path.join("foo", "bar"))
the-stack_106_31037
try: from collections.abc import Sized except ImportError: from collections import Sized from collections import defaultdict from functools import partial import numpy as np from scipy.stats import rankdata import sklearn from sklearn.base import is_classifier, clone from joblib import Parallel, delayed from sklearn.model_selection._search import BaseSearchCV from sklearn.utils import check_random_state from sklearn.utils.fixes import MaskedArray from sklearn.utils.validation import indexable, check_is_fitted try: from sklearn.metrics import check_scoring except ImportError: from sklearn.metrics.scorer import check_scoring from . import Optimizer from .utils import point_asdict, dimensions_aslist, eval_callbacks from .space import check_dimension from .callbacks import check_callback class BayesSearchCV(BaseSearchCV): """Bayesian optimization over hyper parameters. BayesSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated search over parameter settings. In contrast to GridSearchCV, not all parameter values are tried out, but rather a fixed number of parameter settings is sampled from the specified distributions. The number of parameter settings that are tried is given by n_iter. Parameters are presented as a list of skopt.space.Dimension objects. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each search point. This object is assumed to implement the scikit-learn estimator api. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. search_spaces : dict, list of dict or list of tuple containing (dict, int). One of these cases: 1. dictionary, where keys are parameter names (strings) and values are skopt.space.Dimension instances (Real, Integer or Categorical) or any other valid value that defines skopt dimension (see skopt.Optimizer docs). Represents search space over parameters of the provided estimator. 2. list of dictionaries: a list of dictionaries, where every dictionary fits the description given in case 1 above. If a list of dictionary objects is given, then the search is performed sequentially for every parameter space with maximum number of evaluations set to self.n_iter. 3. list of (dict, int > 0): an extension of case 2 above, where first element of every tuple is a dictionary representing some search subspace, similarly as in case 2, and second element is a number of iterations that will be spent optimizing over this subspace. n_iter : int, default=50 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. Consider increasing ``n_points`` if you want to try more parameter settings in parallel. optimizer_kwargs : dict, optional Dict of arguments passed to :class:`Optimizer`. For example, ``{'base_estimator': 'RF'}`` would use a Random Forest surrogate instead of the default Gaussian Process. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. At maximum there are ``n_points`` times ``cv`` jobs available during each iteration. n_points : int, default=1 Number of parameter settings to sample in parallel. If this does not align with ``n_iter``, the last iteration will sample less points. See also :func:`~Optimizer.ask` pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. return_train_score : boolean, default=False If ``'True'``, the ``cv_results_`` attribute will include training scores. Examples -------- >>> from skopt import BayesSearchCV >>> # parameter ranges are specified by one of below >>> from skopt.space import Real, Categorical, Integer >>> >>> from sklearn.datasets import load_iris >>> from sklearn.svm import SVC >>> from sklearn.model_selection import train_test_split >>> >>> X, y = load_iris(True) >>> X_train, X_test, y_train, y_test = train_test_split(X, y, ... train_size=0.75, ... random_state=0) >>> >>> # log-uniform: understand as search over p = exp(x) by varying x >>> opt = BayesSearchCV( ... SVC(), ... { ... 'C': Real(1e-6, 1e+6, prior='log-uniform'), ... 'gamma': Real(1e-6, 1e+1, prior='log-uniform'), ... 'degree': Integer(1,8), ... 'kernel': Categorical(['linear', 'poly', 'rbf']), ... }, ... n_iter=32, ... random_state=0 ... ) >>> >>> # executes bayesian optimization >>> _ = opt.fit(X_train, y_train) >>> >>> # model can be saved, used for predictions or scoring >>> print(opt.score(X_test, y_test)) 0.973... Attributes ---------- cv_results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance the below given table +--------------+-------------+-------------------+---+---------------+ | param_kernel | param_gamma | split0_test_score |...|rank_test_score| +==============+=============+===================+===+===============+ | 'rbf' | 0.1 | 0.8 |...| 2 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.2 | 0.9 |...| 1 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.3 | 0.7 |...| 1 | +--------------+-------------+-------------------+---+---------------+ will be represented by a ``cv_results_`` dict of:: { 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'], mask = False), 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False), 'split0_test_score' : [0.8, 0.9, 0.7], 'split1_test_score' : [0.82, 0.5, 0.7], 'mean_test_score' : [0.81, 0.7, 0.7], 'std_test_score' : [0.02, 0.2, 0.], 'rank_test_score' : [3, 1, 1], 'split0_train_score' : [0.8, 0.9, 0.7], 'split1_train_score' : [0.82, 0.5, 0.7], 'mean_train_score' : [0.81, 0.7, 0.7], 'std_train_score' : [0.03, 0.03, 0.04], 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], 'mean_score_time' : [0.007, 0.06, 0.04, 0.04], 'std_score_time' : [0.001, 0.002, 0.003, 0.005], 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and ``std_score_time`` are all in seconds. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. optimizer_results_ : list of `OptimizeResult` Contains a `OptimizeResult` for each search space. The search space parameter are sorted by its name. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``cv_results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.cv_results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). Notes ----- The parameters selected are those that maximize the score of the held-out data, according to the scoring parameter. If `n_jobs` was set to a value higher than one, the data is copied for each parameter setting(and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also -------- :class:`GridSearchCV`: Does exhaustive search over a grid of parameters. """ def __init__(self, estimator, search_spaces, optimizer_kwargs=None, n_iter=50, scoring=None, fit_params=None, n_jobs=1, n_points=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise', return_train_score=False): self.search_spaces = search_spaces self.n_iter = n_iter self.n_points = n_points self.random_state = random_state self.optimizer_kwargs = optimizer_kwargs self._check_search_space(self.search_spaces) # Temporary fix for compatibility with sklearn 0.20 and 0.21 # See scikit-optimize#762 # To be consistent with sklearn 0.21+, fit_params should be deprecated # in the constructor and be passed in ``fit``. self.fit_params = fit_params super(BayesSearchCV, self).__init__( estimator=estimator, scoring=scoring, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score, return_train_score=return_train_score) def _check_search_space(self, search_space): """Checks whether the search space argument is correct""" if len(search_space) == 0: raise ValueError( "The search_spaces parameter should contain at least one" "non-empty search space, got %s" % search_space ) # check if space is a single dict, convert to list if so if isinstance(search_space, dict): search_space = [search_space] # check if the structure of the space is proper if isinstance(search_space, list): # convert to just a list of dicts dicts_only = [] # 1. check the case when a tuple of space, n_iter is provided for elem in search_space: if isinstance(elem, tuple): if len(elem) != 2: raise ValueError( "All tuples in list of search spaces should have" "length 2, and contain (dict, int), got %s" % elem ) subspace, n_iter = elem if (not isinstance(n_iter, int)) or n_iter < 0: raise ValueError( "Number of iterations in search space should be" "positive integer, got %s in tuple %s " % (n_iter, elem) ) # save subspaces here for further checking dicts_only.append(subspace) elif isinstance(elem, dict): dicts_only.append(elem) else: raise TypeError( "A search space should be provided as a dict or" "tuple (dict, int), got %s" % elem) # 2. check all the dicts for correctness of contents for subspace in dicts_only: for k, v in subspace.items(): check_dimension(v) else: raise TypeError( "Search space should be provided as a dict or list of dict," "got %s" % search_space) # copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV @property def best_score_(self): check_is_fitted(self, 'cv_results_') return self.cv_results_['mean_test_score'][self.best_index_] # copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV @property def best_params_(self): check_is_fitted(self, 'cv_results_') return self.cv_results_['params'][self.best_index_] @property def optimizer_results_(self): check_is_fitted(self, '_optim_results') return self._optim_results # copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV def _fit(self, X, y, groups, parameter_iterable): """ Actual fitting, performing the search over parameters. Taken from https://github.com/scikit-learn/scikit-learn/blob/0.18.X .../sklearn/model_selection/_search.py """ estimator = self.estimator cv = sklearn.model_selection._validation.check_cv( self.cv, y, classifier=is_classifier(estimator)) self.scorer_ = check_scoring( self.estimator, scoring=self.scoring) X, y, groups = indexable(X, y, groups) n_splits = cv.get_n_splits(X, y, groups) if self.verbose > 0 and isinstance(parameter_iterable, Sized): n_candidates = len(parameter_iterable) print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(n_splits, n_candidates, n_candidates * n_splits)) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch cv_iter = list(cv.split(X, y, groups)) out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch )(delayed(sklearn.model_selection._validation._fit_and_score)( clone(base_estimator), X, y, self.scorer_, train, test, self.verbose, parameters, fit_params=self.fit_params, return_train_score=self.return_train_score, return_n_test_samples=True, return_times=True, return_parameters=True, error_score=self.error_score ) for parameters in parameter_iterable for train, test in cv_iter) # if one choose to see train score, "out" will contain train score info if self.return_train_score: (train_scores, test_scores, test_sample_counts, fit_time, score_time, parameters) = zip(*out) else: (test_scores, test_sample_counts, fit_time, score_time, parameters) = zip(*out) candidate_params = parameters[::n_splits] n_candidates = len(candidate_params) results = dict() def _store(key_name, array, weights=None, splits=False, rank=False): """A small helper to store the scores/times to the cv_results_""" array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits) if splits: for split_i in range(n_splits): results["split%d_%s" % (split_i, key_name)] = array[:, split_i] array_means = np.average(array, axis=1, weights=weights) results['mean_%s' % key_name] = array_means # Weighted std is not directly available in numpy array_stds = np.sqrt(np.average((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights)) results['std_%s' % key_name] = array_stds if rank: results["rank_%s" % key_name] = np.asarray( rankdata(-array_means, method='min'), dtype=np.int32) # Computed the (weighted) mean and std for test scores alone # NOTE test_sample counts (weights) remain the same for all candidates test_sample_counts = np.array(test_sample_counts[:n_splits], dtype=np.int) _store('test_score', test_scores, splits=True, rank=True, weights=test_sample_counts if self.iid else None) if self.return_train_score: _store('train_score', train_scores, splits=True) _store('fit_time', fit_time) _store('score_time', score_time) best_index = np.flatnonzero(results["rank_test_score"] == 1)[0] best_parameters = candidate_params[best_index] # Use one MaskedArray and mask all the places where the param is not # applicable for that candidate. Use defaultdict as each candidate may # not contain all the params param_results = defaultdict(partial( MaskedArray, np.empty(n_candidates,), mask=True, dtype=object)) for cand_i, params in enumerate(candidate_params): for name, value in params.items(): # An all masked empty array gets created for the key # `"param_%s" % name` at the first occurence of `name`. # Setting the value at an index also unmasks that index param_results["param_%s" % name][cand_i] = value results.update(param_results) # Store a list of param dicts at the key 'params' results['params'] = candidate_params self.cv_results_ = results self.best_index_ = best_index self.n_splits_ = n_splits if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best_parameters) if y is not None: best_estimator.fit(X, y, **self.fit_params) else: best_estimator.fit(X, **self.fit_params) self.best_estimator_ = best_estimator return self def _fit_best_model(self, X, y): """Fit the estimator copy with best parameters found to the provided data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], Target relative to X for classification or regression. Returns ------- self """ self.best_estimator_ = clone(self.estimator) self.best_estimator_.set_params(**self.best_params_) self.best_estimator_.fit(X, y, **(self.fit_params or {})) return self def _make_optimizer(self, params_space): """Instantiate skopt Optimizer class. Parameters ---------- params_space : dict Represents parameter search space. The keys are parameter names (strings) and values are skopt.space.Dimension instances, one of Real, Integer or Categorical. Returns ------- optimizer: Instance of the `Optimizer` class used for for search in some parameter space. """ kwargs = self.optimizer_kwargs_.copy() kwargs['dimensions'] = dimensions_aslist(params_space) optimizer = Optimizer(**kwargs) return optimizer def _step(self, X, y, search_space, optimizer, groups=None, n_points=1): """Generate n_jobs parameters and evaluate them in parallel. """ # get parameter values to evaluate params = optimizer.ask(n_points=n_points) # convert parameters to python native types params = [[np.array(v).item() for v in p] for p in params] # make lists into dictionaries params_dict = [point_asdict(search_space, p) for p in params] # HACK: self.cv_results_ is reset at every call to _fit, keep current all_cv_results = self.cv_results_ # HACK: this adds compatibility with different versions of sklearn refit = self.refit self.refit = False self._fit(X, y, groups, params_dict) self.refit = refit # merge existing and new cv_results_ for k in self.cv_results_: all_cv_results[k].extend(self.cv_results_[k]) all_cv_results["rank_test_score"] = list(np.asarray( rankdata(-np.array(all_cv_results['mean_test_score']), method='min'), dtype=np.int32)) if self.return_train_score: all_cv_results["rank_train_score"] = list(np.asarray( rankdata(-np.array(all_cv_results['mean_train_score']), method='min'), dtype=np.int32)) self.cv_results_ = all_cv_results self.best_index_ = np.argmax(self.cv_results_['mean_test_score']) # feed the point and objective back into optimizer local_results = self.cv_results_['mean_test_score'][-len(params):] # optimizer minimizes objective, hence provide negative score return optimizer.tell(params, [-score for score in local_results]) @property def total_iterations(self): """ Count total iterations that will be taken to explore all subspaces with `fit` method. Returns ------- max_iter: int, total number of iterations to explore """ total_iter = 0 for elem in self.search_spaces: if isinstance(elem, tuple): space, n_iter = elem else: n_iter = self.n_iter total_iter += n_iter return total_iter def _run_search(self, x): pass def fit(self, X, y=None, groups=None, callback=None): """Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. y : array-like, shape = [n_samples] or [n_samples, n_output] Target relative to X for classification or regression (class labels should be integers or strings). groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. callback: [callable, list of callables, optional] If callable then `callback(res)` is called after each parameter combination tested. If list of callables, then each callable in the list is called. """ # check if space is a single dict, convert to list if so search_spaces = self.search_spaces if isinstance(search_spaces, dict): search_spaces = [search_spaces] callbacks = check_callback(callback) if self.optimizer_kwargs is None: self.optimizer_kwargs_ = {} else: self.optimizer_kwargs_ = dict(self.optimizer_kwargs) random_state = check_random_state(self.random_state) self.optimizer_kwargs_['random_state'] = random_state # Instantiate optimizers for all the search spaces. optimizers = [] for search_space in search_spaces: if isinstance(search_space, tuple): search_space = search_space[0] optimizers.append(self._make_optimizer(search_space)) self.optimizers_ = optimizers # will save the states of the optimizers self.cv_results_ = defaultdict(list) self.best_index_ = None self.multimetric_ = False self._optim_results = [] n_points = self.n_points for search_space, optimizer in zip(search_spaces, optimizers): # if not provided with search subspace, n_iter is taken as # self.n_iter if isinstance(search_space, tuple): search_space, n_iter = search_space else: n_iter = self.n_iter # do the optimization for particular search space while n_iter > 0: # when n_iter < n_points points left for evaluation n_points_adjusted = min(n_iter, n_points) optim_result = self._step( X, y, search_space, optimizer, groups=groups, n_points=n_points_adjusted ) n_iter -= n_points if eval_callbacks(callbacks, optim_result): break self._optim_results.append(optim_result) # Refit the best model on the the whole dataset if self.refit: self._fit_best_model(X, y) return self
the-stack_106_31042
# SPDX-FileCopyrightText: 2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors) # # SPDX-License-Identifier: MIT import argparse import os import struct import sys sys.path.insert(0, "bitmap_font") sys.path.insert(0, "../../tools/bitmap_font") from adafruit_bitmap_font import bitmap_font parser = argparse.ArgumentParser(description="Generate USB descriptors.") parser.add_argument("--font", type=str, help="Font path", required=True) parser.add_argument("--extra_characters", type=str, help="Unicode string of extra characters") parser.add_argument( "--sample_file", type=argparse.FileType("r", encoding="utf-8"), help="Text file that includes strings to support.", ) parser.add_argument("--output_c_file", type=argparse.FileType("w"), required=True) args = parser.parse_args() class BitmapStub: def __init__(self, width, height, color_depth): self.width = width self.rows = [b""] * height def _load_row(self, y, row): self.rows[y] = bytes(row) f = bitmap_font.load_font(args.font, BitmapStub) # Load extra characters from the sample file. sample_characters = set() if args.sample_file: for line in args.sample_file: # Skip comments because we add additional characters in our huffman comments. if line.startswith("//"): continue for c in line.strip(): sample_characters.add(c) # Merge visible ascii, sample characters and extra characters. visible_ascii = bytes(range(0x20, 0x7F)).decode("utf-8") all_characters = visible_ascii for c in sample_characters: if c not in all_characters: all_characters += c if args.extra_characters: all_characters.extend(args.extra_characters) all_characters = "".join(sorted(set(all_characters))) filtered_characters = all_characters # Try to pre-load all of the glyphs. Misses will still be slow later. f.load_glyphs(set(ord(c) for c in all_characters)) missing = 0 # Get each glyph. for c in set(all_characters): if ord(c) not in f._glyphs: missing += 1 filtered_characters = filtered_characters.replace(c, "") continue g = f.get_glyph(ord(c)) if g["shift"][1] != 0: raise RuntimeError("y shift") if missing > 0: print("Font missing", missing, "characters", file=sys.stderr) x, y, dx, dy = f.get_bounding_box() tile_x, tile_y = x - dx, y - dy total_bits = tile_x * len(all_characters) total_bits += 32 - total_bits % 32 bytes_per_row = total_bits // 8 b = bytearray(bytes_per_row * tile_y) for x, c in enumerate(filtered_characters): g = f.get_glyph(ord(c)) start_bit = x * tile_x + g["bounds"][2] start_y = (tile_y - 2) - (g["bounds"][1] + g["bounds"][3]) for y, row in enumerate(g["bitmap"].rows): for i in range(g["bounds"][0]): byte = i // 8 bit = i % 8 if row[byte] & (1 << (7 - bit)) != 0: overall_bit = start_bit + (start_y + y) * bytes_per_row * 8 + i b[overall_bit // 8] |= 1 << (7 - (overall_bit % 8)) extra_characters = "" for c in filtered_characters: if c not in visible_ascii: extra_characters += c c_file = args.output_c_file c_file.write( """\ #include "shared-bindings/displayio/Palette.h" #include "supervisor/shared/display.h" """ ) c_file.write( """\ _displayio_color_t terminal_colors[2] = { { .rgb888 = 0x000000, .rgb565 = 0x0000, .luma = 0x00, .chroma = 0 }, { .rgb888 = 0xffffff, .rgb565 = 0xffff, .luma = 0xff, .chroma = 0 }, }; displayio_palette_t supervisor_terminal_color = { .base = {.type = &displayio_palette_type }, .colors = terminal_colors, .color_count = 2, .needs_refresh = false }; """ ) c_file.write( """\ displayio_tilegrid_t supervisor_terminal_text_grid = {{ .base = {{ .type = &displayio_tilegrid_type }}, .bitmap = (displayio_bitmap_t*) &supervisor_terminal_font_bitmap, .pixel_shader = &supervisor_terminal_color, .x = 16, .y = 0, .pixel_width = {1}, .pixel_height = {2}, .bitmap_width_in_tiles = {0}, .tiles_in_bitmap = {0}, .width_in_tiles = 1, .height_in_tiles = 1, .tile_width = {1}, .tile_height = {2}, .tiles = NULL, .partial_change = false, .full_change = false, .hidden = false, .hidden_by_parent = false, .moved = false, .inline_tiles = false, .in_group = true }}; """.format( len(all_characters), tile_x, tile_y ) ) c_file.write( """\ const uint32_t font_bitmap_data[{}] = {{ """.format( bytes_per_row * tile_y // 4 ) ) for i, word in enumerate(struct.iter_unpack(">I", b)): c_file.write("0x{:08x}, ".format(word[0])) if (i + 1) % (bytes_per_row // 4) == 0: c_file.write("\n") c_file.write( """\ }; """ ) c_file.write( """\ displayio_bitmap_t supervisor_terminal_font_bitmap = {{ .base = {{.type = &displayio_bitmap_type }}, .width = {}, .height = {}, .data = (size_t*) font_bitmap_data, .stride = {}, .bits_per_value = 1, .x_shift = 5, .x_mask = 0x1f, .bitmask = 0x1, .read_only = true }}; """.format( len(all_characters) * tile_x, tile_y, bytes_per_row / 4 ) ) c_file.write( """\ const fontio_builtinfont_t supervisor_terminal_font = {{ .base = {{.type = &fontio_builtinfont_type }}, .bitmap = &supervisor_terminal_font_bitmap, .width = {}, .height = {}, .unicode_characters = (const uint8_t*) "{}", .unicode_characters_len = {} }}; """.format( tile_x, tile_y, extra_characters, len(extra_characters.encode("utf-8")) ) ) c_file.write( """\ terminalio_terminal_obj_t supervisor_terminal = { .base = { .type = &terminalio_terminal_type }, .font = &supervisor_terminal_font, .cursor_x = 0, .cursor_y = 0, .tilegrid = &supervisor_terminal_text_grid }; """ )
the-stack_106_31043
""" Author: Matheus Felinto Description: A simple electronic circuit simulator """ import sys import numpy as np from lib.netlist import NetList from lib import components if __name__ == "__main__": netlist = NetList(sys.argv[1]) netlist.read_netlist() nodes_number, auxiliary_equations_number = netlist.define_matrix_range() N = nodes_number + auxiliary_equations_number + 1 admittance_matrix = np.zeros((N, N), dtype=complex) current_vector = np.zeros(N, dtype=complex) frequency = 0 if netlist.lines[-1].split()[0].upper() == ".SIN": frequency = float(netlist.lines[-1].split()[1]) auxiliary_elements = components.create_component_stamps(netlist.lines, admittance_matrix, current_vector, nodes_number, frequency) admittance_matrix = np.delete(np.delete(admittance_matrix, 0, 0), 0, 1) current_vector = np.delete(current_vector, 0, 0) nodes_voltage = np.linalg.solve(admittance_matrix, current_vector) print(""" _____ _ _ _ _ _ |_ _| |__ ___ ___(_)_ __ ___ _ _(_) |_ __ ____ _| |_ _ ___ ___ | | | '_ \ / _ \ / __| | '__/ __| | | | | __| \ \ / / _` | | | | |/ _ \/ __| | | | | | | __/ | (__| | | | (__| |_| | | |_ \ V / (_| | | |_| | __/\__ \\ |_| |_| |_|\___| \___|_|_| \___|\__,_|_|\__| \_/ \__,_|_|\__,_|\___||___/ __ _ _ __ ___ _ / _` | '__/ _ (_) | (_| | | | __/_ \__,_|_| \___(_) """) if netlist.lines[-1].split()[0].upper() == ".DC": for index in range(1, len(nodes_voltage) + 1): print(f"{f'node({index})' if index <= nodes_number else f'current({auxiliary_elements[index - nodes_number - 1]})'} = {nodes_voltage[index - 1].real:.3f}") elif netlist.lines[-1].split()[0].upper() == ".SIN": for index in range(1, len(nodes_voltage) + 1): print(f"{f'node({index})' if index <= nodes_number else f'current({auxiliary_elements[index - nodes_number - 1]})'} = {nodes_voltage[index - 1].real:.3f} Cos({frequency}t) + {-nodes_voltage[index - 1].imag:.3f} Sin({frequency}t)")
the-stack_106_31044
import urllib2 import hashlib import tarfile import random import string import sys import os import logging import json import socket import shutil import errno import datetime as dt import retry INFRASTRUCTURE_ERROR = 12 def make_user_agent(): return 'fetch_from: {host}'.format(host=socket.gethostname()) def add_common_arguments(parser): parser.add_argument('--copy-to') # used by jbuild in fetch_resource parser.add_argument('--rename-to') # used by test_node in inject_mds_resource_to_graph parser.add_argument('--copy-to-dir') parser.add_argument('--untar-to') parser.add_argument('--rename', action='append', default=[], metavar='FILE', help='rename FILE to the corresponding output') parser.add_argument('--executable', action='store_true', help='make outputs executable') parser.add_argument('--log-path') parser.add_argument('outputs', nargs='*') def ensure_dir(path): if not (path == '' or os.path.isdir(path)): os.makedirs(path) def hardlink_or_copy(src, dst): ensure_dir(os.path.dirname(dst)) if os.name == 'nt': shutil.copy(src, dst) else: try: os.link(src, dst) except OSError as e: if e.errno == errno.EEXIST: return elif e.errno == errno.EXDEV: sys.stderr.write("Can't make cross-device hardlink - fallback to copy: {} -> {}\n".format(src, dst)) shutil.copy(src, dst) else: raise def rename_or_copy_and_remove(src, dst): ensure_dir(os.path.dirname(dst)) try: os.rename(src, dst) except OSError: shutil.copy(src, dst) os.remove(src) class BadChecksumFetchError(Exception): pass class IncompleteFetchError(Exception): pass class ResourceUnpackingError(Exception): pass class ResourceIsDirectoryError(Exception): pass class OutputIsDirectoryError(Exception): pass class OutputNotExistError(Exception): pass def setup_logging(args, base_name): def makedirs(path): try: os.makedirs(path) except OSError: pass if args.log_path: log_file_name = args.log_path else: log_file_name = base_name + ".log" args.abs_log_path = os.path.abspath(log_file_name) makedirs(os.path.dirname(args.abs_log_path)) logging.basicConfig(filename=args.abs_log_path, level=logging.DEBUG) def is_temporary(e): return isinstance(e, (BadChecksumFetchError, IncompleteFetchError, urllib2.URLError, socket.timeout, socket.error)) def uniq_string_generator(size=6, chars=string.ascii_lowercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) def report_to_snowden(value): def inner(): body = { 'namespace': 'ygg', 'key': 'fetch-from-sandbox', 'value': json.dumps(value), } urllib2.urlopen( 'https://back-snowden.qloud.yandex-team.ru/report/add', json.dumps([body, ]), timeout=5, ) try: inner() except Exception as e: logging.error(e) def copy_stream(read, *writers, **kwargs): chunk_size = kwargs.get('size', 1024*1024) while True: data = read(chunk_size) if not data: break for write in writers: write(data) def md5file(fname): res = hashlib.md5() with open(fname, 'rb') as f: copy_stream(f.read, res.update) return res.hexdigest() def git_like_hash_with_size(filepath): """ Calculate git like hash for path """ sha = hashlib.sha1() file_size = 0 with open(filepath, 'rb') as f: while True: block = f.read(2 ** 16) if not block: break file_size += len(block) sha.update(block) sha.update('\0') sha.update(str(file_size)) return sha.hexdigest(), file_size def size_printer(display_name, size): sz = [0] last_stamp = [dt.datetime.now()] def printer(chunk): sz[0] += len(chunk) now = dt.datetime.now() if last_stamp[0] + dt.timedelta(seconds=10) < now: if size: print >>sys.stderr, "##status##{} - [[imp]]{:.1f}%[[rst]]".format(display_name, 100.0 * sz[0] / size) last_stamp[0] = now return printer def fetch_url(url, unpack, resource_file_name, expected_md5=None, expected_sha1=None, tries=10): logging.info('Downloading from url %s name %s and expected md5 %s', url, resource_file_name, expected_md5) tmp_file_name = uniq_string_generator() request = urllib2.Request(url, headers={'User-Agent': make_user_agent()}) req = retry.retry_func(lambda: urllib2.urlopen(request, timeout=30), tries=tries, delay=5, backoff=1.57079) logging.debug('Headers: %s', req.headers.headers) expected_file_size = int(req.headers['Content-Length']) real_md5 = hashlib.md5() real_sha1 = hashlib.sha1() with open(tmp_file_name, 'wb') as fp: copy_stream(req.read, fp.write, real_md5.update, real_sha1.update, size_printer(resource_file_name, expected_file_size)) real_md5 = real_md5.hexdigest() real_file_size = os.path.getsize(tmp_file_name) real_sha1.update('\0') real_sha1.update(str(real_file_size)) real_sha1 = real_sha1.hexdigest() if unpack: tmp_dir = tmp_file_name + '.dir' os.makedirs(tmp_dir) with tarfile.open(tmp_file_name, mode="r|gz") as tar: tar.extractall(tmp_dir) tmp_file_name = os.path.join(tmp_dir, resource_file_name) real_md5 = md5file(tmp_file_name) logging.info('File size %s (expected %s)', real_file_size, expected_file_size) logging.info('File md5 %s (expected %s)', real_md5, expected_md5) logging.info('File sha1 %s (expected %s)', real_sha1, expected_sha1) if expected_md5 and real_md5 != expected_md5: report_to_snowden( { 'headers': req.headers.headers, 'expected_md5': expected_md5, 'real_md5': real_md5 } ) raise BadChecksumFetchError( 'Downloaded {}, but expected {} for {}'.format( real_md5, expected_md5, url, ) ) if expected_sha1 and real_sha1 != expected_sha1: report_to_snowden( { 'headers': req.headers.headers, 'expected_sha1': expected_sha1, 'real_sha1': real_sha1 } ) raise BadChecksumFetchError( 'Downloaded {}, but expected {} for {}'.format( real_sha1, expected_sha1, url, ) ) if expected_file_size != real_file_size: report_to_snowden({'headers': req.headers.headers, 'file_size': real_file_size}) raise IncompleteFetchError( 'Downloaded {}, but expected {} for {}'.format( real_file_size, expected_file_size, url, ) ) return tmp_file_name def process(fetched_file, file_name, args, remove=True): assert len(args.rename) <= len(args.outputs), ( 'too few outputs to rename', args.rename, 'into', args.outputs) if not os.path.isfile(fetched_file): raise ResourceIsDirectoryError('Resource must be a file, not a directory: %s' % fetched_file) if args.copy_to: hardlink_or_copy(fetched_file, args.copy_to) if not args.outputs: args.outputs = [args.copy_to] if args.rename_to: args.rename.append(fetched_file) if not args.outputs: args.outputs = [args.rename_to] if args.copy_to_dir: hardlink_or_copy(fetched_file, os.path.join(args.copy_to_dir, file_name)) if args.untar_to: ensure_dir(args.untar_to) try: with tarfile.open(fetched_file, mode='r:*') as tar: tar.extractall(args.untar_to) except tarfile.ReadError as e: logging.exception(e) raise ResourceUnpackingError('File {} cannot be untared'.format(fetched_file)) for src, dst in zip(args.rename, args.outputs): if src == 'RESOURCE': src = fetched_file if os.path.abspath(src) == os.path.abspath(fetched_file): logging.info('Copying %s to %s', src, dst) hardlink_or_copy(src, dst) else: logging.info('Renaming %s to %s', src, dst) if remove: rename_or_copy_and_remove(src, dst) else: shutil.copy(src, dst) for path in args.outputs: if not os.path.exists(path): raise OutputNotExistError('Output does not exist: %s' % os.path.abspath(path)) if not os.path.isfile(path): raise OutputIsDirectoryError('Output must be a file, not a directory: %s' % os.path.abspath(path)) if args.executable: os.chmod(path, os.stat(path).st_mode | 0o111) if os.path.abspath(path) == os.path.abspath(fetched_file): remove = False if remove: os.remove(fetched_file)
the-stack_106_31045
import os import json from flask import render_template, g, session, redirect, url_for, request # noinspection PyPackageRequirements from bson.objectid import ObjectId from app import app, app_mongo, cdn_theme_url, app_redis from views.navigation import Navigation from views.auth import auth from views.jump_freighter import jf from views.admin import admin from views.account import account from views.corp import corp from views.fittings import fittings from views.buyback import buyback from views.ordering import ordering from views.security import security from views.recruitment import recruitment from views.auth import requires_sso, auth_check # noinspection PyUnresolvedReferences from views import api # Attaches API module app.register_blueprint(auth, url_prefix="/auth") app.register_blueprint(jf, url_prefix="/jf") app.register_blueprint(admin, url_prefix="/admin") app.register_blueprint(account, url_prefix="/account") app.register_blueprint(corp, url_prefix="/corp") app.register_blueprint(fittings, url_prefix="/fittings") app.register_blueprint(buyback, url_prefix="/buyback") app.register_blueprint(ordering, url_prefix="/ordering") app.register_blueprint(security, url_prefix="/security") app.register_blueprint(recruitment, url_prefix="/recruitment") Navigation(app) @app.before_first_request def app_init(): # Check if stations are loaded db_check_stations = app_mongo.db.stations.find_one({"_id": 60003760}) # Use Jita as check if not db_check_stations: # Load statics into memory with open("resources/staStations.json", "r") as staStations_file: stations_list = json.load(staStations_file) app_mongo.db.stations.insert([{"_id": int(key), "name": value} for key, value in stations_list.items()]) # Refresh Items app_mongo.db.items.drop() with open("resources/invTypes.json", "r") as invTypes_file: items_list = json.load(invTypes_file) # Volumes by market group with open("resources/invPackaged.json", "r") as invPackaged_file: package_list = json.load(invPackaged_file) # Fallback packed volumes of ships with open("resources/invVolumes.json", "r") as invVolumes_file: volumes_list = json.load(invVolumes_file) # Open refine amounts with open("resources/invTypeMaterials.json", "r") as invTypesMaterials_file: materials_list = json.load(invTypesMaterials_file) adjusted_items_list = [] for key, value in items_list.items(): if package_list.get(str(value["ship_group_id"])): # Adjust for strategic cruisers if value["name"] in ["Legion", "Tengu", "Proteus", "Loki"]: corrected_volume = 5000 else: corrected_volume = package_list.get(str(value["ship_group_id"])) else: corrected_volume = volumes_list[key] if volumes_list.get(key) else value["volume"] adjusted_items_list.append({"_id": int(key), "name": value["name"], "volume": corrected_volume, "meta": value["meta"], "materials": materials_list.get(key, []), "market_group_id": value["market_group_id"], "skill_id": value["skill_id"], "batch": value["batch"], "ship_group_id": value["ship_group_id"]}) app_mongo.db.items.insert(adjusted_items_list) # Check if roles are loaded app_mongo.db.eve_auth.update({"_id": "super_admin"}, {"$setOnInsert": {"users": []}}, upsert=True) app_mongo.db.eve_auth.update({"_id": "jf_admin"}, {"$setOnInsert": {"users": []}}, upsert=True) app_mongo.db.eve_auth.update({"_id": "jf_pilot"}, {"$setOnInsert": {"users": []}}, upsert=True) app_mongo.db.eve_auth.update({"_id": "user_admin"}, {"$setOnInsert": {"users": []}}, upsert=True) app_mongo.db.eve_auth.update({"_id": "fittings_admin"}, {"$setOnInsert": {"users": []}}, upsert=True) app_mongo.db.eve_auth.update({"_id": "buyback_admin"}, {"$setOnInsert": {"users": []}}, upsert=True) app_mongo.db.eve_auth.update({"_id": "ordering_admin"}, {"$setOnInsert": {"users": []}}, upsert=True) app_mongo.db.eve_auth.update({"_id": "ordering_marketeer"}, {"$setOnInsert": {"users": []}}, upsert=True) app_mongo.db.eve_auth.update({"_id": "security_officer"}, {"$setOnInsert": {"users": []}}, upsert=True) app_mongo.db.eve_auth.update({"_id": "recruiter"}, {"$setOnInsert": {"users": []}}, upsert=True) @app.before_request def db_init(): g.mongo = app_mongo g.redis = app_redis if request.path not in ["/settings"] and not any([ request.path.endswith(".js"), request.path.endswith(".css"), request.path.endswith(".ico"), request.path.endswith(".png"), ]): session["prev_path"] = request.path # Check css if session.get("default_css", True): app.extensions['bootstrap']['cdns']["theme"].baseurl = cdn_theme_url else: cdn_theme_alt_url = "https://maxcdn.bootstrapcdn.com/bootswatch/3.3.5/sandstone/" app.extensions['bootstrap']['cdns']["theme"].baseurl = cdn_theme_alt_url if os.environ.get("maintenance") == "True": return render_template("maintenance.html") @app.teardown_request def cleanup(exception=None): if exception: print("Error: ", exception) @app.route('/') def home(): with open("configs/base.json", "r") as base_config_file: base_config = json.load(base_config_file) return render_template("index.html", forum_url=base_config["forum_url"]) @app.route("/settings") def settings(): session.setdefault("default_css", True) session["default_css"] = False if session.get("default_css") else True if session.get("CharacterOwnerHash"): return redirect(session.get("prev_path", url_for("account.home"))) else: return redirect(session.get("prev_path", url_for("home"))) @requires_sso(None) @app.route("/issues", methods=["GET", "POST"]) def issues(): editor = auth_check("user_admin") if request.form.get("action") == "submit": g.mongo.db.issues.insert({ "submitter": session["CharacterName"], "issue": request.form.get("issue").strip() }) elif request.form.get("action") == "delete": if editor: g.mongo.db.issues.remove({"_id": ObjectId(request.form.get("id"))}) else: g.mongo.db.issues.remove({"_id": ObjectId(request.form.get("id")), "submitter": session["CharacterName"]}) issue_list = [] for db_issue in g.mongo.db.issues.find(): timestamp = ObjectId(db_issue["_id"]).generation_time.strftime("%Y-%m-%d %H:%M:%S") can_delete = True if editor or session["CharacterName"] == db_issue["submitter"] else False issue_list.append([timestamp, db_issue["issue"], db_issue["submitter"], can_delete, db_issue["_id"]]) return render_template("issues.html", issue_list=issue_list) # noinspection PyUnusedLocal @app.errorhandler(404) def error_missing(exception): error_message = "This page cannot be found." return render_template("error.html", error_code=404, error_message=error_message), 404 # noinspection PyUnusedLocal @app.errorhandler(403) def error_unauthorized(exception): error_message = "You are not authorized to view this page. Ensure you have the correct permissions." return render_template("error.html", error_code=403, error_message=error_message), 403 # noinspection PyUnusedLocal @app.errorhandler(500) def error_crash(exception): error_message = "This page has crashed due to an exception. Contact Kazuki Ishikawa and submit a bug report." return render_template("error.html", error_code=500, error_message=error_message), 500 if not os.environ.get("EXTERNAL") and __name__ == "__main__": os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = 'true' os.environ["maintenance"] = 'False' @app.route('/test') def test(): g.redis.publish('titdev-test', 'Look at this. Very " \'cool # message;. ') g.redis.publish('titdev-marketeer', 'This is a test of the emergency annoyance system.') return render_template("base.html") profile = False # Profiling if profile: from werkzeug.contrib.profiler import ProfilerMiddleware app.config["PROFILE"] = True app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30]) app.debug = True app.run(host="0.0.0.0")
the-stack_106_31046
#!/usr/bin/env python """This plugin renders the client search page.""" import urllib from grr.gui import renderers from grr.gui.plugins import forms from grr.gui.plugins import semantic from grr.lib import aff4 from grr.lib import flow from grr.lib import rdfvalue from grr.lib import utils from grr.lib.aff4_objects import users as aff4_users class NotificationCount(renderers.TemplateRenderer): """Display the number of outstanding notifications.""" def RenderAjax(self, request, response): """Return the count on unseen notifications.""" response = super(NotificationCount, self).RenderAjax(request, response) number = 0 try: user_fd = aff4.FACTORY.Open(aff4.ROOT_URN.Add("users").Add( request.user), token=request.token) notifications = user_fd.Get(user_fd.Schema.PENDING_NOTIFICATIONS) if notifications: number = len(notifications) except IOError: pass return renderers.JsonResponse(dict(number=number)) class NotificationBar(renderers.TemplateRenderer): """Render a notification bar for the user.""" layout_template = renderers.Template(""" <div id="notification_dialog" class="modal wide-modal" tabindex="-1" role="dialog" aria-hidden="true"> <div class="modal-dialog"> <div class="modal-content"> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal" aria-hidden="true">x</button> <h3>Notifications for {{this.user|escape}}</h3> </div> <div class="modal-body" id="notification_dialog_body"> </div> <div class="modal-footer"> <button class="btn btn-default" data-dismiss="modal" aria-hidden="true"> Close </button> </div> </div> </div> </div> <div id="user_settings_dialog" class="modal" tabindex="-1" role="dialog" aria-hidden="true"> </div> <ul class="nav pull-left"> <li><p class="navbar-text">User: {{this.user|escape}}</p></li> </ul> <div id="notifications_and_settings" class="pull-right navbar-form"> <button id="notification_button" class="btn btn-info" data-toggle="modal" data-target="#notification_dialog" style="margin-right: 10px" /> </div> """) def Layout(self, request, response): """Show the number of notifications outstanding for the user.""" self.user = request.user response = super(NotificationBar, self).Layout(request, response) return self.CallJavascript(response, "Layout") class UpdateSettingsFlow(flow.GRRFlow): """Update the User's GUI settings.""" # This flow can run without ACL enforcement (an SUID flow). ACL_ENFORCED = False args_type = aff4_users.GUISettings @flow.StateHandler() def Start(self): with aff4.FACTORY.Create( aff4.ROOT_URN.Add("users").Add(self.token.username), aff4_type="GRRUser", mode="w", token=self.token) as user_fd: user_fd.Set(user_fd.Schema.GUI_SETTINGS(self.args)) class UserSettingsDialog(renderers.ConfirmationDialogRenderer): """Dialog that allows user to change his settings.""" header = "Settings" proceed_button_title = "Apply" content_template = renderers.Template(""" {{this.user_settings_form|safe}} """) ajax_template = renderers.Template(""" Settings were successfully updated. Reloading... """) def GetUserSettings(self, request): try: user_record = aff4.FACTORY.Open( aff4.ROOT_URN.Add("users").Add(request.user), "GRRUser", token=request.token) return user_record.Get(user_record.Schema.GUI_SETTINGS) except IOError: return aff4.GRRUser.SchemaCls.GUI_SETTINGS() def Layout(self, request, response): user_settings = self.GetUserSettings(request) self.user_settings_form = forms.SemanticProtoFormRenderer( proto_obj=user_settings, prefix="settings").RawHTML(request) return super(UserSettingsDialog, self).Layout(request, response) def RenderAjax(self, request, response): """Ajax hanlder for this renderer.""" settings = forms.SemanticProtoFormRenderer( proto_obj=aff4_users.GUISettings(), prefix="settings").ParseArgs(request) flow.GRRFlow.StartFlow(flow_name="UpdateSettingsFlow", args=settings, token=request.token) response = self.RenderFromTemplate(self.ajax_template, response, unique=self.unique) return self.CallJavascript(response, "RenderAjax") class ResetUserNotifications(flow.GRRFlow): """A flow to reset user's notifications.""" # This flow can run without ACL enforcement (an SUID flow). ACL_ENFORCED = False @flow.StateHandler() def Start(self): user_fd = aff4.FACTORY.Open(aff4.ROOT_URN.Add("users").Add( self.token.username), aff4_type="GRRUser", mode="rw", token=self.token) user_fd.ShowNotifications(reset=True) class ViewNotifications(renderers.TableRenderer): """Render the notifications for the user.""" target_template = renderers.Template(""" <a href="/#{{hash|escape}}" target_hash="{{hash|escape}}" notification_type="{{notification_type|escape}}">{{target}}</span>""") layout_template = renderers.TableRenderer.layout_template def __init__(self, **kwargs): renderers.TableRenderer.__init__(self, **kwargs) self.AddColumn(semantic.RDFValueColumn("Timestamp")) self.AddColumn(semantic.RDFValueColumn("Message", width="100%")) self.AddColumn(semantic.RDFValueColumn("Target")) def Layout(self, request, response): response = super(ViewNotifications, self).Layout(request, response) return self.CallJavascript(response, "ViewNotifications.Layout") def BuildTable(self, start_row, end_row, request): """Add all the notifications to this table.""" row_index = 0 search_term = request.REQ.get("sSearch") # We modify this object by changing the notification from pending to # shown. try: user_fd = aff4.FACTORY.Open(aff4.ROOT_URN.Add("users").Add( request.user), aff4_type="GRRUser", token=request.token) except IOError: return # Hack for sorting. Requires retrieval of all notifications. notifications = list(user_fd.ShowNotifications(reset=False)) for notification in sorted(notifications, key=lambda x: x.timestamp, reverse=True): if row_index < start_row: continue if row_index > end_row: break if (search_term and search_term.lower() not in notification.message.lower()): continue row = {"Message": notification.message, "Target": self.FormatFromTemplate( self.target_template, hash=self.BuildHashFromNotification(notification), notification_type=notification.type, target=notification.subject), "Timestamp": rdfvalue.RDFDatetime(notification.timestamp)} self.AddRow(row, row_index) row_index += 1 flow.GRRFlow.StartFlow(flow_name="ResetUserNotifications", token=request.token) @staticmethod def BuildHashFromNotification(notification): """Navigate to the most appropriate location for this navigation.""" h = {} # Still display if subject doesn't get set, this will appear in the GUI with # a target of "None" urn = "/" if notification.subject is not None: urn = notification.subject # General Host information if notification.type == "Discovery": path = rdfvalue.RDFURN(urn) components = path.Path().split("/")[1:] h["c"] = components[0] h["main"] = "HostInformation" elif notification.type == "DownloadFile": h["aff4_path"] = notification.subject h["main"] = "DownloadFile" elif notification.type == "ViewObject": path = rdfvalue.RDFURN(urn) components = path.Path().split("/")[1:] if len(components) == 2 and components[0] == "hunts": h["hunt_id"] = notification.subject h["main"] = "ManageHunts" elif len(components) == 2 and components[0] == "cron": h["cron_job_urn"] = notification.subject h["main"] = "ManageCron" elif len(components) == 3 and components[1] == "flows": h["flow"] = notification.subject h["c"] = components[0] h["main"] = "ManageFlows" else: h["c"] = components[0] h["aff4_path"] = notification.subject h["t"] = renderers.DeriveIDFromPath("/".join(components[1:-1])) h["main"] = "VirtualFileSystemView" # Error with a flow elif notification.type == "FlowStatus": path = rdfvalue.RDFURN(urn) components = path.Path().split("/")[1:] h["flow"] = notification.source h["c"] = components[0] h["main"] = "ManageFlows" elif notification.type == "GrantAccess": h["main"] = "GrantAccess" h["acl"] = notification.subject return urllib.urlencode( dict([(x, utils.SmartStr(y)) for x, y in h.items()]))
the-stack_106_31047
""" Web Steps Steps file for web interactions with Selenium For information on Waiting until elements are present in the HTML see: https://selenium-python.readthedocs.io/waits.html """ import logging from behave import when, then from compare import expect, ensure from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.ui import Select from selenium.webdriver.support import expected_conditions ID_PREFIX = 'customer_' @when('I visit the "home page"') def step_impl(context): """ Make a call to the base URL """ context.driver.get(context.base_url) # Uncomment next line to take a screenshot of the web page #context.driver.save_screenshot('home_page.png') @when('I visit the "Address Page"') def step_impl(context): """ Make a call to the address sub-url""" context.driver.get(context.base_url+'/address') @then('I should see "{message}" in the title') def step_impl(context, message): """ Check the document title for a message """ expect(context.driver.title).to_contain(message) @then('I should not see "{message}"') def step_impl(context, message): error_msg = "I should not see '%s' in '%s'" % (message, context.resp.text) ensure(message in context.resp.text, False, error_msg) @when('I set the "{element_name}" to "{text_string}"') def step_impl(context, element_name, text_string): element_id = ID_PREFIX + element_name.lower().replace(' ', '_') element = context.driver.find_element_by_id(element_id) element.clear() element.send_keys(text_string) @when('I select "{text}" in the "{element_name}" dropdown') def step_impl(context, text, element_name): element_id = ID_PREFIX + element_name.lower().replace(' ', '_') element = Select(context.driver.find_element_by_id(element_id)) element.select_by_visible_text(text) @then('I should see "{text}" in the "{element_name}" dropdown') def step_impl(context, text, element_name): element_id = ID_PREFIX + element_name.lower().replace(' ', '_') element = Select(context.driver.find_element_by_id(element_id)) expect(element.first_selected_option.text).to_equal(text) @then('the "{element_name}" field should be empty') def step_impl(context, element_name): element_id = ID_PREFIX + element_name.lower().replace(' ', '_') element = context.driver.find_element_by_id(element_id) expect(element.get_attribute('value')).to_be(u'') ################################################################## # These two function simulate copy and paste ################################################################## @when('I copy the "{element_name}" field') def step_impl(context, element_name): element_id = ID_PREFIX + element_name.lower().replace(' ', '_') element = WebDriverWait(context.driver, context.WAIT_SECONDS).until( expected_conditions.presence_of_element_located((By.ID, element_id)) ) context.clipboard = element.get_attribute('value') logging.info('Clipboard contains: %s', context.clipboard) @when('I copy the "ID" field from the results') def step_impl(context): value = context.driver.find_element_by_xpath("/html/body/div[@class='container']/div[@id='search_results']/table[@class='table table-striped']/tbody/tr[@id='row_0']/td[1]").text context.clipboard = value logging.info("Clipboard contains: %s", context.clipboard) @when('I copy the "Customer ID" field from the results') def step_impl(context): value = context.driver.find_element_by_xpath("/html/body/div[@class='container']/div[@id='search_results']/table[@class='table table-striped']/tbody/tr[@id='row_0']/td[2]").text context.clipboard = value logging.info("Clipboard contains: %s", context.clipboard) @when('I paste the "{element_name}" field') def step_impl(context, element_name): element_id = ID_PREFIX + element_name.lower().replace(' ', '_') element = WebDriverWait(context.driver, context.WAIT_SECONDS).until( expected_conditions.presence_of_element_located((By.ID, element_id)) ) element.clear() element.send_keys(context.clipboard) ################################################################## # This code works because of the following naming convention: # The buttons have an id in the html hat is the button text # in lowercase followed by '-btn' so the Clean button has an id of # id='clear-btn'. That allows us to lowercase the name and add '-btn' # to get the element id of any button ################################################################## @when('I press the "{button}" button') def step_impl(context, button): button_id = button.lower() + '-btn' context.driver.find_element_by_id(button_id).click() @then('I should see "{name}" in the results') def step_impl(context, name): found = WebDriverWait(context.driver, context.WAIT_SECONDS).until( expected_conditions.text_to_be_present_in_element( (By.ID, 'search_results'), name ) ) expect(found).to_be(True) @then('I should not see "{name}" in the results') def step_impl(context, name): element = context.driver.find_element_by_id('search_results') error_msg = "I should not see '%s' in '%s'" % (name, element.text) ensure(name in element.text, False, error_msg) @then('I should see the message "{message}"') def step_impl(context, message): found = WebDriverWait(context.driver, context.WAIT_SECONDS).until( expected_conditions.text_to_be_present_in_element( (By.ID, 'flash_message'), message ) ) expect(found).to_be(True) ################################################################## # This code works because of the following naming convention: # The id field for text input in the html is the element name # prefixed by ID_PREFIX so the Name field has an id='customer_name' # We can then lowercase the name and prefix with customer_ to get the id ################################################################## @then('I should see "{text_string}" in the "{element_name}" field') def step_impl(context, text_string, element_name): element_id = ID_PREFIX + element_name.lower().replace(' ', '_') found = WebDriverWait(context.driver, context.WAIT_SECONDS).until( expected_conditions.text_to_be_present_in_element_value( (By.ID, element_id), text_string ) ) expect(found).to_be(True) @when('I change "{element_name}" to "{text_string}"') def step_impl(context, element_name, text_string): element_id = ID_PREFIX + element_name.lower().replace(' ', '_') element = WebDriverWait(context.driver, context.WAIT_SECONDS).until( expected_conditions.presence_of_element_located((By.ID, element_id)) ) element.clear() element.send_keys(text_string)
the-stack_106_31049
# Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license. See LICENSE file in the project root for full license information import datetime from dictionary_object import DictionaryObject class LonghaulConfig(DictionaryObject): def __init__(self): super(LonghaulConfig, self).__init__() self.timeout_interval = datetime.timedelta(minutes=5) self.longhaul_total_duration = datetime.timedelta() self.longhaul_property_update_interval = datetime.timedelta(seconds=10) self.longhaul_telemetry_interval = datetime.timedelta(seconds=10) self.longhaul_d2c_enabled = False self.longhaul_d2c_interval_length = 1 self.longhaul_d2c_ops_per_interval = 10 self.longhaul_d2c_count_failures_allowed = 0 self.lock_attributes() LonghaulConfig._defaults = LonghaulConfig()