{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\nWhy does it not look in the Project1 folder for the Login.aspx?\n\nA:\n\nYou need to change to this (add slash at the beginning of url):\n\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29562,"cells":{"text":{"kind":"string","value":"Q:\n\nWill using UUID to as list keys cause unnecessary re-renders in React?\n\nI have a list of items that doesn't contain enough data to generate a unique key. If I use the uuid library to generate an ID, will a single item change also causes the other items to re-render since their key will change each time?\nconst people = [\n {\n gender: 'male',\n firstName: 'david',\n },\n {\n gender: 'male',\n firstName: 'david',\n },\n {\n gender: 'male',\n firstName: 'joe',\n },\n]\n\nconst renderPeople = () => {\n return people.map(person => {\n return (\n
\n

{person.gender}

\n

{person.firstName}

\n
\n )\n })\n}\n\nsome time later... one of the davids changed\nconst people = [\n {\n gender: 'male',\n firstName: 'david',\n },\n {\n gender: 'male',\n firstName: 'davidzz',\n },\n {\n gender: 'male',\n firstName: 'joe',\n },\n]\n\nA:\n\n
assigns new key for each
every time, so it is useless.\nIf the array stays same, UUID should be generated on array creation.\nIf the array changes, e.g. received from HTTP request, UUIDs for elements with same content will be generated again.\nIn order to avoid that, key should be specific to person entity. It's always preferable to use internal identifiers (database IDs) where available for unambiguity.\nIf identifiers are not available, key may depend on element contents:\nreturn (\n
\n

{person.gender}

\n

{person.firstName}

\n
\n)\n\nIt's more efficient to hash elements once at the time when an array is created, e.g. with uuid:\nimport uuidv3 from 'uuid/v3';\n...\n\nfor (const person of people) {\n person.key = uuidv3(JSON.stringify(person), uuidv3.URL);\n}\n\nOr use dedicated hashing function like object-hash.\nNotice that hashing may result in key collisions if there are elements with same contents.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29563,"cells":{"text":{"kind":"string","value":"Q:\n\nRecord Count functoid returns aggregate count for non-flattend target message\n\nI tried to use the Record Count functoid to map the number of sub-records of an record that itself occurs 0 to unbounded to a message with each record containing a field holding the number of sub-records:\nroot+ +root\n | |\n +foo+ +foo+\n | |\n +bar+ -RecordCount- barcount\n |\n +xyz\n\nHowever my current map aggregates the count of all bar records and returns it in every foo\\barcount.\nSample source message\n\n \n 1\n \n \n \n \n \n \n \n \n 2\n \n \n \n \n \n \n \n\n\n... and the result is\n\n \n 1\n 4\n \n \n 2\n 4\n \n\n\n... whereas I expected\n\n \n 1\n 2\n \n \n 2\n 2\n \n\n\nA:\n\nI solved this issue by replacing the Record Count functoid with a Call XSLT Template Scripting functoid.\nThe XSLT template looks like this:\n\n \n \n \n \n\n\nand the input to the scripting functoid is the Id field from foo.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29564,"cells":{"text":{"kind":"string","value":"Q:\n\nDetect faulty drive in RAID 10 array\n\nI've been told that I can only verify my HW RAID array is working perfectly with KVM. However, I want to be automatically notified when there is a problem by my server.\nIs there a way via SSH (that will be called via system() in php) that can detect that a drive is having problems? I don't need to identify which drive.\nI have thought of one theory but I don't know if it will work in practice. If I were to run a PHP script to fopen('/dev/[filesystem]', 'r') and seeked every xGB for 1 byte and it seeks a position of the filesystem that's having problems, it should return an error. Am I correct in thinking this idea?\nI use XFS filesystem, I have heard of xfs_check but that says it needs to be ran in read-only mode which is inconvenient.\nI use 3ware RAID controller.\n\nA:\n\nInstall the 3Ware tools (tw_cli) on your machine. \nAfter you have installed them, get the id # of the controller (I've never understood the system behind it, for all I know it might be random):\n$ tw_cli show\n\nCtl Model (V)Ports Drives Units NotOpt RRate VRate BBU\n------------------------------------------------------------------------\nc0 9550SXU-4LP 4 2 1 0 1 1 -\n\nYou can then query the array status with \n$ tw_cli /c0 show\n\nUnit UnitType Status %RCmpl %V/I/M Stripe Size(GB) Cache AVrfy\n------------------------------------------------------------------------------\nu0 RAID-1 OK - - - 74.4951 ON OFF\n\nPort Status Unit Size Blocks Serial\n---------------------------------------------------------------\np0 NOT-PRESENT - - - -\np1 NOT-PRESENT - - - -\np2 OK u0 74.53 GB 156301488 9QZ07NP2\np3 OK u0 74.53 GB 156301488 9QZ08DS2\n\nObviously, this will look different on your machine. These example where lifted from here.\nTo actively verify (scrub) your drives, use \n$ tw_cli /c0/u0 start verify\n\nFor automatic notifications, you should setup a monitoring system, e.g. Nagios or Icinga and use a plugin that checks the health of the array with the help of tw_cli. These plugins work nicely without Nagios/Icinga as well and could be easily used in a minimal monitoring system in form of a cron job that sends a mail of the plugin doesn't return 0.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29565,"cells":{"text":{"kind":"string","value":"Q:\n\nMasterPage objects returning as null\n\nI've got an ASP.net application that is used to display information queried from our ERP system onto various web pages. The main object is called an EpicorUser, and it basically encapsulates all current information about an employee. \nThis object is used to fill in a bunch of various fields on the master page such as Full Name, current activity, clock in/out times, etc.\nI am trying to pass this object from the MasterPage into the content pages to avoid needlessly querying the WebService that serves this information. The problem is, when I access the object from a ContentPage, it is always null. I know it has been populated because my MasterPage content is all filled in correctly. \nI am trying to access the MasterPage 'CurrentUser' object from my ContentPage like this:\n**MasterPage Codebehind:**\npublic EpicorUser CurrentUser; //This object is populated once user has authenticated\n\n///This is in my ContentPage ASPX file so I can reference the MasterPage from codebehind\n<%@ MasterType VirtualPath=\"~/Pages/MasterPage/ShopConnect.Master\" %>\n\n**ContentPage CodeBehind:**\nstring FullName = Master.CurrentUser.UserFileData.FullName; //CurrentUser is null(but it shouldn't be)\n\nStrange thing is, I had another content page where this system worked fine. It has also stopped working, and I don't think I have changed anything on the masterpage that could cause this. I had set the CurrentUser as a public property so I could access \nI went as far a creating a method to re-populate the object from the master page, and calling it from the code-behind on the contentpage:\n**ContentPage code-behind:**\nEpicorUser CurrentUser = Master.GetCurrentUserObject();\n\n**MasterPage Method being invoked:**\npublic EpicorUser GetCurrentUserObject()\n{\n using (PrincipalContext context = new PrincipalContext(ContextType.Domain, \"OFFICE\"))\n {\n UserPrincipal principal = UserPrincipal.FindByIdentity(context, HttpContext.Current.User.Identity.Name);\n EpicorUser CurrentEmployee = RetrieveUserInfoByWindowsID(principal.SamAccountName); \n return CurrentUser; //Object is NOT null before the return\n }\n}\n\n**ContentPage code-behind return:**\nEpicorUser CurrentUser = Master.GetCurrentUserObject(); //But object is now null once we return \n\nStepping through the code shows me that the CurrentUser object is populated correctly in the MasterPage code behind, but once it is returned to the ContentPage code behind, it is now null! \nAnyone know where the disconnect is? \n\nA:\n\nContent Page is loaded first and then Master page will be loaded. So, your property could be blank when it is accessed in the content page. You can try creating a public method(to return UserObject) on the master page and then call the method from content page. \nAnother option is \ncreating a base page class(inherit all content pages) and create a property to return the user object. So, all pages can access the value \nEDIT:\npublic class BasePageClass : System.Web.UI.Page\n{\n public List LookupValues\n {\n get\n {\n if (ViewState[\"LookupValues\"] == null)\n {\n /*\n * create default instance here or retrieve values from Database for one time purpose\n */\n ViewState[\"LookupValues\"] = new List();\n }\n return ViewState[\"LookupValues\"] as List;\n }\n }\n}\npublic partial class WebForm6 : BasePageClass\n{\n protected void Page_Load(object sender, EventArgs e)\n { \n }\n protected void MyButton_Click(object sender, EventArgs e)\n {\n //access lookup properties\n List myValues = LookupValues;\n }\n}\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29566,"cells":{"text":{"kind":"string","value":"Q:\n\nHow to detect if sprite is being touched?\n\nI'm trying to figure out how to know if the user is touching an sprite.\nI simplified my code to the bare bones, on init I create a single sprite called \"button\" and then I try to know when the user is touching it/stops touching. \nThis is what I'm trying now:\n-(id)initWithSize:(CGSize)size { \n if (self = [super initWithSize:size]) {\n self.backgroundColor = [SKColor colorWithRed:0 green:0 blue:0 alpha:1.0];\n\n // Add button\n SKSpriteNode *sprite = [SKSpriteNode spriteNodeWithImageNamed:@\"button\"];\n sprite.name = @\"button\";\n sprite.position = CGPointMake(CGRectGetMidX(self.frame), CGRectGetMidY(self.frame));\n [self addChild:sprite];\n }\n return self;\n}\n\n-(void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event {\n UITouch *touch = [touches anyObject];\n CGPoint location = [touch locationInNode:self];\n SKNode *node = [self nodeAtPoint:location];\n if ([node.name isEqualToString:@\"button\"]) {\n NSLog(@\"Started touch on sprite\");\n }\n}\n\n-(void)update:(CFTimeInterval)currentTime {\n/* Called before each frame is rendered */\n}\n\nHowever though I can tell if a touch starts on the button, I cannot tell if the user ends the touch or moves out of the sprite (or vicebersa). How can I do this?\n\nA:\n\nAdd to properties: (allows you to access them in various methods)\n@property (nonatomic) BOOL touchInSprite;\n@property (nonatomic) SKSpriteNode * sprite;\n@end\n\nAdd methods:\n- (void) touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event\n{\n UITouch * touch = [touches anyObject];\n CGPoint location = [touch locationInNode:self];\n\n if ([self.sprite containsPoint: location]) \n self.touchInSprite = true; \n else\n self.touchInSprite = false;\n}\n\n - (void) touchesMoved:(NSSet *)touches withEvent:(UIEvent *)event\n{\n UITouch * touch = [touches anyObject];\n CGPoint location = [touch locationInNode:self];\n\n if ([self.sprite containsPoint: location]) \n { \n self.touchInSprite = true; \n } \n else\n {\n self.touchInSprite = false;\n //user stop touches it\n }\n}\n\n - (void) touchesEnded:(NSSet *)touches withEvent:(UIEvent *)event\n{\n UITouch * touch = [touches anyObject];\n CGPoint location = [touch locationInNode:self];\n\n if (self.startTouchValid == true) \n {\n //Perform action\n } \n}\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29567,"cells":{"text":{"kind":"string","value":"Q:\n\nPython socket error resilience / workaround\n\nI have a script running that is testing a series of urls for availability.\nThis is one of the functions.\ndef checkUrl(url): # Only downloads headers, returns status code.\n p = urlparse(url)\n conn = httplib.HTTPConnection(p.netloc)\n conn.request('HEAD', p.path)\n resp = conn.getresponse()\n return resp.status\n\nOccasionally, the VPS will lose connectivity, the entire script crashes when that occurs.\nFile \"/usr/lib/python2.6/httplib.py\", line 914, in request\n self._send_request(method, url, body, headers)\nFile \"/usr/lib/python2.6/httplib.py\", line 951, in _send_request\n self.endheaders()\nFile \"/usr/lib/python2.6/httplib.py\", line 908, in endheaders\n self._send_output()\nFile \"/usr/lib/python2.6/httplib.py\", line 780, in _send_output\n self.send(msg)\nFile \"/usr/lib/python2.6/httplib.py\", line 739, in send\n self.connect()\nFile \"/usr/lib/python2.6/httplib.py\", line 720, in connect\n self.timeout)\nFile \"/usr/lib/python2.6/socket.py\", line 561, in create_connection\n raise error, msg\nsocket.error: [Errno 101] Network is unreachable\n\nI'm not at all familiar with handling errors like this in python.\nWhat is the appropriate way to keep the script from crashing when network connectivity is temporarily lost?\n\nEdit:\nI ended up with this - feedback?\ndef checkUrl(url): # Only downloads headers, returns status code.\n try:\n p = urlparse(url)\n conn = httplib.HTTPConnection(p.netloc)\n conn.request('HEAD', p.path)\n resp = conn.getresponse()\n return resp.status\n except IOError, e:\n if e.errno == 101:\n print \"Network Error\"\n time.sleep(1)\n checkUrl(url)\n else:\n raise\n\nI'm not sure I fully understand what raise does though..\n\nA:\n\nProblem with your solution as it stands is you're going to run out of stack space if there are too many errors on a single URL (> 1000 by default) due to the recursion. Also, the extra stack frames could make tracebacks hard to read (500 calls to checkURL). I'd rewrite it to be iterative, like so:\ndef checkUrl(url): # Only downloads headers, returns status code.\n while True:\n try:\n p = urlparse(url)\n conn = httplib.HTTPConnection(p.netloc)\n conn.request('HEAD', p.path)\n resp = conn.getresponse()\n return resp.status\n except IOError as e:\n if e.errno == 101:\n print \"Network Error\"\n time.sleep(1)\n except:\n raise\n\nAlso, you want the last clause in your try to be a bare except not an else. Your else only gets executed if control falls through the try suite, which can never happen, since the last statement of the try suite is return.\nThis is very easy to change to allow a limited number of retries. Just change the while True: line to for _ in xrange(5) or however many retries you wish to accept. The function will then return None if it can't connect to the site after 5 attempts. You can have it return something else or raise an exception by adding return or raise SomeException at the very end of the function (indented the same as the for or while line).\n\nA:\n\nIf you just want to handle this Network is unreachable 101, and let other exceptions throw an error, you can do following for example.\nfrom errno import ENETUNREACH\n\ntry:\n # tricky code goes here\n\nexcept IOError as e:\n # an IOError exception occurred (socket.error is a subclass)\n if e.errno == ENETUNREACH:\n # now we had the error code 101, network unreachable\n do_some_recovery\n else:\n # other exceptions we reraise again\n raise\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29568,"cells":{"text":{"kind":"string","value":"Q:\n\nGet all users, with all their images\n\nI have 2 mysql tables :\n\nusers table \ntable that contains gallery images for each user \n\nMy users table looks like :\nid | name\n---------\n1 Ryan\n2 James\n3 Dave\n\nMy user_gallery_images tables looks like :\nid | user_id | image\n--------------------\n1 1 image.jpg\n2 1 image2.jpg\n3 2 image3.jpg\n4 2 image4.jpg\n\nI was wondering if there was a query that would retrieve all users, and get all the images for that user.\nThe expected result should look like :\nid | name | images\n-------------------\n1 Ryan image.jpg,image2.jpg\n2 James image3.jpg,image4.jpg\n3 Dave \n\nThank you\n\nA:\n\nYou will have to use a LEFT JOIN rather than an INNER JOIN because you want to retrieve David who doesn't have images. And you will need to use GROUP_CONCAT\nSELECT u.id, u.name, GROUP_CONCAT(image) from users u \nLEFT JOIN user_gallery_images g \nON u.id = g.user_id GROUP by u.id\n\nNote this query will work on mysql 5.7 only if you have a PRIMARY KEY on users.id. Will work on mysql < 5.7 regardless of the primary key\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29569,"cells":{"text":{"kind":"string","value":"Q:\n\nR - How to fill a matrix by columns\n\nI am trying to fill the columns of a matrix with the subsets of an built-in data frame. The resulting matrix should have dimensions 16 by 11 and each subset is 16 integers long.\nI have written the following for loop:\nA <- unique(DNase$Run)\nz <- matrix(data =NA, ncol=11, nrow =16)\nfor (i in A) {\n z[,i] <- subset(DNase$density, DNase$Run==i)\n}\n\nand I obtain the following error:\nError in [<-(*tmp*, , i, value = c(0.017, 0.018, 0.121, 0.124, 0.206, : \n no 'dimnames' attribute for array\nCould anyone kindly explain where the confusion comes from? \nMany thanks in advance!\n\nA:\n\nCheap Solution\nSince the DNase data.frame is already ordered by the Run factor, we can actually form the desired output matrix with a simple call to matrix():\nmatrix(DNase$density,16);\n## [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [,11]\n## [1,] 0.017 0.045 0.070 0.011 0.035 0.086 0.094 0.054 0.032 0.052 0.047\n## [2,] 0.018 0.050 0.068 0.016 0.035 0.103 0.092 0.054 0.043 0.094 0.057\n## [3,] 0.121 0.137 0.173 0.118 0.132 0.191 0.182 0.152 0.142 0.164 0.159\n## [4,] 0.124 0.123 0.165 0.108 0.135 0.189 0.182 0.148 0.155 0.166 0.155\n## [5,] 0.206 0.225 0.277 0.200 0.224 0.272 0.282 0.226 0.239 0.259 0.246\n## [6,] 0.215 0.207 0.248 0.206 0.220 0.277 0.273 0.222 0.242 0.256 0.252\n## [7,] 0.377 0.401 0.434 0.364 0.385 0.440 0.444 0.392 0.420 0.439 0.427\n## [8,] 0.374 0.383 0.426 0.360 0.390 0.426 0.439 0.383 0.395 0.439 0.411\n## [9,] 0.614 0.672 0.703 0.620 0.658 0.686 0.686 0.658 0.624 0.690 0.704\n## [10,] 0.609 0.681 0.689 0.640 0.647 0.676 0.668 0.644 0.705 0.701 0.684\n## [11,] 1.019 1.116 1.067 0.979 1.060 1.062 1.052 1.043 1.046 1.042 0.994\n## [12,] 1.001 1.078 1.077 0.973 1.031 1.072 1.035 1.002 1.026 1.075 0.980\n## [13,] 1.334 1.554 1.629 1.424 1.425 1.424 1.409 1.466 1.398 1.340 1.421\n## [14,] 1.364 1.526 1.479 1.399 1.409 1.459 1.392 1.381 1.405 1.406 1.385\n## [15,] 1.730 1.932 2.003 1.740 1.750 1.768 1.759 1.743 1.693 1.699 1.715\n## [16,] 1.710 1.914 1.884 1.732 1.738 1.806 1.739 1.724 1.729 1.708 1.721\n\nThis of course depends on the aforementioned ordering, which can be verified with a call to rle():\ndo.call(data.frame,rle(levels(DNase$Run)[DNase$Run]));\n## lengths values\n## 1 16 1\n## 2 16 2\n## 3 16 3\n## 4 16 4\n## 5 16 5\n## 6 16 6\n## 7 16 7\n## 8 16 8\n## 9 16 9\n## 10 16 10\n## 11 16 11\n\nRobust Solution\nIf we don't want to depend on that ordering, we can use reshape() as follows, and we get a nice bonus of column names, if you want that:\nreshape(cbind(DNase[c('Run','density')],id=ave(c(DNase$Run),DNase$Run,FUN=seq_along)),dir='w',timevar='Run')[-1];\n## density.1 density.2 density.3 density.4 density.5 density.6 density.7 density.8 density.9 density.10 density.11\n## 1 0.017 0.045 0.070 0.011 0.035 0.086 0.094 0.054 0.032 0.052 0.047\n## 2 0.018 0.050 0.068 0.016 0.035 0.103 0.092 0.054 0.043 0.094 0.057\n## 3 0.121 0.137 0.173 0.118 0.132 0.191 0.182 0.152 0.142 0.164 0.159\n## 4 0.124 0.123 0.165 0.108 0.135 0.189 0.182 0.148 0.155 0.166 0.155\n## 5 0.206 0.225 0.277 0.200 0.224 0.272 0.282 0.226 0.239 0.259 0.246\n## 6 0.215 0.207 0.248 0.206 0.220 0.277 0.273 0.222 0.242 0.256 0.252\n## 7 0.377 0.401 0.434 0.364 0.385 0.440 0.444 0.392 0.420 0.439 0.427\n## 8 0.374 0.383 0.426 0.360 0.390 0.426 0.439 0.383 0.395 0.439 0.411\n## 9 0.614 0.672 0.703 0.620 0.658 0.686 0.686 0.658 0.624 0.690 0.704\n## 10 0.609 0.681 0.689 0.640 0.647 0.676 0.668 0.644 0.705 0.701 0.684\n## 11 1.019 1.116 1.067 0.979 1.060 1.062 1.052 1.043 1.046 1.042 0.994\n## 12 1.001 1.078 1.077 0.973 1.031 1.072 1.035 1.002 1.026 1.075 0.980\n## 13 1.334 1.554 1.629 1.424 1.425 1.424 1.409 1.466 1.398 1.340 1.421\n## 14 1.364 1.526 1.479 1.399 1.409 1.459 1.392 1.381 1.405 1.406 1.385\n## 15 1.730 1.932 2.003 1.740 1.750 1.768 1.759 1.743 1.693 1.699 1.715\n## 16 1.710 1.914 1.884 1.732 1.738 1.806 1.739 1.724 1.729 1.708 1.721\n\nNote that technically the above object is a data.frame, but you can easily coerce to matrix with as.matrix().\nExplanation of Your Error\nThe reason why your code is failing is as follows. First, notice that the DNase$Run vector is actually an ordered factor:\nclass(DNase$Run);\n## [1] \"ordered\" \"factor\"\n\nYour A variable will therefore also be an ordered factor, just with the unique values from DNase$Run.\nNow, when you use a for-loop to iterate over a factor (ordered or otherwise), it uses the levels (character strings) as the iteration value (as opposed to the integer enumeration values that are stored internally). Demo:\nfor (i in factor(letters[1:5])) print(i);\n## [1] \"a\"\n## [1] \"b\"\n## [1] \"c\"\n## [1] \"d\"\n## [1] \"e\"\n\nThus, your i loop variable is being assigned to the levels character strings of DNase$Run. And, since your z matrix has no dimnames, trying to index its columns with a character string is failing with the error message \"no 'dimnames' attribute for array\".\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29570,"cells":{"text":{"kind":"string","value":"Q:\n\nUnder my constructor for an array, I have a toString method that prints out the contents. But it's telling me it isn't resolved to a variable\n\nSo my constructor creates an array, and I want my toString method to display the contents. However, I'm getting an error telling me that table[i] can not be resolved to a variable, even though it was created in the constructor. Please help!\npublic int size = 38;\npublic int first = 0;\npublic int last = 2;\npublic int count = 1;\n\npublic Table()\n{\n int[] table = new int[size]; \n table[0] = first;\n table [size-1] = last;\n for(int i = 1; i < size-1; i++){\n if(count == first | count == last)\n count++;\n table[i] = count;\n count++;\n }\n}\n\npublic String toString(){\n String string = \"Wheel: 0\";\n for(int i = 1; i < size; i++)\n string = string + \"-\" + table[i] ; //table[i] CAN NOT BE RESOLVED TO A VARIABLE\n return string;\n\n}\n\nA:\n\nYour table is defined locally in your constructor.\nint[] table = new int[size]; \n\nYou have to declare it outside the constructor:\nint[] table;\npublic Table()\n{\n table = new int[size]; \n...\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29571,"cells":{"text":{"kind":"string","value":"Q:\n\nOptimizing array transposing function\n\nI'm working on a homework assignment, and I've been stuck for hours on my solution. The problem we've been given is to optimize the following code, so that it runs faster, regardless of how messy it becomes. We're supposed to use stuff like exploiting cache blocks and loop unrolling.\nProblem:\n//transpose a dim x dim matrix into dist by swapping all i,j with j,i\nvoid transpose(int *dst, int *src, int dim) {\n int i, j;\n\n for(i = 0; i < dim; i++) {\n for(j = 0; j < dim; j++) {\n dst[j*dim + i] = src[i*dim + j];\n }\n }\n}\n\nWhat I have so far:\n//attempt 1\nvoid transpose(int *dst, int *src, int dim) {\n int i, j, id, jd;\n\n id = 0;\n for(i = 0; i < dim; i++, id+=dim) {\n jd = 0;\n for(j = 0; j < dim; j++, jd+=dim) {\n dst[jd + i] = src[id + j];\n }\n }\n}\n\n//attempt 2\nvoid transpose(int *dst, int *src, int dim) {\n int i, j, id;\n int *pd, *ps;\n id = 0;\n for(i = 0; i < dim; i++, id+=dim) {\n pd = dst + i;\n ps = src + id;\n for(j = 0; j < dim; j++) {\n *pd = *ps++;\n pd += dim;\n }\n }\n}\n\nSome ideas, please correct me if I'm wrong:\nI have thought about loop unrolling but I dont think that would help, because we don't know if the NxN matrix has prime dimensions or not. If I checked for that, it would include excess calculations which would just slow down the function.\nCache blocks wouldn't be very useful, because no matter what, we will be accessing one array linearly (1,2,3,4) while the other we will be accessing in jumps of N. While we can get the function to abuse the cache and access the src block faster, it will still take a long time to place those into the dst matrix.\nI have also tried using pointers instead of array accessors, but I don't think that actually speeds up the program in any way.\nAny help would be greatly appreciated.\nThanks\n\nA:\n\nCache blocking can be useful. For an example, lets say we have a cache line size of 64 bytes (which is what x86 uses these days). So for a large enough matrix such that it's larger than the cache size, then if we transpose a 16x16 block (since sizeof(int) == 4, thus 16 ints fit in a cache line, assuming the matrix is aligned on a cacheline bounday) we need to load 32 (16 from the source matrix, 16 from the destination matrix before we can dirty them) cache lines from memory and store another 16 lines (even though the stores are not sequential). In contrast, without cache blocking transposing the equivalent 16*16 elements requires us to load 16 cache lines from the source matrix, but 16*16=256 cache lines to be loaded and then stored for the destination matrix.\n\nA:\n\nUnrolling is useful for large matrixes.\nYou'll need some code to deal with excess elements if the matrix size isn't a multiple of the times you unroll. But this will be outside the most critical loop, so for a large matrix it's worth it.\nRegarding the direction of accesses - it may be better to read linearly and write in jumps of N, rather than vice versa. This is because read operations block the CPU, while write operations don't (up to a limit).\nOther suggestions:\n1. Can you use parallelization? OpenMP can help (though if you're expected to deliver single CPU performance, it's no good).\n2. Disassemble the function and read it, focusing on the innermost loop. You may find things you wouldn't notice in C code.\n3. Using decreasing counters (stopping at 0) might be slightly more efficient that increasing counters.\n4. The compiler must assume that src and dst may alias (point to the same or overlapping memory), which limits its optimization options. If you could somehow tell the compiler that they can't overlap, it may be great help. However, I'm not sure how to do that (maybe use the restrict qualifier).\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29572,"cells":{"text":{"kind":"string","value":"Q:\n\nthe method addHeader (String, String) is undefined for the type HttpGet\n\nI have this program:\nimport org.apache.http.client.HttpClient;\nimport org.apache.http.client.methods.HttpGet;\nimport org.apache.http.impl.client.HttpClientBuilder;\n\npublic class ApplicationRESTFul {\n\n public static void main(String[] args) {\n\n String url = \"http://www.google.com/search?q=httpClient\";\n\n HttpClient client = HttpClientBuilder.create().build();\n HttpGet request = new HttpGet(url);\n\n request.addHeader(\"Accept\", \"application/json\");\n\n }\n\n}\n\nBut I got this message from eclipse\nthe method addHeader (String, String) is undefined for the type HttpGet\n\nI am using this library and as I see in the documentation , the method should exist (org.apache.httpcomponents.httpclient_4.5)\nhttp://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/client/methods/HttpGet.html\n\nA:\n\nI solved it by adding httpcore JAR to class path. Adding the dependency from maven adds the httpcore JAR too and not just the httpclient JAR, and that's why it works too.\n\nA:\n\nimporting the depency from maven instead to add the lib in the classpath solved the problem\n \n org.apache.httpcomponents\n httpclient\n 4.3.6\n \n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29573,"cells":{"text":{"kind":"string","value":"Q:\n\npdb is not working in django doctests\n\nSo I created the following file (testlib.py) to automatically load all doctests (throughout my nested project directories) into the __tests__ dictionary of tests.py:\n# ./testlib.py\nimport os, imp, re, inspect\nfrom django.contrib.admin import site\n\ndef get_module_list(start):\n all_files = os.walk(start)\n file_list = [(i[0], (i[1], i[2])) for i in all_files]\n file_dict = dict(file_list)\n\n curr = start\n modules = []\n pathlist = []\n pathstack = [[start]]\n\n while pathstack is not None:\n\n current_level = pathstack[len(pathstack)-1]\n if len(current_level) == 0:\n pathstack.pop()\n\n if len(pathlist) == 0:\n break\n pathlist.pop()\n continue\n pathlist.append(current_level.pop())\n curr = os.sep.join(pathlist)\n\n local_files = []\n for f in file_dict[curr][1]:\n if f.endswith(\".py\") and os.path.basename(f) not in ('tests.py', 'models.py'):\n local_file = re.sub('\\.py$', '', f)\n local_files.append(local_file)\n\n for f in local_files:\n # This is necessary because some of the imports are repopulating the registry, causing errors to be raised\n site._registry.clear()\n module = imp.load_module(f, *imp.find_module(f, [curr]))\n modules.append(module)\n\n pathstack.append([sub_dir for sub_dir in file_dict[curr][0] if sub_dir[0] != '.'])\n\n return modules\n\ndef get_doc_objs(module):\n ret_val = []\n for obj_name in dir(module):\n obj = getattr(module, obj_name)\n if callable(obj):\n ret_val.append(obj_name)\n if inspect.isclass(obj):\n ret_val.append(obj_name)\n\n return ret_val\n\ndef has_doctest(docstring):\n return \">>>\" in docstring\n\ndef get_test_dict(package, locals):\n test_dict = {}\n for module in get_module_list(os.path.dirname(package.__file__)):\n for method in get_doc_objs(module):\n docstring = str(getattr(module, method).__doc__)\n if has_doctest(docstring):\n\n print \"Found doctests(s) \" + module.__name__ + '.' + method\n\n # import the method itself, so doctest can find it\n _temp = __import__(module.__name__, globals(), locals, [method])\n locals[method] = getattr(_temp, method)\n\n # Django looks in __test__ for doctests to run. Some extra information is\n # added to the dictionary key, because otherwise the info would be hidden.\n test_dict[method + \"@\" + module.__file__] = getattr(module, method)\n\n return test_dict\n\nTo give credit where credit is due, much of this came from here\nIn my tests.py file, I have the following code:\n# ./project/tests.py\nimport testlib, project\n__test__ = testlib.get_test_dict(project, locals())\n\nAll of this works quite well to load my doctests from all of my files and subdirectories. The problem is that when I import and invoke pdb.set_trace() anywhere, this is all I see:\n(Pdb) l\n(Pdb) args\n(Pdb) n\n(Pdb) n\n(Pdb) l\n(Pdb) cont\n\ndoctest is apparently capturing and mediating the output itself, and is using the output in assessing the tests. So, when the test run completes, I see everything that should have printed out when I was in the pdb shell within doctest's failure report. This happens regardless of whether I invoke pdb.set_trace() inside a doctest line or inside the function or method being tested.\nObviously, this is a big drag. Doctests are great, but without an interactive pdb, I cannot debug any of the failures that they are detecting in order to fix them.\nMy thought process is to possibly redirect pdb's output stream to something that circumvents doctest's capture of the output, but I need some help figuring out the low-level io stuff that would be required to do that. Also, I don't even know if it would be possible, and am too unfamiliar with doctest's internals to know where to start. Anyone out there have any suggestions, or better, some code that could get this done?\n\nA:\n\nI was able to get pdb by tweaking it. I just put the following code at the bottom of my testlib.py file:\nimport sys, pdb\nclass TestPdb(pdb.Pdb):\n def __init__(self, *args, **kwargs):\n self.__stdout_old = sys.stdout\n sys.stdout = sys.__stdout__\n pdb.Pdb.__init__(self, *args, **kwargs)\n\n def cmdloop(self, *args, **kwargs):\n sys.stdout = sys.__stdout__\n retval = pdb.Pdb.cmdloop(self, *args, **kwargs)\n sys.stdout = self.__stdout_old\n\ndef pdb_trace():\n debugger = TestPdb()\n debugger.set_trace(sys._getframe().f_back)\n\nIn order to use the debugger I just import testlib and call testlib.pdb_trace() and am dropped into a fully functional debugger.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29574,"cells":{"text":{"kind":"string","value":"Q:\n\nWhat are the most popular galaxies for which we have images?\n\nThe only galaxies I can think of (not being an astronomer) are Andromeda and Milky Way. There are 51 near galaxies, but they all pretty much say \"satellite of Milky way\" or \"satellite of Andromeda\". There are 100k+ galaxies in the local supercluster, and that page seems to have a better list:\n\nCorvus \nComa Berenices\nUrsa Major \nVirgo \nSculptor \netc.\n\nIf you had to rank them in order of prominence in the scientific community or in popular science, wondering what the top 10 or 20 galaxies would be (for which we have photos).\nI am trying to come up with a list of images for educational purposes that are potentially somewhat familiar to laymen audiences, or which would be useful to introduce to laymen audiences.\n\nA:\n\nAny such list is going to be terribly subjective. Since I'm an astronomer who studies galaxies, I'll go ahead and throw out a subjective list of the more famous, photogenic, and/or scientifically well-studied galaxies. The first six are in the Local Group (LMC and SMC are satellites of the Milky Way, M32 is a satellite of Andromeda).\n\nMilky Way\nAndromeda (M31)\nLarge Magellanic Cloud\nSmall Magellanic Cloud\nTriangulum (M33)\nM32\nSombrero (M104)\nPinwheel (M101)\nWhirlpool (M51a)\nM64 (Black Eye)\nM74 (NGC 628)\nM81\nM82 (Cigar)\nM87\nM100\nNGC 891\nNGC 1068 (M77)\nNGC 1300\nNGC 1365\nCentaurus A\nCygnus A\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29575,"cells":{"text":{"kind":"string","value":"Q:\n\nOnly one button in a panel with multiple togglebuttons changes color - wxPython\n\nI want to set the color of a toggle button of my choice in the panel that I have created. The problem is that in the numerous toggle buttons that I have displayed on my panel when I want to change the color of each one only the color of the last button changes. Here's my code:\nimport wx\n\nclass Frame(wx.Frame):\n\ndef __init__(self):\n wx.Frame.__init__(self,None)\n self.panel = wx.Panel(self,wx.ID_ANY)\n\n self.sizer = wx.BoxSizer(wx.VERTICAL)\n self.flags_panel = wx.Panel(self, wx.ID_ANY, style = wx.SUNKEN_BORDER)\n\n self.sizer.Add(self.flags_panel)\n self.SetSizer(self.sizer,wx.EXPAND | wx.ALL)\n self.flags = Flags(self.flags_panel, [8,12])\n self.flags.Show()\n\nclass Flags (wx.Panel):\ndef __init__(self,panel, num_flags = []):#,rows = 0,columns = 0,radius = 0, hspace = 0, vspace = 0,x_start = 0, y_start = 0\n wx.Panel.__init__(self,panel,-1, size = (350,700))\n\n num_rows = num_flags[0]\n num_columns = num_flags[1]\n x_pos_start = 10\n y_pos_start = 10\n\n i = x_pos_start\n j = y_pos_start\n buttons = []\n for i in range (num_columns):\n buttons.append('toggle button')\n self.ButtonValue = False\n for button in buttons:\n index = 0\n while index != 15: \n self.Button = wx.ToggleButton(self,-1,size = (10,10), pos = (i,j))\n self.Bind(wx.EVT_TOGGLEBUTTON,self.OnFlagCreation, self.Button)\n self.Button.Show()\n i += 15\n index += 1\n j += 15\n i = 10\n\n self.Show()\n\ndef OnFlagCreation(self,event):\n if not self.ButtonValue:\n self.Button.SetBackgroundColour('#fe1919')\n self.ButtonValue = True\n else:\n self.Button.SetBackgroundColour('#14e807')\n self.ButtonValue = False\n\nif __name__ == '__main__':\n app = wx.App(False)\n frame = Frame()\n frame.Show()\n app.MainLoop()\n\nA:\n\nYour problem is quite simple. The last button is always changed because it's the last button defined:\nself.Button = wx.ToggleButton(self,-1,size = (10,10), pos = (i,j))\n\nEach time through the for loop, you reassign the self.Button attribute to a different button. What you want to do is extract the button from your event object and change its background color. So change your function to look like this:\ndef OnFlagCreation(self,event):\n btn = event.GetEventObject()\n if not self.ButtonValue:\n btn.SetBackgroundColour('#fe1919')\n self.ButtonValue = True\n else:\n btn.SetBackgroundColour('#14e807')\n self.ButtonValue = False\n\nSee also:\n\nhttp://www.blog.pythonlibrary.org/2011/09/20/wxpython-binding-multiple-widgets-to-the-same-handler/\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29576,"cells":{"text":{"kind":"string","value":"Q:\n\nInternet Explorer 11 issue\n\nI am working on selenium automation through IE web browser. Sometimes while invoking browser the actions are done very slowly.\nFor example, If I comment a user id(abcd), IE typing like a(taking a minute)b(taking minute),c(taking a minute)..... I checked the internet speed and clear the cache cookies and all. Sometimes it's happening.\nPlease suggest any solutions.\n\nA:\n\nMost likely it is due to 64 bit IEWebDriver. Switch to 32 bit IEWebDriver and check if it fixes your issue.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29577,"cells":{"text":{"kind":"string","value":"Q:\n\nPerl - undefined subroutine\n\nI have the following Perl code:\nuse Email::Sender::Simple;\nuse IO::Socket::SSL;\n\nIO::Socket::SSL::set_defaults(SSL_verify_mode => SSL_VERIFY_NONE);\n\nEmail::Sender::Simple::sendmail($email, { transport => $transport });\n\nWhen I run it I get this error:\nUndefined subroutine &Email::Sender::Simple::sendmail called at script.pl line 73.\n\nIf I change the code to have the following, then it works:\nuse Email::Sender::Simple qw(sendmail);\n\nsendmail($email, { transport => $transport });\n\nCan someone explain why I had to change the code for sendmail, while I did NOT have to change the code for set_defaults to look like:\nuse IO::Socket::SSL qw(set_defaults);\n\nset_defaults(SSL_verify_mode => SSL_VERIFY_NONE);\n\nA:\n\nTake a look at the code Email/Sendmail/Simple.pm. There is no sendmail subroutine in that program. Instead, if you look at the header, you'll see:\nuse Sub::Exporter -setup => {\n exports => {\n sendmail => Sub::Exporter::Util::curry_class('send'),\n try_to_sendmail => Sub::Exporter::Util::curry_class('try_to_send'),\n },\n};\n\nI'm not familiar with Sub::Exporter, but I did notice this description.\n\nThe biggest benefit of Sub::Exporter over existing exporters (including the ubiquitous Exporter.pm) is its ability to build new coderefs for export, rather than to simply export code identical to that found in the exporting package.\n\nOh...\nSo, the purpose of using Sub::Exporter is to export subroutine names that aren't subroutines in your package.\nIf you're interested, you can read the tutorial of Sub::Exporter, but it appears it has the ability to export subroutines under different names.\nThus, Email::Sender::Simple::sendmail isn't a subroutine, but that sendmail can still be exported.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29578,"cells":{"text":{"kind":"string","value":"Q:\n\nChange password page\n\nI'm having trouble with this PHP code, it seems logical to me but as I'm new to PHP and MySQL, I am obviously wrong.\nI'm trying to set up a change password page for an assignment, and I can't see where I have gone wrong, the code is as follows:\nsession_start();\nif(isset($_SESSION['uname'])){\n echo \"Welcome \" . $_SESSION['uname'];\n}\nrequire_once 'PHP/Constants.php';\n$conn = new MySQLi(DB_SERVER, DB_USER, DB_PASSWORD, DB_NAME) or die ('There was a problem connecting to the database');\n$query = \"SELECT * FROM user\";\n$result = mysqli_query($conn, $query);\nwhile ($pwdReq = mysqli_fetch_array($result)){\n if ($pwdReq['Password'] == $_POST['oldPwd']) {\n if ($_POST['confPwd'] == $_POST['newPwd']){\n $change = \"INSERT INTO user(Password) VALUES ('newPwd')\";\n $pwdChange = mysqli_query($conn, $change);\n } else return \"The new passwords do not match!\";\n } else return \"Please enter a correct password!\";\n} \n\nThe body Of my page is as follows:\n
\n

Change Password

\n

\n \n \n

\n

\n \n \n

\n

\n \n \n

\n

\n \n

\n
\n\nWhen the page runs all I get is as follows:\n\nNotice: Undefined index: oldPwd in C:\\Program Files\n (x86)\\xampp\\htdocs\\Assignment\\change_password.php on line 11\n\nThank you in advance for any help I receive - Nick\n\nA:\n\nAlways check your POST values before you do anything\nYou should select the single record that matches uname\nprepare your queries to avoid SQL injection\n\nHere's an improved version of your script:\n \n\nif oldPwd, confPwd, or confPwd is not set you will need to figure why. This is not PHP fault anymore. You will need to look in your html and make sure the script is receiving these values\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29579,"cells":{"text":{"kind":"string","value":"Q:\n\nWhy is the order of white/grey matter different in the brain and spinal cord?\n\nIn the brain proper, grey matter forms the outer layer of the brain, and white matter forms the inner layer. In the spine, this is reversed: white matter forms the outer layer of the spine, and grey matter the inner layer. Is there a developmental or functional reason for this?\n\nA:\n\nI'll tackle this question from a functional point of view. \nGray matter are cell bodies, white matter are myelinated fiber tracts.\nIn the brain, the gray matter is basically the cortex, the white matter lies mainly underneath it. The Cortex is the place where all the higher mental processing takes place (Fig. 1).\n\nFig. 1. Cortical functions. Source: Penn Medicine\nThe white matter in the brain connects the various parts of the cortex so that information can be transported for further processing and integrated.\n\nFig. 2. Central white matter. Source: NIH Medline\nSince the cortex is the 'processor', it makes sense to connect the parts subcortically (more efficient as it leads to shorter connections). However, the cortex has been expanding very late in evolution, so this 'endpoint reasoning' can be contested, because from an evolutionary perspective, more cortex was needed and hence it was expanded right where it happened to be, namely in the outer part of the brain.\nIn the spinal chord things are pretty much reversed; grey matter within, white matter around it (Fig. 3). \n\nFig. 3. Section through spinal chord with central grey matter and surrounding white matter. Source: University of Michigan\nThe white matter, again, is formed by various tracts (Fig. 4) and the grey matter with parts that are processing information (fig. 5). \n\nFig. 4. Section through spinal chord showing the spinal tracts forming the white matter. Source: Biology.SE\n\nFig. 5. Section through spinal chord showing the spinal reflex arches forming the gray matter. Source: APSU Biology\nThe white matter in the spinal chord constitutes the various sensory and motor pathways to and from the brain, respectively. The gray matter constitutes basic processing nuclei that form the reflex arches in the spinal chord. These reflex arches process incoming sensory information (e.g. pain) and govern motor output (e.g., pulling the hand away from the fire). \nAgain, the structure makes sense in terms of efficiency, as the reflex arches combine the sensory and motor tracts to govern reflexes, and therefore processing them from within saves space. \n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29580,"cells":{"text":{"kind":"string","value":"Q:\n\nManually set the properties of new objects created from datagridview\n\npremise: I have a class \"User\" which has a property of type list to represent a list of \"Office\" associated with the user.\nI have a BindingSource associated to the list of \"Office\" in which the grid is hooked to insert new elements.\nEverything seems to work correctly, when I click on the new line and write a value in the list is added to the new object \"Office\"\nThe problem is that in addition to the data entered by the user I want to insert some default value automatically (example: a guid), I know I could do this with a hidden column in my grid, but I do not like too much as a solution and I would like to work directly on the object .\nI tried with the event\nDataBindingComplete(object sender, DataGridViewBindingCompleteEventArgs e)\n\nhttps://msdn.microsoft.com/en-us/library/system.windows.forms.datagridview.databindingcomplete%28v=vs.110%29.aspx\nThis is called after the new object \"office\" was added to the list but I can not recover it, because I do not know which row is inserted but only a generic word \"ItemAdded\"\nI also tried using the method\nDefaultValuesNeeded(object sender, System.Windows.Forms.DataGridViewRowEventArgs e)\n\nhttps://msdn.microsoft.com/en-us/library/system.windows.forms.datagridview.defaultvaluesneeded%28v=vs.110%29.aspx?cs-save-lang=1&cs-lang=csharp#code-snippet-1\nbut this to me is called before my object \"office\" I is added to the list and I have no way to have it in order to set the default.\nLong story short: how do I manually set the values on the properties of an object added to a list automatically from gridview without using hidden columns?\n\nA:\n\nIf the Property \"guid\" is generated from the Entity Framework, then you can add a partial class such as:\npublic partial class Office\n{\n public Office()\n {\n guid=Guid.newGuid();\n }\n}\n\nand add a constructor there which populates the guid property whenever a new office is created.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29581,"cells":{"text":{"kind":"string","value":"Q:\n\nFont Boosting Issue in Android Lollipop\n\nWe recently noticed font boosting issue in Android Lollipop OS. If user modifies font size in settings Menu, application is reloading if it is already running and all fonts are modified based on selected font size in device settings. \nIf we are setting below metatag viewport, issue is not exist in iOS and android OS < 5.0\n\n> maximum-scale=1, user-scalable=no\">\n\nIs there any option to resolve this issue?????\nNote :\n1. This kind of issue is not available in iOS and Android < 5.0. \n2. There is no issue for thin application for Android >= 5.0 as well. (Only webpage is reloading whenever there is a change in device font settings)\n\nA:\n\nThe issue is getting resolved by setting below value in webview. \n webView.getSettings().setTextZoom(100);\nRoot cause: \n For Android OS < 5.0 TextZoom is having default value as 100. But in Lollipop OS, value is taken from font settings in device if it is not overwritten in application. If we are setting textZoom as 100, font settings changes is not affecting application. \n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29582,"cells":{"text":{"kind":"string","value":"Q:\n\nStack smashing detected and no source for getenv\n\nI'm having the weirdest problem ever when programming in C. My function sometimes runs and other times it doesn't, and even though I tried searching for these errors (stack smashing detected and no source for getenv) I can't seem to find the answer to why it fails sometimes. I tried debugging it and it only has a problem in the last character (a \"}\"), so it runs all those other functions (and they have been tested a lot separately) but sometimes it just falls apart and doesn't run the last function (it works, that I can guarantee, because some other times it even runs inside this function). Also the few last times I ran the function it gave Segmentation Fault even though sometimes it ran all the way to the end. Is there any way I can debug this problem? Here goes my code:\nvoid main(int argc, char * argv[]) {\nFILE * fin=fopen(argv[1],\"r\");\nchar v[1024];\nint col;\nmatrix m=emptyS();\nwhile(fscanf(fin, \"%s\",v)!=EOF) {\n int i=0;\n int * w = (int*) malloc(sizeof (int));\n int str=strlen(v);\n int size=0;\n while(i= 4*/\n }\n i++;\n size++;\n w=realloc(w,size*sizeof(int));\n w[size-1]=atoi(a);\n }\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29583,"cells":{"text":{"kind":"string","value":"Q:\n\nShow that $|h(x)-h(y)|≤|x-y|$\n\nLet $$h(x)=x/(1+|x|)$$ \nShow that $$|h(x)-h(y)|≤|x-y|$$ \nI have no idea to start. But I can see that $h(x)≤x$. Logically, the required inequality will follow from this. But I cannot apply it to $y$.\n\nA:\n\nSuppose that $0 < x < y$. You have $$\\vert h(x)-h(y) \\vert = \\left \\vert\\frac{x}{1+x}-\\frac{y}{1+y}\\right\\vert=\\frac{\\vert x-y \\vert}{(1+x)(1+y)} \\le \\vert x -y \\vert.$$\nThen study the case $x < y <0$. And finally the last one $x < 0 < y$\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29584,"cells":{"text":{"kind":"string","value":"Q:\n\nWhat is the purpose of the colon after the flag in some shell command options?\n\nSee this example, taken from the O'Reilly book Classic Shell Scripting:\nsort -t: -k1,1 /etc/passwd\n\nWhy is there a : after the t? It doesn't seem to be needed, nor documented in man, but I keep seeing it in examples.\n\nA:\n\nIt's just the argument to the -t option, specifying that fields are separated by colons in the input file.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29585,"cells":{"text":{"kind":"string","value":"Q:\n\nWhen should I answer/close \"Why doesn't my code work?\" questions?\n\nI am a little confused about how to deal with \"Why doesn't my code work?\" questions. On one hand, they appear to be acceptable under the MVCe rules. On the other hand, I fail to see how they could possibly be useful to anyone, even the OP, after the OP learns the answer. They can't ever be closed as a duplicate, because each person's code is a 'snowflake'. And half the time, the bug(s) are really trivial (slight logic error or tool misuse).\nSo if these are actually bad questions, what should I be doing about them?\nIf they aren't bad questions, how do they contribute to the value of SO?\n\nA:\n\nIf the question is just a code dump and a \"why doesn't this work\", then there's a close reason specifically for that:\n\nQuestions seeking debugging help (\"why isn't this code working?\") must include the desired behavior, a specific problem or error and the shortest code necessary to reproduce it in the question itself. Questions without a clear problem statement are not useful to other readers. See: How to create a Minimal, Complete, and Verifiable example.\n\nIf the question contains a code snippet, an explanation of what it should do, an explanation of why it's not doing what it should be doing, and the code snippet is capable of reproducing that erroneous behavior, then it's a good question. It could be closed as a duplicate of any other question with the same problem, and others could be closed as a duplicate of it. Such a question could also have other problems (the description of the problem or desired behavior may not be clear, the scope of the problem could be Too Broad, it could be just off topic, etc.) and if the post has any other problems, feel free to address them as appropriate.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29586,"cells":{"text":{"kind":"string","value":"Q:\n\nxpath - if else expression in Python\n\nI have the following table structure:\nIn the gender column when a value exists, the gender is displayed between the tag, but when the tag does not exist it is not displayed and the value is a special character &nbsp;\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
NAME
AGE
GENDER
MARIA
25
F
JOHN
22
&nbsp;
PAUL
36
&nbsp;
DEREK
16
M
\n\nI'm doing the following:\nfor table in result.xpath('//table[@class=\"first\"]'): \n for i, tr in enumerate(table.xpath('//tr')):\n for j, td in enumerate(tr.xpath('td/div/|td')):\n if td.text == '&nbsp;':\n print '---'\n else:\n print td.text\n\nHow to print '---' if the character &nbsp exists in td.text?\n\nA:\n\n&nbsp; is an entity reference to the no-break space character (Unicode code point: U+00A0). To test if the text content of an element is equal to that character, you can use this:\nif td.text == u'\\u00A0':\n\nComplete demonstration:\nfrom lxml import html\n\ntable = html.parse(\"table.html\")\n\nfor tr in table.xpath('//tr'):\n for td in tr.xpath('td/div|td'):\n if td.text == u'\\u00A0':\n print 'BLANK VALUE'\n else:\n print td.text\n\nOutput:\nNone\nNone\nNone\nNone\nNone\nNone\nNAME\nNone\nAGE\nNone\nGENDER\nNone\nNone\nMARIA\nNone\n25\nNone\nF\nNone\nNone\nJOHN\nNone\n22\nBLANK VALUE\nNone\nNone\nPAUL\nNone\n36\nBLANK VALUE\nNone\nNone\nDEREK\nNone\n16\nNone\nM\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29587,"cells":{"text":{"kind":"string","value":"Q:\n\n\"I have X in my blood\" arguments\n\nLately, I have been hearing the argument \"Oh, well I have Indian(Native American) in my blood, and 'redskin' does not offend me so why change the name?\" coming from people who claim their great-great-great grandmother was from one of the hundreds of tribes from the area. \nI know it's been an argument made before, and I've always found it to be silly. There are some obvious arguments to be made from that statement: They haven't experienced life the same way as someone who is 100% native american, or that one person from a large group is not a representative of said group. \nBut now that I am hearing this claim being made more and more, I was wondering what fallacy this would fall under? It seems to be similar to this argument, \"I have X friends so I can't be racist against X people.\" But I feel it is a bit more than that? \n\nA:\n\nThis can be a case of the Cherry Picking fallacy:\n\nCherry picking, suppressing evidence, or the fallacy of incomplete evidence is the act of pointing to individual cases or data that seem to confirm a particular position, while ignoring a significant portion of related cases or data that may contradict that position. \n\nThe person making the argument picks a case which supports his point, ignoring the cases that don't support his point.\n\nA:\n\nIn my experience, this is not a logical error, so much as an unarticulated moral premise.\nThe speaker holds an underlying assumption here that if it is possible not to be bothered by this kind of thing, one is morally obligated not be bothered by it, or at least to do one's best to not act upon being bothered.\nUsing himself as evidence this is possible, he is ashamed of the offended members of his group, who are not trying hard enough to get along with the rest of the world.\nHe is presuming the ultimate value of some version of democratic social harmony: non-interference in the majority's autonomy, 'Give the benefit of the doubt' or 'Go along, get along'. And this is in the foreground to a degree that he denies consideration to more important moral concerns.\nYou can tell the difference between this and suppression of evidence or 'cherry picking' because, if you render the argument statistical, it does not matter unless the numbers are hugely predominant, and often even then.\nHe is not unaware he is an exception, he embraces the exceptionality and considers it morally superior.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29588,"cells":{"text":{"kind":"string","value":"Q:\n\nHow to calculate difference of time between two records using Scala?\n\nI want to calculate time difference between events of a session using Scala.\n-- GIVEN\nSource is a csv file as shown below:\nHEADER \n\"session\",\"events\",\"timestamp\",\"Records\"\nDATA\n\"session_1\",\"event_1\",\"2015-01-01 10:10:00\",100\n\"session_1\",\"event_2\",\"2015-01-01 11:00:00\",500\n\"session_1\",\"event_3\",\"2015-01-01 11:30:00\",300\n\"session_1\",\"event_4\",\"2015-01-01 11:45:00\",300\n\"session_2\",\"event_1\",\"2015-01-01 10:10:00\",100\n\"session_2\",\"event_2\",\"2015-01-01 11:00:00\",500\n\nREQUIRED OUTPUT\nHEADER \n\"session\",\"events\",\"time_spent_in_minutes\",\"total_records\"\nDATA\n\"session_1\",\"event_1\",\"50\",100\n\"session_1\",\"event_2\",\"30\",600\n\"session_1\",\"event_3\",\"15\",900\n\"session_1\",\"event_4\",\"0\",1200\n\"session_2\",\"event_1\",\"50\",100\n\"session_2\",\"event_2\",\"0\",600\n\nWhere time_spend_in_minutes is difference between current_event and next event for a given session.\nHeader is not required in target but good to have.\nI am new to Scala so here what i have so far:\n$ cat test.csv\n\"session_1\",\"event_1\",\"2015-01-01 10:10:00\",100\n\"session_1\",\"event_2\",\"2015-01-01 11:00:00\",500\n\"session_1\",\"event_3\",\"2015-01-01 11:30:00\",300\n\"session_1\",\"event_4\",\"2015-01-01 11:45:00\",300\n\"session_2\",\"event_1\",\"2015-01-01 10:10:00\",100\n\"session_2\",\"event_2\",\"2015-01-01 11:00:00\",500\n\nscala> val sessionFile = sc.textFile(\"test.csv\").\nmap(_.split(',')).\nmap(e => (e(1).trim, Sessions(e(0).trim,e(1).trim,e(2).trim,e(3).trim.toInt))).\nforeach(println)\n\n(\"event_1\",Sessions(\"session_2\",\"event_1\",\"2015-01-01 10:10:00\",100))\n(\"event_1\",Sessions(\"session_1\",\"event_1\",\"2015-01-01 10:10:00\",100))\n(\"event_2\",Sessions(\"session_2\",\"event_2\",\"2015-01-01 11:00:00\",500))\n(\"event_2\",Sessions(\"session_1\",\"event_2\",\"2015-01-01 11:00:00\",500))\n(\"event_3\",Sessions(\"session_1\",\"event_3\",\"2015-01-01 11:30:00\",300))\n(\"event_4\",Sessions(\"session_1\",\"event_4\",\"2015-01-01 11:45:00\",300))\nsessionFile: Unit = ()\n\nscala>\n\nA:\n\nHere is a solution that uses joda time library.\nval input = \n\"\"\"\"session_1\",\"event_1\",\"2015-01-01 10:10:00\",100\n \"session_1\",\"event_2\",\"2015-01-01 11:00:00\",500\n \"session_1\",\"event_3\",\"2015-01-01 11:30:00\",300\n \"session_1\",\"event_4\",\"2015-01-01 11:45:00\",300\n \"session_2\",\"event_1\",\"2015-01-01 10:10:00\",100\n \"session_2\",\"event_2\",\"2015-01-01 11:00:00\",500\"\"\"\n\nCreate RDD from text input, can be read from file using sc.textFile\nimport org.joda.time.format._\nimport org.joda.time._\n\ndef strToTime(s: String):Long = { \n DateTimeFormat.forPattern(\"\"\"\"yyyy-MM-dd HH:mm:ss\"\"\"\")\n .parseDateTime(s).getMillis()/1000 \n}\n\nval r1 = sc.parallelize(input.split(\"\\n\"))\n .map(_.split(\",\"))\n .map(x => (x(0), (x(1), x(2), x(3))))\n .groupBy(_._1)\n .map(_._2.map{ case(s, (e, timestr, r)) => \n (s, (e, strToTime(timestr), r))}\n .toArray\n .sortBy( z => z match { \n case (session, (event, time, records)) => time}))\n\nConverted time from \"2015-01-01 10:10:00\" to seconds from epoch, and sorted by time.\nval r2 = r1.map(x => x :+ { val y = x.last; \n y match { \n case (session, (event, time, records)) => \n (session, (event, time, \"0\")) }})\n\nAdded an extra event in each session, with all params same as last event of session except record count. \nThis allows time-duration calculation to provide \"0\" in last event.\nUse sliding to get pairs of events.\nval r3 = r2.map(x => x.sliding(2).toArray)\n\nval r4 = r3.map(x => x.map{ \n case Array((s1, (e1, t1, c1)), (s2, (e2, t2, c2))) => \n (s1, (e1, (t2 - t1)/60, c1)) } )\n\nUse scan to add records-count in incremental way.\nval r5 = r4.map(x => x.zip(x.map{ case (s, (e, t, r)) => r.toInt}\n .scan(0)(_+_)\n .drop(1)))\n\nval r6 = r5.map(x => x.map{ case ((s, (e, t, r)), recordstillnow) =>\n s\"${s},${e},${t},${recordstillnow}\" })\n\nval r7 = r6.flatMap(x => x)\n\nr7.collect.mkString(\"\\n\")\n//\"session_2\",\"event_1\",50,100\n//\"session_2\",\"event_2\",0,600\n//\"session_1\",\"event_1\",50,100\n//\"session_1\",\"event_2\",30,600\n//\"session_1\",\"event_3\",15,900\n//\"session_1\",\"event_4\",0,1200\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29589,"cells":{"text":{"kind":"string","value":"Q:\n\nOrder By in Ascending Order\n\ni have used the following query for POPUP LOV in Apex \nselect VEN_INVOICE_REFNO as display_value, VEN_INVOICE_REFNO as return_value \n from VENDORINVOICE\n order by 1 asc;\n\ni want to show me the values as shown in diagram in numeric order i-e in ascending order.i have tried all the possible ways which i know but it don't works.\n\nA:\n\nit seem that your field datatype is string, convert it into numeric then apply the order by\nselect CAST(VEN_INVOICE_REFNO AS INTEGER) as display_value, VEN_INVOICE_REFNO as return_value \n from VENDORINVOICE\n order by 1 asc;\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29590,"cells":{"text":{"kind":"string","value":"Q:\n\nHow to make a cobweb diagram\n\nI am struggling making a cobweb diagram for the function $$x_{t+1}=8x_t/{1+2x_t}$$\nSo I understand when making the cobweb diagram, that I have to draw the line $y=x$\nBut where I have trouble understanding is how to draw the function in the graph. I am given the point $x_0 =0.5$ So I plug this into the function and get $2 = x_1$ and then I keep plugging in points. Do I graph points like $.5,2$ or $0,.5$? \n\nA:\n\nLet the expression on the right be $g(x)$. The method converges (sometimes) to the solution of the equation $x=g(x)$.\nThe cobweb diagram illustrates the movement from first guess $x_0$ to second guess $x_1$ etc.\nDraw a line from $(x_0,0)$ up to $(x_0,x1)$.\nThen across to $(x_1,x_1)$.\nThen up or down to $(x_1,x_2)$.\nThen across to $(x_2,x_2)$.\nEtc\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29591,"cells":{"text":{"kind":"string","value":"Q:\n\nWhy size of classes is larger in case of virtual inheritance?\n\nVirtual base class is a way of preventing multiple instances of a given class appearing in an inheritance hierarchy when using multiple inheritance . Then for the following classes\nclass level0 {\n int a;\n public :\n level0();\n};\n\nclass level10:virtual public level0 {\n int b;\n public :\n level10();\n};\n\nclass level11 :virtual public level0 {\n int c;\n public :\n level11();\n};\n\nclass level2 :public level10,public level11 { \n int d;\n public:\n level2();\n};\n\nI got following sizes of the classes\n\nsize of level0 4\nsize of level10 12\nsize of level11 12\nsize of level2 24\n\nbut when I removed virtual from inheritance of level10 and level11 I got following output \n\nsizeof level0 4\nsizeof level10 8\nsizeof level11 8\nsizeof level2 20\n\nIf virtual inheritance prevents multiple instances of a base class, then why size of classes is greater in case of virtual inheritance?\n\nA:\n\nBecause when using virtual inheritence, the compiler will create* a vtable to point to the correct offsets for the various classes, and a pointer to that vtable is stored along with the class.\n\n\"Will create\" -- vtables are not dictated by the Standard, but the behaviors implied by virtual inheritence is. Most compilers use vtables to implement the functionality dictated by the Standard.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29592,"cells":{"text":{"kind":"string","value":"Q:\n\nCombining information from multiple studies to estimate the mean and variance of normally distributed data - Bayesian vs meta-analytic approaches\n\nI have reviewed a set of papers, each reporting the observed mean and SD of a measurement of $X$ in its respective sample of known size, $n$. I want to make the best possible guess about the likely distribution of the same measure in a new study that I am designing, and how much uncertainty is in that guess. I am happy to assume $X \\sim N(\\mu, \\sigma^2$). \nMy first thought was meta-analysis, but the models typically employed focus on point estimates and corresponding confidence intervals. However, I want to say something about the full distribution of $X$, which in this case would also including making a guess about the variance, $\\sigma^2$. \nI have been reading about possible Bayeisan approaches to estimating the complete set of parameters of a given distribution in light of prior knowledge. This generally makes more sense to me, but I have zero experience with Bayesian analysis. This also seems like a straightforward, relatively simple problem to cut my teeth on. \n1) Given my problem, which approach makes the most sense and why? Meta-analysis or a Bayesian approach?\n2) If you think the Bayesian approach is best, can you point me to a way to implement this (preferably in R)? \nRelated question\nEDITS:\nI have been trying to work this out in what I think is a 'simple' Bayesian manner. \nAs I stated above, I am not just interested in the estimated mean, $\\mu$, but also the variance,$\\sigma^2$, in light of prior information, i.e. $P(\\mu, \\sigma^2|Y)$\nAgain, I know nothing about Bayeianism in practice, but it didn't take long to find that the posterior of a normal distribution with unknown mean and variance has a closed form solution via conjugacy, with the normal-inverse-gamma distribution. \nThe problem is reformulated as $P(\\mu, \\sigma^2|Y) = P(\\mu|\\sigma^2, Y)P(\\sigma^2|Y)$.\n$P(\\mu|\\sigma^2, Y)$ is estimated with a normal distribution; $P(\\sigma^2|Y)$ with an inverse-gamma distribution. \nIt took me a while to get my head around it, but from these links(1, 2) I was able, I think, to sort how to do this in R. \nI started with a data frame made up from a row for each of 33 studies/samples, and columns for the mean, variance, and sample size. I used the mean, variance, and sample size from the first study, in row 1, as my prior information. I then updated this with the information from the next study, calculated the relevant parameters, and sampled from the normal-inverse-gamma to get the distribution of $\\mu$ and $\\sigma^2$. This gets repeated until all 33 studies have been included. \n# Loop start values values\n\n i <- 2\n k <- 1\n\n# Results go here\n\n muL <- list() # mean of the estimated mean distribution\n varL <- list() # variance of the estimated mean distribution\n nL <- list() # sample size\n eVarL <- list() # mean of the estimated variance distribution\n distL <- list() # sampling 10k times from the mean and variance distributions\n\n# Priors, taken from the study in row 1 of the data frame\n\n muPrior <- bayesDf[1, 14] # Starting mean\n nPrior <- bayesDf[1, 10] # Starting sample size\n varPrior <- bayesDf[1, 16]^2 # Starting variance\n\n for (i in 2:nrow(bayesDf)){\n\n# \"New\" Data, Sufficient Statistics needed for parameter estimation\n\n muSamp <- bayesDf[i, 14] # mean\n nSamp <- bayesDf[i, 10] # sample size\n sumSqSamp <- bayesDf[i, 16]^2*(nSamp-1) # sum of squares (variance * (n-1))\n\n# Posteriors\n\n nPost <- nPrior + nSamp\n muPost <- (nPrior * muPrior + nSamp * muSamp) / (nPost) \n sPost <- (nPrior * varPrior) + \n sumSqSamp + \n ((nPrior * nSamp) / (nPost)) * ((muSamp - muPrior)^2)\n varPost <- sPost/nPost\n bPost <- (nPrior * varPrior) + \n sumSqSamp + \n (nPrior * nSamp / (nPost)) * ((muPrior - muSamp)^2)\n# Update \n\n muPrior <- muPost\n nPrior <- nPost\n varPrior <- varPost\n\n# Store\n\n muL[[i]] <- muPost\n varL[[i]] <- varPost\n nL[[i]] <- nPost\n eVarL[[i]] <- (bPost/2) / ((nPost/2) - 1)\n\n# Sample\n\n muDistL <- list() \n varDistL <- list()\n\n for (j in 1:10000){\n varDistL[[j]] <- 1/rgamma(1, nPost/2, bPost/2)\n v <- 1/rgamma(1, nPost/2, bPost/2)\n muDistL[[j]] <- rnorm(1, muPost, v/nPost)\n }\n\n# Store \n\n varDist <- do.call(rbind, varDistL)\n muDist <- do.call(rbind, muDistL)\n dist <- as.data.frame(cbind(varDist, muDist))\n distL[[k]] <- dist\n\n# Advance\n\n k <- k+1 \n i <- i+1\n\n }\n\n var <- do.call(rbind, varL)\n mu <- do.call(rbind, muL)\n n <- do.call(rbind, nL)\n eVar <- do.call(rbind, eVarL)\n normsDf <- as.data.frame(cbind(mu, var, eVar, n)) \n colnames(seDf) <- c(\"mu\", \"var\", \"evar\", \"n\")\n normsDf$order <- c(1:33)\n\nHere is a path diagram showing how the $E(\\mu)$ and $E(\\sigma^2)$ change as each new sample is added. \n\nHere are the desnities based on sampling from the estimated distributions for the mean and variance at each update. \n\nI just wanted to add this in case it is helpful for someone else, and so that people in-the-know can tell me whether this was sensible, flawed, etc. \n\nA:\n\nThe two approaches (meta-analysis and Bayesian updating) are not really that distinct. Meta-analytic models are in fact often framed as Bayesian models, since the idea of adding evidence to prior knowledge (possibly quite vague) about the phenomenon at hand lends itself naturally to a meta-analysis. An article that describes this connection is:\nBrannick, M. T. (2001). Implications of empirical Bayes meta-analysis for test validation. Journal of Applied Psychology, 86(3), 468-480.\n(the author uses correlations as the outcome measure for the meta-analysis, but the principle is the same regardless of the measure).\nA more general article on Bayesian methods for meta-analysis would be:\nSutton, A. J., & Abrams, K. R. (2001). Bayesian methods in meta-analysis and evidence synthesis. Statistical Methods in Medical Research, 10(4), 277-303.\nWhat you seem to be after (in addition to some combined estimate) is a prediction/credibility interval that describes where in a future study the true outcome/effect is likely to fall. One can obtain such an interval from a \"traditional\" meta-analysis or from a Bayesian meta-analytic model. The traditional approach is described, for example, in:\nRiley, R. D., Higgins, J. P., & Deeks, J. J. (2011). Interpretation of random effects meta-analyses. British Medical Journal, 342, d549.\nIn the context of a Bayesian model (take, for example, the random-effects model described by equation 6 in the paper by Sutton & Abrams, 2001), one can easily obtain the posterior distribution of $\\theta_i$, where $\\theta_i$ is the true outcome/effect in the $i$th study (since these models are typically estimated using MCMC, one just needs to monitor the chain for $\\theta_i$ after a suitable burn-in period). From that posterior distribution, one can then obtain the credibility interval.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29593,"cells":{"text":{"kind":"string","value":"Q:\n\nWhy does a BigDecimal scale changes if accessed through an association?\n\nI have two Ruby on Rails models Farm and Harvest. A farm belongs to a harvest. Here are the models: \nclass Farm < ActiveRecord::Base\n acts_as_singleton\n belongs_to :harvest\n validates :harvest, presence: true, allow_blank: true\n serialize :harvest_time, Tod::TimeOfDay\n validates :harvest_time, presence: true, allow_blank: true\n validates :hash_rate, presence: true\n validates_with HashRateValidator\nend\n\nclass Harvest < ActiveRecord::Base\n belongs_to :user\n validates :user, presence: true\n validates :date, presence: true\n validates :amount, presence: true\n validates :identifier, presence: true\n validates :amount, numericality: { :greater_than => 0 }\nend\n\nThere is only one Farm (accomplished thanks to the acts as singleton gem). Every time a harvest is done the harvest association from the farm changes, since it always have to point to the latest harvest. Since I am using a Farm as a singleton model I update the Farm using the following code: \n@harvest = Harvest.new(\n :date => DateTime.now,\n :amount => amount,\n :identifier => new_identifier,\n :user => current_user,\n :assigned => false\n)\n\nif @harvest.save\n Farm.instance.update_attributes(:harvest => @harvest)\n byebug\n\nThe weird thins is that the values of the harvest amount ans the amount from the harvest assigned to the farm do not match after this: \n(byebug) Farm.instance.harvest.amount\n435.435\n\n(byebug) @harvest.amount\n435.435345343\n\n(byebug) Farm.instance.harvest.id\n12\n\n(byebug) @harvest.id\n12\n\nThe amount decimal is suposed to have scale to 8 and precision to 6 (from the migration), here is the relevant part of the schema.rb file:\ncreate_table \"harvests\", force: :cascade do |t|\n t.datetime \"date\"\n t.decimal \"amount\", precision: 6, scale: 8\n t.integer \"identifier\"\n t.datetime \"created_at\", null: false\n t.datetime \"updated_at\", null: false\n ...\n end\n\nSo, what's going on here? The amount should be the exact same value!\n\nA:\n\nI figured it out. Scale and precision did not make sense. Precision is the amount of digits on the BigDecimal amount, scale is the amount of those digits that appear to the right the decimal point. Since precision was set to 6 scale could not accommodate 8 digits after the decimal point. So when the number came from the database it was truncated, when it came from memory it had all its digits after the decimal point. I fixed it by setting precision to 18 and scale to 8, which means 18 digits in total and 8 of those appearing to the right of the decimal points. \nSqlite allowed the incoherent precision => 6 and scale => 8. Postgres did not.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29594,"cells":{"text":{"kind":"string","value":"Q:\n\nWhat is the logic behind when JavaScript throws a ReferenceError?\n\nI've been using JavaScript for years but have been trying to increase my deep, under-the-hood type knowledge of the language lately. I'm a bit confused about what the logic is behind when JavaScript throws a ReferenceError.\nFor example, none of these throw a referenceError, but still write undefined to the console:\nfunction foobar(foo)\n{\n var bar = foo;\n console.log(bar);\n}\nfoobar();\n\nor\nvar foo = undefined;\nvar bar = foo;\nconsole.log(bar);\n\nor\nvar foo;\nvar bar = foo;\nconsole.log(bar);\n\nbut this obviously does throw a ReferenceError error on the first line without writing to the console:\nvar bar = foo;\nconsole.log(bar);\n\nSo it seems that having a variable in a parameter list or declaring it will stop a referenceError from being thrown - even though the variable is still 'undefined'.\nDoes anyone know what's going on under the hood or what the hard and fast rules are surrounding this? Does anyone know why these aren't considered referenceErrors?\n\nA:\n\nThere's a difference in using a variable that exists but has an undefined value, and using a variable that doesn't exist and was never declared.\nThe latter will create a reference error as you're trying to reference something that doesn't exists and has not been declared.\nOn the other hand, when you do \nvar foo;\n\nfoo does exists, and it has been declared, it's value is just undefined, so you can still reference it without throwing an error.\nIn other words, trying to reference a variable that hasn't been declared will throw a reference error, while referencing declared variables will never throw a reference error, regardless of wether or not a value has been set for that variable.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29595,"cells":{"text":{"kind":"string","value":"Q:\n\nGoogle App Engine Cron Job\n\nI have created a cron.xml file and a servlet which describes the job.\nNow when i compile and login as an admin, local development dashboard doesn't show Cron Jobs link.\n\nA:\n\nLocal development server does not have the Cron Jobs link neither does it execute cron jobs. The actual appengine will show cron jobs and will execute them.\nYou can manually execute cron jobs on local server by visiting their urls. e.g.\nhttp://localhost:8888/FindReservedBooksTask.\nBTW the cron.xml file should be in the war/WEB-INF directory.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29596,"cells":{"text":{"kind":"string","value":"Q:\n\nA* Search - least number of hops?\n\nI'm trying to create an A* pathfinding algorithm, however I'm having a little trouble getting off the ground with it. A little background:\nI am by no means versed in pathfinding algorithms, however I did touch upon this subject a couple years ago (I've since forgotten everything I've learned). I play EVE Online, which is an online game about internet spaceships. The developers release data dumps for static information (items in game, solar system locations, etc). I am trying to find the shortest route from solar system A to solar system B.\nTake a look at this map: http://evemaps.dotlan.net/map/UUA-F4 That is one region in the game, with each node being a system. I would like to compute the shortest distance between any two of those systems.\nMy issue: everything that I've read online about A* is talking about incorporating the distance between two nodes (for example, the distance between two cities) to help compute the shortest path. That doesn't help my case, as I'm more interested in the number of hops (node 1 > node 2 > node 3) rather than the distance between those hops. I do not know how to modify the A* algorithm to incorporate this.\nThe information that I have in the database:\nA list of all systems and their neighbors (so, systemX links with systemA and systemB)\nx, y, and z coordinates of all systems in a 3D grid\nIf anyone can point me in the right direction, that would be great. I'm looking to use this in PHP, however I've also started to work in Python a bit so that'll work too. \nExample data can be provided on request if needed.\nEDIT\nAs some have pointed out, the 'cost' associated with each jump would simply be 1. However, with A*, you also need a heuristic that estimates the distance from the current node to the target node. I'm not exactly sure how to go about determining this value, as I'm not sure of the remaining hops. As stated, I do have the 3D coordinates (x,y,z) for every node, but I'm not sure if this could give any insight as the physical distance between each node is not of concern. I do know that no path spans more than 99 hops.\nEDIT 2\nMySQL data for the example region.\nto -> from data: http://pastebin.com/gTuwdr7h\nSystem information (x,y,z cooridinates if needed): http://pastebin.com/Vz3FD3Kz\n\nA:\n\nIf the number of \"hops\" is what matters to you, then consider that to be your distance, meaning that if the two locations are connected by a single hop, the distance is one. \nFor A*, you'll need two things:\n\nThe costs from one location to each neighbor, in your case, this seems to be constant (hops).\nAn heuristic, that estimates the cost of going from your current \"node\" or location to the goal. How you can estimate this depends a lot on your problem. It's important that your heuristic doesn't *over*estimates the true cost, or else A* won't be able to guarantee the best result.\n\nA:\n\nTake the upper part of the linked graph:\n\nAssume that the lines represent 2 way (i.e., you can go to or from any linked node) and that the black lines are a 'cost' of 1 and the red lines are a 'cost' of 2.\nThat structure can be represented by the following Python data structure:\ngraph = {'Q-KCK3': {'3C-261':1, 'L-SDU7':1},\n 'L-SDU7': {'Q-KCK3':1, '3C-261':1,'4-IPWK':1},\n '3C-261': {'4-IPWK':1,'9K-VDI':1,'L-SDU7':1,'U8MM-3':1},\n 'U8MM-3': {'9K-VDI':1,'3C-261':1, '9K-VDI':1, 'Q8T-MC':2},\n 'Q8T-MC': {'U8MM-3':2, 'H55-2R':1, 'VM-QFU':2},\n 'H55-2R': {'Q8T-MC':1, '9XI-OX':1, 'A3-PAT':1, 'P6-DBM':1},\n 'P6-DBM': {'A3-PAT':1, 'H55-2R':1},\n 'A3-PAT': {'P6-DBM':1, 'H55-2R':1, '9XI-OX':1,'YRZ-E4':1},\n 'YRZ-E4': {'A3-PAT':1}, \n 'VM-QFU': {'IEZW-V':1, 'PU-128':2},\n 'IEZW-V': {'VM-QFU':1, 'PU-128':1, 'B-DX09':1},\n 'PU-128': {'VM-QFU':1, 'B-DX09':1, 'IEZW-V':1},\n 'B-DX09': {'IEZW-V':1, 'PU-128':1, '1TS-WIN':1},\n '1TS-WIN': {'B-DX09':1, '16-31U':1},\n '16-31U': {'1TS-WIN':1}\n }\n\nNow you can define a recursive function to navigate that data:\ndef find_all_paths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n if start not in graph:\n return []\n paths = []\n for node in graph[start]:\n if node not in path:\n newpaths = find_all_paths(graph, node, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths \n\ndef min_path(graph, start, end):\n paths=find_all_paths(graph,start,end)\n mt=10**99\n mpath=[]\n print '\\tAll paths:',paths\n for path in paths:\n t=sum(graph[i][j] for i,j in zip(path,path[1::]))\n print '\\t\\tevaluating:',path, t\n if t{}:{}'.format(i,j,graph[i][j]) for i,j in zip(mpath,mpath[1::]))\n e2=str(sum(graph[i][j] for i,j in zip(mpath,mpath[1::])))\n print 'Best path: '+e1+' Total: '+e2+'\\n' \n\nNow demo:\nmin_path(graph,'Q-KCK3','A3-PAT')\nmin_path(graph,'Q-KCK3','16-31U')\n\nPrints:\n All paths: [['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'P6-DBM', 'A3-PAT'], ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'A3-PAT'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'P6-DBM', 'A3-PAT'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'A3-PAT']]\n evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'P6-DBM', 'A3-PAT'] 7\n evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'A3-PAT'] 6\n evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'P6-DBM', 'A3-PAT'] 8\n evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'A3-PAT'] 7\nBest path: Q-KCK3->3C-261:1\n3C-261->U8MM-3:1\nU8MM-3->Q8T-MC:2\nQ8T-MC->H55-2R:1\nH55-2R->A3-PAT:1 Total: 6\n\n All paths: [['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U']]\n evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'] 10\n evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'] 11\n evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'] 11\n evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'] 12\n evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'] 11\n evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'] 12\n evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'] 12\n evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'] 13\nBest path: Q-KCK3->3C-261:1\n3C-261->U8MM-3:1\nU8MM-3->Q8T-MC:2\nQ8T-MC->VM-QFU:2\nVM-QFU->IEZW-V:1\nIEZW-V->B-DX09:1\nB-DX09->1TS-WIN:1\n1TS-WIN->16-31U:1 Total: 10\n\nIf you want the minimum number of hops, just modify min_path to return the shortest list length rather than the minimum total cost of the hops. Or, make the cost of each hop 1. \nHave a look at my previous answer regarding trains. \n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29597,"cells":{"text":{"kind":"string","value":"Q:\n\nTeachable AI Chatbot\n\nI'm starting on AI chatbots and don't know where to actually start.\nwhat I've imagined is something like this:\n\nEmpty chat bot that doesn't know anything\nLearns when user asks question and if the bot doesn't know the answer, it'd ask for it\nRecords all the data learned and parse synonymous questions\n\nExample procedure:\nUser: what is the color of a ripped mango?\nBot: I don't know [to input answer add !@: at the start]\nUser: !@:yellow\nUser: do you know the color of ripped mango?\nBot: yellow\n\nA:\n\nChatbots, or conversational dialogue systems in general, will have to be able to generate natural language and as you might expect, this is not something trivial. The state-of-the-art approaches usually mine conversations of human-human conversations (such as for example conversations on chat platforms like Facebook or Twitter, or even movie dialogs, basically things which are available in large quantities and resemble natural conversation). These conversations are then for example labelled as question-answer pairs, possibly using pretrained word embeddings. \nThis is an active area of research in the field of NLP. An example category of used systems is that of \"End-to-End Sequence-to-Sequence models\" (seq2seq). However, basic seq2seq models have a tendency to produce repetitive and therefore dull responses. More recent papers try to address this using reinforcement learning, as well as techniques like adversarial networks, in order to learn to choose responses. Another technique that improves the system is to extend the context of the conversation by allowing the model to see (more) prior turns, for example by using a hierarchical model.\nIf you don't really know where to start, I think you will find all the basics you will need in this free chapter of \"Speech and Language Processing.\" by Daniel Jurafsky & James H. Martin (August 2017). Good luck!\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29598,"cells":{"text":{"kind":"string","value":"Q:\n\nScrap/extract with Java, result from coinmarketcap.com\n\nI need to extract coinmarket cap volume (ex: Market Cap: $306,020,249,332) from top of page with Java, please see picture attached.\n\nI have used jsoup library in Java Eclipse but didn't extract volume. Jsoup extract only other attributes. Probably problem is from a java script library.\nAlso I have used html unit without success:\nimport java.io.IOException;\nimport java.util.List;\n\nimport com.gargoylesoftware.htmlunit.WebClient;\nimport com.gargoylesoftware.htmlunit.html.HtmlAnchor;\nimport com.gargoylesoftware.htmlunit.html.HtmlPage;\n\npublic class Testss {\n public static void main(String\\[\\] args) throws IOException {\n\n String url = \"https://coinmarketcap.com/faq/\";\n\n WebClient client = new WebClient();\n HtmlPage page = client.getPage(url);\n List anchors = page.getByXPath(\"//div\\[@class='col-sm-6 text-center'\\]//a\");\n for (Object obj : anchors) {\n HtmlAnchor a = (HtmlAnchor) obj;\n System.out.println(a.getTextContent().trim());\n }\n }\n}\n\nHow can I extract volume from this site with Java?\nThanks!\n\nA:\n\nCheck the network tab findout the exact request which is fetching the data, In your case its https://files.coinmarketcap.com/generated/stats/global.json\n\nAlso the request URL is the below one\nSo, Fetching the main URL will not give you what you require, For that you have to fetch the data from the request URL directly and parse it using any JSON library. SimpleJSON I can suggest in one of those.\nThe JSON data which you will get after hitting the url.\n\n{\r\n \"bitcoin_percentage_of_market_cap\": 55.95083004655126,\r\n \"active_cryptocurrencies\": 1324,\r\n \"total_volume_usd\": 21503093761,\r\n \"active_markets\": 7009,\r\n \"total_market_cap_by_available_supply_usd\": 301100436864\r\n}\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}},{"rowIdx":29599,"cells":{"text":{"kind":"string","value":"Q:\n\nJquery.Live events for onadd to dom elements\n\nWhat if I want not only to add events for all future added, but also want to put some data in them, execute it as an init event for them. Is there possible to use something from live tools?\n\nA:\n\nNo event exists in the dom for item creation. The only option you have is to use the liveQuery plugin which scans the dom every x milliseconds looking for selector matches. When a new element is found it will run a function.\nI do not really like the overhead of this however it may give you the desired functionality.\n\n"},"meta":{"kind":"string","value":"{\n \"pile_set_name\": \"StackExchange\"\n}"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":295,"numItemsPerPage":100,"numTotalItems":29950,"offset":29500,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1Nzc3NzA2MSwic3ViIjoiL2RhdGFzZXRzL3N1b2x5ZXIvcGlsZV9zdGFja2V4Y2hhbmdlIiwiZXhwIjoxNzU3NzgwNjYxLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.NFl0Xu115FeCZulhlzPaudggyWN1GXQSvdO54F7xEm1H7jqagDadycWb2PMWXMMz5nIXPL9F4NWOYDxpleqiAQ","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
text
stringlengths
175
47.7k
meta
dict
Q: Javers - How to add extra commit metadata? Currently Javers records the username and the timestamp of the audit in the commit metadata but I would like to store the user id as well. Is it possible to add more fields to Javers commit metadata like user id or ip address? A: Yes, commit properties. It's the Map of String properties that you can add to each commit and then use in queries. See example - https://javers.org/documentation/jql-examples/#commit-property-filter
{ "pile_set_name": "StackExchange" }
Q: How to parse a loosely structured document I am analyzing data feeds which have data somewhat like this RAM 4 GB DDR3 RAM 16GB DIMM memory 4GB DDR3 MHz // no value for MHz memory 4GB DDR3 1333 MHz // no the order of MHz is not fixed ram 6GB, 1333 MHz, DDR3 // comma used as delimiter Processor Intel Core i7-3612QM Processor Intel Core i7 2630QM processor i3-380,2.53 GHz //380 used for model number instead of 380M and model number separated by '-' and clock speed separated by ',' Processor Core i3-380 2.53 GHz Processor Intel Ci3 - 2330 (2nd Gen), 2.53 GHz // multiple symbols used as delimiters(',','-') Hard drive 500GB 5400RPM Hard Disk Drive 1.5 TB Hard Disk 256 GB now i need to analyze what each specification means like in ram 6GB, 1333 MHz, DDR3 i need to figure out that 6GB is the capacity, 1333 MHz is the frequency and DDR3 is the type of ram. But the problem as you can see is these are very irregular(some entries have some fields and dont have others and sometimes whitespaces are used as separators ,sometimes ,s and sometimes -s). My first reaction was using regex but i soon realised that it was stupid. Then i thought that i can split on the separator(, in the above case) but even the separator is not fixed. Also this approach would be useless for entries like this memory 4 GB 1333 MHz DDR3 Using whitespace as separator for this entry would make it look like 4 GB 1333 MHz are different but actually 4 GB and 1333 MHz are different. Also how can i programatically decide that Intel Core i3, Core i3, i3-380 and Ci3 imply Intel Core i3? I understand that i have to tell the library once that Intel Core i3, Core i3 and Ci3 mean the same thing. But later when analysis the text it should be able to figure out. The above mentioned lists of entries show how variable can the entries be. Is there some python library(or in any other language) that can help me in dealing with these tasks? A: If you're able to build a set of classes that directly correspond to each type of entry, then that's probably the way to go. For example, a class for RAM might be: class Memory: def __init__(self, s): if not 'RAM' in s and not 'memory' in s: raise ValueError("Not a string that describes RAM.") self.capacity = int(re.match(r'(\d+) ?GB', s)[1]) Then just try each class until one fits.
{ "pile_set_name": "StackExchange" }
Q: Slide up/down layout on click I am looking to slide/up down a nested layout on its parent layout click. Ie the parent layout will have a hidden child. On click I would like the parents height to animate down (slide down) to fit the child layout. On click again I would like the child to animate up (slide up). Basically just animating the parents height to show/hide the child. I have found this which looks to work but seems like a lot of code: http://gmariotti.blogspot.com/2013/09/expand-and-collapse-animation.html I have seen a lot of things using 'animateLayoutChanges' to animate things however I cannot get that to work. I have tried this: <LinearLayout android:id="@+id/parent" android:layout_width="match_parent" android:layout_height="wrap_content"> <TextView android:id="@+id/text" android:layout_width="wrap_content" android:layout_height="wrap_content"/> <LinearLayout android:id="@+id/child" android:layout_width="match_parent" android:layout_height="match_parent" android:visibility="gone" android:animateLayoutChanges="true"> <TextView android:id="@+id/message" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Some text to show/hide"/> </LinearLayout> </LinearLayout> Then in code: LinearLayout parent = (LinearLayout)findViewById(R.id.parent); parent.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { LinearLayout child = (LinearLayout)findViewById(R.id.child); child.setVisibility(child.getVisibility() == View.VISIBLE ? View.GONE : View.VISIBLE); } }); That sets the visibility of the child view correctly but there is absolutely no animation. A: Well, first of all android:animateLayoutChanges effects the child elements. So, obviously, if you are changing the properties of the element itself, it will not be animated. I think you can accomplish what you are trying to in API 16 by enabling the LayoutTransition.CHANGING animation. <LinearLayout android:id="@+id/parent" android:layout_width="match_parent" android:layout_height="wrap_content" android:orientation="vertical" android:animateLayoutChanges="true"> <TextView android:id="@+id/text" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Title"/> <TextView android:id="@+id/message" android:layout_width="wrap_content" android:layout_height="0dp" android:text="Some text to show/hide"/> </LinearLayout> LinearLayout parent = (LinearLayout)findViewById(R.id.parent); parent.getLayoutTransition().enableTransitionType(LayoutTransition.CHANGING); View text = findViewById(R.id.text); text.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { View message = findViewById(R.id.message); ViewGroup.LayoutParams params = message.getLayoutParams(); params.height = params.height == 0 ? ViewGroup.LayoutParams.WRAP_CONTENT : 0; message.setLayoutParams(params); } });
{ "pile_set_name": "StackExchange" }
Q: SBS 2003 - Rename Domain Netbios Name Without Renaming Active Directory Domain Name I have client running a Small Business Server 2003 R2 network with a strangely named domain. The fully qualified domain name is a straightforward contoso.local. The domain netbios name (shown as pre-Windows 2000 domain name), however, is contoso2. While not good esthetically, the domain name mismatch has not presented any real problems until recently. After migrating to Windows 7, when users try to remote into their PC’s, the default domain name at the login prompt is contoso (eg “contoso\username”). This is incorrect (it should be “contoso2\...” or “contoso.local\...”) and is causing all kinds of grief for the users. So now I am looking at finally fixing this domain name mismatch. Through my Google research I’ve found the following resources: http://www.techieshelp.com/how-to-rename-a-server-2008-domain/ http://social.technet.microsoft.com/wiki/contents/articles/1347.renaming-a-windows-server-2008-active-directory-domain-dsforum2wiki.aspx http://technet.microsoft.com/en-us/library/cc794869.aspx I’m not much of an Active Directory expert. These articles only discuss renaming a Server 2008 domain so I’m not sure what applies to 2003 R2 or not. Seeing also that I’m renaming the Win2000 domain name but not the FQDN, I’m wondering if there some steps that are changed or I don’t need to do. Thanks! A: Sounds as if you want to change the NetBIOS name, but not the DNS name. Windows 2000 domains do not support renames. Domain must have forest functional level of 2003 or higher. See http://technet.microsoft.com/en-us/library/cc738208%28v=ws.10%29.aspx. You also cannot rename a domains that uses Exchange (see article). Sounds as if when you do the rename you could change the NetBIOS name and leave the DNS name the same. Build a different test domain with a few VM's and test it out if you can. The rendom tool and more checklists at http://technet.microsoft.com/en-us/windowsserver/bb405948.aspx
{ "pile_set_name": "StackExchange" }
Q: Save data to two different resources I'm trying to save a user's full record, but I depend on two separate tables. Profile data is saved in one resource, and the address in another. How can I write the code so that it saves the profile first, and from the generated id, save the address? It's possible? Profile form Address form Here is my Create User code: export const BarberCreate = (props) => { return ( <Create {...props}> <TabbedForm toolbar={<BarberCreateToolbar />}> <FormTab label="Perfil"> <TextInput source="name" /> <TextInput source="email" /> <DateInput source="birthday" /> <TextInput source="phone" placeholder="(99) 99999-9999" /> <TextInput source="transport" /> </FormTab> <FormTab label="Endereço"> <TextInput source="street" label="Rua" /> <TextInput source="city" label="Cidade" /> <TextInput source="district" label="Bairro" /> </FormTab> </TabbedForm> </Create> ); };``` A: The best would be do that on the backend, possibly with transaction support, but if you can't, one way to go on react-admin side is decorating the dataProvider. v3 const userSaverDataProvider = dataProvider => ({ ...dataProvider, create: async (resource, params) => { if (resource === 'users') { const profile = await dataProvider.create('profile', {data: { name: params.data.name, .... }}) await dataProvider.create('address', {data: { profileID: profile.data.id, street: params.data.street, ... }}) return profile } return dataProvider.create(resource, params) } }) Maybe necessary decorate createMany too if you do bulk creating of users on react-admin. Also, checkout https://github.com/FusionWorks/react-admin-google-maps, it migtht be useful
{ "pile_set_name": "StackExchange" }
Q: Knockout JS - databind to a literal? Currently I have the following element <h6 class="header"> Chance of Precipitation: <span data-bind="text: PrecipitationLabel"></span> </h6> This works fine, but I don't really need a span tag in my case --the observable only loads for display, so I don't need to update an element. I know I could bind with a computed/dependent variable that combines the "Chance..." text with PrecipitationLabel, but that takes some of the static markup out of the view, which is not ideal. Is there something similar to Razor's tags to output just a literal? A: The functionality you are looking for was added to github 11 days ago. The syntax would be: <h6 class="header"> Chance of Precipitation: <!-- ko text: PrecipitationLabel --><!-- /ko --> </h6> It is not yet included in any published download. A: Or you may use this, <h6 class="header" data-bind="text: 'Chance of Precipitation: ' + PrecipitationLabel()" />
{ "pile_set_name": "StackExchange" }
Q: Two dimensional strings in C++ I want to write something like 2d strings in C++. I tried with : vector< vector<string> > table; int m,n,i,j; string s; cin>>n>>m; for(i=0;i<n;i++) { for(j=0;j<m;j++) { cin>>s; table[i][j] = s; } } cout << "\n\n\n\n"; for(i=0;i<n;i++) { for(j=0;j<m;j++) { cout<<table[i][j]<<" "; } cout<<"\n"; } no compile errors, but when i enter input like: 10 20 .....#.............. .....#.............. .....#.............. .....#.............. ######.............. .......###.......... .......#.#.......... .......###...####### .............#.....# .............####### It gives me segmentation fault. Why ? What's wrong ? And how it should be done so it would work correctly ? Thank you. A: The question seems to imply that the data structure needed is a set of n lines with m characters each. There are two ways to think of this -- as an nxm char matrix, or as n m-character vectors (and a string is similar but not identical to vector<char>). So it seems you don't want a vector of vectors of strings, you want either a vector of vectors of chars, or just a vector of strings. In any event, you have to allocate the appropriate amount of space before using table[i][j] or (slightly more idiomatic c++, but not necessary in this case since m and n are known beforehand) use something like push_back to add to the end. Note also that the cin>>s reads an entire line from stdin (which makes the vector<string> solution a bit easier to deal with, I think). A: When inserting something new into a vector, you can't just allocate by index - you need to use the push_back method or something similar. for(i=0;i<n;i++) { vector<string> row; for(j=0;j<m;j++) { cin>>s; row.push_back(s); } table.push_back(row); }
{ "pile_set_name": "StackExchange" }
Q: Is there a maximum attainable beard thickness? This other question asks if attuning to a Belt of Dwarvenkind grants the following benefit (and literally nothing else): you have a 50 percent chance each day at dawn of growing a full beard if you're capable of growing one, or a visibly thicker beard if you already have one. My question is about the latter part of that bonus (marked in bold). Say you attune to the belt and then wear it for a prolonged period of time. You never shave, so every other day (on average) your beard becomes "visibly thicker". Are there any mechanics governing how this works? Like, is there a maximum attainable beard thickness? Could a player reasonably argue that their beard has become so thick and dense that it acts as natural armor and grants them an AC boost or damage resistance? Would it eventually become an impediment, or even a hazard to the bearded character? For instance, does it become so dense that it's effectively solid and while possibly granting a defensive boost it also impedes movement and makes it difficult to breathe? Do the ever-thickening beard fibers ultimately become so dense that they form a singularity and the character dies? Or does it just do the boring thing, and fizzle out somewhere along the way? This may all be GM fiat. However the rules as written seem to imply that the beard gets perceptibly thicker every other day without end. If you have a beard, it gets thicker. Each day at dawn (more or less). If a player demands a strict interpretation, how far do (or can) you take it? What makes sense from a fairness standpoint if the player has gone to the effort of attuning an item just for this one particular benefit? A: It's asymptotic. There's a finite amount of mass available for the beard (based on how much the wearer eats, the weight their neck can support, etc.). Since the beard must continuously get thicker, it has to approach a limit, perhaps something like $$b(t) = b_{max}(1-e^{-t/\tau})$$ where t starts at the time one starts growing a beard (so, for a dwarf, at birth). What's really interesting is that the beard must not just grow thicker, but grow visibly thicker. This implies that dwarves, at least, can perceive infinitesimal differences in beard thickness, which means they can actually see everyone's facial hair growing all the time. Science! A: As you already guessed yourself, RAW this would be a GM fiat. If I apply some kind of logic (and a bit of humor) in this magic item concept, the beard will grow as thick as the thickest dwarven beard in the multiverse because the Belt of Dwarvenkind should never betray its people giving a non-dwarf the thickest beard there is; Moradin wouldn't approve that. In the case of a Dwarf user, the belt would elevate his beard to the glorious thickness of Moradin's beard; a great honor for every dwarf. A: Hair sheds. When I have long hair, I seem to lose hair at a greater rate. Even if the number of hairs I lose is constant, since the average length of hairs lost is greater, I lose a greater volume of hair. What we experience with beard growth in normal life without a magical belt is: When I've recently shaved, my beard is, indeed, visibly thicker the next morning. However, once my beard reaches a critical length, it sheds at a rate so that the rate of growth and the rate of shed hit an equilibrium, and it is no longer visibly thicker the next morning. Supposing that I have a magical belt that causes my beard to be visibly thicker the next morning, my beard length would have surpassed the rate at which it sheds. Therefore, my beard will no longer be at equilibrium, and will actually become less thick throughout the day. Once the beard becomes so thick, the beard would shed at such a rate during the day, as to become visibly less thick by evening. By morning, the beard could become visibly thicker than the previous evening, without actually being any thicker than the previous morning. This circumvents the paradox, and is in-line with intuitive hair growth experience.
{ "pile_set_name": "StackExchange" }
Q: Going from using XMLSerializer to using the XNA Content Pipeline A well known limitation of using the XNA Content Pipeline is that it is not included in the XNA redistributable. So, if you want to create an editor for your game, the designer must download the whole deal: your editor, your engine, XNA Game Studio and Visual Studio Express. Even then, I'm not sure you can compile your XML data into xnb outside of Visual Studio. So I decided to simply use XMLSerializer, which works fine. However, I'm thinking that, once all the content of my game is done, prior to release, it would be great if I could convert the whole system into using the XNA Content Pipeline. In my mind, I think that the compiled xnb files would load faster than deserializing XML into objects. Is the conversion possible? More importantly, is it worth it? Note: my intention is to release the game as an XBOX Live Indie Game. A: I'm afraid the conversion in the first place was probably a bad call. Depending on Visual Studio (Express) and XNA Game Studio isn't so bad, because they're free downloads. And I believe the WinForms 2 sample is a good starting point for getting the content pipeline working outside XNA. And loading from XNB should be faster than de-serializing XML. As for the conversion back - it's probably not worth it. Possibly a better option would be to write a custom content processor that (rather than using XNA's built-in XML importer) loads your XML files and deserializes them into objects as normal. And then outputs those objects using the content pipeline's automatic XNB writing support. Then you can use Content.Load to load those objects back from the output XNB. That way you get to keep all the benefits of your XML format (including that it's already written), as well as all the benefits of using XNB in your game.
{ "pile_set_name": "StackExchange" }
Q: How to filter songs in listView I have a listView filled up with all songs on sdcard, can i filter them by name using an ediText or something else (like a SearchBar) and update the listview?Thanks, this is how I fill up the listview: @SuppressWarnings("deprecation") private void init_phone_music_grid() { System.gc(); String[] proj = { MediaStore.Audio.Media._ID, MediaStore.Audio.Media.DATA, MediaStore.Audio.Media.DISPLAY_NAME, MediaStore.Video.Media.SIZE }; musiccursor = managedQuery(MediaStore.Audio.Media.EXTERNAL_CONTENT_URI, proj, MediaStore.Audio.Media.DURATION + ">= 120000", null, null); count = musiccursor.getCount(); musiclist = (ListView) findViewById(R.id.listView1); musiclist.setAdapter(new MusicAdapter(getApplicationContext())); musiclist.setOnItemClickListener(musicgridlistener); mMediaPlayer = new MediaPlayer(); } private OnItemClickListener musicgridlistener = new OnItemClickListener() { public void onItemClick(AdapterView parent, View v, int position, long id) { System.gc(); music_column_index = musiccursor .getColumnIndexOrThrow(MediaStore.Audio.Media.DATA); musiccursor.moveToPosition(position); String filename = musiccursor.getString(music_column_index); String canzone=filename.replace("/storage/sdcard0/", ""); canzone=canzone.replace("Music/", ""); canzone=canzone.replace("media/", ""); canzone=canzone.replace("Ringtones/", ""); t=(TextView) findViewById(R.id.textView2); t.setWidth(130); t.setSingleLine(true); t.setText(canzone); try { if (mMediaPlayer.isPlaying()) { mMediaPlayer.reset(); } mMediaPlayer.setDataSource(filename); mMediaPlayer.prepare(); mMediaPlayer.start(); } catch (Exception e) { } } }; public class MusicAdapter extends BaseAdapter { private Context mContext; public MusicAdapter(Context c) { mContext = c; } public int getCount() { return count; } public Object getItem(int position) { return position; } public long getItemId(int position) { return position; } public View getView(int position, View convertView, ViewGroup parent) { System.gc(); String id = null; TextView tv; if (convertView == null) { tv = new TextView(mContext.getApplicationContext()); } else{ tv = (TextView) convertView; } musiccursor.moveToPosition(position); music_column_index = musiccursor.getColumnIndexOrThrow(MediaStore.Audio.Media.DISPLAY_NAME); id = musiccursor.getString(music_column_index); tv.setText(id); return tv; } } This is the updated code: protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_player); progressBar = (ProgressBar) findViewById(R.id.progressBar1); init_phone_music_grid(null); //Controllo(); edittext=(EditText) findViewById(R.id.editText1); edittext.addTextChangedListener(new TextWatcher() { @Override public void onTextChanged(CharSequence cs, int arg1, int arg2, int arg3) { } @Override public void beforeTextChanged(CharSequence arg0, int arg1, int arg2, int arg3) { } @Override public void afterTextChanged(Editable arg0) { init_phone_music_grid(edittext.getText().toString()); } }); } @SuppressWarnings("deprecation") private void init_phone_music_grid(String a) { System.gc(); String sortOrder=null; if(a!=null){ sortOrder=MediaStore.Audio.Media.TITLE +a; } String[] proj = { MediaStore.Audio.Media._ID,MediaStore.Audio.Media.DATA,MediaStore.Audio.Media.DISPLAY_NAME, MediaStore.Video.Media.SIZE }; musiccursor = managedQuery(MediaStore.Audio.Media.EXTERNAL_CONTENT_URI, proj, MediaStore.Audio.Media.DURATION + ">= 120000", null, sortOrder); count = musiccursor.getCount(); musiclist = (ListView) findViewById(R.id.listView1); musiclist.setAdapter(new MusicAdapter(getApplicationContext())); musiclist.setOnItemClickListener(musicgridlistener); mMediaPlayer = new MediaPlayer(); } private OnItemClickListener musicgridlistener = new OnItemClickListener() { public void onItemClick(AdapterView parent, View v, int position, long id) { System.gc(); music_column_index = musiccursor .getColumnIndexOrThrow(MediaStore.Audio.Media.DATA); musiccursor.moveToPosition(position); String filename = musiccursor.getString(music_column_index); String canzone=filename.replace("/storage/sdcard0/", ""); canzone=canzone.replace("Music/", ""); canzone=canzone.replace("media/", ""); canzone=canzone.replace("Ringtones/", ""); t=(TextView) findViewById(R.id.textView2); t.setWidth(130); t.setSingleLine(true); t.setText(canzone); try { if (mMediaPlayer.isPlaying()) { mMediaPlayer.reset(); } mMediaPlayer.setDataSource(filename); mMediaPlayer.prepare(); mMediaPlayer.start(); } catch (Exception e) { } } }; A: this is how you sort stuffs from mediastore String sortOrder = MediaStore.Audio.Media.TITLE + " ASC"; if you want to sort it by artists or something else..just replace the "TITLE"part with whatever you dersire.. If you find my answer useful,accept it Edit:Sorry i forgot where you will put this sort order thing musiccursor = managedQuery(MediaStore.Audio.Media.EXTERNAL_CONTENT_URI, proj, MediaStore.Audio.Media.DURATION + ">= 120000", null, null); in this line of node..replace the last "null" with sortorder,and you will get sorted list...
{ "pile_set_name": "StackExchange" }
Q: Conditional statement within group I have a dataframe in which I want to make a new column with values based on condition within groups. So for the dataframe below, I want to make a new column n_actions which gives Cond1. for the whole group GROUP the number 2 if a 6 appears in column STEP Cond 2. for the whole group GROUP the number 3 if a 9 appears in column STEP Cond 3. if not a 6 or 9 appears within column STEP for the GROUP, then 1 #dataframe start dataframe <- data.frame(group = c("A", "A", "A", "B", "B", "B", "B", "B", "B", "C", "C", "C", "D", "D", "D", "D", "D", "D", "D", "D", "D"), step = c(1, 2, 3, 1, 2, 3, 4, 5, 6, 1, 2, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9)) # dataframe desired dataframe$n_actions <- c(rep(1, 3), rep(2, 6,), rep(1, 3), rep(3, 9)) A: Another way with dplyr's case_when: library(dplyr) dataframe %>% group_by(group) %>% mutate( n_actions1 = case_when( 9 %in% step ~ 3, 6 %in% step ~ 2, TRUE ~ 1 ) ) Output: # A tibble: 21 x 3 # Groups: group [4] group step n_actions <fct> <dbl> <dbl> 1 A 1 1 2 A 2 1 3 A 3 1 4 B 1 2 5 B 2 2 6 B 3 2 7 B 4 2 8 B 5 2 9 B 6 2 10 C 1 1 11 C 2 1 12 C 3 1 13 D 1 3 14 D 2 3 15 D 3 3 16 D 4 3 17 D 5 3 18 D 6 3 19 D 7 3 20 D 8 3 21 D 9 3
{ "pile_set_name": "StackExchange" }
Q: Bash script for grepping string and placing them into array I'm looking way to make bash script that greps output of command and place strings into array and be able to randomly select 1 string from the array for example every minute and place it as variable, time for randomly selecting needs to be configurable. Command output: string string2 string3 Place of all these strings into array and randomly select one of them and place it as variable desired result: strings -> array <- randomly selecting from array every 1 minute and placing string selected as variable for further use A: In bash, you can use readarray with command substitution to capture newline-separated outputs into an array; for exmaple: readarray -t outputs < <(seq 10) Where I've used seq 10 to produce some output. This results in: $ declare -p outputs declare -a outputs='([0]="1" [1]="2" [2]="3" [3]="4" [4]="5" [5]="6" [6]="7" [7]="8" [8]="9" [9]="10")' To pseudorandomly select one of those elements every minute: while : do element=$(( RANDOM % ${#outputs[@]} )) var=${outputs[$element]} sleep 60 done Noting that bash arrays start at index zero, the $(( )) arithemtic says to use the value of $RANDOM modulo the number of elements in the outputs array.
{ "pile_set_name": "StackExchange" }
Q: Open CV libgtk2.0-dev and pkg-config error mac Hi I am fairly new to OpenCV and I am trying to get OpenCV demo-code for blur detection working on my mac but I get an error every time I run the code. Here is the code I try to run: # import the necessary packages from imutils import paths import argparse import cv2 def variance_of_laplacian(image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var() # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--images", required=True, help="path to input directory of images") ap.add_argument("-t", "--threshold", type=float, default=100.0, help="focus measures that fall below this value will be considered 'blurry'") args = vars(ap.parse_args()) # loop over the input images for imagePath in paths.list_images(args["images"]): # load the image, convert it to grayscale, and compute the # focus measure of the image using the Variance of Laplacian # method image = cv2.imread(imagePath) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) fm = variance_of_laplacian(gray) text = "Not Blurry" # if the focus measure is less than the supplied threshold, # then the image should be considered "blurry" if fm < args["threshold"]: text = "Blurry" # show the image cv2.putText(image, "{}: {:.2f}".format(text, fm), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3) cv2.imshow("Image", image) key = cv2.waitKey(0) I run the code by executing: python detect_blur.py --images images This is my terminal output: OpenCV Error: Unspecified error (The function is not implemented. Rebuild the library with Windows, GTK+ 2.x or Carbon support. If you are on Ubuntu or Debian, install libgtk2.0-dev and pkg-config, then re-run cmake or configure script) in cvShowImage, file /Users/travis/build/skvark/opencv-python/opencv/modules/highgui/src/window.cpp, line 583 Traceback (most recent call last): File "detect_blur.py", line 37, in <module> cv2.imshow("Image", image) cv2.error: /Users/travis/build/skvark/opencv-python/opencv/modules/highgui/src/window.cpp:583: error: (-2) The function is not implemented. Rebuild the library with Windows, GTK+ 2.x or Carbon support. If you are on Ubuntu or Debian, install libgtk2.0-dev and pkg-config, then re-run cmake or configure script in function cvShowImage I have already installed GTK+ and pkg-config but I still get the same error. I am not sure what to do next. I am fairly confident the problem is not with the code but with the installation This is the tutorial I followed to install OpenCV http://www.pyimagesearch.com/2016/12/19/install-opencv-3-on-macos-with-homebrew-the-easy-way/ A: You might be using opencv-python , which doesn't support many features. You have to uninstall opencv-python and then reinstall or rebuild OpenCV. pip uninstall opencv-python On mac OS, you can reinstall OpenCV with brew uninstall opencv3 brew install opencv3 --with-ffmpeg -v Make sure you are using correct Python, you might have different versions of Python installed with either brew, pyenv, or anaconda. Check which python you are using by executing this: which python where /usr/local/bin/python is from brew, and you'll know the others from the keyword shown in path.
{ "pile_set_name": "StackExchange" }
Q: Load the content dynamically by hijax I want to load the page without refreshing. While preferring ajax technique the URL won't crawl in SEO. So I am choosing Hijax techinique to change the content without reload. I am new to this technique. So Kindly anyone suggest the guide for this. Also Say whether it support the dynamic content load? Because I am choosing the content based on the primary key value. Thanks in advance. A: I did something similar using JQuery alone. If you attach an onClick to the links on your website, the search engines won't be able to trigger them as they are JS meaning that they can crawl properly. For a JS enabled browser you can catch the onClick, get the URL of the link clicked and then load that using AJAX into a div of your choice. Here is the JS I used: $(function(){ $("a.ajax_link").click(function(e){ ajaxLink(this, e); }); }); function ajaxLink(item, e) { var container = $("#ajax_container_wrapper"); var link; if (e != null) { e.preventDefault(); } link = $(item).attr("href"); container.load(link); } And the hyperlink would be something like: <a href="www.example.com" class="ajax_link">My Link</a> This will load www.example.com into the ajax_container_wrapper div when the link is clicked. If you only want to load a portion of www.example.com then you can substitute this line: link = $(item).attr("href"); with something like this: link = $(item).attr("href") + " #some_div"; This will load the content of some_div on www.example.com into your ajax_container_wrapper.
{ "pile_set_name": "StackExchange" }
Q: iptables LOG and DROP in one rule I am trying to log outgoing connections with iptables. What I want is, drop and accept connection while logging them also. I have found that -j option takes DROP/REJECT/ACCEPT/LOG. But I want to do something like DROP and LOG or ACCEPT and LOG. Is there a way to achieve this ? A: Although already over a year old, I stumbled across this question a couple of times on other Google search and I believe I can improve on the previous answer for the benefit of others. Short answer is you cannot combine both action in one line, but you can create a chain that does what you want and then call it in a one liner. Let's create a chain to log and accept: iptables -N LOG_ACCEPT And let's populate its rules: iptables -A LOG_ACCEPT -j LOG --log-prefix "INPUT:ACCEPT:" --log-level 6 iptables -A LOG_ACCEPT -j ACCEPT Now let's create a chain to log and drop: iptables -N LOG_DROP And let's populate its rules: iptables -A LOG_DROP -j LOG --log-prefix "INPUT:DROP: " --log-level 6 iptables -A LOG_DROP -j DROP Now you can do all actions in one go by jumping (-j) to you custom chains instead of the default LOG / ACCEPT / REJECT / DROP: iptables -A <your_chain_here> <your_conditions_here> -j LOG_ACCEPT iptables -A <your_chain_here> <your_conditions_here> -j LOG_DROP A: Example: iptables -A INPUT -j LOG --log-prefix "INPUT:DROP:" --log-level 6 iptables -A INPUT -j DROP Log Exampe: Feb 19 14:18:06 servername kernel: INPUT:DROP:IN=eth1 OUT= MAC=aa:bb:cc:dd:ee:ff:11:22:33:44:55:66:77:88 SRC=x.x.x.x DST=x.x.x.x LEN=48 TOS=0x00 PREC=0x00 TTL=117 ID=x PROTO=TCP SPT=x DPT=x WINDOW=x RES=0x00 SYN URGP=0 Other options: LOG Turn on kernel logging of matching packets. When this option is set for a rule, the Linux kernel will print some information on all matching packets (like most IP header fields) via the kernel log (where it can be read with dmesg or syslogd(8)). This is a "non-terminating target", i.e. rule traversal continues at the next rule. So if you want to LOG the packets you refuse, use two separate rules with the same matching criteria, first using target LOG then DROP (or REJECT). --log-level level Level of logging (numeric or see syslog.conf(5)). --log-prefix prefix Prefix log messages with the specified prefix; up to 29 letters long, and useful for distinguishing messages in the logs. --log-tcp-sequence Log TCP sequence numbers. This is a security risk if the log is readable by users. --log-tcp-options Log options from the TCP packet header. --log-ip-options Log options from the IP packet header. --log-uid Log the userid of the process which generated the packet. A: At work, I needed to log and block SSLv3 connections on ports 993 (IMAPS) and 995 (POP3S) using iptables. So, I combined Gert van Dijk's How to take down SSLv3 in your network using iptables firewall? (POODLE) with Prevok's answer and came up with this: iptables -N SSLv3 iptables -A SSLv3 -j LOG --log-prefix "SSLv3 Client Hello detected: " iptables -A SSLv3 -j DROP iptables -A INPUT \ -p tcp \! -f -m multiport --dports 993,995 \ -m state --state ESTABLISHED -m u32 --u32 \ "0>>22&0x3C@ 12>>26&0x3C@ 0 & 0xFFFFFF00=0x16030000 && \ 0>>22&0x3C@ 12>>26&0x3C@ 2 & 0xFF=0x01 && \ 0>>22&0x3C@ 12>>26&0x3C@ 7 & 0xFFFF=0x0300" \ -j SSLv3 Explanation To LOG and DROP, create a custom chain (e.g. SSLv3): iptables -N SSLv3 iptables -A SSLv3 -j LOG --log-prefix "SSLv3 Client Hello detected: " iptables -A SSLv3 -j DROP Then, redirect what you want to LOG and DROP to that chain (see -j SSLv3): iptables -A INPUT \ -p tcp \! -f -m multiport --dports 993,995 \ -m state --state ESTABLISHED -m u32 --u32 \ "0>>22&0x3C@ 12>>26&0x3C@ 0 & 0xFFFFFF00=0x16030000 && \ 0>>22&0x3C@ 12>>26&0x3C@ 2 & 0xFF=0x01 && \ 0>>22&0x3C@ 12>>26&0x3C@ 7 & 0xFFFF=0x0300" \ -j SSLv3 Note: mind the order of the rules. Those rules did not work for me until I put them above this one I had on my firewall script: iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
{ "pile_set_name": "StackExchange" }
Q: Transform scale and positioning issue I'm having an issue with the way I have centered my links and the way I want to links to scale on hover. When I hover over the links the transition is effecting how I have centered the links as well as the scale of them. It moves them to the side and scales them but I want to keep them centered. .position{ position: relative; } .absolute_img_links:link, .absolute_img_links:visited{ display: block; position: absolute; top: 50%; left: 50%; width: 70%; background-color: #ff6633; transform: translate(-50%, -50%); color: #ffffff; font-size: 1.4em; padding: 0.5em; transition: transform:scale 1s; } .absolute_img_links:hover{ transform: scale(1.1); } <div class="container" id="bottom_col_margin"> <div class="row"> <div class="col-xs-12 col-sm-4"> <img class="img-responsive position" src="<?php bloginfo('template_directory'); ?>/images/testimonials_img.jpg"> <span class="text_center"><a href="#" class="absolute_img_links">Customer Testimonials</a></span> </div> <div class="col-xs-12 col-sm-4"> <img class="img-responsive position" src="<?php bloginfo('template_directory'); ?>/images/delivery_img.jpg"> <span class="text_center"><a href="#" class="absolute_img_links">Free Delivery</a></span> </div> <div class="col-xs-12 col-sm-4"> <img class="img-responsive position" src="<?php bloginfo('template_directory'); ?>/images/help_guides_img.jpg"> <span class="text_center"><a href="#" class="absolute_img_links">Help & Guides</a></span> </div> </div> </div> A: when you write the code for :hover, and you use transform: , if you don't keep the initial translate(-50%,-50%) and only use scale(1.1) it will understand like the translate becomes 0 . so you need to keep the initial translate values also in the hover state . so the code will become transform:translate(-50%,-50%) scale(1.1) .position{ position: relative; } .absolute_img_links:link, .absolute_img_links:visited{ display: block; position: absolute; top: 50%; left: 50%; width: 70%; background-color: #ff6633; transform: translate(-50%,-50%); color: #ffffff; font-size: 1.4em; padding: 0.5em; transition: 3s; } .absolute_img_links:hover{ transform: translate(-50%,-50%) scale(1.1); } <div class="container" id="bottom_col_margin"> <div class="row"> <div class="col-xs-12 col-sm-4"> <img class="img-responsive position" src="<?php bloginfo('template_directory'); ?>/images/testimonials_img.jpg"> <span class="text_center"><a href="#" class="absolute_img_links">Customer Testimonials</a></span> </div> <div class="col-xs-12 col-sm-4"> <img class="img-responsive position" src="<?php bloginfo('template_directory'); ?>/images/delivery_img.jpg"> <span class="text_center"><a href="#" class="absolute_img_links">Free Delivery</a></span> </div> <div class="col-xs-12 col-sm-4"> <img class="img-responsive position" src="<?php bloginfo('template_directory'); ?>/images/help_guides_img.jpg"> <span class="text_center"><a href="#" class="absolute_img_links">Help & Guides</a></span> </div> </div> </div>
{ "pile_set_name": "StackExchange" }
Q: Using property/variable value from Jenkins I got stuck to a really annoying issue. Looks pretty simple to solve but I can't see what am I doing wrong. All started from JMeter: more than nine parameters from Jenkins. I managed to get the values from Jenkins in Jmeter by spiting an array. String line = "${__P(jenkinsparams)}"; String[] words = line.split(","); String looks like: -Jjenkinsparams="999,999,8443,1433,SQL2012,sa" So I have: words[0] = 999; words[1] = 999; words[2] = 8443; [...] words[5] = sa; This operation is made inside a BeanShell Sampler with 1 thread. How can I use these values as further on? Even in different Thread Groups. I've tried: props.put("SqlIP",words[0]); props.put("SqlInstance", words[1]); but ${__P(SqlIP)} doesn't retrieve the value when used in JDBC Connection Configuration as: jdbc:sqlserver://${__P(SqlIP)}\\${__P(SqlInstance)} How can I use properties/variables to send data from that array to build an JDBC connection? I need them for: SQL IP, SQL instance, SQL username and SQL password. All sent in that array from Jenkins. Thank you A: Because JDBC Connection Configuration is a Configuration element and according to Execution Order it's kicked off before any Beanshell test elements. You will have to come up with another way of setting the value. I'm not aware of any parameters number limit on JMeter level, if you're running an exotic shell or a form of custom JMeter startup script which introduces this limitation you can work it around by putting your configuration into user.properties file or a custom properties file which can be passed via -q parameter, check out Apache JMeter Properties Customization Guide to learn more about setting and overriding JMeter Properties
{ "pile_set_name": "StackExchange" }
Q: Java ArrayList remove duplicates from both the lists I have 2 ArrayList of custom objects. I want to remove duplicate entries from both the ArrayList. The objects have three fields: fName, lName, and id. If the id occurs multiple times, I want to remove from both the lists. How can I do that? I am fine with merging both the lists and removing the 2 duplicate entries as well. A: If you want to merge: simply copy the content of both lists to a map. Then you don't have the duplicates anymore (but you loose your actual ordering): Map<Integer, MyObject> temp = new HashMap<Integer, MyObject>(); for (MyObject obj:firstList) { temp.put(obj.getId(), obj); } for (MyObject obj:secondList) { temp.put(obj.getId(), obj); } List<MyObject> result = new ArrayList<MyObject>(temp.values());
{ "pile_set_name": "StackExchange" }
Q: Is there a faster video rendering solution than using AndroidBitmap_xxx functions? In my native thread I have FFMpeg getting and decoding frames then putting them in a queue. On Java side I have a GLSurfaceView and from Renderer.onDrawFrame I'm calling into the native code passing a bitmap (that bitmap I create only once, then pass it every time). In the native code I get the head of the queue, copy data to the java bitmap using AndroidBitmap_xxx functions, then render that Bitmap on Java side as a texture. I wonder is there a faster way to render video? Shall I do it entirely in the native code, if yes, why it will be faster? Edit: I now don't copy RGB frame pixels to the locked bitmap pixels, rather I decode YUV frame directly into the locked bitmap pixels. This makes rendering significantly faster (because no unneeded memcpy anymore) still the question remains. A: The most effective technique to change the pixels in the texture is called Render-to-Texture and can be done in OpenGL/OpenGL ES via FBOs. On desktop OpenGL you can use pixel buffer objects (PBOs) to manipulate pixel data directly on GPU (but OpenGL ES does not support this yet). On unextended OpenGL you can change the pixels in system memory and then update texture with glTexImage2D/glTexSubImage2D - but this is inefficient last resort solution and should be avoided if possible. glTexSubImage2D is usually much faster since it only updates pixel inside the existing texture, while glTexImage2D creates entirely new texture (as a benefit you can change the size and pixel format of the texture). On the other side, glTexSubImage2D allows to update only parts of the texture. You say that you want it to work with OpenGL ES, so I would propose to do the following steps: replace glTexImage2D() with glTexSubImage2D() - if you gain enough performance that's it, just let it be; implement render-to-texture with FBOs and shaders - it will require far more work to rewrite your code, but will give even better performance. For FBOs the code can look like this: // setup FBO glGenFramebuffers( 1, &FFrameBuffer ); glBindFramebuffer( GL_FRAMEBUFFER, FFrameBuffer ); glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, YourTextureID, 0 ); glBindFramebuffer( GL_FRAMEBUFFER, 0 ); // render to FBO glBindFramebuffer( GL_FRAMEBUFFER, FFrameBuffer ); glViewport( 0, 0, YourTextureWidth, YourTextureHeight ); your rendering code goes here - it will draw directly into the texture glBindFramebuffer( GL_FRAMEBUFFER, 0 ); // cleanup glBindFramebuffer( GL_FRAMEBUFFER, 0 ); glDeleteFramebuffers( 1, &FFrameBuffer ); Keep in mind that not all pixel formats can be rendered to. RGB/RGBA are usually fine.
{ "pile_set_name": "StackExchange" }
Q: Input value not showing up (AngularJS) I've got this controller: app.controller('controlFormulario', function($scope) { this.formulario = []; this.formulario.fecha = new Date(); }); ...this directive: app.directive('formulario', [function() { return { restrict: 'E', // C: class, E: element, M: comments, A: attributes templateUrl: 'modules/formulario.html' }; ... and this view: <div class="form-group"> <label for="fecha">Fecha</label> <input type="fecha" class="form-control" id="fecha" ng-model="fecha" value="{{formCtrl.formulario.fecha | date}}" disabled> </div> As you may guess, the attribute value has the date, perfectly filtered and so. No problem here. The actual problem comes when you see the website, and find out that the input doesn't show up anything. The "value" attribute is perfectly assigned, but it's not showing it inside the input box. Why? Am I doing something wrong? Tried the same with ng-init, ng-value... I'm a newbye on AngularJS, just ended the basic tutorial and I'm trying to practise and get some more knowledge, so maybe I'm missing out something. Any help? A: You need to store the data attribute to the $scope. Check out this link: https://docs.angularjs.org/guide/scope
{ "pile_set_name": "StackExchange" }
Q: What is the meaning of docker ps -a What is the meaning of "-a" from docker ps -a docker ps is lists all running containers in docker engine. When we run docker ps -a it will list the container that had stop as well. What is meaning of "-a" ? A: -a is short form for --all. It shows all the containers both stopped and running. Normal docker ps shows only the running containers. When you provide the option -a, it shows all the containers. Please check this for further details: :) Docker ps options
{ "pile_set_name": "StackExchange" }
Q: What does 'touch' mean in "It opened at a touch"? Clay promptly withdrew again and hurried across to the window. It opened at a touch and gave on to a wide courtyard. A: The window opened very easily. Clay needed barely to touch it, and it opened. The meaning is most likely not literal. Clay might've made a more comprehensive movement than just a single touch to open the window; but that is irrelevant for this narrative; the author needed to stress the fact that it opened easily. From the grammar standpoint, touch is a noun, a singular countable noun, as evidenced by the use of the indefinite article a. "The window opened at one touch", or "at a single touch". Compare with the expression at a glance: At a glance: (idiomatic) Upon cursory examination; an abbreviated review. "At a glance it seems that he is a nice guy, but upon digging deeper the truth emerges." A: The word touch is a very common word used as a verb or noun. In the sentence presented, it's a countable noun that means a light movement or placement of a part of your body, specially your hand on the window. The window opened only on a light movement of a part of your body, most probably your hand, and it provided a view of a wide courtyard.
{ "pile_set_name": "StackExchange" }
Q: Default dialog padding What should be the default value for a dialog padding? 8px 10px? A: The Windows UX guidelines tell you to use a 7 DLU padding all the way around on a dialog. A Dialog Unit (DLU) is based on the average size of a character. It's defined so that the "average" character is 4 DLUs wide by 8 DLUs high. The size of an average character changes depending on the font, the font size, and the dpi that the user is currently running. This means that DLUs are not a constant between machines, or between users on the same machine. Different fonts have different aspect ratios. This ends up meaning that the size in pixels of a DLU will be different in the horizontal and vertical directions. Looking at this particular size (dialog padding of 7 dlus): Segoe UI 9pt, 96dpi Tahoma 8pt, 96dpi dlus px py px py ================================================== 1x1 1.75 1.875 1.250 1.625 4x8 7 15 6 13 Definition of DLU: Average character=4x8 7x7 12.25 13.125 8.75 11.375 Dialog box margin - all sides (7) So if the user is running Segoe UI 9pt at 96 dpi (the Windows Vista and 7 default), you want a top and bottom margin of 13px, and a left and right margin of 12px. If the user is running Tahoma 9pt at 96dpi (the Windows 2000 and XP default), you want a top and bottom margin of 11px and a left and right margin of 9px. Other common sizes: MS Sans Serif, 8pt, 96dpi (Windows 9x default) Tahoma 8pt, 96dpi (Windows 2000/XP default) Tahoma 8pt, 120dpi (Windows 2000/XP large fonts) Segoe UI 9pt, 96dpi (Windows Vista/7 default) Segoe UI 9pt, 120 dpi (Windows Vista large fonts default, Windows 7 high-dpi default) Segoe UI 11pt, 96 dpi (what i run at work) Segoe UI 9pt, 137 dpi (what i run at home) And this is why creating user interfaces is hard. It's hard to get it right, and most developers would rather just stick their head in the sand and pretend users didn't have font preferences, or change their dpi setting. And the fact that the question was asked 4 months ago, and then abandoned, seems to confirm that developers don't want to hear the hard answers. A: This depends on the platform and its respective design guidelines. On Windows it's 7 DLU or 11 px: Dialog box margins: 7 DLU on all sides or 11 pixels on all sides —Windows User Experience Interaction Guidelines In Mac OS X, Apple doesn't mandate a specific size of the dialog padding, but instead only recommends Equal margins on both sides and the bottom edge of the window (the window in Figure 16-4 uses a 20-pixel margin in these areas). —Apple Human Interface Guidelines For GNOME it's 12 px: Leave a 12-pixel border between the edge of the window and the nearest controls. —GNOME Human Interface Guidelines 2.2
{ "pile_set_name": "StackExchange" }
Q: Why is the Riemann integral only defined on compact sets? Every text I look at says a function must be bounded and be defined on a compact set before one can even think about the Riemann integral. Boundedness makes sense, otherwise the Darboux sums could be undefined. However, I don't see where it becomes important that the integral be taken over a compact set. A: If you use the definition without tagged partitions, the reason the interval needs to be compact is that you need the function to obtain suprema and infima on every subinterval on a partition. For example $f(x) = 1/x$ is continuous on $(0,1)$ (so it should be integrable), but it never attains a supremum in the first subinterval of any partition. Even if you use tagged partitions, this problem persists. Again, consider $f(x) = 1/x$ on $(0,1)$. Let $P_n$ be a sequence of partitions such that $P_{n+1}$ is a refinement of $P_n$ for all $n$, and let $t_n$ be the tagged point in the first subinterval of $P_n$. Then $t_n \to 0$ as $n \to \infty$. Hence $f(t_n) \to \infty$ so that the limit of $f(t_n)\Delta_1$ will be infinite for partitions whose mesh size tends to $0$ slower than $f(t_n)$ tends to $\infty$. Hence the Riemann sums will not converge to any finite limit which means, by definition, that $f$ is not integrable. One way to interpret this discussion is that the theorem "If $f$ is continuous on $I$, then $f$ is Riemann integrable on $I$" will no longer be true if we allow non-compact $I$. In fact, this is one of the "deficiencies" that made the Riemann integral unsuitable (along with the more pressing problems regarding convergence for sequences of functions). For Lebesgue integrals, you can use an open set without problems.
{ "pile_set_name": "StackExchange" }
Q: Flutter - Non-scrollable Grid Is there a way to build a grid that's not scrollable in itself and which size is according to its children, the same way we can specify in a row or column mainAxisSize: MainAxisSize.min? To give you the big picture - I'm trying to create a responsive layout that depends on the device's width. It should be split into 2 parts, connected seamlessly via a column. 1) 2 big containers which sizes depend on the screen width, taking into account a small space in between them. Each container's width and height will be the same (square containers). 2) Same idea, but instead have 3 rows, each consisting of 3 smaller containers. This creates a grid. It's very important though that the grid won't be scrollable in itself and that its size will be according to its children. It should only be scrolled together with the rest of the page that's contained in a SingleChildScrollView. Especially since each container's height needs to be the same as its width, I was thinking of going with a combination of rows, columns, and LayoutBuilder - they gives me all the capabilities I need. However, before doing things manually, I was wondering if there's something that could work out of the box. A: Something like this? SingleChildScrollView( child: Column( children: <Widget>[ Row( children: <Widget>[ Expanded( child: Padding( padding: const EdgeInsets.all(10.0), child: AspectRatio( aspectRatio: 1.0, child: Container( width: double.infinity, decoration: BoxDecoration( border: Border.all(width: 3.0, color: Colors.green), ), ), ), ), ), Expanded( child: Padding( padding: const EdgeInsets.all(10.0), child: AspectRatio( aspectRatio: 1.0, child: Container( width: double.infinity, decoration: BoxDecoration( border: Border.all(width: 3.0, color: Colors.green), ), ), ), ), ), ], ), Container( padding: const EdgeInsets.all(10.0), child: GridView.builder( physics: NeverScrollableScrollPhysics(), shrinkWrap: true, gridDelegate: SliverGridDelegateWithFixedCrossAxisCount( crossAxisCount: 3, childAspectRatio: 1.0, mainAxisSpacing: 10.0, crossAxisSpacing: 10.0, ), itemCount: 21, itemBuilder: (context, index) { return Container( decoration: BoxDecoration( border: Border.all(width: 3.0), ), ); }, ), ), ], ), )
{ "pile_set_name": "StackExchange" }
Q: Encrypting as3 flash .swf I'm trying to protect my as3 .swf flash file code from decompilation. I cannot spend $$$ on commercial compilers though. How can i encrypt my swf for free? A: My brutally honest answer: don't even try. If someone has skill to make use of assets or code from your application, then they're going to have the basic knowledge needed to decompile your SWF and get what they need. If you even want to try, then this is all I can suggest: http://www.kindisoft.com/ Hope this helps.. Or at least explains why you shouldn't spend time trying.
{ "pile_set_name": "StackExchange" }
Q: integrating webpack with grunt requirejs project I have a project which completely build with Backbone and reruiejs. For production build we are heavily dependent on grunt. For JS modules, i have using grunt requirejs. After evaluating Webpack module loader, i thought of trying it out in my project but not sure to what extent i should use it and how to implement the same. Any reference point or example project ? A: Here is a short, but very informative tutorial by Pete Hunt: https://github.com/petehunt/webpack-howto It goes through building a simple build script. After you get the hang of it, you'll be able to make your own scripts in no time. If you use Webpack, it makes using Grunt & RequireJS redundant & unnecessary because Webpack can do all that for you. Plus, it will be tedious to maintain multiple build scripts in all these tools.
{ "pile_set_name": "StackExchange" }
Q: Inputting a number and returning the product of all the even integers between 1 and that number I'm looking for help on the question: So far my code has gotten me far enough to return the right answer but also the restultant multiplication along the way, i.e.: (1, 2, 2, 8, 8, 48). Can anyone reshuffle or redo the code so it just outputs the answer only, thanks in advance! counter=1 product=1 userinput=int (input ("What number: ")) for counter in range (1, userinput): if counter%2==0: product=int (counter*product) counter=counter+1 else: counter=counter+1 print (product) A: that's because print is executed on every iteration, you need to execute it only after the loop ends, which means print must have the same indent level as the loop, counter=1 product=1 userinput=int (input ("What number: ")) for counter in range (1, userinput): if counter%2==0: product= int(counter*product) print(product)
{ "pile_set_name": "StackExchange" }
Q: "quality" = upvotes/views An interesting measure of a question is not only the total number of views and the total number of upvotes, but their quotient. I (as a user) can calculated this quotient in mind - given the two numbers - but would find it better to be given it directly: prominently, and at first sight. Other opinions? A: I looked up the top questions according to this ratio (among 100+ views questions with a score of 5+). As user7530 predicted, the leaders are generally good, but not necessarily fantastic, questions with specific titles. Putting (rather long) in the title is a winning move in this game. Top 5: Paracompactness of CW complexes (rather long) How low can the approval rating of a majority candidate be? Do we really know the reliability of PrimeQ[n] (for $n>10^{16}$)? Evaluate $\sum_{k=1}^{\infty} \frac{k^2-1}{k^4+k^2+1}$ Smallest order for finite group that needs many elements to generate it A: I can sort of see what the idea would be, but wouldn't the denominator need to be the number of times the question was shown to a user who has enough reputation to vote? (Otherwise a good question would drop in your ranking simply by being linked from Reddit). And this number is not public knowledge and probably not even remembered by the system.
{ "pile_set_name": "StackExchange" }
Q: Why is prediction not plotted? Here is my code in Python 3: from sklearn import linear_model import numpy as np obj = linear_model.LinearRegression() allc = np.array([[0,0],[1,1],[2,2],[3,3],[4,4],[5,5],[6,6]]) X=allc[:,0] X=X.reshape(-1, 1) Y=X.reshape(X.shape[0],-1) obj.fit(X, Y) print(obj.predict(7)) import matplotlib.pyplot as plt plt.scatter(X,Y,color='black') plt.plot(X[0],obj.predict(7),color='black',linewidth=3) plt.show() My plotted data looks this way: After fitting, obj.predict(7) equals [7.] What am I doing wrong? I expected to see 7.7 point being plotted. A: The plot method is taking an array for the X-axis and an array for the Y-axis, and draws a line according to those arrays. You tried to draw a point using a method for lines... For your code to work (I have tested it and it worked) switch this line: plt.plot(X[0],obj.predict(7),color='black',linewidth=3) with this line: plt.scatter(7,obj.predict(7),color='black',linewidth=3) The scatter method will take the point given (7, 7) and put it in the graph just like you wanted. I hope this helped :)
{ "pile_set_name": "StackExchange" }
Q: Get DataType of computed column from dacpac When traversing a Dacpac via C# code, I am able to figure out which columns are referenced in a computed column (GetReferenced(Microsoft.SqlServer.Dac.Model.Column.ExpressionDependencies)), and what the expression for the column is (Microsoft.SqlServer.Dac.Model.Column.Expression property). What I cannot find, is the datatype of the computed column. Besides that, I also cannot find a way to retrieve the columns of a FileTable table (although this is a static set of columns). Search engines won't bring me far; there aren't that many resource to be found on this topic. A: I have blogged here how to get the data type of columns: http://sqlserverfunctions.wordpress.com/category/dacfx-2/ I haven't tried it on computed columns. To get columns for a file table, follow the blog but replace Table.Columns ModelRelationshipClass with FileTable.Columns ModelRelationshipClass. Ed
{ "pile_set_name": "StackExchange" }
Q: Figuring out the right SPARQL query I am trying to somehow find all the owl:sameAs properties from a resource link. A simple query would be like SELECT ?x WHERE { <http://dbpedia.org/resource/Tetris> owl:sameAs ?x } However i also would like to get the Yago link mentioned as is owl:sameAs of. Could any one help me out how to do this ? A: You can get the YAGO link like so: SELECT ?x WHERE { ?x owl:sameAs <http://dbpedia.org/resource/Tetris> } Or get both the “incoming” and “outgoing” links in one query: SELECT ?x WHERE { { <http://dbpedia.org/resource/Tetris> owl:sameAs ?x } UNION { ?x owl:sameAs <http://dbpedia.org/resource/Tetris> } }
{ "pile_set_name": "StackExchange" }
Q: What does (a); mean in solidity? The bittrex contract has the following code. What does (a); and (val); mean? contract Token { function balanceOf(address a) returns (uint) { (a); return 0; } function transfer(address a, uint val) returns (bool) { (a); (val); return false; } } A: It doesn't do anything. It just states the variable within brackets, which just evaluates to the value of the variable, but it's assigned to nothing. I compared the gas usage to a version of the contract which has that line commented out and it has identical gas costs to run the function, so it may be ignored by the compiler anyway. They may have added it to silence compiler warnings about the function parameters not being used.
{ "pile_set_name": "StackExchange" }
Q: Laurent series of $\frac{1}{1-\cos{3z}}$ Let the Laurent series of the function $f(z)=\frac{1}{1-\cos{3z}}$ be $\sum_{-\infty}^{\infty}{{a}_{k}{z}^{k}}$. a) Compute ${a}_{-3}$, ${a}_{0}$ and ${a}_{1}$. b) Find the biggest $R$ so that the above Laurent series converges in the domain $0 < |z| < R$. This question is quite similar to the one answered in this post, except we have $3z$ instead of $z$. However I never come up with the same conclusion when doing the calculation myself : \begin{equation} \cos{z} = 1-\frac{{z}^{2}}{2!} + \frac{{z}^{4}}{4!} - \frac{{z}^{6}}{6!} + \dots \end{equation} \begin{align} \Longrightarrow \frac{1}{1-\cos{z}} &= \frac{1}{\frac{{z}^{2}}{2!} - \frac{{z}^{4}}{4!} + \frac{{z}^{6}}{6!} - \dots} \\ &= \frac{2}{{z}^{2}} \frac{1}{1-\frac{2{z}^{2}}{4!} + \frac{2{z}^{4}}{6!} - \dots} \\ &= \frac{2}{{z}^{2}} \frac{1}{1-\left(\frac{2{z}^{2}}{4!} - \frac{2{z}^{4}}{6!} + \dots\right)}\end{align} and with $\frac1{1-z}=1+z+z^2+\ldots$ for $|z|<1$, I get : \begin{align} \frac{1}{1-\cos{z}} &= \frac{2}{z^2}\left(1 + \frac{z^2}{12} - \frac{z^4}{360} + \dots\right) \ &= \frac{2}{z^2} + \frac{1}{6} - \frac{z^2}{180} + \dots \end{align} I started the Laurent series today for an assignment and therefore I am not very proficient with those objects. However I made the calculation many times and I never get what seems to be the right result from Wolfram. This has probably something to do with the convergence of $\frac{2{z}^{2}}{4!} + \frac{2{z}^{4}}{6!} - \dots$ on the disk $\{z \quad | \quad |z| < 2\pi\}$. I would love some help about this. Thank you very much A: Since $f$ is an even function, $a_{-3}=a_1=0$. You have $1-\cos(3z)=\frac92z^2-\frac{27}8z^4+\cdots$. So$$\frac1{1-\cos(3z)}=\frac1{\frac92z^2-\frac{27}8z^4+\cdots}=\frac{a_{-2}}{z^2}+a_0+a_2z^2+\cdots$$and therefore$$1=\left(\frac92z^2-\frac{27}8z^4+\cdots\right)\left(\frac{a_{-2}}{z^2}+a_0+a_2z^2+\cdots\right),$$from which you can deduce that $a_{-2}=\frac29$. The biggest $R$ such that the series converges on $D(0,R)\setminus\{0\}$ is $\frac{2\pi}{3}$, since $\pm\frac{2\pi}3$ are the complex numbers $z$ closest to $0$ such that $1-\cos(3z)=0$.
{ "pile_set_name": "StackExchange" }
Q: How to match Firefox's about page in extension manifest? I would like to match Firefox's about page like so: "content_scripts": [ { "matches": ["about:debugging"], "js": ["import.js"] } ] The script doesn't run. If I try other matches it works properly on "normal" websites. A: If I'm not mistaken in Chrome, Firefox, Microsoft, Opera, and Vivaldi these are the things you aren't allowed to modify because they go against extension policies. Meaning, You can't do this, sorry. about-scheme chrome-extension-scheme chrome-scheme edge-extension-scheme edge-scheme moz-extension-scheme ms-browser-extension-scheme opera-scheme vivaldi-scheme wyciwyg-scheme (In bold are the ones that will effect Firefox)
{ "pile_set_name": "StackExchange" }
Q: Shell script - Giving error while iterating through array I have been trying to check for an If Condition on one name value in the array defined. I am experiencing this error: main.sh: line 9: [10.247.78.207: command not found main.sh: line 9: [10.247.78.207: command not found Here is my code: #!/bin/bash declare -a names=${names:-(10.247.78.207 10.247.78.206)} for (( i = 0 ; i < ${#names[@]} ; i++ )) do if ["${names[0]}" == "10.247.78.207"] then echo "hello" fi done A: Change ["${names[0]}" == "10.247.78.207"] to [ "${names[0]}" == "10.247.78.207" ]. That is space after [ and before ]. Hope this solves your problem.
{ "pile_set_name": "StackExchange" }
Q: How can I get the IP address and port of a client with PHP? How do I get the IP and port of a client with PHP? I tried the script below but it only gives me the IP address. <?php print $_SERVER['REMOTE_ADDR']; ?> A: To get the port of the connected device you can use $_SERVER['REMOTE_PORT'] $ipAddress = $_SERVER['REMOTE_ADDR']; $port = $_SERVER['REMOTE_PORT']; A: Port is defined in http server (Apache or other and mostly it is 80 or 443) The PHP $_SERVER variables can be checked at this link. I am sure that 'REMOTE_ADDR' returns the IP address from which the user is viewing the current page. But if your server is behind NAT: If you are serving from behind a proxy server, you will almost certainly save time by looking at what these $_SERVER variables do on your machine behind the proxy. // in place of $_SERVER['REMOTE_ADDR'] $_SERVER['HTTP_X_FORWARDED_FOR'] // in place of $_SERVER['SERVER_NAME'] $_SERVER['HTTP_X_FORWARDED_HOST'] && $_SERVER['HTTP_X_FORWARDED_SERVER']
{ "pile_set_name": "StackExchange" }
Q: Effective operator in four-fermion interaction In one book, I have got the following lines which I found myself unable to understand what is effective operator? The paragraph is given below: The weak interaction describes nuclear beta decay, and at low energy it is given by an effective four fermion interaction. Since the effective operator has dimension 6, the coupling constant has inverse mass-squared dimension. A: To make things clear for the specific case you are talking about, think about what is really happening in beta decay. It occurs via the following process: It involves the exchange of a W boson and so to model the true process properly using the standard model requires the three point fermion gauge boson vertices (operators): $$\frac{g}{\sqrt{2}} W^\mu\left(\bar\nu \, \gamma_\mu e + \bar u \gamma_\mu d \right).$$ These are interactions between two fermions and the W boson allowing us to draw the above diagram. Note that the coupling constant $g$ is dimensionless. The contribution of the exchange of the W boson is given by $$\frac{g^2}{E^2-M_W^2}$$ where E is the energy transferred by the W boson. Now at low energies which correspond to small distances the W boson only propagates for a very small distance and it can be approximated by a contact interaction between the fermions: (Note here that we have replaced the individual quarks by $p$ and $n$ but that is not important to the argument). You see that the contribution of the $W$ is reduced to a contact interaction between four fermions! So the effective operator for this interaction now contains a term with four fermions such as $$G_F \, \bar\nu e \bar p n.$$ At low energies $(E^2 \ll M_W^2)$ we find $$\frac{g^2}{E^2-M_W^2} \to \frac{g^2}{M_W^2}$$ and so the new coupling constant (Fermi constant) is given by $$\frac{G_F}{\sqrt{2}}=\frac{g^2}{8M_W^2}$$ which has dimension of inverse mass squared! (The numerical factors are not important for the argument and come out from doing the full calculation properly). I am not sure if it was the use of the word operator in the quote that confused you. Remember that in QFT the fields are operators and so a term in the Lagrangian containing a bunch of fields is often referred to as an operator. If four fermion fields are involved (as above for the contact interaction) then it is called a four fermion operator. If the operator appears from integrating out some other fields (the $W$ in this case) then it is known as an effective operator meaning at low energies it effectively captures the correct physics.
{ "pile_set_name": "StackExchange" }
Q: Total number of ways of selecting two numbers from the set {1,2,3,4,5..........,3n} so that their sum is divisible by 3 is S. Find S? Total number of ways of selecting two numbers from the set $\{ 1,2,3,4,5..........,3n \}$ so that their sum is divisible by $3$ is $S$. Find $S$? This question came in my test and we were supposed to do it in less than $3$ minutes. I still can't get the answer A: If the first number is divisible by $3$, then so is the other one; we choose two multiples of $3$ out of $n$, total ${n\choose 2}$ possibilities. If the first number (say, $n$) is not divisible by $3$, then the remainder of the second number ($m$) by division by $3$ is different. So either $n$ or $m$ is $1\mod 3$ and the other $2\mod 3$. Since order isn't relevant, we can assume $n\equiv 1\mod 3$ and $m\equiv 2\mod 3$, so that we have ${n\choose 1}{n\choose 1}$ possibilities. Thus, $$S={n\choose 2}+{n\choose 1}{n\choose 1}=\frac12n\left(3n-1\right)$$
{ "pile_set_name": "StackExchange" }
Q: iOS - Unit Testing asynchronous private function in Presenter of MVP Hello I'm trying to unit testing a private function which located in Presenter This is my Presenter Codes and I'm using Networking Singleton Object APIService class MyPresenter { weak var vc: MyProtocol? func attachView(vc: MyProtocol?) { self.vc = vc } func request(_ id: String) { if id.count == 0 { vc?.showIDEmptyAlert() return } fetch(id) } private func fetch(_ id:String) { DispatchQueue.global.async { APIService.shared.fetch(id) { (data, err) in if let err = err { self.vc?.showErrorAlert() return } self.vc?.update(data) } } } } and this is my ViewController codes class MyViewController: UIViewController, MyProtocol { private var presenter: MyPresenter = MyPresenter() override func viewDidLoad() { super.viewDidLoad() presenter.attachView(vc: self) } func showIDEmptyAlert() { self.present .. } func showErrorAlert() { self.present .. } func update(data: String) { self.label.text = data } @IBAction func handleRegisterButton(_ sender: UIButton) { guard let id = idTextField.text else { return } presenter.request(id) } } These are my Presenter and View. And I wrote Test Code like this First, I made Mock PassiveView Like this class MyViewMock: MyProtocol { private (set) var showEmptyIdAlertHasBeenCalled = false private (set) var showErrorAlertHasBeenCalled = false private (set) var updateHasBeenCalled = false func showEmptyIdAlert() { showEmptyIdAlertHasBeenCalled = true } func showErrorAlert() { showErrorAlertHasBeenCalled = true } func update(data: String) { updateHasBeenCalled = true } } So I expected that if I could test Presenter's request(_:) methods with valid id and invalid but because request(_:) didn't get handler parameter and APIService.shared.fetch is asynchronous, I couldn't get correct result by calling request(_:). (Always false) How can I test this kind of Presenter? A: In terms ofXCTests, there is the XCTestExpectation class to test asynchronous functions. But there is an issue in your approach of testing the Presenter. You should use mock for your network service and stub it with expected arguments. It doesn't make sense to call the actual service. Unit tests are the fastest kind of tests. Private methods are a part of black-box which you should not care about its internal structure. Test public interfaces but don't test private methods. If you will try to mock and stub APIService, then you notice it's impossible to do if it's a singleton. You'll end up with injecting service as a dependency into Presenter for the better testability. If the service will be mocked and the stub will be used, then there is no need in using XCTestExpectation, because there won't be any asynchronous code.
{ "pile_set_name": "StackExchange" }
Q: Inno Setup and Copying a file I have an installer that is working just fine. What I would like to do is copy a DLL that is part of the installed application to another folder. For example I am installing the application on E:\Folder, but I would like to copy a DLL from that install to E:\public. Is it possible to have Inno copy a file to another folder as part of an install? A: If you want to copy that library into a folder which is a subfolder of the parent folder selected by the user on the Select Destination Location wizard page, then you should specify the path relative to the {app} folder for the DestDir parameter of your [Files] section entry for that library. In script it would be something like this: [Files] Source: "App.exe"; DestDir: "{app}" Source: "Library.dll"; DestDir: "{app}\..\Public" Just to make it clear, if the user selects e.g. this folder: C:\Program Files (x86)\App Folder Then the library will be copied to this folder: C:\Program Files (x86)\Public
{ "pile_set_name": "StackExchange" }
Q: Web API 2 download file using async Task I need to write a method like below to return a text document (.txt, pdf, .doc, .docx etc) While there are good examples of posting file in Web API 2.0 on the web , I couldn't find a relevant one for just downloading one. (I know how to do it in HttpResponseMessage.) public async Task<IHttpActionResult> GetFileAsync(int FileId) { //just returning file part (no other logic needed) } Does the above needs to be async at all? I am only looking to return stream. (Is that okay?) More importantly before I end up doing the job one way or the otther, I wanted to know what's the "right" way of doing this sort of job... (so approaches and techniques mentioning this would be greatly appreciated).. thanks. A: Right, for your above scenario the action does not need to return an async action result. Here I am creating a custom IHttpActionResult. You can check my comments in the below code here. public IHttpActionResult GetFileAsync(int fileId) { // NOTE: If there was any other 'async' stuff here, then you would need to return // a Task<IHttpActionResult>, but for this simple case you need not. return new FileActionResult(fileId); } public class FileActionResult : IHttpActionResult { public FileActionResult(int fileId) { this.FileId = fileId; } public int FileId { get; private set; } public Task<HttpResponseMessage> ExecuteAsync(CancellationToken cancellationToken) { HttpResponseMessage response = new HttpResponseMessage(); response.Content = new StreamContent(File.OpenRead(@"<base path>" + FileId)); response.Content.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment"); // NOTE: Here I am just setting the result on the Task and not really doing any async stuff. // But let's say you do stuff like contacting a File hosting service to get the file, then you would do 'async' stuff here. return Task.FromResult(response); } } A: Methods are asynchronous if return a Task object, not because are decorated with async keyword. async is only a syntactical sugar to replace this syntax what can get rather complex when there are more tasks combined or more continuations: public Task<int> ExampleMethodAsync() { var httpClient = new HttpClient(); var task = httpClient.GetStringAsync("http://msdn.microsoft.com") .ContinueWith(previousTask => { ResultsTextBox.Text += "Preparing to finish ExampleMethodAsync.\n"; int exampleInt = previousTask.Result.Length; return exampleInt; }); return task; } Original sample with async: http://msdn.microsoft.com/en-us/library/hh156513.aspx async always requires await, this is enforced by compiler. Both implementations are asynchroous, the only difference is that async+await replaces expands the ContinueWith into "synchronous" code. Returning Task from controller methods what do IO (99% of cases I estimate) is important because the runtime can suspend and reuse the request thread to serve other requests while the IO operation is in progress. This lowers the chances of running out of thread pool threads. Here's an article on the topic: http://www.asp.net/mvc/overview/performance/using-asynchronous-methods-in-aspnet-mvc-4 So the answer to your question "Does the above needs to be async at all? I am only looking to return stream. (Is that okay?)" is that it makes no difference to the caller, it only changes how your code looks (but not how it works).
{ "pile_set_name": "StackExchange" }
Q: Why does opening a connection throw a distributed transactions error in MySQL? (.NET Connector) I'm opening a connection to a local MySQL server and on the connection.Open() method it throws this error: System.NotSupportedException: MySQL Connector/Net does not currently support distributed transactions. at MySql.Data.MySqlClient.MySqlConnection.EnlistTransaction(Transaction> transaction) at MySql.Data.MySqlClient.MySqlConnection.Open() All I'm doing is this: var connection = new MySql.Data.MySqlClient.MySqlConnection(ConfigurationManager.ConnectionStrings["Connection"].ConnectionString); connection.Open(); The connection string in the app.config is <add name="Connection" connectionString="server=localhost;user id=userid;Password=password;database=dbname" providerName="MySql.Data.MySqlClient" /> I don't know why it's trying to enlist the transaction, I haven't specified any transactions & I only have one MySQL server I'm connecting to A: Try adding Enlist=false to your connection string: EDIT: from the MySQL Connector/.NET documentation, if you set AutoEnlist=false in the connection string it should work. <add name="Connection" connectionString="server=localhost;user id=userid;Password=password;database=dbname;AutoEnlist=false" providerName="MySql.Data.MySqlClient" /> It appears that certain versions of ADO.NET can default to automatically enlisting a connection into an existing transaction. See http://msdn.microsoft.com/en-us/library/ms254973.aspx for more detail, but I expect somehow somewhere ADO is confused into thinking that there's an existing transaction going on to some other db.
{ "pile_set_name": "StackExchange" }
Q: C++ operator overloads and destructors struct Node { int value Node* next; } typedef List Node* const Set operator +(const Set& a, const Set& b) { Set aSet; List newList = mergeListsCopy(a.list, b.list); aSet.list = newList; return aSet; } class Set { public: //method decs private: List list; }; Set::~Set() { list = deleteList(list); } The internals of this code work perfectly fine, mergeListsCopy creates a new list from two singly linked lists and assignes the pointer to the list which is a private variable of aSet. The problem is when aSet is returned, aSet.list is some strange poison address( in this case 0xf). When I ran it through the debugger a Set was created in the scope of the operator overload and but two references to this set were also created locally both using the symbol aSet, before the return occurred, the program jumped to the destructor, presumably for the extraneous Set, but since there is only one Set it gets destroyed. When I comment out my destructor this problem goes away. What did I do wrong? A: You need to follow the Rule of Three. If you need to explicitly declare either the destructor, copy constructor or copy assignment operator yourself, you probably need to explicitly declare all three of them. It is most likely that temporary nameless objects get created(by calling the implicit compiler generated copy constructor) during the course of execution of your program and when those temporary objects get destroyed by call to destructor it ends up messing your linked list.
{ "pile_set_name": "StackExchange" }
Q: Ingredients for cleaning optical elements I have recently purchased a cleaning solution for precision optics which has the following ingredients (amounts were not specified): Ethyl Alcohol Methylethylketon Aqua Diethyl Ether Isopropyl Typically reagent grade isopropyl alcohol alone is used for cleaning optical elements. So, does the mixture above provide any advantages over using pure isopropyl? (Possible advantages may include a faster dry time, better removal of oils/smudges, and less toxicity) Any explanations would be greatly appreciated. A: It might possibly be intended to dissolve an adherent organic film, such as that left by evaporation of plasticizers from foam packaging. However, it also might well partially dissolve paints and cements used in the lens assembly, leaving yet harder-to-remove dirt on the lens. Unless you have a specific need for those solvents, use distilled water and/or isopropanol, according to B&H.
{ "pile_set_name": "StackExchange" }
Q: How do I view the database that EntityFramework created for me? Just starting off with MVC 4 and Entity Framework 5 (4.4 for .net 4). I've defined a couple models and saved something to database. I want to see what's going on behind the scenes, but I can't figure out how to connect to this database. How do I do it? I assume I can connect to it via the Server Explorer somehow, but I'm not sure what options to pick. A: You can get the connection string using DbContext.Database.Connection.ConnectionString - it will tell you everything since it is used to connect to the database. Then in server explorer you should know whether to pick SqlExpress or one of the SqlCompact options.
{ "pile_set_name": "StackExchange" }
Q: Fitness Function - What to do, if value of one Chromosome features is much higher than other ones I am trying to write a Fitness Function for my Genetic Algorithm. I have got three features (F1,F2,F3 - All are numeric variables), which are taken into consideration for rating the chromosome. If all of them are equally important I can write such equation: If their importance would be different, I can multiply features by different constants. It will all work, if values of all three features would have the same interval. But what if F1 and F2 could have a value from (0,10) and F3 for example(0,49)? How could I approximately scale it? Or should I approach it differently? A: Scale all your values to [0..1] Assign a weight / importance to every value Multiply every scaled value with its weight Divide the sum of all values from step 3 by the sum of all weights I don't know which language you are using, so here is some C# code: Feature Class public class Feature { public double Importance { get; set; } public double Value { get; set; } public double LowValue { get; set; } public double HighValue { get; set; } public double ScaledValue { get { // Make sure value is within bounds double intermediate = Math.Max(Math.Min(LowValue, HighValue), Math.Min(Math.Max(LowValue, HighValue), Value)); // Scale from 0 to 1 within defined bounds double scaledValue = (intermediate - LowValue)/(HighValue - LowValue); return scaledValue; } } } Example Calculation // low importance (1), values from [20..-10] (less is better) var f1 = new Feature { Importance = 1, Value = 0, LowValue = 20, HighValue = -10 }; // low importance (1), values from [0..10] (more is better) var f2 = new Feature { Importance = 1, Value = 1, LowValue = 0, HighValue = 10 }; // high importance (5), values from [0..49] var f3 = new Feature { Importance = 5, Value = 25, LowValue = 0, HighValue = 49 }; var features = new[] {f1, f2, f3}; var score = features.Sum(f => f.ScaledValue*f.Importance)/features.Sum(f => f.Importance); Debug.WriteLine(score);
{ "pile_set_name": "StackExchange" }
Q: Post increment on set iterator Possible Duplicate: iterator validity ,after erase() call in std::set When I iterate over a set and want to delete certain items the iterators are changed. This results in segfaults as the iteration fails after deletion. How can I overcome this problem? std::set< std::pair<double,unsigned>, comparisonFunction> candidates;' [...] for( auto it = candidates.begin(); it != candidates.end(); ++it) { [...] if ( some constraint satisfied) { candidates.erase(it); } } I encounter a segfault when I use this code. My guess is that this is either due to the corrupted iterators or due to the fact, that the element to be deleted is the last element in some cases. Does a post increment on the iterator overcome this problem? Like this: candidate.erase(it++); A: Use the return value of erase: it = candidates.erase(it); Note that you must not increment it if you erase an element, otherwise your iterator could be invalidated. for( auto it = candidates.begin(); it != candidates.end();) { if ( some constraint satisfied) { it = candidates.erase(it); } else ++it; } Also note that this wasn't possible in C++03, since erase didn't return any iterator. However, since you're using C++11 it shouldn't be a problem. References std::set::erase
{ "pile_set_name": "StackExchange" }
Q: How to prevent wasting time with some users How can we avoid wasting time with users that don't deserve our attention, specially new users? Do I need to always open the user profile to check if he is a bad/good user? Example: Question: AdMob Ads on Android Canvas GamePanel User: https://stackoverflow.com/users/2399229/user2399229 I spent some time answering that question, but then the user insulted me (comment already removed). After that I opened the user profile and saw all those negative questions on his profile... =( EDIT: After all these negative points from my question, now I'm becoming a bad meta user, lol! But I really liked the two answers so far! What really upset me was the insult itself... A: New users, even bad ones, deserve our attention. If they haven't found the FAQ, we can point them to it. If they ask bad questions, we can help them understand how to ask better ones. If they post offensive things, we can flag and/or edit the problem. Everyone was a newbie once, and mistakes are to be expected. Part of our job is to help those bad users become good users. If the user does not learn or is being intentionally disruptive, an automatic ban will eventually be enforced. A: George's answer is an excellent point and a good answer. I'm going to approach this from a different angle, as well. You should generally judge whether to help with a question or not based on the question, not the user. If it's a good question, give it a good answer. If not, don't. Try to help if you like, or just leave it alone if not. Don't be afraid to vote (down, close) or flag (if appropriate), if you wish. Note that sometimes, 'bad' users will spontaneously become 'good' ones. People are capable of learning - sometimes at an alarming rate. If someone's post is a good one, answer it; even if their previous contributions have been 'bad', you'll be reinforcing their new, good behavior.
{ "pile_set_name": "StackExchange" }
Q: Pending Intent are not Working as Expected I am having a great trouble with Notification and pending Intent. I am trying to open Chat activity with appropriate user_details from which the message is sent. That's why on Firebase Function I have passed the from_user_id which is the one who is sending the message. I am getting correct logs there in FCM but when I receive a chat notification and open it It opens activity without any userName and messages. It open a new instance of the activity with default values. @Override public void onMessageReceived(RemoteMessage remoteMessage) { super.onMessageReceived(remoteMessage); String notification_title = remoteMessage.getNotification().getTitle(); String notification_message = remoteMessage.getNotification().getBody(); String click_action = remoteMessage.getNotification().getClickAction(); String from_user_id = remoteMessage.getData().get("from_user_id"); NotificationCompat.Builder mBuilder = new NotificationCompat.Builder(this, CHANNEL_ID) .setSmallIcon(R.drawable.chitchat_icon) .setContentTitle(notification_title) .setAutoCancel(true) .setContentText(notification_message); Intent resultIntent = new Intent(click_action); resultIntent.putExtra("user_id", from_user_id); PendingIntent resultPendingIntent = PendingIntent.getActivity( this, 0, resultIntent, PendingIntent.FLAG_UPDATE_CURRENT ); mBuilder.setContentIntent(resultPendingIntent); int mNotificationId = (int) System.currentTimeMillis(); NotificationManager mNotificationManager = (NotificationManager) getSystemService(Context.NOTIFICATION_SERVICE); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) { mNotificationManager.createNotificationChannel(mChannel); mBuilder.setChannelId(CHANNEL_ID); } mNotificationManager.notify(mNotificationId,mBuilder.build()); } Message payload: const payload = { notification: { title: userName, body: message, icon: "default", click_action : "com.example.chitchat_TARGET_MESSAGE_NOTIFICATION" }, data : { from_user_id : from_user_id } }; My Manifest look like this: <?xml version="1.0" encoding="utf-8"?> <manifest xmlns:android="http://schemas.android.com/apk/res/android" package="com.example.chitchat"> ... <application android:name=".ChitChat" android:allowBackup="true" android:icon="@drawable/chitchat_icon" android:label="@string/app_name" android:roundIcon="@mipmap/ic_launcher_round" android:supportsRtl="true" android:theme="@style/AppTheme"> <activity android:name=".GroupChatActivity"></activity> <activity android:name=".CallActivity" /> <activity android:name=".ChatActivity" android:parentActivityName=".MainActivity"> <intent-filter> <action android:name="com.example.chitchat_TARGET_MESSAGE_NOTIFICATION" /> <category android:name="android.intent.category.DEFAULT" /> </intent-filter> </activity> <activity android:name=".ProfileActivity"> <intent-filter> <action android:name="com.example.chitchat_TARGET_NOTIFICATION" /> <category android:name="android.intent.category.DEFAULT" /> </intent-filter> </activity> <activity ... <intent-filter> <action android:name="android.intent.action.MAIN" /> <category android:name="android.intent.category.LAUNCHER" /> </intent-filter> </activity> <activity android:name="com.theartofdev.edmodo.cropper.CropImageActivity" android:theme="@style/Base.Theme.AppCompat" /> <meta-data android:name="com.google.firebase.default_notification_channel_id" android:value="fcm_default_channel" /> <!-- adding --> <service android:name=".FirebaseMessagingService"> <intent-filter> <action android:name="com.google.firebase.MESSAGING_EVENT" /> </intent-filter> </service> </application> ... </manifest> I don't know If I have added some different functions like LifeCycleEvent Listners and EmailVerification for registration has created issues. I am also unable to log the problem I don't know why. Please, appropriate suggestions. Thanks A: First of all change resultIntent.putExtra inside onMessageReceived like: resultIntent.putExtra("from_user_id", from_user_id); And in your ChatActivity fetch user_id like below: String user_id = getIntent().getStringExtra("from_user_id") Hope this solve your problem. Or change your notification payload: const payload = { notification: { title: userName, body: message, icon: "default", click_action : "com.example.chitchat_TARGET_MESSAGE_NOTIFICATION" }, data : { user_id : from_user_id } }; And inside onMessageReceived change: String from_user_id = remoteMessage.getData().get("from_user_id"); To String from_user_id = remoteMessage.getData().get("user_id"); Reason: During background, System generates the notification without executing your code inside onMessageReceived. That's why it puts the extra as it gets from notification payload which is from_user_id, that's you send from server.
{ "pile_set_name": "StackExchange" }
Q: What would be hold some number of Double[] in java? A List or collection or something other? I would be storing between 10 and 20 Double[]. I would want to add more maybe, retrieve them. A: You mean where you could store your doubles for in-memory usage? Go for an ArrayList. List<Double> list = new ArrayList<Double>(); // store list.add(10.0); list.add(15.0); // Retrieve for (Double d : list) { System.out.println(d); }
{ "pile_set_name": "StackExchange" }
Q: Horizontal CSS UL Menu For some reason my CSS styled UL menu won't center horizontally. The CSS to create it is: #navigation { width:79em; height: 2em; position: relative; padding: .5em; font-family: Verdana, Geneva, sans-serif; text-transform: uppercase; font-size: 1em; background-color: #F5F5F5; -webkit-border-radius: 3px; -moz-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 0 1em 0 #333; -moz-box-shadow: 0 0 1em 0 #333; box-shadow: 0 .25em .3em -.055em #333; } #navigation ul { text-align: center; padding: 0; margin: 0; list-style: none; } #navigation li { display: inline; list-style: none; display: block; float: left; width: 10em; height: 2em; text-align: center; padding: .5em 0 0 0; margin: 0; border-left: .0625em solid #FFF; border-right: .1em solid #CCC; text-shadow: .0625em .0625em .0625em #ffffff; filter: dropshadow(color=#ffffff, offx=1, offy=1); } A: add #navigation { ... margin : 0 auto; }
{ "pile_set_name": "StackExchange" }
Q: Is changing concentration rules for 1h+ buff spells a bad idea? I have noticed that many long duration (1h+) spells in 5E D&D have concentration, effectively meaning caster cannot cast other concentration spells while using such a spell (or end the spell). For example Barkskin and Alter Self. This seems to make many "buff" spells much less useful, possibly to the point they will never be used except very in special circumstances. I have played and DMed since early red-box basic, and I get that 5E with its bounded accuracy is not like 3.5E where the Wizard would buff up party with 4-5 spells before they go to work for the day. However it still seems weird that now you would almost never do that or any concentration buffs in 5E. I get that with reaction spells you may not need buffs as much - but I liked the strategy and planning of selecting buffs. I am thinking of a house-rule something like this: Besides Concentration, there is Subconscious Concentration, which works the same (only one subconscious spell at a time, which can be lost if concentration would) - but it would allow you to have 1 Concentration and 1 Subconscious Concentration spell at the same time. Generally longer lasting (1h+? 10 min?) non-damaging utility spells would be Subconscious Concentration instead of Concentration*. This way the Druid could use Barkskin and still cast concentration combat spells and have fun in a fight. However, you would not have a stack of expected standard buffs on the party either, since only one utility spell could be subconsciously concentrated on at a time, which I think would make for interesting choices. *Obviously I would have to evaluate spells for subconscious mechanic individually. I admit I am on the fence re Magic Weapon since it is damage causing - but I like idea of wizard buffing fighter with a magic weapon as they hunt that Werewolf, while still being able to cast a spell or two during the fight. On the other hand I think scrying would probably stay regular concentration because...it feels right. Is this a good / bad / unbalancing idea? What are some Pros and Cons? Any suggestions for improvement on this house-rule? Edit: Conclusion Thanks to everyone for feedback and sharing your opinions. It seems this house rule might be a bad idea with unbalancing consequences. 1) DMG p263 strongly advices against changing this part of system. There will probably be balancing issues, not limited to casters outshining non casters. Haste or fly + long term subconscious buff does give me pause. 2a) wizard still has many non concentration spells like fireball 2b) druid is more limited in options but with wildshape combinations this could be a design feature rather than a bug. Call lightning with a flying creature comes to mind. If the barkskin concentration limit that got me thinking about this in first place bugs me it may be safer to just make barkskin non concentration rather than risk braking everything. I am having second thoughts about the wisdom of this house rule. If I do give it or something like it a try, I'll update on how it turns out. A: The game was designed with concentration in mind. If you change how concentration works, it will almost certainly be unbalanced, unless you take steps to actually balance it. That being said, it's your game. Try it, see if you like it. If you don't, then change it. When I am thinking about changing something, I usually search for the most broken scenario I can think of and then I play test it myself. You would need to list everything single spell viable for this new mechanic and then try stacking different spells together and see what happens. A: You know many Wizard damage spells are instant and can be cast while concentrating on something else, right? e.g. Fireball. You're intended to be able to Haste a party member (or other buff spell) and then blast away. Having Haste plus a "subconscious" longer-term buff up simultaneously and still being able to Fireball seems pretty powerful. I think the main competition for a Wizard's concentration slot isn't damage spells, but rather short-term very strong buffs like Haste. It's more a problem for Druids where most of the damage spells are concentration (Call Lightning, Flaming Sphere), although most of their damage cantrips are still instant. I don't have a lot of experience with this, but yes I think Barkskin is hard to use because of the concentration opportunity cost. Perhaps if you're worried about an ambush? But druids can do a lot of stuff (like wild shape) so I think the existing balance (of many druid spells requiring concentration) is fully intentional. e.g. Call Lightning then wild shape into a bird to stay out of reach while blasting enemies makes it an upside, and saves your spell slots for later healing. It does mean you can't buff. Or the classic is Flaming Sphere + Wildshape so you can attack and move the sphere with a bonus action. Or if you wildshape into something squishy, barkskin first.
{ "pile_set_name": "StackExchange" }
Q: change the format of the date inserted by PHP into MySQL database I am trying to insert the current date into MySQL database in this format: (12/31/2013 10:26:12 PM). I've tried to make a simple code to change the format, but all I get is a syntax error $sql = "INSERT INTO Students VALUES ('','" . $info[$i]['firstname'] . "', '" . $info[$i]['lastname'] . "', '" . $info[$i]['sex'] . "', '" . $info[$i]['major'] . "', '" . $info[$i]['favorite'] . "', '" . $info[$i]['GPA'] "TO_CHAR(SYSDATE(),'dd/mm/yyyy')"; Tell me please what shall I do with it. A: Just try this $sql = "INSERT INTO Students VALUES ('','" . $info[$i]['firstname'] . "', '" . $info[$i]['lastname'] . "', '" . $info[$i]['sex'] . "', '" . $info[$i]['major'] . "', '" . $info[$i]['favorite'] . "', '" . $info[$i]['GPA'] . gmdate('m/d/Y g:i:s A').")"; or try this one $sql = "INSERT INTO Students VALUES ('','" . $info[$i]['firstname'] . "', '" . $info[$i]['lastname'] . "', '" . $info[$i]['sex'] . "', '" . $info[$i]['major'] . "', '" . $info[$i]['favorite'] . "', '" . $info[$i]['GPA'] ."', '" . gmdate('m/d/Y g:i:s A').")"; You can also change gmdate with date Have A nice day
{ "pile_set_name": "StackExchange" }
Q: X,Y axis skew line how go up/down/sides I have two points, one start position and a goal position(dynamic). I want to spawn players as they would in a formula 1 race. i.e the second a little to the right and back of the first, third left and back of the second and so on. I have already determined the angle so they face to the goal point. I dont know how to move relative to the line on the axes. I think my distance moves it sideways, but im not a 100% sure.. I also am too stupid to figure out how to go perpendicular of the new points, even though it's probably just adding a minus somewhere. Well, I hope someone can help me with this, thanks a lot in advance. Note: The code is in Pawn, a C-like scripting language. new x1 = RaceCheckpoints[0][0]//startpoint x new y1 = RaceCheckpoints[0][1]//startpoint y new x2 = RaceCheckpoints[1][0]//goalpoint x new y2 = RaceCheckpoints[1][1]//goalpoint y new dist = 2; new pos = 0; new x3, y3, x4, y4, a, b, norm; x3 = (x1 + x2) / 2; y3 = (y1 + y2) / 2; a = y1 - y2; b = x2 - x1; norm = sqrt(a*a + b*b); a = a / norm; b = b / norm; x3 = x3 + a * -dist; y3 = y3 + b * -dist; x4 = x3 + a * 2 * dist; y4 = y3 + b * 2 * dist; for(new i;i<MAX_PLAYERS;i++) { if(RaceParticipant[i] != 0) { if(IsPlayerInAnyVehicle(i)) PlayerVehicles[i]=GetPlayerVehicleID(i); else PlayerVehicles[i]=0; if (pos = 0)//left lane { SetPlayerPosFindZ(playerid, x3, y3, RaceCheckpoints[0][2]+10); new angle = atan2(y2 - x3, x2 - y3) * 180 / PI; SetPlayerFacingAngle(i,angle); pos++; } if (pos = 1)//right lane { SetPlayerPosFindZ(playerid, x4, y4, RaceCheckpoints[0][2]+10); new angle = atan2(y2 - x4, x2 - y4) * 180 / PI; SetPlayerFacingAngle(i,angle); pos--; } } } A: Let's say that your goal lies directly in x direction. Your vector between start and goal is then (0, 1) and the angle between it and the x axis is, of course, zero. Let's also say that each car has a row ix and a columns iy and that the first car has row and column 0. The distance of any car to the first car is then xx = - ix * dx - iy * dd; yy = - iy * dy; where dx, dy and dd are the metrics between cars: --------000------------------- | 000 dd | 000 111-------- dx 000 111 | 000 111 | 111 | 111 --------222 222 222 333 222 333 222 333 | 333 | 333 | | |--- dy ---| Now say that your goal lies somewhere else and the the vector between start and goal is (vx, vy). The angle between that vector and the x axis is a. You have to rotate your xx and yy: xx' = cos(a) * xx - sin(a) * yy yy' = sin(a) * xx + cos(a) * yy You could also write this in matrix notation: {P'} = [C] * {P} where {P} and {P'} are your unrotated and rotated points and [C] is the rotation matrix: | cos(a) - sin(a) | [C] = | | | sin(a) cos(a) | Your angle is a = atan2(vy, vx) but you dn't really need the angle here. If you normalise vor vector (vx, vy) so that it is a unit vector, vx and vy are already the cosine and sine of your rotation. The last step is to add your starting point to the rotated positions. Putting all this together (in C, not Pawn): double dx = 8.0; // x distance between 1st and 3rd car double dy = 5.0; // y distance between 1st and 2nd car double dd = 1.5; // x distance between 1st and 2nd car double sx = 118.0; // start point, i.e. position of 1st car double sy = 6.0; double gx = 240.0; // goal point double gy = 60.0; int ncar = 8; // number of cars double vx = gx - sx; // vector between start and goal double vy = gy - sy; double vv; double acos; // sine and cosine of the angle between double asin; // (vx, vy) and (1, 0) double cx[ncar]; // car positions double cy[ncar]; int i; vv = sqrt(vx*vx + vy * vy); // normalise vector acos = vx / vv; // determine rotation cosines asin = vy / vv; for (i = 0; i < ncar; i++) { int ix = i / 2; // grid index row int iy = i % 2; // grid index column double xx = - ix * dx - iy * dd; // unrotated car pos, double yy = - iy * dy; // 1st car a (0, 0) cx[i] = sx + acos * xx - asin * yy; cy[i] = sy + asin * xx + acos * yy; }
{ "pile_set_name": "StackExchange" }
Q: Basic contact form not displaying input info in email PHP HTML5 I have a basic contact form submitted using PHP. When it is submitted I receive an email however it does not display the information input into the field. That information is however sent. It displayed in the address bar of the browser e.g. ../contact.phpurl=antispam&name=dfsaa&number=fdfd&email=fdsafds%40fds.com&message=Please+contact+me+regarding+this Front: <form action="contact.php"> <input class="form-control" id="name" name="name" placeholder="Name" type="text" required> <input class="form-control" id="number" name="number" placeholder="Contact Number" type="text" required> <input class="form-control" id="email" name="email" placeholder="Email" type="email" required> <textarea class="form-control" id="message" name="message" placeholder="Message" rows="5">Please contact me regarding this...</textarea><br> <button class="btn btn-default pull-right" type="submit">Send</button> </form> PHP: <?php $name = @trim(stripslashes($_POST["name"])); $email = @trim(stripslashes($_POST["email"])); $number = @trim(stripslashes($_POST["number"])); $message = @trim(stripslashes($_POST["message"])); $email_from = $email; $email_to = "[email protected]"; $body = "Name: " . $name . "\n\n" . "Email: " . $email . "\n\n" . "Number: " . $number . "\n\n" . "\n\n" . "Message: " . $message; $success = @mail($email_to, $body, "Name: " . $name . "\n\n" . "Email: " . $email . "\n\n" . "Number: " . $number . "\n\n" . "Message: " . $message); ?> <!DOCTYPE HTML> <html lang="en-US"> <head> <script> alert("Thank you for contacting us. A member of our team will be in touch as soon as possible."); </script> <meta HTTP-EQUIV="REFRESH" content="0; url= index.html"> </head> The contact form works perfectly on another site... This one is on a godaddy site that i didnt create, i'm just ammending. The email that i receive has all of the field titles but none of the input info. Any ideas why? A: Your form is by default of type GET, not POST as you want to check in PHP therefore data are visible through $_GET and not $_POST global PHP variable. Since you want POST, modify your form structure <form action="contact.php"> should be <form action="contact.php" type="post"> And in your PHP, first check if request method is POST <?php if (strtoupper($_SERVER['REQUEST_METHOD']) == 'POST') { //Get data and send email $name = trim($_POST['....']); ... }
{ "pile_set_name": "StackExchange" }
Q: Android: Bundle.toString() and then create a Bundle from String I'm trying to do the following: // I have a Bundle and convert it to string Bundle _bundle; // Meanwhile I put intergers, booleans etc in _bundle String _strBundle = _bundle.toString(); Later in my code I need to create a Bundle from _strBundle. How do I do that? So far I couldn't find any information on this. Of course I don't wish to parse _strBundle myself and hope that the fremawork already provides a String2Bundle sort of a functionality. A: As far as i know there is no way for the framework to know what is stored in your string. The same pattern could have possible interpretations (string, boolean, integer) and so i don't think that that actually even makes sense. In fact i can't imagine why you would need to do it. You already have the bundle in the first place. I suggest you keep the original bundle and use it when you need it or make the parse of the string and create the bundle yourself (which won't be an easy task if you want to cover all the possibilities). A: If you want to pass data as String to another place, it's better to use JSONObject it's very simple to create JSONObject from String and vice versa. And it's too similar to Bundle class! json = new JSONObject(str); str = json.toString(); Also, You can pass it as String parameter with Intent or the other method as String.
{ "pile_set_name": "StackExchange" }
Q: Переход между двумя view контроллерами через Navigation Controller Здравствуйте. Я только начал изучать obj c и как первую программу я делаю переход между двумя вьюхами через Navigation Controller . Кнопки находятся в navigation bar.Вот что у меня в файлах: UntitledAppDelegate.m - (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions { RootViewController *rootViewController=[[RootViewController alloc] init]; UINavigationController *navigationController=[[UINavigationController alloc] initWithRootViewController:rootViewController]; self.window.rootViewController=navigationController; [view release]; [label release]; [button release]; [self.window makeKeyAndVisible]; return YES; } RootViewController.m - (void)viewDidLoad { [super viewDidLoad]; self.title=@"My View Controller"; self.navigationController.navigationBarHidden=NO; UIBarButtonItem *item=[[UIBarButtonItem alloc] initWithTitle:@"next view" style:UIBarButtonItemStylePlain target:self action:@selector(secondPage:)]; self.navigationItem.rightBarButtonItem=item; [item release]; } -(void)secondPage:(id)sender { UIBarButtonItem *item=(UIBarButtonItem *)sender; if(self.secondViewController==nil) { SecondViewController *secondView=[[SecondViewController alloc] init]; self.secondViewController=secondView; } [self.navigationController pushViewController:self.secondViewController animated:YES]; } Cоответственно, есть файл SecondViewController.m,но пока он пустой Выдает ошибку что request for member "secondViewController" in something not a structure or union Все .h файлы я подсоединил. Может кто-нибудь подскажет, что здесь не так? A: Вы что, из прошлого? Поубирайте свои release. Уже 2 года, как они не используются. -(void)secondPage:(id)sender { if(!secondViewController) { SecondViewController *secondViewController=[[SecondViewController alloc] initWithNibName:@"SecondViewController" bundle:nil]; } [self.navigationController pushViewController: secondViewController animated:YES]; }
{ "pile_set_name": "StackExchange" }
Q: Perl Regexp with an option I am trying to parse this string and the number between /TN and ending forward /. Also, if there is a /REF I need to grab the REF # as well. So it could be an either or situation. Example String: my $acis_string = 'EL41X/TN 333-3333 /DES TAT 313 223-2388 OE508-1-12-13/ORD R1244850301 /CD 02-03-15 /ZKMA 8001 T1ZF PNTCMINEDC0 PNTCMISOH00/SCS DQKUX /TN 333-3330, 6540/RTI 581 /LSO 248 340/REF 28/TGP 581 /ORD C1244888657/CD 02-12-16'; He was my base regexp: push @matches, [$2, $1] while $acis_string =~ /\/TN (.*?)\/.*?(\/REF (\w+)\/)?/g; Desired Output: $VAR1 = [ [ n/a, '333-3333 ' ], [ 28, '333-3330, 6540' ] ]; Note: There could be many /TN / and /REF in the string. Thanks, Kevin A: You can use the following regex: \/TN\s+([^\/]+)(?:(?!\/TN|\/REF).)*(?:\/REF\s+([^\/]+))? See RegEX DEMO Explanation: \/TN\s+([^\/]+) : Match characters other than / after /TN and put in capture group 1 (?:(?!\/TN|\/REF).)* : Logic to match characters making sure there is no /TN or /REF in them (?:\/REF\s+([^\/]+))? : Optionally match characters other than / after /REF and put in capture group 2
{ "pile_set_name": "StackExchange" }
Q: unable to write to CSV after replace I have an input file, in which I am making an string replace operation. I read the file cell by cell, replace the string and then write it back to a new CSV file. input_file = open('/Users/tcssig/Desktop/unstacked2.csv', 'r', encoding='utf-8') output_file = open('/Users/tcssig/Desktop/unstacked3.csv', 'w', encoding='utf-8') writer = csv.writer(output_file , delimiter=' ') reader = csv.reader(input_file) for row in reader: for string in row: data = [string.replace('read','write')] print(data) writer.writerow(data) Above code runs well, but I get an empty output file. Example of data : reading reading reading reading interval 0 1 2 3 who axis Mikael X 0 10 20 30 Mikael Y 50 40 30 20 Mikael Z 100 90 80 70 Mike X 0 0.1 0.2 0.3 Mike Y 0.5 0.4 0.3 0.2 Mike Z 1 0.9 0.8 0.7 What am i missing? A: Content of input file: "Roll No" English read Science "Roll No" English Write Science Problem with your code: As mentioned by @Scott, files are not closed. Your are reading cell by for string in row: and replacing string there. But after replacement you are writing that cell as row in your file. For example, output file with your code looks file : Roll No English read Science This is due to above mentioned reason i.e. you are writing each cell. How to make it working? Comments inline with code import csv input_file = open('mark.csv', 'r', encoding='utf-8') output_file = open('result.csv', 'w') writer = csv.writer(output_file , delimiter=' ', encoding='utf-8') reader = csv.reader(input_file) for row in reader: #Initailize empty list for each row data = [] for string in row: #Replace and add to data list data.append(string.replace('read','write')) #Now write complete writer.writerow(data) input_file.close() output_file.close() Output: "Roll No" English write Science "Roll No" English Write Science You can achieve same thing without csv module. with open("mark.csv") as input_file: with open("result.csv",'w') as output_file: for line in input_file: new_line = (line.replace("read","write")).replace(","," ") output_file.write(new_line)
{ "pile_set_name": "StackExchange" }
Q: Track an instance in C#? Is there a way to track a single instace in C#/.NET in Visual Studio while debugging? I find it would be really useful sometimes. Another way to look at it would be breakpoints on instances rather than code. Therefore, every time my instance is accessed and/or modified the execution stops and I am presented with the line of code which accesses/modifies my instance. In C++ the equivalence would be monitoring the piece of memory where the instance is located, or simply a pointer to the instance. This approach doesn't work with managed code as the objects in .NET are moved around, therefore I need an equivalence for pointers in C++. I am aware of WeakReferences in C# but I am not sure if they are of any use while debugging? Edit1: This question is different from "When debugging, is there a way to tell if an object is a different instance? " as I am not interested in comparing two references, but I want to access a single object. A: There's nothing that I'm aware of out of the box, but VS does support conditional breakpoints. One option would be to: Place breakpoints on all of the methods on your class that you're interested in Debug your code through until the first of these is hit Find the HashCode of the instance Make all of the breakpoints coditional on GetHashCode() == the hash code you previously retrieved Let the application run on until the breakpoint is hit again Look in the Call Stack window to see which line of code is calling your method A little clunky, but will work...
{ "pile_set_name": "StackExchange" }
Q: Why is this meta content redirect not working The default document for the IIS .Net project is a Default.htm file. The following code reflects the contents of the Default.htm file. When it runs, it does not redirect to the Project1 folder, but looks for the Login.aspx in the current directory instead (example: www.website.com/Login.aspx when it should be www.website.com/Project1/Login.aspx). I assumed my url tag was incorrect, however it is without flaw. <html> <head> <meta HTTP-EQUIV="REFRESH" content="0; url=Project1/Login.aspx"> <title>Welcome</title> </head> <body> <p>Loading ...</p> <p>Please click <a href="Project1/Login.aspx">here</a> to login</p> </body> </html> Why does it not look in the Project1 folder for the Login.aspx? A: You need to change to this (add slash at the beginning of url): <meta HTTP-EQUIV="REFRESH" content="0; url=/Project1/Login.aspx">
{ "pile_set_name": "StackExchange" }
Q: Will using UUID to as list keys cause unnecessary re-renders in React? I have a list of items that doesn't contain enough data to generate a unique key. If I use the uuid library to generate an ID, will a single item change also causes the other items to re-render since their key will change each time? const people = [ { gender: 'male', firstName: 'david', }, { gender: 'male', firstName: 'david', }, { gender: 'male', firstName: 'joe', }, ] const renderPeople = () => { return people.map(person => { return ( <div key={uuid.v4() /* a new value each time? */ }> <p>{person.gender}</p> <p>{person.firstName}</p> </div> ) }) } some time later... one of the davids changed const people = [ { gender: 'male', firstName: 'david', }, { gender: 'male', firstName: 'davidzz', }, { gender: 'male', firstName: 'joe', }, ] A: <div key={uuid.v4()}> assigns new key for each <div> every time, so it is useless. If the array stays same, UUID should be generated on array creation. If the array changes, e.g. received from HTTP request, UUIDs for elements with same content will be generated again. In order to avoid that, key should be specific to person entity. It's always preferable to use internal identifiers (database IDs) where available for unambiguity. If identifiers are not available, key may depend on element contents: return ( <div key={JSON.stringify(person)}> <p>{person.gender}</p> <p>{person.firstName}</p> </div> ) It's more efficient to hash elements once at the time when an array is created, e.g. with uuid: import uuidv3 from 'uuid/v3'; ... for (const person of people) { person.key = uuidv3(JSON.stringify(person), uuidv3.URL); } Or use dedicated hashing function like object-hash. Notice that hashing may result in key collisions if there are elements with same contents.
{ "pile_set_name": "StackExchange" }
Q: Record Count functoid returns aggregate count for non-flattend target message I tried to use the Record Count functoid to map the number of sub-records of an record that itself occurs 0 to unbounded to a message with each record containing a field holding the number of sub-records: root+ +root | | +foo+ +foo+ | | +bar+ -RecordCount- barcount | +xyz However my current map aggregates the count of all bar records and returns it in every foo\barcount. Sample source message <root> <foo> <Id>1</Id> <bar> <xyz /> </bar> <bar> <xyz /> </bar> </foo> <foo> <Id>2</Id> <bar> <xyz /> </bar> <bar> <xyz /> </bar> </foo> </root> ... and the result is <root> <foo> <Id>1</Id> <barcount>4</barcount> </foo> <foo> <Id>2</Id> <barcount>4</barcount> </foo> </root> ... whereas I expected <root> <foo> <Id>1</Id> <barcount>2</barcount> </foo> <foo> <Id>2</Id> <barcount>2</barcount> </foo> </root> A: I solved this issue by replacing the Record Count functoid with a Call XSLT Template Scripting functoid. The XSLT template looks like this: <xsl:template name="CountMyBar"> <xsl:param name="fooId" /> <xsl:element name="barcount"> <xsl:value-of select="count(//foo[Id=$fooId]/bar)" /> </xsl:element> </xsl:template> and the input to the scripting functoid is the Id field from foo.
{ "pile_set_name": "StackExchange" }
Q: Detect faulty drive in RAID 10 array I've been told that I can only verify my HW RAID array is working perfectly with KVM. However, I want to be automatically notified when there is a problem by my server. Is there a way via SSH (that will be called via system() in php) that can detect that a drive is having problems? I don't need to identify which drive. I have thought of one theory but I don't know if it will work in practice. If I were to run a PHP script to fopen('/dev/[filesystem]', 'r') and seeked every xGB for 1 byte and it seeks a position of the filesystem that's having problems, it should return an error. Am I correct in thinking this idea? I use XFS filesystem, I have heard of xfs_check but that says it needs to be ran in read-only mode which is inconvenient. I use 3ware RAID controller. A: Install the 3Ware tools (tw_cli) on your machine. After you have installed them, get the id # of the controller (I've never understood the system behind it, for all I know it might be random): $ tw_cli show Ctl Model (V)Ports Drives Units NotOpt RRate VRate BBU ------------------------------------------------------------------------ c0 9550SXU-4LP 4 2 1 0 1 1 - You can then query the array status with $ tw_cli /c0 show Unit UnitType Status %RCmpl %V/I/M Stripe Size(GB) Cache AVrfy ------------------------------------------------------------------------------ u0 RAID-1 OK - - - 74.4951 ON OFF Port Status Unit Size Blocks Serial --------------------------------------------------------------- p0 NOT-PRESENT - - - - p1 NOT-PRESENT - - - - p2 OK u0 74.53 GB 156301488 9QZ07NP2 p3 OK u0 74.53 GB 156301488 9QZ08DS2 Obviously, this will look different on your machine. These example where lifted from here. To actively verify (scrub) your drives, use $ tw_cli /c0/u0 start verify For automatic notifications, you should setup a monitoring system, e.g. Nagios or Icinga and use a plugin that checks the health of the array with the help of tw_cli. These plugins work nicely without Nagios/Icinga as well and could be easily used in a minimal monitoring system in form of a cron job that sends a mail of the plugin doesn't return 0.
{ "pile_set_name": "StackExchange" }
Q: MasterPage objects returning as null I've got an ASP.net application that is used to display information queried from our ERP system onto various web pages. The main object is called an EpicorUser, and it basically encapsulates all current information about an employee. This object is used to fill in a bunch of various fields on the master page such as Full Name, current activity, clock in/out times, etc. I am trying to pass this object from the MasterPage into the content pages to avoid needlessly querying the WebService that serves this information. The problem is, when I access the object from a ContentPage, it is always null. I know it has been populated because my MasterPage content is all filled in correctly. I am trying to access the MasterPage 'CurrentUser' object from my ContentPage like this: **MasterPage Codebehind:** public EpicorUser CurrentUser; //This object is populated once user has authenticated ///This is in my ContentPage ASPX file so I can reference the MasterPage from codebehind <%@ MasterType VirtualPath="~/Pages/MasterPage/ShopConnect.Master" %> **ContentPage CodeBehind:** string FullName = Master.CurrentUser.UserFileData.FullName; //CurrentUser is null(but it shouldn't be) Strange thing is, I had another content page where this system worked fine. It has also stopped working, and I don't think I have changed anything on the masterpage that could cause this. I had set the CurrentUser as a public property so I could access I went as far a creating a method to re-populate the object from the master page, and calling it from the code-behind on the contentpage: **ContentPage code-behind:** EpicorUser CurrentUser = Master.GetCurrentUserObject(); **MasterPage Method being invoked:** public EpicorUser GetCurrentUserObject() { using (PrincipalContext context = new PrincipalContext(ContextType.Domain, "OFFICE")) { UserPrincipal principal = UserPrincipal.FindByIdentity(context, HttpContext.Current.User.Identity.Name); EpicorUser CurrentEmployee = RetrieveUserInfoByWindowsID(principal.SamAccountName); return CurrentUser; //Object is NOT null before the return } } **ContentPage code-behind return:** EpicorUser CurrentUser = Master.GetCurrentUserObject(); //But object is now null once we return Stepping through the code shows me that the CurrentUser object is populated correctly in the MasterPage code behind, but once it is returned to the ContentPage code behind, it is now null! Anyone know where the disconnect is? A: Content Page is loaded first and then Master page will be loaded. So, your property could be blank when it is accessed in the content page. You can try creating a public method(to return UserObject) on the master page and then call the method from content page. Another option is creating a base page class(inherit all content pages) and create a property to return the user object. So, all pages can access the value EDIT: public class BasePageClass : System.Web.UI.Page { public List<string> LookupValues { get { if (ViewState["LookupValues"] == null) { /* * create default instance here or retrieve values from Database for one time purpose */ ViewState["LookupValues"] = new List<string>(); } return ViewState["LookupValues"] as List<string>; } } } public partial class WebForm6 : BasePageClass { protected void Page_Load(object sender, EventArgs e) { } protected void MyButton_Click(object sender, EventArgs e) { //access lookup properties List<string> myValues = LookupValues; } }
{ "pile_set_name": "StackExchange" }
Q: How to detect if sprite is being touched? I'm trying to figure out how to know if the user is touching an sprite. I simplified my code to the bare bones, on init I create a single sprite called "button" and then I try to know when the user is touching it/stops touching. This is what I'm trying now: -(id)initWithSize:(CGSize)size { if (self = [super initWithSize:size]) { self.backgroundColor = [SKColor colorWithRed:0 green:0 blue:0 alpha:1.0]; // Add button SKSpriteNode *sprite = [SKSpriteNode spriteNodeWithImageNamed:@"button"]; sprite.name = @"button"; sprite.position = CGPointMake(CGRectGetMidX(self.frame), CGRectGetMidY(self.frame)); [self addChild:sprite]; } return self; } -(void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event { UITouch *touch = [touches anyObject]; CGPoint location = [touch locationInNode:self]; SKNode *node = [self nodeAtPoint:location]; if ([node.name isEqualToString:@"button"]) { NSLog(@"Started touch on sprite"); } } -(void)update:(CFTimeInterval)currentTime { /* Called before each frame is rendered */ } However though I can tell if a touch starts on the button, I cannot tell if the user ends the touch or moves out of the sprite (or vicebersa). How can I do this? A: Add to properties: (allows you to access them in various methods) @property (nonatomic) BOOL touchInSprite; @property (nonatomic) SKSpriteNode * sprite; @end Add methods: - (void) touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event { UITouch * touch = [touches anyObject]; CGPoint location = [touch locationInNode:self]; if ([self.sprite containsPoint: location]) self.touchInSprite = true; else self.touchInSprite = false; } - (void) touchesMoved:(NSSet *)touches withEvent:(UIEvent *)event { UITouch * touch = [touches anyObject]; CGPoint location = [touch locationInNode:self]; if ([self.sprite containsPoint: location]) { self.touchInSprite = true; } else { self.touchInSprite = false; //user stop touches it } } - (void) touchesEnded:(NSSet *)touches withEvent:(UIEvent *)event { UITouch * touch = [touches anyObject]; CGPoint location = [touch locationInNode:self]; if (self.startTouchValid == true) { //Perform action } }
{ "pile_set_name": "StackExchange" }
Q: Python socket error resilience / workaround I have a script running that is testing a series of urls for availability. This is one of the functions. def checkUrl(url): # Only downloads headers, returns status code. p = urlparse(url) conn = httplib.HTTPConnection(p.netloc) conn.request('HEAD', p.path) resp = conn.getresponse() return resp.status Occasionally, the VPS will lose connectivity, the entire script crashes when that occurs. File "/usr/lib/python2.6/httplib.py", line 914, in request self._send_request(method, url, body, headers) File "/usr/lib/python2.6/httplib.py", line 951, in _send_request self.endheaders() File "/usr/lib/python2.6/httplib.py", line 908, in endheaders self._send_output() File "/usr/lib/python2.6/httplib.py", line 780, in _send_output self.send(msg) File "/usr/lib/python2.6/httplib.py", line 739, in send self.connect() File "/usr/lib/python2.6/httplib.py", line 720, in connect self.timeout) File "/usr/lib/python2.6/socket.py", line 561, in create_connection raise error, msg socket.error: [Errno 101] Network is unreachable I'm not at all familiar with handling errors like this in python. What is the appropriate way to keep the script from crashing when network connectivity is temporarily lost? Edit: I ended up with this - feedback? def checkUrl(url): # Only downloads headers, returns status code. try: p = urlparse(url) conn = httplib.HTTPConnection(p.netloc) conn.request('HEAD', p.path) resp = conn.getresponse() return resp.status except IOError, e: if e.errno == 101: print "Network Error" time.sleep(1) checkUrl(url) else: raise I'm not sure I fully understand what raise does though.. A: Problem with your solution as it stands is you're going to run out of stack space if there are too many errors on a single URL (> 1000 by default) due to the recursion. Also, the extra stack frames could make tracebacks hard to read (500 calls to checkURL). I'd rewrite it to be iterative, like so: def checkUrl(url): # Only downloads headers, returns status code. while True: try: p = urlparse(url) conn = httplib.HTTPConnection(p.netloc) conn.request('HEAD', p.path) resp = conn.getresponse() return resp.status except IOError as e: if e.errno == 101: print "Network Error" time.sleep(1) except: raise Also, you want the last clause in your try to be a bare except not an else. Your else only gets executed if control falls through the try suite, which can never happen, since the last statement of the try suite is return. This is very easy to change to allow a limited number of retries. Just change the while True: line to for _ in xrange(5) or however many retries you wish to accept. The function will then return None if it can't connect to the site after 5 attempts. You can have it return something else or raise an exception by adding return or raise SomeException at the very end of the function (indented the same as the for or while line). A: If you just want to handle this Network is unreachable 101, and let other exceptions throw an error, you can do following for example. from errno import ENETUNREACH try: # tricky code goes here except IOError as e: # an IOError exception occurred (socket.error is a subclass) if e.errno == ENETUNREACH: # now we had the error code 101, network unreachable do_some_recovery else: # other exceptions we reraise again raise
{ "pile_set_name": "StackExchange" }
Q: Get all users, with all their images I have 2 mysql tables : users table table that contains gallery images for each user My users table looks like : id | name --------- 1 Ryan 2 James 3 Dave My user_gallery_images tables looks like : id | user_id | image -------------------- 1 1 image.jpg 2 1 image2.jpg 3 2 image3.jpg 4 2 image4.jpg I was wondering if there was a query that would retrieve all users, and get all the images for that user. The expected result should look like : id | name | images ------------------- 1 Ryan image.jpg,image2.jpg 2 James image3.jpg,image4.jpg 3 Dave Thank you A: You will have to use a LEFT JOIN rather than an INNER JOIN because you want to retrieve David who doesn't have images. And you will need to use GROUP_CONCAT SELECT u.id, u.name, GROUP_CONCAT(image) from users u LEFT JOIN user_gallery_images g ON u.id = g.user_id GROUP by u.id Note this query will work on mysql 5.7 only if you have a PRIMARY KEY on users.id. Will work on mysql < 5.7 regardless of the primary key
{ "pile_set_name": "StackExchange" }
Q: R - How to fill a matrix by columns I am trying to fill the columns of a matrix with the subsets of an built-in data frame. The resulting matrix should have dimensions 16 by 11 and each subset is 16 integers long. I have written the following for loop: A <- unique(DNase$Run) z <- matrix(data =NA, ncol=11, nrow =16) for (i in A) { z[,i] <- subset(DNase$density, DNase$Run==i) } and I obtain the following error: Error in [<-(*tmp*, , i, value = c(0.017, 0.018, 0.121, 0.124, 0.206, : no 'dimnames' attribute for array Could anyone kindly explain where the confusion comes from? Many thanks in advance! A: Cheap Solution Since the DNase data.frame is already ordered by the Run factor, we can actually form the desired output matrix with a simple call to matrix(): matrix(DNase$density,16); ## [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [,11] ## [1,] 0.017 0.045 0.070 0.011 0.035 0.086 0.094 0.054 0.032 0.052 0.047 ## [2,] 0.018 0.050 0.068 0.016 0.035 0.103 0.092 0.054 0.043 0.094 0.057 ## [3,] 0.121 0.137 0.173 0.118 0.132 0.191 0.182 0.152 0.142 0.164 0.159 ## [4,] 0.124 0.123 0.165 0.108 0.135 0.189 0.182 0.148 0.155 0.166 0.155 ## [5,] 0.206 0.225 0.277 0.200 0.224 0.272 0.282 0.226 0.239 0.259 0.246 ## [6,] 0.215 0.207 0.248 0.206 0.220 0.277 0.273 0.222 0.242 0.256 0.252 ## [7,] 0.377 0.401 0.434 0.364 0.385 0.440 0.444 0.392 0.420 0.439 0.427 ## [8,] 0.374 0.383 0.426 0.360 0.390 0.426 0.439 0.383 0.395 0.439 0.411 ## [9,] 0.614 0.672 0.703 0.620 0.658 0.686 0.686 0.658 0.624 0.690 0.704 ## [10,] 0.609 0.681 0.689 0.640 0.647 0.676 0.668 0.644 0.705 0.701 0.684 ## [11,] 1.019 1.116 1.067 0.979 1.060 1.062 1.052 1.043 1.046 1.042 0.994 ## [12,] 1.001 1.078 1.077 0.973 1.031 1.072 1.035 1.002 1.026 1.075 0.980 ## [13,] 1.334 1.554 1.629 1.424 1.425 1.424 1.409 1.466 1.398 1.340 1.421 ## [14,] 1.364 1.526 1.479 1.399 1.409 1.459 1.392 1.381 1.405 1.406 1.385 ## [15,] 1.730 1.932 2.003 1.740 1.750 1.768 1.759 1.743 1.693 1.699 1.715 ## [16,] 1.710 1.914 1.884 1.732 1.738 1.806 1.739 1.724 1.729 1.708 1.721 This of course depends on the aforementioned ordering, which can be verified with a call to rle(): do.call(data.frame,rle(levels(DNase$Run)[DNase$Run])); ## lengths values ## 1 16 1 ## 2 16 2 ## 3 16 3 ## 4 16 4 ## 5 16 5 ## 6 16 6 ## 7 16 7 ## 8 16 8 ## 9 16 9 ## 10 16 10 ## 11 16 11 Robust Solution If we don't want to depend on that ordering, we can use reshape() as follows, and we get a nice bonus of column names, if you want that: reshape(cbind(DNase[c('Run','density')],id=ave(c(DNase$Run),DNase$Run,FUN=seq_along)),dir='w',timevar='Run')[-1]; ## density.1 density.2 density.3 density.4 density.5 density.6 density.7 density.8 density.9 density.10 density.11 ## 1 0.017 0.045 0.070 0.011 0.035 0.086 0.094 0.054 0.032 0.052 0.047 ## 2 0.018 0.050 0.068 0.016 0.035 0.103 0.092 0.054 0.043 0.094 0.057 ## 3 0.121 0.137 0.173 0.118 0.132 0.191 0.182 0.152 0.142 0.164 0.159 ## 4 0.124 0.123 0.165 0.108 0.135 0.189 0.182 0.148 0.155 0.166 0.155 ## 5 0.206 0.225 0.277 0.200 0.224 0.272 0.282 0.226 0.239 0.259 0.246 ## 6 0.215 0.207 0.248 0.206 0.220 0.277 0.273 0.222 0.242 0.256 0.252 ## 7 0.377 0.401 0.434 0.364 0.385 0.440 0.444 0.392 0.420 0.439 0.427 ## 8 0.374 0.383 0.426 0.360 0.390 0.426 0.439 0.383 0.395 0.439 0.411 ## 9 0.614 0.672 0.703 0.620 0.658 0.686 0.686 0.658 0.624 0.690 0.704 ## 10 0.609 0.681 0.689 0.640 0.647 0.676 0.668 0.644 0.705 0.701 0.684 ## 11 1.019 1.116 1.067 0.979 1.060 1.062 1.052 1.043 1.046 1.042 0.994 ## 12 1.001 1.078 1.077 0.973 1.031 1.072 1.035 1.002 1.026 1.075 0.980 ## 13 1.334 1.554 1.629 1.424 1.425 1.424 1.409 1.466 1.398 1.340 1.421 ## 14 1.364 1.526 1.479 1.399 1.409 1.459 1.392 1.381 1.405 1.406 1.385 ## 15 1.730 1.932 2.003 1.740 1.750 1.768 1.759 1.743 1.693 1.699 1.715 ## 16 1.710 1.914 1.884 1.732 1.738 1.806 1.739 1.724 1.729 1.708 1.721 Note that technically the above object is a data.frame, but you can easily coerce to matrix with as.matrix(). Explanation of Your Error The reason why your code is failing is as follows. First, notice that the DNase$Run vector is actually an ordered factor: class(DNase$Run); ## [1] "ordered" "factor" Your A variable will therefore also be an ordered factor, just with the unique values from DNase$Run. Now, when you use a for-loop to iterate over a factor (ordered or otherwise), it uses the levels (character strings) as the iteration value (as opposed to the integer enumeration values that are stored internally). Demo: for (i in factor(letters[1:5])) print(i); ## [1] "a" ## [1] "b" ## [1] "c" ## [1] "d" ## [1] "e" Thus, your i loop variable is being assigned to the levels character strings of DNase$Run. And, since your z matrix has no dimnames, trying to index its columns with a character string is failing with the error message "no 'dimnames' attribute for array".
{ "pile_set_name": "StackExchange" }
Q: Under my constructor for an array, I have a toString method that prints out the contents. But it's telling me it isn't resolved to a variable So my constructor creates an array, and I want my toString method to display the contents. However, I'm getting an error telling me that table[i] can not be resolved to a variable, even though it was created in the constructor. Please help! public int size = 38; public int first = 0; public int last = 2; public int count = 1; public Table() { int[] table = new int[size]; table[0] = first; table [size-1] = last; for(int i = 1; i < size-1; i++){ if(count == first | count == last) count++; table[i] = count; count++; } } public String toString(){ String string = "Wheel: 0"; for(int i = 1; i < size; i++) string = string + "-" + table[i] ; //table[i] CAN NOT BE RESOLVED TO A VARIABLE return string; } A: Your table is defined locally in your constructor. int[] table = new int[size]; You have to declare it outside the constructor: int[] table; public Table() { table = new int[size]; ...
{ "pile_set_name": "StackExchange" }
Q: Optimizing array transposing function I'm working on a homework assignment, and I've been stuck for hours on my solution. The problem we've been given is to optimize the following code, so that it runs faster, regardless of how messy it becomes. We're supposed to use stuff like exploiting cache blocks and loop unrolling. Problem: //transpose a dim x dim matrix into dist by swapping all i,j with j,i void transpose(int *dst, int *src, int dim) { int i, j; for(i = 0; i < dim; i++) { for(j = 0; j < dim; j++) { dst[j*dim + i] = src[i*dim + j]; } } } What I have so far: //attempt 1 void transpose(int *dst, int *src, int dim) { int i, j, id, jd; id = 0; for(i = 0; i < dim; i++, id+=dim) { jd = 0; for(j = 0; j < dim; j++, jd+=dim) { dst[jd + i] = src[id + j]; } } } //attempt 2 void transpose(int *dst, int *src, int dim) { int i, j, id; int *pd, *ps; id = 0; for(i = 0; i < dim; i++, id+=dim) { pd = dst + i; ps = src + id; for(j = 0; j < dim; j++) { *pd = *ps++; pd += dim; } } } Some ideas, please correct me if I'm wrong: I have thought about loop unrolling but I dont think that would help, because we don't know if the NxN matrix has prime dimensions or not. If I checked for that, it would include excess calculations which would just slow down the function. Cache blocks wouldn't be very useful, because no matter what, we will be accessing one array linearly (1,2,3,4) while the other we will be accessing in jumps of N. While we can get the function to abuse the cache and access the src block faster, it will still take a long time to place those into the dst matrix. I have also tried using pointers instead of array accessors, but I don't think that actually speeds up the program in any way. Any help would be greatly appreciated. Thanks A: Cache blocking can be useful. For an example, lets say we have a cache line size of 64 bytes (which is what x86 uses these days). So for a large enough matrix such that it's larger than the cache size, then if we transpose a 16x16 block (since sizeof(int) == 4, thus 16 ints fit in a cache line, assuming the matrix is aligned on a cacheline bounday) we need to load 32 (16 from the source matrix, 16 from the destination matrix before we can dirty them) cache lines from memory and store another 16 lines (even though the stores are not sequential). In contrast, without cache blocking transposing the equivalent 16*16 elements requires us to load 16 cache lines from the source matrix, but 16*16=256 cache lines to be loaded and then stored for the destination matrix. A: Unrolling is useful for large matrixes. You'll need some code to deal with excess elements if the matrix size isn't a multiple of the times you unroll. But this will be outside the most critical loop, so for a large matrix it's worth it. Regarding the direction of accesses - it may be better to read linearly and write in jumps of N, rather than vice versa. This is because read operations block the CPU, while write operations don't (up to a limit). Other suggestions: 1. Can you use parallelization? OpenMP can help (though if you're expected to deliver single CPU performance, it's no good). 2. Disassemble the function and read it, focusing on the innermost loop. You may find things you wouldn't notice in C code. 3. Using decreasing counters (stopping at 0) might be slightly more efficient that increasing counters. 4. The compiler must assume that src and dst may alias (point to the same or overlapping memory), which limits its optimization options. If you could somehow tell the compiler that they can't overlap, it may be great help. However, I'm not sure how to do that (maybe use the restrict qualifier).
{ "pile_set_name": "StackExchange" }
Q: the method addHeader (String, String) is undefined for the type HttpGet I have this program: import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.HttpClientBuilder; public class ApplicationRESTFul { public static void main(String[] args) { String url = "http://www.google.com/search?q=httpClient"; HttpClient client = HttpClientBuilder.create().build(); HttpGet request = new HttpGet(url); request.addHeader("Accept", "application/json"); } } But I got this message from eclipse the method addHeader (String, String) is undefined for the type HttpGet I am using this library and as I see in the documentation , the method should exist (org.apache.httpcomponents.httpclient_4.5) http://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/client/methods/HttpGet.html A: I solved it by adding httpcore JAR to class path. Adding the dependency from maven adds the httpcore JAR too and not just the httpclient JAR, and that's why it works too. A: importing the depency from maven instead to add the lib in the classpath solved the problem <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpclient</artifactId> <version>4.3.6</version> </dependency>
{ "pile_set_name": "StackExchange" }
Q: pdb is not working in django doctests So I created the following file (testlib.py) to automatically load all doctests (throughout my nested project directories) into the __tests__ dictionary of tests.py: # ./testlib.py import os, imp, re, inspect from django.contrib.admin import site def get_module_list(start): all_files = os.walk(start) file_list = [(i[0], (i[1], i[2])) for i in all_files] file_dict = dict(file_list) curr = start modules = [] pathlist = [] pathstack = [[start]] while pathstack is not None: current_level = pathstack[len(pathstack)-1] if len(current_level) == 0: pathstack.pop() if len(pathlist) == 0: break pathlist.pop() continue pathlist.append(current_level.pop()) curr = os.sep.join(pathlist) local_files = [] for f in file_dict[curr][1]: if f.endswith(".py") and os.path.basename(f) not in ('tests.py', 'models.py'): local_file = re.sub('\.py$', '', f) local_files.append(local_file) for f in local_files: # This is necessary because some of the imports are repopulating the registry, causing errors to be raised site._registry.clear() module = imp.load_module(f, *imp.find_module(f, [curr])) modules.append(module) pathstack.append([sub_dir for sub_dir in file_dict[curr][0] if sub_dir[0] != '.']) return modules def get_doc_objs(module): ret_val = [] for obj_name in dir(module): obj = getattr(module, obj_name) if callable(obj): ret_val.append(obj_name) if inspect.isclass(obj): ret_val.append(obj_name) return ret_val def has_doctest(docstring): return ">>>" in docstring def get_test_dict(package, locals): test_dict = {} for module in get_module_list(os.path.dirname(package.__file__)): for method in get_doc_objs(module): docstring = str(getattr(module, method).__doc__) if has_doctest(docstring): print "Found doctests(s) " + module.__name__ + '.' + method # import the method itself, so doctest can find it _temp = __import__(module.__name__, globals(), locals, [method]) locals[method] = getattr(_temp, method) # Django looks in __test__ for doctests to run. Some extra information is # added to the dictionary key, because otherwise the info would be hidden. test_dict[method + "@" + module.__file__] = getattr(module, method) return test_dict To give credit where credit is due, much of this came from here In my tests.py file, I have the following code: # ./project/tests.py import testlib, project __test__ = testlib.get_test_dict(project, locals()) All of this works quite well to load my doctests from all of my files and subdirectories. The problem is that when I import and invoke pdb.set_trace() anywhere, this is all I see: (Pdb) l (Pdb) args (Pdb) n (Pdb) n (Pdb) l (Pdb) cont doctest is apparently capturing and mediating the output itself, and is using the output in assessing the tests. So, when the test run completes, I see everything that should have printed out when I was in the pdb shell within doctest's failure report. This happens regardless of whether I invoke pdb.set_trace() inside a doctest line or inside the function or method being tested. Obviously, this is a big drag. Doctests are great, but without an interactive pdb, I cannot debug any of the failures that they are detecting in order to fix them. My thought process is to possibly redirect pdb's output stream to something that circumvents doctest's capture of the output, but I need some help figuring out the low-level io stuff that would be required to do that. Also, I don't even know if it would be possible, and am too unfamiliar with doctest's internals to know where to start. Anyone out there have any suggestions, or better, some code that could get this done? A: I was able to get pdb by tweaking it. I just put the following code at the bottom of my testlib.py file: import sys, pdb class TestPdb(pdb.Pdb): def __init__(self, *args, **kwargs): self.__stdout_old = sys.stdout sys.stdout = sys.__stdout__ pdb.Pdb.__init__(self, *args, **kwargs) def cmdloop(self, *args, **kwargs): sys.stdout = sys.__stdout__ retval = pdb.Pdb.cmdloop(self, *args, **kwargs) sys.stdout = self.__stdout_old def pdb_trace(): debugger = TestPdb() debugger.set_trace(sys._getframe().f_back) In order to use the debugger I just import testlib and call testlib.pdb_trace() and am dropped into a fully functional debugger.
{ "pile_set_name": "StackExchange" }
Q: What are the most popular galaxies for which we have images? The only galaxies I can think of (not being an astronomer) are Andromeda and Milky Way. There are 51 near galaxies, but they all pretty much say "satellite of Milky way" or "satellite of Andromeda". There are 100k+ galaxies in the local supercluster, and that page seems to have a better list: Corvus Coma Berenices Ursa Major Virgo Sculptor etc. If you had to rank them in order of prominence in the scientific community or in popular science, wondering what the top 10 or 20 galaxies would be (for which we have photos). I am trying to come up with a list of images for educational purposes that are potentially somewhat familiar to laymen audiences, or which would be useful to introduce to laymen audiences. A: Any such list is going to be terribly subjective. Since I'm an astronomer who studies galaxies, I'll go ahead and throw out a subjective list of the more famous, photogenic, and/or scientifically well-studied galaxies. The first six are in the Local Group (LMC and SMC are satellites of the Milky Way, M32 is a satellite of Andromeda). Milky Way Andromeda (M31) Large Magellanic Cloud Small Magellanic Cloud Triangulum (M33) M32 Sombrero (M104) Pinwheel (M101) Whirlpool (M51a) M64 (Black Eye) M74 (NGC 628) M81 M82 (Cigar) M87 M100 NGC 891 NGC 1068 (M77) NGC 1300 NGC 1365 Centaurus A Cygnus A
{ "pile_set_name": "StackExchange" }
Q: Only one button in a panel with multiple togglebuttons changes color - wxPython I want to set the color of a toggle button of my choice in the panel that I have created. The problem is that in the numerous toggle buttons that I have displayed on my panel when I want to change the color of each one only the color of the last button changes. Here's my code: import wx class Frame(wx.Frame): def __init__(self): wx.Frame.__init__(self,None) self.panel = wx.Panel(self,wx.ID_ANY) self.sizer = wx.BoxSizer(wx.VERTICAL) self.flags_panel = wx.Panel(self, wx.ID_ANY, style = wx.SUNKEN_BORDER) self.sizer.Add(self.flags_panel) self.SetSizer(self.sizer,wx.EXPAND | wx.ALL) self.flags = Flags(self.flags_panel, [8,12]) self.flags.Show() class Flags (wx.Panel): def __init__(self,panel, num_flags = []):#,rows = 0,columns = 0,radius = 0, hspace = 0, vspace = 0,x_start = 0, y_start = 0 wx.Panel.__init__(self,panel,-1, size = (350,700)) num_rows = num_flags[0] num_columns = num_flags[1] x_pos_start = 10 y_pos_start = 10 i = x_pos_start j = y_pos_start buttons = [] for i in range (num_columns): buttons.append('toggle button') self.ButtonValue = False for button in buttons: index = 0 while index != 15: self.Button = wx.ToggleButton(self,-1,size = (10,10), pos = (i,j)) self.Bind(wx.EVT_TOGGLEBUTTON,self.OnFlagCreation, self.Button) self.Button.Show() i += 15 index += 1 j += 15 i = 10 self.Show() def OnFlagCreation(self,event): if not self.ButtonValue: self.Button.SetBackgroundColour('#fe1919') self.ButtonValue = True else: self.Button.SetBackgroundColour('#14e807') self.ButtonValue = False if __name__ == '__main__': app = wx.App(False) frame = Frame() frame.Show() app.MainLoop() A: Your problem is quite simple. The last button is always changed because it's the last button defined: self.Button = wx.ToggleButton(self,-1,size = (10,10), pos = (i,j)) Each time through the for loop, you reassign the self.Button attribute to a different button. What you want to do is extract the button from your event object and change its background color. So change your function to look like this: def OnFlagCreation(self,event): btn = event.GetEventObject() if not self.ButtonValue: btn.SetBackgroundColour('#fe1919') self.ButtonValue = True else: btn.SetBackgroundColour('#14e807') self.ButtonValue = False See also: http://www.blog.pythonlibrary.org/2011/09/20/wxpython-binding-multiple-widgets-to-the-same-handler/
{ "pile_set_name": "StackExchange" }
Q: Internet Explorer 11 issue I am working on selenium automation through IE web browser. Sometimes while invoking browser the actions are done very slowly. For example, If I comment a user id(abcd), IE typing like a(taking a minute)b(taking minute),c(taking a minute)..... I checked the internet speed and clear the cache cookies and all. Sometimes it's happening. Please suggest any solutions. A: Most likely it is due to 64 bit IEWebDriver. Switch to 32 bit IEWebDriver and check if it fixes your issue.
{ "pile_set_name": "StackExchange" }
Q: Perl - undefined subroutine I have the following Perl code: use Email::Sender::Simple; use IO::Socket::SSL; IO::Socket::SSL::set_defaults(SSL_verify_mode => SSL_VERIFY_NONE); Email::Sender::Simple::sendmail($email, { transport => $transport }); When I run it I get this error: Undefined subroutine &Email::Sender::Simple::sendmail called at script.pl line 73. If I change the code to have the following, then it works: use Email::Sender::Simple qw(sendmail); sendmail($email, { transport => $transport }); Can someone explain why I had to change the code for sendmail, while I did NOT have to change the code for set_defaults to look like: use IO::Socket::SSL qw(set_defaults); set_defaults(SSL_verify_mode => SSL_VERIFY_NONE); A: Take a look at the code Email/Sendmail/Simple.pm. There is no sendmail subroutine in that program. Instead, if you look at the header, you'll see: use Sub::Exporter -setup => { exports => { sendmail => Sub::Exporter::Util::curry_class('send'), try_to_sendmail => Sub::Exporter::Util::curry_class('try_to_send'), }, }; I'm not familiar with Sub::Exporter, but I did notice this description. The biggest benefit of Sub::Exporter over existing exporters (including the ubiquitous Exporter.pm) is its ability to build new coderefs for export, rather than to simply export code identical to that found in the exporting package. Oh... So, the purpose of using Sub::Exporter is to export subroutine names that aren't subroutines in your package. If you're interested, you can read the tutorial of Sub::Exporter, but it appears it has the ability to export subroutines under different names. Thus, Email::Sender::Simple::sendmail isn't a subroutine, but that sendmail can still be exported.
{ "pile_set_name": "StackExchange" }
Q: Change password page I'm having trouble with this PHP code, it seems logical to me but as I'm new to PHP and MySQL, I am obviously wrong. I'm trying to set up a change password page for an assignment, and I can't see where I have gone wrong, the code is as follows: session_start(); if(isset($_SESSION['uname'])){ echo "Welcome " . $_SESSION['uname']; } require_once 'PHP/Constants.php'; $conn = new MySQLi(DB_SERVER, DB_USER, DB_PASSWORD, DB_NAME) or die ('There was a problem connecting to the database'); $query = "SELECT * FROM user"; $result = mysqli_query($conn, $query); while ($pwdReq = mysqli_fetch_array($result)){ if ($pwdReq['Password'] == $_POST['oldPwd']) { if ($_POST['confPwd'] == $_POST['newPwd']){ $change = "INSERT INTO user(Password) VALUES ('newPwd')"; $pwdChange = mysqli_query($conn, $change); } else return "The new passwords do not match!"; } else return "Please enter a correct password!"; } The body Of my page is as follows: <form method="post" action=""> <h2>Change Password</h2> <p> <label for="oldPwd">Old Password:</label> <input type="password" name="oldPwd" /> </p> <p> <label for="newPwd">New Password:</label> <input type="password" name="newPwd" /> </p> <p> <label for="confPwd">Confirm Password:</label> <input type="password" name="confPwd" /> </p> <p> <input type="submit" id="submit" value="Submit" name="submit" /> </p> </form> When the page runs all I get is as follows: Notice: Undefined index: oldPwd in C:\Program Files (x86)\xampp\htdocs\Assignment\change_password.php on line 11 Thank you in advance for any help I receive - Nick A: Always check your POST values before you do anything You should select the single record that matches uname prepare your queries to avoid SQL injection Here's an improved version of your script: <?php session_start(); if(isset($_SESSION['uname'])){ echo "Welcome " . $_SESSION['uname']; } if(isset($_POST['oldPwd']) && isset($_POST['newPwd']) && isset($_POST['confPwd']){ //Values are set require_once 'PHP/Constants.php'; $conn = new MySQLi(DB_SERVER, DB_USER, DB_PASSWORD, DB_NAME) or die ('There was a problem connecting to the database'); //Select user from DB where username matches Session $query = "SELECT * FROM user WHERE uname = ?"; //prepare query $stmt = mysqli_prepare($conn, $query); mysqli_stmt_bind_param($stmt, "s", $_SESSION['uname']); $result = mysqli_stmt_execute($stmt); $row = mysqli_fetch_assoc($result); //get the password from DB $pwdReq = $row['Password']; if ($pwdReq == $_POST['oldPwd']){ if($_POST['confPwd'] == $_POST['newPwd'])) { $change = "INSERT INTO user(Password) VALUES (?)"; $stmt = mysqli_prepare($conn, $change); mysqli_stmt_bind_param($stmt, "s", $_POST['newPwd']); mysqli_stmt_execute($stmt); echo "Password has been changed"; } else{ echo "The new password does not match confirmation"; } }else{ echo "Old password not matching the database"; } }else{ echo "oldPwd, confPwd, or confPwd is not set"; } ?> if oldPwd, confPwd, or confPwd is not set you will need to figure why. This is not PHP fault anymore. You will need to look in your html and make sure the script is receiving these values
{ "pile_set_name": "StackExchange" }
Q: Why is the order of white/grey matter different in the brain and spinal cord? In the brain proper, grey matter forms the outer layer of the brain, and white matter forms the inner layer. In the spine, this is reversed: white matter forms the outer layer of the spine, and grey matter the inner layer. Is there a developmental or functional reason for this? A: I'll tackle this question from a functional point of view. Gray matter are cell bodies, white matter are myelinated fiber tracts. In the brain, the gray matter is basically the cortex, the white matter lies mainly underneath it. The Cortex is the place where all the higher mental processing takes place (Fig. 1). Fig. 1. Cortical functions. Source: Penn Medicine The white matter in the brain connects the various parts of the cortex so that information can be transported for further processing and integrated. Fig. 2. Central white matter. Source: NIH Medline Since the cortex is the 'processor', it makes sense to connect the parts subcortically (more efficient as it leads to shorter connections). However, the cortex has been expanding very late in evolution, so this 'endpoint reasoning' can be contested, because from an evolutionary perspective, more cortex was needed and hence it was expanded right where it happened to be, namely in the outer part of the brain. In the spinal chord things are pretty much reversed; grey matter within, white matter around it (Fig. 3). Fig. 3. Section through spinal chord with central grey matter and surrounding white matter. Source: University of Michigan The white matter, again, is formed by various tracts (Fig. 4) and the grey matter with parts that are processing information (fig. 5). Fig. 4. Section through spinal chord showing the spinal tracts forming the white matter. Source: Biology.SE Fig. 5. Section through spinal chord showing the spinal reflex arches forming the gray matter. Source: APSU Biology The white matter in the spinal chord constitutes the various sensory and motor pathways to and from the brain, respectively. The gray matter constitutes basic processing nuclei that form the reflex arches in the spinal chord. These reflex arches process incoming sensory information (e.g. pain) and govern motor output (e.g., pulling the hand away from the fire). Again, the structure makes sense in terms of efficiency, as the reflex arches combine the sensory and motor tracts to govern reflexes, and therefore processing them from within saves space.
{ "pile_set_name": "StackExchange" }
Q: Manually set the properties of new objects created from datagridview premise: I have a class "User" which has a property of type list to represent a list of "Office" associated with the user. I have a BindingSource associated to the list of "Office" in which the grid is hooked to insert new elements. Everything seems to work correctly, when I click on the new line and write a value in the list is added to the new object "Office" The problem is that in addition to the data entered by the user I want to insert some default value automatically (example: a guid), I know I could do this with a hidden column in my grid, but I do not like too much as a solution and I would like to work directly on the object . I tried with the event DataBindingComplete(object sender, DataGridViewBindingCompleteEventArgs e) https://msdn.microsoft.com/en-us/library/system.windows.forms.datagridview.databindingcomplete%28v=vs.110%29.aspx This is called after the new object "office" was added to the list but I can not recover it, because I do not know which row is inserted but only a generic word "ItemAdded" I also tried using the method DefaultValuesNeeded(object sender, System.Windows.Forms.DataGridViewRowEventArgs e) https://msdn.microsoft.com/en-us/library/system.windows.forms.datagridview.defaultvaluesneeded%28v=vs.110%29.aspx?cs-save-lang=1&cs-lang=csharp#code-snippet-1 but this to me is called before my object "office" I is added to the list and I have no way to have it in order to set the default. Long story short: how do I manually set the values on the properties of an object added to a list automatically from gridview without using hidden columns? A: If the Property "guid" is generated from the Entity Framework, then you can add a partial class such as: public partial class Office { public Office() { guid=Guid.newGuid(); } } and add a constructor there which populates the guid property whenever a new office is created.
{ "pile_set_name": "StackExchange" }
Q: Font Boosting Issue in Android Lollipop We recently noticed font boosting issue in Android Lollipop OS. If user modifies font size in settings Menu, application is reloading if it is already running and all fonts are modified based on selected font size in device settings. If we are setting below metatag viewport, issue is not exist in iOS and android OS < 5.0 > <meta name="viewport" content="width=device-width, initial-scale=1, > maximum-scale=1, user-scalable=no"> Is there any option to resolve this issue????? Note : 1. This kind of issue is not available in iOS and Android < 5.0. 2. There is no issue for thin application for Android >= 5.0 as well. (Only webpage is reloading whenever there is a change in device font settings) A: The issue is getting resolved by setting below value in webview. webView.getSettings().setTextZoom(100); Root cause: For Android OS < 5.0 TextZoom is having default value as 100. But in Lollipop OS, value is taken from font settings in device if it is not overwritten in application. If we are setting textZoom as 100, font settings changes is not affecting application.
{ "pile_set_name": "StackExchange" }
Q: Stack smashing detected and no source for getenv I'm having the weirdest problem ever when programming in C. My function sometimes runs and other times it doesn't, and even though I tried searching for these errors (stack smashing detected and no source for getenv) I can't seem to find the answer to why it fails sometimes. I tried debugging it and it only has a problem in the last character (a "}"), so it runs all those other functions (and they have been tested a lot separately) but sometimes it just falls apart and doesn't run the last function (it works, that I can guarantee, because some other times it even runs inside this function). Also the few last times I ran the function it gave Segmentation Fault even though sometimes it ran all the way to the end. Is there any way I can debug this problem? Here goes my code: void main(int argc, char * argv[]) { FILE * fin=fopen(argv[1],"r"); char v[1024]; int col; matrix m=emptyS(); while(fscanf(fin, "%s",v)!=EOF) { int i=0; int * w = (int*) malloc(sizeof (int)); int str=strlen(v); int size=0; while(i<str) { char a[4]; int y; for(y=0;y<4;y++) a[y]='\0'; int x=0; while(v[i]!=',') { a[x]=v[i]; i++; x++; } i++; size++; w=realloc(w,size*sizeof(int)); w[size-1]=atoi(a); } m=add(m,w,size); col=size; } fclose(fin); graphW wg=discWD(col-1); int k=0; while(k<(col-2)) { int j=k+1; while(j<(col-1)) { wg=add_Wedge(wg,k,j,mi(m,k,j,col)); j++; } k++; } int* ms=MST(wg); graph gbayes=tree(wg,ms); bayes b=newBN(gbayes,m); FILE * fout=fopen(argv[2],"w"); serialize(b,fout); fclose(fout); } Thank you in advance! A: You don't check x for going out of bounds. Valgrind would have told you. while(i<str) { char a[4] = { 0 }; int x=0; while(v[i]!=',') { a[x]=v[i]; /* here you access a[x] without check for x */ i++; x++; /* Here it may go >= 4*/ } i++; size++; w=realloc(w,size*sizeof(int)); w[size-1]=atoi(a); }
{ "pile_set_name": "StackExchange" }
Q: Show that $|h(x)-h(y)|≤|x-y|$ Let $$h(x)=x/(1+|x|)$$ Show that $$|h(x)-h(y)|≤|x-y|$$ I have no idea to start. But I can see that $h(x)≤x$. Logically, the required inequality will follow from this. But I cannot apply it to $y$. A: Suppose that $0 < x < y$. You have $$\vert h(x)-h(y) \vert = \left \vert\frac{x}{1+x}-\frac{y}{1+y}\right\vert=\frac{\vert x-y \vert}{(1+x)(1+y)} \le \vert x -y \vert.$$ Then study the case $x < y <0$. And finally the last one $x < 0 < y$
{ "pile_set_name": "StackExchange" }
Q: What is the purpose of the colon after the flag in some shell command options? See this example, taken from the O'Reilly book Classic Shell Scripting: sort -t: -k1,1 /etc/passwd Why is there a : after the t? It doesn't seem to be needed, nor documented in man, but I keep seeing it in examples. A: It's just the argument to the -t option, specifying that fields are separated by colons in the input file.
{ "pile_set_name": "StackExchange" }
Q: When should I answer/close "Why doesn't my code work?" questions? I am a little confused about how to deal with "Why doesn't my code work?" questions. On one hand, they appear to be acceptable under the MVCe rules. On the other hand, I fail to see how they could possibly be useful to anyone, even the OP, after the OP learns the answer. They can't ever be closed as a duplicate, because each person's code is a 'snowflake'. And half the time, the bug(s) are really trivial (slight logic error or tool misuse). So if these are actually bad questions, what should I be doing about them? If they aren't bad questions, how do they contribute to the value of SO? A: If the question is just a code dump and a "why doesn't this work", then there's a close reason specifically for that: Questions seeking debugging help ("why isn't this code working?") must include the desired behavior, a specific problem or error and the shortest code necessary to reproduce it in the question itself. Questions without a clear problem statement are not useful to other readers. See: How to create a Minimal, Complete, and Verifiable example. If the question contains a code snippet, an explanation of what it should do, an explanation of why it's not doing what it should be doing, and the code snippet is capable of reproducing that erroneous behavior, then it's a good question. It could be closed as a duplicate of any other question with the same problem, and others could be closed as a duplicate of it. Such a question could also have other problems (the description of the problem or desired behavior may not be clear, the scope of the problem could be Too Broad, it could be just off topic, etc.) and if the post has any other problems, feel free to address them as appropriate.
{ "pile_set_name": "StackExchange" }
Q: xpath - if else expression in Python I have the following table structure: In the gender column when a value exists, the gender is displayed between the tag, but when the tag does not exist it is not displayed and the value is a special character &nbsp; <TABLE class="first"> <TR> <TD></TD> <TD></TD> <TD></TD> <TD></TD> </TR> <TR VALIGN="top"> <TD></TD> <TD><DIV>NAME</DIV></TD> <TD><DIV>AGE</DIV></TD> <TD><DIV>GENDER</DIV></TD> </TR> <TR VALIGN="top"> <TD></TD> <TD><DIV>MARIA</DIV></TD> <TD><DIV>25</DIV></TD> <TD><DIV>F</DIV></TD> </TR> <TR VALIGN="top"> <TD></TD> <TD><DIV>JOHN</DIV></TD> <TD><DIV>22</DIV></TD> <TD>&nbsp;</TD> </TR> <TR VALIGN="top"> <TD></TD> <TD><DIV>PAUL</DIV></TD> <TD><DIV>36</DIV></TD> <TD>&nbsp;</TD> </TR> <TR VALIGN="top"> <TD></TD> <TD><DIV>DEREK</DIV></TD> <TD><DIV>16</DIV></TD> <TD><DIV>M</DIV></TD> </TR> </TABLE> I'm doing the following: for table in result.xpath('//table[@class="first"]'): for i, tr in enumerate(table.xpath('//tr')): for j, td in enumerate(tr.xpath('td/div/|td')): if td.text == '&nbsp;': print '---' else: print td.text How to print '---' if the character &nbsp exists in td.text? A: &nbsp; is an entity reference to the no-break space character (Unicode code point: U+00A0). To test if the text content of an element is equal to that character, you can use this: if td.text == u'\u00A0': Complete demonstration: from lxml import html table = html.parse("table.html") for tr in table.xpath('//tr'): for td in tr.xpath('td/div|td'): if td.text == u'\u00A0': print 'BLANK VALUE' else: print td.text Output: None None None None None None NAME None AGE None GENDER None None MARIA None 25 None F None None JOHN None 22 BLANK VALUE None None PAUL None 36 BLANK VALUE None None DEREK None 16 None M
{ "pile_set_name": "StackExchange" }
Q: "I have X in my blood" arguments Lately, I have been hearing the argument "Oh, well I have Indian(Native American) in my blood, and 'redskin' does not offend me so why change the name?" coming from people who claim their great-great-great grandmother was from one of the hundreds of tribes from the area. I know it's been an argument made before, and I've always found it to be silly. There are some obvious arguments to be made from that statement: They haven't experienced life the same way as someone who is 100% native american, or that one person from a large group is not a representative of said group. But now that I am hearing this claim being made more and more, I was wondering what fallacy this would fall under? It seems to be similar to this argument, "I have X friends so I can't be racist against X people." But I feel it is a bit more than that? A: This can be a case of the Cherry Picking fallacy: Cherry picking, suppressing evidence, or the fallacy of incomplete evidence is the act of pointing to individual cases or data that seem to confirm a particular position, while ignoring a significant portion of related cases or data that may contradict that position. The person making the argument picks a case which supports his point, ignoring the cases that don't support his point. A: In my experience, this is not a logical error, so much as an unarticulated moral premise. The speaker holds an underlying assumption here that if it is possible not to be bothered by this kind of thing, one is morally obligated not be bothered by it, or at least to do one's best to not act upon being bothered. Using himself as evidence this is possible, he is ashamed of the offended members of his group, who are not trying hard enough to get along with the rest of the world. He is presuming the ultimate value of some version of democratic social harmony: non-interference in the majority's autonomy, 'Give the benefit of the doubt' or 'Go along, get along'. And this is in the foreground to a degree that he denies consideration to more important moral concerns. You can tell the difference between this and suppression of evidence or 'cherry picking' because, if you render the argument statistical, it does not matter unless the numbers are hugely predominant, and often even then. He is not unaware he is an exception, he embraces the exceptionality and considers it morally superior.
{ "pile_set_name": "StackExchange" }
Q: How to calculate difference of time between two records using Scala? I want to calculate time difference between events of a session using Scala. -- GIVEN Source is a csv file as shown below: HEADER "session","events","timestamp","Records" DATA "session_1","event_1","2015-01-01 10:10:00",100 "session_1","event_2","2015-01-01 11:00:00",500 "session_1","event_3","2015-01-01 11:30:00",300 "session_1","event_4","2015-01-01 11:45:00",300 "session_2","event_1","2015-01-01 10:10:00",100 "session_2","event_2","2015-01-01 11:00:00",500 REQUIRED OUTPUT HEADER "session","events","time_spent_in_minutes","total_records" DATA "session_1","event_1","50",100 "session_1","event_2","30",600 "session_1","event_3","15",900 "session_1","event_4","0",1200 "session_2","event_1","50",100 "session_2","event_2","0",600 Where time_spend_in_minutes is difference between current_event and next event for a given session. Header is not required in target but good to have. I am new to Scala so here what i have so far: $ cat test.csv "session_1","event_1","2015-01-01 10:10:00",100 "session_1","event_2","2015-01-01 11:00:00",500 "session_1","event_3","2015-01-01 11:30:00",300 "session_1","event_4","2015-01-01 11:45:00",300 "session_2","event_1","2015-01-01 10:10:00",100 "session_2","event_2","2015-01-01 11:00:00",500 scala> val sessionFile = sc.textFile("test.csv"). map(_.split(',')). map(e => (e(1).trim, Sessions(e(0).trim,e(1).trim,e(2).trim,e(3).trim.toInt))). foreach(println) ("event_1",Sessions("session_2","event_1","2015-01-01 10:10:00",100)) ("event_1",Sessions("session_1","event_1","2015-01-01 10:10:00",100)) ("event_2",Sessions("session_2","event_2","2015-01-01 11:00:00",500)) ("event_2",Sessions("session_1","event_2","2015-01-01 11:00:00",500)) ("event_3",Sessions("session_1","event_3","2015-01-01 11:30:00",300)) ("event_4",Sessions("session_1","event_4","2015-01-01 11:45:00",300)) sessionFile: Unit = () scala> A: Here is a solution that uses joda time library. val input = """"session_1","event_1","2015-01-01 10:10:00",100 "session_1","event_2","2015-01-01 11:00:00",500 "session_1","event_3","2015-01-01 11:30:00",300 "session_1","event_4","2015-01-01 11:45:00",300 "session_2","event_1","2015-01-01 10:10:00",100 "session_2","event_2","2015-01-01 11:00:00",500""" Create RDD from text input, can be read from file using sc.textFile import org.joda.time.format._ import org.joda.time._ def strToTime(s: String):Long = { DateTimeFormat.forPattern(""""yyyy-MM-dd HH:mm:ss"""") .parseDateTime(s).getMillis()/1000 } val r1 = sc.parallelize(input.split("\n")) .map(_.split(",")) .map(x => (x(0), (x(1), x(2), x(3)))) .groupBy(_._1) .map(_._2.map{ case(s, (e, timestr, r)) => (s, (e, strToTime(timestr), r))} .toArray .sortBy( z => z match { case (session, (event, time, records)) => time})) Converted time from "2015-01-01 10:10:00" to seconds from epoch, and sorted by time. val r2 = r1.map(x => x :+ { val y = x.last; y match { case (session, (event, time, records)) => (session, (event, time, "0")) }}) Added an extra event in each session, with all params same as last event of session except record count. This allows time-duration calculation to provide "0" in last event. Use sliding to get pairs of events. val r3 = r2.map(x => x.sliding(2).toArray) val r4 = r3.map(x => x.map{ case Array((s1, (e1, t1, c1)), (s2, (e2, t2, c2))) => (s1, (e1, (t2 - t1)/60, c1)) } ) Use scan to add records-count in incremental way. val r5 = r4.map(x => x.zip(x.map{ case (s, (e, t, r)) => r.toInt} .scan(0)(_+_) .drop(1))) val r6 = r5.map(x => x.map{ case ((s, (e, t, r)), recordstillnow) => s"${s},${e},${t},${recordstillnow}" }) val r7 = r6.flatMap(x => x) r7.collect.mkString("\n") //"session_2","event_1",50,100 //"session_2","event_2",0,600 //"session_1","event_1",50,100 //"session_1","event_2",30,600 //"session_1","event_3",15,900 //"session_1","event_4",0,1200
{ "pile_set_name": "StackExchange" }
Q: Order By in Ascending Order i have used the following query for POPUP LOV in Apex select VEN_INVOICE_REFNO as display_value, VEN_INVOICE_REFNO as return_value from VENDORINVOICE order by 1 asc; i want to show me the values as shown in diagram in numeric order i-e in ascending order.i have tried all the possible ways which i know but it don't works. A: it seem that your field datatype is string, convert it into numeric then apply the order by select CAST(VEN_INVOICE_REFNO AS INTEGER) as display_value, VEN_INVOICE_REFNO as return_value from VENDORINVOICE order by 1 asc;
{ "pile_set_name": "StackExchange" }
Q: How to make a cobweb diagram I am struggling making a cobweb diagram for the function $$x_{t+1}=8x_t/{1+2x_t}$$ So I understand when making the cobweb diagram, that I have to draw the line $y=x$ But where I have trouble understanding is how to draw the function in the graph. I am given the point $x_0 =0.5$ So I plug this into the function and get $2 = x_1$ and then I keep plugging in points. Do I graph points like $.5,2$ or $0,.5$? A: Let the expression on the right be $g(x)$. The method converges (sometimes) to the solution of the equation $x=g(x)$. The cobweb diagram illustrates the movement from first guess $x_0$ to second guess $x_1$ etc. Draw a line from $(x_0,0)$ up to $(x_0,x1)$. Then across to $(x_1,x_1)$. Then up or down to $(x_1,x_2)$. Then across to $(x_2,x_2)$. Etc
{ "pile_set_name": "StackExchange" }
Q: Why size of classes is larger in case of virtual inheritance? Virtual base class is a way of preventing multiple instances of a given class appearing in an inheritance hierarchy when using multiple inheritance . Then for the following classes class level0 { int a; public : level0(); }; class level10:virtual public level0 { int b; public : level10(); }; class level11 :virtual public level0 { int c; public : level11(); }; class level2 :public level10,public level11 { int d; public: level2(); }; I got following sizes of the classes size of level0 4 size of level10 12 size of level11 12 size of level2 24 but when I removed virtual from inheritance of level10 and level11 I got following output sizeof level0 4 sizeof level10 8 sizeof level11 8 sizeof level2 20 If virtual inheritance prevents multiple instances of a base class, then why size of classes is greater in case of virtual inheritance? A: Because when using virtual inheritence, the compiler will create* a vtable to point to the correct offsets for the various classes, and a pointer to that vtable is stored along with the class. "Will create" -- vtables are not dictated by the Standard, but the behaviors implied by virtual inheritence is. Most compilers use vtables to implement the functionality dictated by the Standard.
{ "pile_set_name": "StackExchange" }
Q: Combining information from multiple studies to estimate the mean and variance of normally distributed data - Bayesian vs meta-analytic approaches I have reviewed a set of papers, each reporting the observed mean and SD of a measurement of $X$ in its respective sample of known size, $n$. I want to make the best possible guess about the likely distribution of the same measure in a new study that I am designing, and how much uncertainty is in that guess. I am happy to assume $X \sim N(\mu, \sigma^2$). My first thought was meta-analysis, but the models typically employed focus on point estimates and corresponding confidence intervals. However, I want to say something about the full distribution of $X$, which in this case would also including making a guess about the variance, $\sigma^2$. I have been reading about possible Bayeisan approaches to estimating the complete set of parameters of a given distribution in light of prior knowledge. This generally makes more sense to me, but I have zero experience with Bayesian analysis. This also seems like a straightforward, relatively simple problem to cut my teeth on. 1) Given my problem, which approach makes the most sense and why? Meta-analysis or a Bayesian approach? 2) If you think the Bayesian approach is best, can you point me to a way to implement this (preferably in R)? Related question EDITS: I have been trying to work this out in what I think is a 'simple' Bayesian manner. As I stated above, I am not just interested in the estimated mean, $\mu$, but also the variance,$\sigma^2$, in light of prior information, i.e. $P(\mu, \sigma^2|Y)$ Again, I know nothing about Bayeianism in practice, but it didn't take long to find that the posterior of a normal distribution with unknown mean and variance has a closed form solution via conjugacy, with the normal-inverse-gamma distribution. The problem is reformulated as $P(\mu, \sigma^2|Y) = P(\mu|\sigma^2, Y)P(\sigma^2|Y)$. $P(\mu|\sigma^2, Y)$ is estimated with a normal distribution; $P(\sigma^2|Y)$ with an inverse-gamma distribution. It took me a while to get my head around it, but from these links(1, 2) I was able, I think, to sort how to do this in R. I started with a data frame made up from a row for each of 33 studies/samples, and columns for the mean, variance, and sample size. I used the mean, variance, and sample size from the first study, in row 1, as my prior information. I then updated this with the information from the next study, calculated the relevant parameters, and sampled from the normal-inverse-gamma to get the distribution of $\mu$ and $\sigma^2$. This gets repeated until all 33 studies have been included. # Loop start values values i <- 2 k <- 1 # Results go here muL <- list() # mean of the estimated mean distribution varL <- list() # variance of the estimated mean distribution nL <- list() # sample size eVarL <- list() # mean of the estimated variance distribution distL <- list() # sampling 10k times from the mean and variance distributions # Priors, taken from the study in row 1 of the data frame muPrior <- bayesDf[1, 14] # Starting mean nPrior <- bayesDf[1, 10] # Starting sample size varPrior <- bayesDf[1, 16]^2 # Starting variance for (i in 2:nrow(bayesDf)){ # "New" Data, Sufficient Statistics needed for parameter estimation muSamp <- bayesDf[i, 14] # mean nSamp <- bayesDf[i, 10] # sample size sumSqSamp <- bayesDf[i, 16]^2*(nSamp-1) # sum of squares (variance * (n-1)) # Posteriors nPost <- nPrior + nSamp muPost <- (nPrior * muPrior + nSamp * muSamp) / (nPost) sPost <- (nPrior * varPrior) + sumSqSamp + ((nPrior * nSamp) / (nPost)) * ((muSamp - muPrior)^2) varPost <- sPost/nPost bPost <- (nPrior * varPrior) + sumSqSamp + (nPrior * nSamp / (nPost)) * ((muPrior - muSamp)^2) # Update muPrior <- muPost nPrior <- nPost varPrior <- varPost # Store muL[[i]] <- muPost varL[[i]] <- varPost nL[[i]] <- nPost eVarL[[i]] <- (bPost/2) / ((nPost/2) - 1) # Sample muDistL <- list() varDistL <- list() for (j in 1:10000){ varDistL[[j]] <- 1/rgamma(1, nPost/2, bPost/2) v <- 1/rgamma(1, nPost/2, bPost/2) muDistL[[j]] <- rnorm(1, muPost, v/nPost) } # Store varDist <- do.call(rbind, varDistL) muDist <- do.call(rbind, muDistL) dist <- as.data.frame(cbind(varDist, muDist)) distL[[k]] <- dist # Advance k <- k+1 i <- i+1 } var <- do.call(rbind, varL) mu <- do.call(rbind, muL) n <- do.call(rbind, nL) eVar <- do.call(rbind, eVarL) normsDf <- as.data.frame(cbind(mu, var, eVar, n)) colnames(seDf) <- c("mu", "var", "evar", "n") normsDf$order <- c(1:33) Here is a path diagram showing how the $E(\mu)$ and $E(\sigma^2)$ change as each new sample is added. Here are the desnities based on sampling from the estimated distributions for the mean and variance at each update. I just wanted to add this in case it is helpful for someone else, and so that people in-the-know can tell me whether this was sensible, flawed, etc. A: The two approaches (meta-analysis and Bayesian updating) are not really that distinct. Meta-analytic models are in fact often framed as Bayesian models, since the idea of adding evidence to prior knowledge (possibly quite vague) about the phenomenon at hand lends itself naturally to a meta-analysis. An article that describes this connection is: Brannick, M. T. (2001). Implications of empirical Bayes meta-analysis for test validation. Journal of Applied Psychology, 86(3), 468-480. (the author uses correlations as the outcome measure for the meta-analysis, but the principle is the same regardless of the measure). A more general article on Bayesian methods for meta-analysis would be: Sutton, A. J., & Abrams, K. R. (2001). Bayesian methods in meta-analysis and evidence synthesis. Statistical Methods in Medical Research, 10(4), 277-303. What you seem to be after (in addition to some combined estimate) is a prediction/credibility interval that describes where in a future study the true outcome/effect is likely to fall. One can obtain such an interval from a "traditional" meta-analysis or from a Bayesian meta-analytic model. The traditional approach is described, for example, in: Riley, R. D., Higgins, J. P., & Deeks, J. J. (2011). Interpretation of random effects meta-analyses. British Medical Journal, 342, d549. In the context of a Bayesian model (take, for example, the random-effects model described by equation 6 in the paper by Sutton & Abrams, 2001), one can easily obtain the posterior distribution of $\theta_i$, where $\theta_i$ is the true outcome/effect in the $i$th study (since these models are typically estimated using MCMC, one just needs to monitor the chain for $\theta_i$ after a suitable burn-in period). From that posterior distribution, one can then obtain the credibility interval.
{ "pile_set_name": "StackExchange" }
Q: Why does a BigDecimal scale changes if accessed through an association? I have two Ruby on Rails models Farm and Harvest. A farm belongs to a harvest. Here are the models: class Farm < ActiveRecord::Base acts_as_singleton belongs_to :harvest validates :harvest, presence: true, allow_blank: true serialize :harvest_time, Tod::TimeOfDay validates :harvest_time, presence: true, allow_blank: true validates :hash_rate, presence: true validates_with HashRateValidator end class Harvest < ActiveRecord::Base belongs_to :user validates :user, presence: true validates :date, presence: true validates :amount, presence: true validates :identifier, presence: true validates :amount, numericality: { :greater_than => 0 } end There is only one Farm (accomplished thanks to the acts as singleton gem). Every time a harvest is done the harvest association from the farm changes, since it always have to point to the latest harvest. Since I am using a Farm as a singleton model I update the Farm using the following code: @harvest = Harvest.new( :date => DateTime.now, :amount => amount, :identifier => new_identifier, :user => current_user, :assigned => false ) if @harvest.save Farm.instance.update_attributes(:harvest => @harvest) byebug The weird thins is that the values of the harvest amount ans the amount from the harvest assigned to the farm do not match after this: (byebug) Farm.instance.harvest.amount 435.435 (byebug) @harvest.amount 435.435345343 (byebug) Farm.instance.harvest.id 12 (byebug) @harvest.id 12 The amount decimal is suposed to have scale to 8 and precision to 6 (from the migration), here is the relevant part of the schema.rb file: create_table "harvests", force: :cascade do |t| t.datetime "date" t.decimal "amount", precision: 6, scale: 8 t.integer "identifier" t.datetime "created_at", null: false t.datetime "updated_at", null: false ... end So, what's going on here? The amount should be the exact same value! A: I figured it out. Scale and precision did not make sense. Precision is the amount of digits on the BigDecimal amount, scale is the amount of those digits that appear to the right the decimal point. Since precision was set to 6 scale could not accommodate 8 digits after the decimal point. So when the number came from the database it was truncated, when it came from memory it had all its digits after the decimal point. I fixed it by setting precision to 18 and scale to 8, which means 18 digits in total and 8 of those appearing to the right of the decimal points. Sqlite allowed the incoherent precision => 6 and scale => 8. Postgres did not.
{ "pile_set_name": "StackExchange" }
Q: What is the logic behind when JavaScript throws a ReferenceError? I've been using JavaScript for years but have been trying to increase my deep, under-the-hood type knowledge of the language lately. I'm a bit confused about what the logic is behind when JavaScript throws a ReferenceError. For example, none of these throw a referenceError, but still write undefined to the console: function foobar(foo) { var bar = foo; console.log(bar); } foobar(); or var foo = undefined; var bar = foo; console.log(bar); or var foo; var bar = foo; console.log(bar); but this obviously does throw a ReferenceError error on the first line without writing to the console: var bar = foo; console.log(bar); So it seems that having a variable in a parameter list or declaring it will stop a referenceError from being thrown - even though the variable is still 'undefined'. Does anyone know what's going on under the hood or what the hard and fast rules are surrounding this? Does anyone know why these aren't considered referenceErrors? A: There's a difference in using a variable that exists but has an undefined value, and using a variable that doesn't exist and was never declared. The latter will create a reference error as you're trying to reference something that doesn't exists and has not been declared. On the other hand, when you do var foo; foo does exists, and it has been declared, it's value is just undefined, so you can still reference it without throwing an error. In other words, trying to reference a variable that hasn't been declared will throw a reference error, while referencing declared variables will never throw a reference error, regardless of wether or not a value has been set for that variable.
{ "pile_set_name": "StackExchange" }
Q: Google App Engine Cron Job I have created a cron.xml file and a servlet which describes the job. Now when i compile and login as an admin, local development dashboard doesn't show Cron Jobs link. A: Local development server does not have the Cron Jobs link neither does it execute cron jobs. The actual appengine will show cron jobs and will execute them. You can manually execute cron jobs on local server by visiting their urls. e.g. http://localhost:8888/FindReservedBooksTask. BTW the cron.xml file should be in the war/WEB-INF directory.
{ "pile_set_name": "StackExchange" }
Q: A* Search - least number of hops? I'm trying to create an A* pathfinding algorithm, however I'm having a little trouble getting off the ground with it. A little background: I am by no means versed in pathfinding algorithms, however I did touch upon this subject a couple years ago (I've since forgotten everything I've learned). I play EVE Online, which is an online game about internet spaceships. The developers release data dumps for static information (items in game, solar system locations, etc). I am trying to find the shortest route from solar system A to solar system B. Take a look at this map: http://evemaps.dotlan.net/map/UUA-F4 That is one region in the game, with each node being a system. I would like to compute the shortest distance between any two of those systems. My issue: everything that I've read online about A* is talking about incorporating the distance between two nodes (for example, the distance between two cities) to help compute the shortest path. That doesn't help my case, as I'm more interested in the number of hops (node 1 > node 2 > node 3) rather than the distance between those hops. I do not know how to modify the A* algorithm to incorporate this. The information that I have in the database: A list of all systems and their neighbors (so, systemX links with systemA and systemB) x, y, and z coordinates of all systems in a 3D grid If anyone can point me in the right direction, that would be great. I'm looking to use this in PHP, however I've also started to work in Python a bit so that'll work too. Example data can be provided on request if needed. EDIT As some have pointed out, the 'cost' associated with each jump would simply be 1. However, with A*, you also need a heuristic that estimates the distance from the current node to the target node. I'm not exactly sure how to go about determining this value, as I'm not sure of the remaining hops. As stated, I do have the 3D coordinates (x,y,z) for every node, but I'm not sure if this could give any insight as the physical distance between each node is not of concern. I do know that no path spans more than 99 hops. EDIT 2 MySQL data for the example region. to -> from data: http://pastebin.com/gTuwdr7h System information (x,y,z cooridinates if needed): http://pastebin.com/Vz3FD3Kz A: If the number of "hops" is what matters to you, then consider that to be your distance, meaning that if the two locations are connected by a single hop, the distance is one. For A*, you'll need two things: The costs from one location to each neighbor, in your case, this seems to be constant (hops). An heuristic, that estimates the cost of going from your current "node" or location to the goal. How you can estimate this depends a lot on your problem. It's important that your heuristic doesn't *over*estimates the true cost, or else A* won't be able to guarantee the best result. A: Take the upper part of the linked graph: Assume that the lines represent 2 way (i.e., you can go to or from any linked node) and that the black lines are a 'cost' of 1 and the red lines are a 'cost' of 2. That structure can be represented by the following Python data structure: graph = {'Q-KCK3': {'3C-261':1, 'L-SDU7':1}, 'L-SDU7': {'Q-KCK3':1, '3C-261':1,'4-IPWK':1}, '3C-261': {'4-IPWK':1,'9K-VDI':1,'L-SDU7':1,'U8MM-3':1}, 'U8MM-3': {'9K-VDI':1,'3C-261':1, '9K-VDI':1, 'Q8T-MC':2}, 'Q8T-MC': {'U8MM-3':2, 'H55-2R':1, 'VM-QFU':2}, 'H55-2R': {'Q8T-MC':1, '9XI-OX':1, 'A3-PAT':1, 'P6-DBM':1}, 'P6-DBM': {'A3-PAT':1, 'H55-2R':1}, 'A3-PAT': {'P6-DBM':1, 'H55-2R':1, '9XI-OX':1,'YRZ-E4':1}, 'YRZ-E4': {'A3-PAT':1}, 'VM-QFU': {'IEZW-V':1, 'PU-128':2}, 'IEZW-V': {'VM-QFU':1, 'PU-128':1, 'B-DX09':1}, 'PU-128': {'VM-QFU':1, 'B-DX09':1, 'IEZW-V':1}, 'B-DX09': {'IEZW-V':1, 'PU-128':1, '1TS-WIN':1}, '1TS-WIN': {'B-DX09':1, '16-31U':1}, '16-31U': {'1TS-WIN':1} } Now you can define a recursive function to navigate that data: def find_all_paths(graph, start, end, path=[]): path = path + [start] if start == end: return [path] if start not in graph: return [] paths = [] for node in graph[start]: if node not in path: newpaths = find_all_paths(graph, node, end, path) for newpath in newpaths: paths.append(newpath) return paths def min_path(graph, start, end): paths=find_all_paths(graph,start,end) mt=10**99 mpath=[] print '\tAll paths:',paths for path in paths: t=sum(graph[i][j] for i,j in zip(path,path[1::])) print '\t\tevaluating:',path, t if t<mt: mt=t mpath=path e1='\n'.join('{}->{}:{}'.format(i,j,graph[i][j]) for i,j in zip(mpath,mpath[1::])) e2=str(sum(graph[i][j] for i,j in zip(mpath,mpath[1::]))) print 'Best path: '+e1+' Total: '+e2+'\n' Now demo: min_path(graph,'Q-KCK3','A3-PAT') min_path(graph,'Q-KCK3','16-31U') Prints: All paths: [['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'P6-DBM', 'A3-PAT'], ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'A3-PAT'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'P6-DBM', 'A3-PAT'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'A3-PAT']] evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'P6-DBM', 'A3-PAT'] 7 evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'A3-PAT'] 6 evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'P6-DBM', 'A3-PAT'] 8 evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'H55-2R', 'A3-PAT'] 7 Best path: Q-KCK3->3C-261:1 3C-261->U8MM-3:1 U8MM-3->Q8T-MC:2 Q8T-MC->H55-2R:1 H55-2R->A3-PAT:1 Total: 6 All paths: [['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'], ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U']] evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'] 10 evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'] 11 evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'] 11 evaluating: ['Q-KCK3', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'] 12 evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'] 11 evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'IEZW-V', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'] 12 evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'B-DX09', '1TS-WIN', '16-31U'] 12 evaluating: ['Q-KCK3', 'L-SDU7', '3C-261', 'U8MM-3', 'Q8T-MC', 'VM-QFU', 'PU-128', 'IEZW-V', 'B-DX09', '1TS-WIN', '16-31U'] 13 Best path: Q-KCK3->3C-261:1 3C-261->U8MM-3:1 U8MM-3->Q8T-MC:2 Q8T-MC->VM-QFU:2 VM-QFU->IEZW-V:1 IEZW-V->B-DX09:1 B-DX09->1TS-WIN:1 1TS-WIN->16-31U:1 Total: 10 If you want the minimum number of hops, just modify min_path to return the shortest list length rather than the minimum total cost of the hops. Or, make the cost of each hop 1. Have a look at my previous answer regarding trains.
{ "pile_set_name": "StackExchange" }
Q: Teachable AI Chatbot I'm starting on AI chatbots and don't know where to actually start. what I've imagined is something like this: Empty chat bot that doesn't know anything Learns when user asks question and if the bot doesn't know the answer, it'd ask for it Records all the data learned and parse synonymous questions Example procedure: User: what is the color of a ripped mango? Bot: I don't know [to input answer add !@: at the start] User: !@:yellow User: do you know the color of ripped mango? Bot: yellow A: Chatbots, or conversational dialogue systems in general, will have to be able to generate natural language and as you might expect, this is not something trivial. The state-of-the-art approaches usually mine conversations of human-human conversations (such as for example conversations on chat platforms like Facebook or Twitter, or even movie dialogs, basically things which are available in large quantities and resemble natural conversation). These conversations are then for example labelled as question-answer pairs, possibly using pretrained word embeddings. This is an active area of research in the field of NLP. An example category of used systems is that of "End-to-End Sequence-to-Sequence models" (seq2seq). However, basic seq2seq models have a tendency to produce repetitive and therefore dull responses. More recent papers try to address this using reinforcement learning, as well as techniques like adversarial networks, in order to learn to choose responses. Another technique that improves the system is to extend the context of the conversation by allowing the model to see (more) prior turns, for example by using a hierarchical model. If you don't really know where to start, I think you will find all the basics you will need in this free chapter of "Speech and Language Processing." by Daniel Jurafsky & James H. Martin (August 2017). Good luck!
{ "pile_set_name": "StackExchange" }
Q: Scrap/extract with Java, result from coinmarketcap.com I need to extract coinmarket cap volume (ex: Market Cap: $306,020,249,332) from top of page with Java, please see picture attached. I have used jsoup library in Java Eclipse but didn't extract volume. Jsoup extract only other attributes. Probably problem is from a java script library. Also I have used html unit without success: import java.io.IOException; import java.util.List; import com.gargoylesoftware.htmlunit.WebClient; import com.gargoylesoftware.htmlunit.html.HtmlAnchor; import com.gargoylesoftware.htmlunit.html.HtmlPage; public class Testss { public static void main(String\[\] args) throws IOException { String url = "https://coinmarketcap.com/faq/"; WebClient client = new WebClient(); HtmlPage page = client.getPage(url); List<?> anchors = page.getByXPath("//div\[@class='col-sm-6 text-center'\]//a"); for (Object obj : anchors) { HtmlAnchor a = (HtmlAnchor) obj; System.out.println(a.getTextContent().trim()); } } } How can I extract volume from this site with Java? Thanks! A: Check the network tab findout the exact request which is fetching the data, In your case its https://files.coinmarketcap.com/generated/stats/global.json Also the request URL is the below one So, Fetching the main URL will not give you what you require, For that you have to fetch the data from the request URL directly and parse it using any JSON library. SimpleJSON I can suggest in one of those. The JSON data which you will get after hitting the url. { "bitcoin_percentage_of_market_cap": 55.95083004655126, "active_cryptocurrencies": 1324, "total_volume_usd": 21503093761, "active_markets": 7009, "total_market_cap_by_available_supply_usd": 301100436864 }
{ "pile_set_name": "StackExchange" }
Q: Jquery.Live events for onadd to dom elements What if I want not only to add events for all future added, but also want to put some data in them, execute it as an init event for them. Is there possible to use something from live tools? A: No event exists in the dom for item creation. The only option you have is to use the liveQuery plugin which scans the dom every x milliseconds looking for selector matches. When a new element is found it will run a function. I do not really like the overhead of this however it may give you the desired functionality.
{ "pile_set_name": "StackExchange" }