index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
300 | 5ddfeb49c16a7452c99126f1a837f3c0bed0ec10 | <mask token>
def getExternalLinks(bs, excludeUrl):
externalLinks = []
for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +
excludeUrl + ').)*$')):
if link.attrs['href'] is not None:
if link.attrs['href'] not in externalLinks:
externalLinks.append(link.attrs['href'])
return externalLinks
<mask token>
def followExternalOnly(startingSite):
externalLink = getRandomExternalLink(startingSite)
print(f'Random external link is: {externalLink}')
followExternalOnly(externalLink)
| <mask token>
def getExternalLinks(bs, excludeUrl):
externalLinks = []
for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +
excludeUrl + ').)*$')):
if link.attrs['href'] is not None:
if link.attrs['href'] not in externalLinks:
externalLinks.append(link.attrs['href'])
return externalLinks
<mask token>
def getAllExternalLinks(siteUrl):
html = requests.get(siteUrl)
domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'
bs = BeautifulSoup(html.text, 'html.parser')
internalLinks = getInternalLinks(bs, domain)
externalLinks = getExternalLinks(bs, domain)
for link in externalLinks:
if link not in allExtLinks:
allExtLinks.add(link)
print(link)
for link in internalLinks:
if link not in allIntLinks:
allIntLinks.add(link)
getAllExternalLinks(link)
def followExternalOnly(startingSite):
externalLink = getRandomExternalLink(startingSite)
print(f'Random external link is: {externalLink}')
followExternalOnly(externalLink)
| <mask token>
def getInternalLinks(bs, includeUrl):
includeUrl = (
f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}')
internalLinks = []
for link in bs.find_all('a', href=re.compile('^(/|.*' + includeUrl + ')')):
if link.attrs['href'] is not None:
if link.attrs['href'] not in internalLinks:
if link.attrs['href'].startswith('/'):
internalLinks.append(includeUrl + link.attrs['href'])
else:
internalLinks.append(link.attrs['href'])
return internalLinks
def getExternalLinks(bs, excludeUrl):
externalLinks = []
for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +
excludeUrl + ').)*$')):
if link.attrs['href'] is not None:
if link.attrs['href'] not in externalLinks:
externalLinks.append(link.attrs['href'])
return externalLinks
<mask token>
def getAllExternalLinks(siteUrl):
html = requests.get(siteUrl)
domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'
bs = BeautifulSoup(html.text, 'html.parser')
internalLinks = getInternalLinks(bs, domain)
externalLinks = getExternalLinks(bs, domain)
for link in externalLinks:
if link not in allExtLinks:
allExtLinks.add(link)
print(link)
for link in internalLinks:
if link not in allIntLinks:
allIntLinks.add(link)
getAllExternalLinks(link)
def followExternalOnly(startingSite):
externalLink = getRandomExternalLink(startingSite)
print(f'Random external link is: {externalLink}')
followExternalOnly(externalLink)
| <mask token>
def getInternalLinks(bs, includeUrl):
includeUrl = (
f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}')
internalLinks = []
for link in bs.find_all('a', href=re.compile('^(/|.*' + includeUrl + ')')):
if link.attrs['href'] is not None:
if link.attrs['href'] not in internalLinks:
if link.attrs['href'].startswith('/'):
internalLinks.append(includeUrl + link.attrs['href'])
else:
internalLinks.append(link.attrs['href'])
return internalLinks
def getExternalLinks(bs, excludeUrl):
externalLinks = []
for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +
excludeUrl + ').)*$')):
if link.attrs['href'] is not None:
if link.attrs['href'] not in externalLinks:
externalLinks.append(link.attrs['href'])
return externalLinks
def getRandomExternalLink(startingPage):
html = requests.get(startingPage)
bs = BeautifulSoup(html.text, 'html.parser')
externalLinks = getExternalLinks(bs, urlparse(startingPage).netloc)
if len(externalLinks) == 0:
print('No external links, looking around the site for one.')
domain = (
f'{urlparse(startingPage).scheme}://{urlparse(startingPage).netloc}'
)
internalLinks = getInternalLinks(bs, domain)
return getRandomExternalLink(internalLinks[random.randint(0, len(
internalLinks) - 1)])
else:
return externalLinks[random.randint(0, len(externalLinks) - 1)]
<mask token>
def getAllExternalLinks(siteUrl):
html = requests.get(siteUrl)
domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'
bs = BeautifulSoup(html.text, 'html.parser')
internalLinks = getInternalLinks(bs, domain)
externalLinks = getExternalLinks(bs, domain)
for link in externalLinks:
if link not in allExtLinks:
allExtLinks.add(link)
print(link)
for link in internalLinks:
if link not in allIntLinks:
allIntLinks.add(link)
getAllExternalLinks(link)
def followExternalOnly(startingSite):
externalLink = getRandomExternalLink(startingSite)
print(f'Random external link is: {externalLink}')
followExternalOnly(externalLink)
| import requests
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import re
import datetime
import random
pages = set()
# Retrieve a list of all Internal links foound on a page.
def getInternalLinks(bs, includeUrl):
includeUrl = f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}'
internalLinks = []
# Finds all links thhat begin with a "/"
for link in bs.find_all('a',
href=re.compile('^(/|.*'+includeUrl+')')):
if link.attrs['href'] is not None:
if link.attrs['href'] not in internalLinks:
if link.attrs['href'].startswith('/'):
internalLinks.append(includeUrl+link.attrs['href'])
else:
internalLinks.append(link.attrs['href'])
return internalLinks
# Retrieves a list of all external links found on a pagee.
def getExternalLinks(bs, excludeUrl):
externalLinks = []
# Finds all links that starts with "http" that do
# not contain the current URL
for link in bs.find_all('a',
href=re.compile('^(http|www)((?!'+excludeUrl+').)*$')):
if link.attrs['href'] is not None:
if link.attrs['href'] not in externalLinks:
externalLinks.append(link.attrs['href'])
return externalLinks
def getRandomExternalLink(startingPage):
html = requests.get(startingPage)
bs = BeautifulSoup(html.text, 'html.parser')
externalLinks = getExternalLinks(bs,
urlparse(startingPage).netloc)
if len(externalLinks) == 0:
print('No external links, looking around the site for one.')
domain = f'{urlparse(startingPage).scheme}://{urlparse(startingPage).netloc}'
internalLinks = getInternalLinks(bs, domain)
return getRandomExternalLink(internalLinks[random.randint(0, len(internalLinks)-1)])
else:
return externalLinks[random.randint(0, len(externalLinks)-1)]
# Collects a list of all external URLs found on the site
allExtLinks = set()
allIntLinks = set()
def getAllExternalLinks(siteUrl):
html = requests.get(siteUrl)
domain = f"{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}"
bs = BeautifulSoup(html.text, 'html.parser')
internalLinks = getInternalLinks(bs, domain)
externalLinks = getExternalLinks(bs, domain)
for link in externalLinks:
if link not in allExtLinks:
allExtLinks.add(link)
print(link)
for link in internalLinks:
if link not in allIntLinks:
allIntLinks.add(link)
getAllExternalLinks(link)
def followExternalOnly(startingSite):
externalLink = getRandomExternalLink(startingSite)
print(f"Random external link is: {externalLink}")
followExternalOnly(externalLink)
| [
2,
3,
4,
5,
8
] |
301 | 1e1f918ba24f5a5f13b9b01289ebfda65bae572d | def warshall_floyd(N):
INF = 10 ** 20
path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]
graph = get_graph()
for i in range(N + 1):
path[i][i] = 0
for g in graph:
x = g[0]
y = g[1]
l = g[2]
path[x][y] = path[y][x] = l
for start in range(N + 1):
for goal in range(N + 1):
for way in range(N + 1):
path[start][goal] = path[goal][start] = min(path[start][
goal], path[start][way] + path[way][goal])
return path
def get_graph():
graph = [input_as_int() for _ in range(M)]
return graph
<mask token>
| def warshall_floyd(N):
INF = 10 ** 20
path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]
graph = get_graph()
for i in range(N + 1):
path[i][i] = 0
for g in graph:
x = g[0]
y = g[1]
l = g[2]
path[x][y] = path[y][x] = l
for start in range(N + 1):
for goal in range(N + 1):
for way in range(N + 1):
path[start][goal] = path[goal][start] = min(path[start][
goal], path[start][way] + path[way][goal])
return path
def get_graph():
graph = [input_as_int() for _ in range(M)]
return graph
def input_as_int():
return list(map(int, input().split()))
<mask token>
| def warshall_floyd(N):
INF = 10 ** 20
path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]
graph = get_graph()
for i in range(N + 1):
path[i][i] = 0
for g in graph:
x = g[0]
y = g[1]
l = g[2]
path[x][y] = path[y][x] = l
for start in range(N + 1):
for goal in range(N + 1):
for way in range(N + 1):
path[start][goal] = path[goal][start] = min(path[start][
goal], path[start][way] + path[way][goal])
return path
def get_graph():
graph = [input_as_int() for _ in range(M)]
return graph
def input_as_int():
return list(map(int, input().split()))
<mask token>
print(ans)
| def warshall_floyd(N):
INF = 10 ** 20
path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]
graph = get_graph()
for i in range(N + 1):
path[i][i] = 0
for g in graph:
x = g[0]
y = g[1]
l = g[2]
path[x][y] = path[y][x] = l
for start in range(N + 1):
for goal in range(N + 1):
for way in range(N + 1):
path[start][goal] = path[goal][start] = min(path[start][
goal], path[start][way] + path[way][goal])
return path
def get_graph():
graph = [input_as_int() for _ in range(M)]
return graph
def input_as_int():
return list(map(int, input().split()))
R, C, K = input_as_int()
N = int(input())
print(ans)
| null | [
2,
3,
4,
5
] |
302 | 04538cc5c9c68582cc9aa2959faae2d7547ab2ee | <mask token>
def write_to_file(file, line):
file.write(line + '\n')
<mask token>
| try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
<mask token>
def write_to_file(file, line):
file.write(line + '\n')
def cat_map():
catmap = {}
id = 1
f = open('cat')
cat = set([s.strip() for s in list(f.readlines())])
for i in cat:
catmap[i] = id
id = id + 1
return catmap
<mask token>
for vespaadd in root:
document = vespaadd.find('document')
if document != None:
subject = document.find('subject')
content = document.find('content')
maincat = document.find('maincat')
if subject == None:
continue
if content == None:
content = subject
if maincat == None:
continue
write_to_file(cnn, data_helpers.clean_str(subject.text))
write_to_file(lstm, data_helpers.clean_str(content.text))
write_to_file(cat, data_helpers.clean_str(maincat.text))
cnn.close()
lstm.close()
cat.close()
| try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
<mask token>
def write_to_file(file, line):
file.write(line + '\n')
def cat_map():
catmap = {}
id = 1
f = open('cat')
cat = set([s.strip() for s in list(f.readlines())])
for i in cat:
catmap[i] = id
id = id + 1
return catmap
tree = ET.ElementTree(file='test.xml')
root = tree.getroot()
cnn = open('cnn', 'a')
lstm = open('lstm', 'a')
cat = open('cat', 'a')
for vespaadd in root:
document = vespaadd.find('document')
if document != None:
subject = document.find('subject')
content = document.find('content')
maincat = document.find('maincat')
if subject == None:
continue
if content == None:
content = subject
if maincat == None:
continue
write_to_file(cnn, data_helpers.clean_str(subject.text))
write_to_file(lstm, data_helpers.clean_str(content.text))
write_to_file(cat, data_helpers.clean_str(maincat.text))
cnn.close()
lstm.close()
cat.close()
| try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import data_helpers
def write_to_file(file, line):
file.write(line + '\n')
def cat_map():
catmap = {}
id = 1
f = open('cat')
cat = set([s.strip() for s in list(f.readlines())])
for i in cat:
catmap[i] = id
id = id + 1
return catmap
tree = ET.ElementTree(file='test.xml')
root = tree.getroot()
cnn = open('cnn', 'a')
lstm = open('lstm', 'a')
cat = open('cat', 'a')
for vespaadd in root:
document = vespaadd.find('document')
if document != None:
subject = document.find('subject')
content = document.find('content')
maincat = document.find('maincat')
if subject == None:
continue
if content == None:
content = subject
if maincat == None:
continue
write_to_file(cnn, data_helpers.clean_str(subject.text))
write_to_file(lstm, data_helpers.clean_str(content.text))
write_to_file(cat, data_helpers.clean_str(maincat.text))
cnn.close()
lstm.close()
cat.close()
| try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import data_helpers
def write_to_file(file,line):
file.write(line+"\n")
def cat_map():
catmap={}
id=1
f=open("cat")
cat=set([s.strip() for s in list(f.readlines())])
for i in cat:
catmap[i]=id
id=id+1
return catmap
tree = ET.ElementTree(file="test.xml")
root = tree.getroot()
cnn=open("cnn","a")
lstm=open("lstm","a")
cat=open("cat","a")
for vespaadd in root:
document = vespaadd.find("document")
if(document!=None):
subject = document.find("subject")
content = document.find("content")
maincat = document.find("maincat")
if(subject==None):
continue
if(content==None):
content=subject
if(maincat==None):
continue
write_to_file(cnn,data_helpers.clean_str(subject.text))
write_to_file(lstm,data_helpers.clean_str(content.text))
write_to_file(cat,data_helpers.clean_str(maincat.text))
cnn.close()
lstm.close()
cat.close() | [
1,
3,
4,
5,
6
] |
303 | 631323e79f4fb32611d7094af92cff8f923fa996 | <mask token>
| <mask token>
def _adjacent(word1, word2):
"""
Returns True if the input words differ by only a single character;
returns False otherwise.
>>> _adjacent('phone','phony')
True
>>> _adjacent('stone','money')
False
"""
| def word_ladder(start_word, end_word, dictionary_file='words5.dict'):
"""
Returns a list satisfying the following properties:
1. the first element is `start_word`
2. the last element is `end_word`
3. elements at index i and i+1 are `_adjacent`
4. all elements are entries in the `dictionary_file` file
For example, running the command
```
word_ladder('stone','money')
```
may give the output
```
['stone', 'shone', 'phone', 'phony', 'peony', 'penny', 'benny', 'bonny', 'boney', 'money']
```
but the possible outputs are not unique,
so you may also get the output
```
['stone', 'shone', 'shote', 'shots', 'soots', 'hoots', 'hooty', 'hooey', 'honey', 'money']
```
(We cannot use doctests here because the outputs are not unique.)
Whenever it is impossible to generate a word ladder between the two words,
the function returns `None`.
HINT:
See <https://github.com/mikeizbicki/cmc-csci046/issues/472> for a discussion about a common memory management bug that causes the generated word ladders to be too long in some cases.
"""
<mask token>
def _adjacent(word1, word2):
"""
Returns True if the input words differ by only a single character;
returns False otherwise.
>>> _adjacent('phone','phony')
True
>>> _adjacent('stone','money')
False
"""
| def word_ladder(start_word, end_word, dictionary_file='words5.dict'):
"""
Returns a list satisfying the following properties:
1. the first element is `start_word`
2. the last element is `end_word`
3. elements at index i and i+1 are `_adjacent`
4. all elements are entries in the `dictionary_file` file
For example, running the command
```
word_ladder('stone','money')
```
may give the output
```
['stone', 'shone', 'phone', 'phony', 'peony', 'penny', 'benny', 'bonny', 'boney', 'money']
```
but the possible outputs are not unique,
so you may also get the output
```
['stone', 'shone', 'shote', 'shots', 'soots', 'hoots', 'hooty', 'hooey', 'honey', 'money']
```
(We cannot use doctests here because the outputs are not unique.)
Whenever it is impossible to generate a word ladder between the two words,
the function returns `None`.
HINT:
See <https://github.com/mikeizbicki/cmc-csci046/issues/472> for a discussion about a common memory management bug that causes the generated word ladders to be too long in some cases.
"""
def verify_word_ladder(ladder):
"""
Returns True if each entry of the input list is adjacent to its neighbors;
otherwise returns False.
>>> verify_word_ladder(['stone', 'shone', 'phone', 'phony'])
True
>>> verify_word_ladder(['stone', 'shone', 'phony'])
False
"""
def _adjacent(word1, word2):
"""
Returns True if the input words differ by only a single character;
returns False otherwise.
>>> _adjacent('phone','phony')
True
>>> _adjacent('stone','money')
False
"""
| #!/bin/python3
def word_ladder(start_word, end_word, dictionary_file='words5.dict'):
'''
Returns a list satisfying the following properties:
1. the first element is `start_word`
2. the last element is `end_word`
3. elements at index i and i+1 are `_adjacent`
4. all elements are entries in the `dictionary_file` file
For example, running the command
```
word_ladder('stone','money')
```
may give the output
```
['stone', 'shone', 'phone', 'phony', 'peony', 'penny', 'benny', 'bonny', 'boney', 'money']
```
but the possible outputs are not unique,
so you may also get the output
```
['stone', 'shone', 'shote', 'shots', 'soots', 'hoots', 'hooty', 'hooey', 'honey', 'money']
```
(We cannot use doctests here because the outputs are not unique.)
Whenever it is impossible to generate a word ladder between the two words,
the function returns `None`.
HINT:
See <https://github.com/mikeizbicki/cmc-csci046/issues/472> for a discussion about a common memory management bug that causes the generated word ladders to be too long in some cases.
'''
def verify_word_ladder(ladder):
'''
Returns True if each entry of the input list is adjacent to its neighbors;
otherwise returns False.
>>> verify_word_ladder(['stone', 'shone', 'phone', 'phony'])
True
>>> verify_word_ladder(['stone', 'shone', 'phony'])
False
'''
def _adjacent(word1, word2):
'''
Returns True if the input words differ by only a single character;
returns False otherwise.
>>> _adjacent('phone','phony')
True
>>> _adjacent('stone','money')
False
'''
| [
0,
1,
2,
3,
4
] |
304 | 0a528fb7fe4a318af8bd3111e8d67f6af6bd7416 | <mask token>
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:
_, lca = self.get_lca(root, 0)
return lca
<mask token>
| <mask token>
class TreeNode:
<mask token>
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:
_, lca = self.get_lca(root, 0)
return lca
def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:
if not node:
return depth, node
left_depth, left_lca = self.get_lca(node.left, depth + 1)
right_depth, right_lca = self.get_lca(node.right, depth + 1)
if left_depth == right_depth:
return left_depth, node
if left_depth > right_depth:
return left_depth, left_lca
return right_depth, right_lca
| <mask token>
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:
_, lca = self.get_lca(root, 0)
return lca
def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:
if not node:
return depth, node
left_depth, left_lca = self.get_lca(node.left, depth + 1)
right_depth, right_lca = self.get_lca(node.right, depth + 1)
if left_depth == right_depth:
return left_depth, node
if left_depth > right_depth:
return left_depth, left_lca
return right_depth, right_lca
| from typing import Tuple
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:
_, lca = self.get_lca(root, 0)
return lca
def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:
if not node:
return depth, node
left_depth, left_lca = self.get_lca(node.left, depth + 1)
right_depth, right_lca = self.get_lca(node.right, depth + 1)
if left_depth == right_depth:
return left_depth, node
if left_depth > right_depth:
return left_depth, left_lca
return right_depth, right_lca
| null | [
2,
4,
5,
6
] |
305 | 371762a6e3f8b8ed14742a70a709da224ae6712b | <mask token>
def bfs(graph, start):
result = []
queue = []
seen = set()
queue.append(start)
seen.add(start)
while len(queue):
vertex = queue.pop(0)
nodes = graph[vertex]
for node in nodes:
if node not in seen:
queue.append(node)
seen.add(node)
result.append(vertex)
return result
<mask token>
| <mask token>
def get_all_edge(graph):
result = []
for k, v in graph.items():
for i in v:
if sorted((k, i)) not in result:
result.append(sorted((k, i)))
return result
def bfs(graph, start):
result = []
queue = []
seen = set()
queue.append(start)
seen.add(start)
while len(queue):
vertex = queue.pop(0)
nodes = graph[vertex]
for node in nodes:
if node not in seen:
queue.append(node)
seen.add(node)
result.append(vertex)
return result
<mask token>
| <mask token>
def get_all_edge(graph):
result = []
for k, v in graph.items():
for i in v:
if sorted((k, i)) not in result:
result.append(sorted((k, i)))
return result
def bfs(graph, start):
result = []
queue = []
seen = set()
queue.append(start)
seen.add(start)
while len(queue):
vertex = queue.pop(0)
nodes = graph[vertex]
for node in nodes:
if node not in seen:
queue.append(node)
seen.add(node)
result.append(vertex)
return result
if __name__ == '__main__':
graph = {'0': ['1'], '1': ['0', '2', '3'], '2': ['1', '4', '5'], '3': [
'1', '4'], '4': ['2', '3'], '5': ['2']}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0'))
"""
['0', '1', '2', '3', '4', '5']
[['1', '3'], ['3', '4'], ['2', '5'], ['1', '2'], ['0', '1'], ['2', '4']]
['1', '3']
disjoint set
1, 3
['3', '4'] -> 一个点在集合中,另一个不在集合中
disjoint set
1, 3, 4
['2', '5'] -> 两个点都不在同一集合中
disjoint set 1
1, 3, 4
disjoint set 2
2, 5
['1', '2'] -> 两个点分别在不同的集合中,合并集合
disjoint set
1, 3, 4, 2, 5
['0', '1'] -> 一个点在集合中,另一个不在集合中
disjoint set
1, 3, 4, 2, 5, 0
['2', '4'] -> 两个点都在同一个集合中,说明有环
"""
graph = {'0': ['1'], '1': ['0', '2', '3'], '2': ['1', '5'], '3': ['1',
'4'], '4': ['3'], '5': ['2']}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0'))
"""
[['3', '4'], ['1', '3'], ['0', '1'], ['1', '2'], ['2', '5']]
['0', '1', '2', '3', '5', '4']
['3', '4']
disjoint set
3, 4
['1', '3']
disjoint set
3, 4, 1
['0', '1']
disjoint set
3, 4, 1, 0
['1', '2']
disjoint set
3, 4, 1, 0, 2
['2', '5']
disjoint set
3, 4, 1, 0, 2, 5
图中无环
"""
graph = {'0': ['1'], '1': ['0', '2'], '2': ['1', '3'], '3': ['2']}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0'))
"""
[['2', '3'], ['0', '1'], ['1', '2']]
['0', '1', '2', '3']
['2', '3']
disjoint set
2, 3
['0', '1']
disjoint set1
2, 3
disjoint set2
0, 1
['1', '2']
disjoint set
0, 1, 2, 3
链表中无环
"""
graph = {'0': ['1'], '1': ['0', '2'], '2': ['1', '3'], '3': ['2', '4',
'6'], '4': ['3', '5'], '5': ['4', '6'], '6': ['3', '5']}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0'))
"""
[['2', '3'], ['5', '6'], ['3', '4'], ['0', '1'], ['1', '2'], ['3', '6'], ['4', '5']]
['0', '1', '2', '3', '4', '6', '5']
['2', '3']
disjoint set
2, 3
['5', '6']
disjoint set1
2, 3
disjoint set2
5, 6
['3', '4']
disjoint set1
2, 3, 4
disjoint set2
5, 6
['0', '1']
disjoint set1
2, 3, 4
disjoint set2
5, 6
disjoint set3
0, 1
['1', '2']
disjoint set1
2, 3, 4, 1, 0
disjoint set2
5, 6
['3', '6']
disjoint set
2, 3, 4, 1, 0, 5, 6
['4', '5'] 链表中有环
"""
| import random
<mask token>
def get_all_edge(graph):
result = []
for k, v in graph.items():
for i in v:
if sorted((k, i)) not in result:
result.append(sorted((k, i)))
return result
def bfs(graph, start):
result = []
queue = []
seen = set()
queue.append(start)
seen.add(start)
while len(queue):
vertex = queue.pop(0)
nodes = graph[vertex]
for node in nodes:
if node not in seen:
queue.append(node)
seen.add(node)
result.append(vertex)
return result
if __name__ == '__main__':
graph = {'0': ['1'], '1': ['0', '2', '3'], '2': ['1', '4', '5'], '3': [
'1', '4'], '4': ['2', '3'], '5': ['2']}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0'))
"""
['0', '1', '2', '3', '4', '5']
[['1', '3'], ['3', '4'], ['2', '5'], ['1', '2'], ['0', '1'], ['2', '4']]
['1', '3']
disjoint set
1, 3
['3', '4'] -> 一个点在集合中,另一个不在集合中
disjoint set
1, 3, 4
['2', '5'] -> 两个点都不在同一集合中
disjoint set 1
1, 3, 4
disjoint set 2
2, 5
['1', '2'] -> 两个点分别在不同的集合中,合并集合
disjoint set
1, 3, 4, 2, 5
['0', '1'] -> 一个点在集合中,另一个不在集合中
disjoint set
1, 3, 4, 2, 5, 0
['2', '4'] -> 两个点都在同一个集合中,说明有环
"""
graph = {'0': ['1'], '1': ['0', '2', '3'], '2': ['1', '5'], '3': ['1',
'4'], '4': ['3'], '5': ['2']}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0'))
"""
[['3', '4'], ['1', '3'], ['0', '1'], ['1', '2'], ['2', '5']]
['0', '1', '2', '3', '5', '4']
['3', '4']
disjoint set
3, 4
['1', '3']
disjoint set
3, 4, 1
['0', '1']
disjoint set
3, 4, 1, 0
['1', '2']
disjoint set
3, 4, 1, 0, 2
['2', '5']
disjoint set
3, 4, 1, 0, 2, 5
图中无环
"""
graph = {'0': ['1'], '1': ['0', '2'], '2': ['1', '3'], '3': ['2']}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0'))
"""
[['2', '3'], ['0', '1'], ['1', '2']]
['0', '1', '2', '3']
['2', '3']
disjoint set
2, 3
['0', '1']
disjoint set1
2, 3
disjoint set2
0, 1
['1', '2']
disjoint set
0, 1, 2, 3
链表中无环
"""
graph = {'0': ['1'], '1': ['0', '2'], '2': ['1', '3'], '3': ['2', '4',
'6'], '4': ['3', '5'], '5': ['4', '6'], '6': ['3', '5']}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0'))
"""
[['2', '3'], ['5', '6'], ['3', '4'], ['0', '1'], ['1', '2'], ['3', '6'], ['4', '5']]
['0', '1', '2', '3', '4', '6', '5']
['2', '3']
disjoint set
2, 3
['5', '6']
disjoint set1
2, 3
disjoint set2
5, 6
['3', '4']
disjoint set1
2, 3, 4
disjoint set2
5, 6
['0', '1']
disjoint set1
2, 3, 4
disjoint set2
5, 6
disjoint set3
0, 1
['1', '2']
disjoint set1
2, 3, 4, 1, 0
disjoint set2
5, 6
['3', '6']
disjoint set
2, 3, 4, 1, 0, 5, 6
['4', '5'] 链表中有环
"""
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
"""
检查图数据和结构或者链表数据结构中是否存在环
https://www.youtube.com/watch?v=YKE4Vd1ysPI
"""
def get_all_edge(graph):
result = []
for k, v in graph.items():
for i in v:
if sorted((k, i)) not in result:
result.append(sorted((k, i)))
return result
def bfs(graph, start):
result = []
queue = []
seen = set()
queue.append(start)
seen.add(start)
while len(queue):
vertex = queue.pop(0)
nodes = graph[vertex]
for node in nodes:
if node not in seen:
queue.append(node)
seen.add(node)
result.append(vertex)
return result
if __name__ == '__main__':
graph = {
'0': ['1'],
'1': ['0', '2', '3'],
'2': ['1', '4', '5'],
'3': ['1', '4'],
'4': ['2', '3'],
'5': ['2'],
}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0')) # ['0', '1', '2', '3', '4', '5']
"""
['0', '1', '2', '3', '4', '5']
[['1', '3'], ['3', '4'], ['2', '5'], ['1', '2'], ['0', '1'], ['2', '4']]
['1', '3']
disjoint set
1, 3
['3', '4'] -> 一个点在集合中,另一个不在集合中
disjoint set
1, 3, 4
['2', '5'] -> 两个点都不在同一集合中
disjoint set 1
1, 3, 4
disjoint set 2
2, 5
['1', '2'] -> 两个点分别在不同的集合中,合并集合
disjoint set
1, 3, 4, 2, 5
['0', '1'] -> 一个点在集合中,另一个不在集合中
disjoint set
1, 3, 4, 2, 5, 0
['2', '4'] -> 两个点都在同一个集合中,说明有环
"""
graph = {
'0': ['1'],
'1': ['0', '2', '3'],
'2': ['1', '5'],
'3': ['1', '4'],
'4': ['3'],
'5': ['2'],
}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0')) # ['0', '1', '2', '3', '4', '5']
"""
[['3', '4'], ['1', '3'], ['0', '1'], ['1', '2'], ['2', '5']]
['0', '1', '2', '3', '5', '4']
['3', '4']
disjoint set
3, 4
['1', '3']
disjoint set
3, 4, 1
['0', '1']
disjoint set
3, 4, 1, 0
['1', '2']
disjoint set
3, 4, 1, 0, 2
['2', '5']
disjoint set
3, 4, 1, 0, 2, 5
图中无环
"""
graph = {
'0': ['1'],
'1': ['0', '2'],
'2': ['1', '3'],
'3': ['2']
}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0'))
"""
[['2', '3'], ['0', '1'], ['1', '2']]
['0', '1', '2', '3']
['2', '3']
disjoint set
2, 3
['0', '1']
disjoint set1
2, 3
disjoint set2
0, 1
['1', '2']
disjoint set
0, 1, 2, 3
链表中无环
"""
graph = {
'0': ['1'],
'1': ['0', '2'],
'2': ['1', '3'],
'3': ['2', '4', '6'],
'4': ['3', '5'],
'5': ['4', '6'],
'6': ['3', '5']
}
all_edge = get_all_edge(graph)
random.shuffle(all_edge)
print(all_edge)
print(bfs(graph, '0'))
"""
[['2', '3'], ['5', '6'], ['3', '4'], ['0', '1'], ['1', '2'], ['3', '6'], ['4', '5']]
['0', '1', '2', '3', '4', '6', '5']
['2', '3']
disjoint set
2, 3
['5', '6']
disjoint set1
2, 3
disjoint set2
5, 6
['3', '4']
disjoint set1
2, 3, 4
disjoint set2
5, 6
['0', '1']
disjoint set1
2, 3, 4
disjoint set2
5, 6
disjoint set3
0, 1
['1', '2']
disjoint set1
2, 3, 4, 1, 0
disjoint set2
5, 6
['3', '6']
disjoint set
2, 3, 4, 1, 0, 5, 6
['4', '5'] 链表中有环
"""
| [
1,
2,
3,
4,
5
] |
306 | 7f7ebc6d3d69fbb19071c63a9ab235ad01f1d414 | <mask token>
def to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',
time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len, lines))
matrix = np.empty([len(lines), max_len], dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]
matrix[i, :len(line_ix)] = line_ix
return matrix.T if time_major else matrix
<mask token>
def generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):
assert isinstance(sentences, np.ndarray
), 'Make sure sentences is q numpy array'
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0, len(indices) - 1, batch_size):
batch_indices = indices[start:start + batch_size]
batch_words, batch_tags = [], []
for sent in sentences[batch_indices]:
words, tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words, word_to_id, max_len, pad)
batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)
batch_tags_1hot = to_categorical(batch_tags, len(all_tags)
).reshape(batch_tags.shape + (-1,))
yield batch_words, batch_tags_1hot
<mask token>
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
sys.stdout.flush()
print('\nMeasuring validation accuracy...')
acc = compute_test_accuracy(self.model)
print('\nValidation accuracy: %.5f\n' % acc)
sys.stdout.flush()
<mask token>
| <mask token>
def to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',
time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len, lines))
matrix = np.empty([len(lines), max_len], dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]
matrix[i, :len(line_ix)] = line_ix
return matrix.T if time_major else matrix
<mask token>
def generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):
assert isinstance(sentences, np.ndarray
), 'Make sure sentences is q numpy array'
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0, len(indices) - 1, batch_size):
batch_indices = indices[start:start + batch_size]
batch_words, batch_tags = [], []
for sent in sentences[batch_indices]:
words, tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words, word_to_id, max_len, pad)
batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)
batch_tags_1hot = to_categorical(batch_tags, len(all_tags)
).reshape(batch_tags.shape + (-1,))
yield batch_words, batch_tags_1hot
def compute_test_accuracy(model):
test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])
test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(
test_tags, tag_to_id)
predicted_tag_probabilities = model.predict(test_words, verbose=1)
predicted_tags = predicted_tag_probabilities.argmax(axis=-1)
numerator = np.sum(np.logical_and(predicted_tags == test_tags,
test_words != 0))
denominator = np.sum(test_words != 0)
return float(numerator) / denominator
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
sys.stdout.flush()
print('\nMeasuring validation accuracy...')
acc = compute_test_accuracy(self.model)
print('\nValidation accuracy: %.5f\n' % acc)
sys.stdout.flush()
<mask token>
| <mask token>
sys.path.append('..')
<mask token>
helpers.mask_busy_gpus(wait=False)
<mask token>
nltk.download('brown')
nltk.download('universal_tagset')
<mask token>
for sentence in data:
words, tags = zip(*sentence)
word_counts.update(words)
<mask token>
print('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /
sum(word_counts.values())))
<mask token>
def to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',
time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len, lines))
matrix = np.empty([len(lines), max_len], dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]
matrix[i, :len(line_ix)] = line_ix
return matrix.T if time_major else matrix
<mask token>
print('Word ids:')
print(to_matrix(batch_words, word_to_id))
print('Tag ids:')
print(to_matrix(batch_tags, tag_to_id))
<mask token>
def generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):
assert isinstance(sentences, np.ndarray
), 'Make sure sentences is q numpy array'
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0, len(indices) - 1, batch_size):
batch_indices = indices[start:start + batch_size]
batch_words, batch_tags = [], []
for sent in sentences[batch_indices]:
words, tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words, word_to_id, max_len, pad)
batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)
batch_tags_1hot = to_categorical(batch_tags, len(all_tags)
).reshape(batch_tags.shape + (-1,))
yield batch_words, batch_tags_1hot
def compute_test_accuracy(model):
test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])
test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(
test_tags, tag_to_id)
predicted_tag_probabilities = model.predict(test_words, verbose=1)
predicted_tags = predicted_tag_probabilities.argmax(axis=-1)
numerator = np.sum(np.logical_and(predicted_tags == test_tags,
test_words != 0))
denominator = np.sum(test_words != 0)
return float(numerator) / denominator
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
sys.stdout.flush()
print('\nMeasuring validation accuracy...')
acc = compute_test_accuracy(self.model)
print('\nValidation accuracy: %.5f\n' % acc)
sys.stdout.flush()
<mask token>
model.add(L.InputLayer([None], dtype='int32'))
model.add(L.Embedding(len(all_words), 50))
model.add(L.TimeDistributed(L.Dense(96, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(96, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.Conv1D(128, 2, padding='same', activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128, 3, padding='same', activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128, 4, padding='same', activation='tanh'))
model.add(L.TimeDistributed(L.Dense(256, activation='tanh')))
model.add(L.Dropout(0.25))
<mask token>
model.add(stepwise_dense)
model.summary()
model.compile('adam', 'categorical_crossentropy')
model.fit_generator(generate_batches(train_data), len(train_data) /
BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)
<mask token>
print("""
Final accuracy: %.5f""" % acc)
model.save_weights('LSTM_gpu_trained_weights_1layer.h5')
| <mask token>
sys.path.append('..')
<mask token>
helpers.mask_busy_gpus(wait=False)
<mask token>
nltk.download('brown')
nltk.download('universal_tagset')
data = nltk.corpus.brown.tagged_sents(tagset='universal')
all_tags = ['#EOS#', '#UNK#', 'ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.',
'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']
data = np.array([[(word.lower(), tag) for word, tag in sentence] for
sentence in data])
<mask token>
train_data, test_data = train_test_split(data, test_size=0.25, random_state=42)
<mask token>
word_counts = Counter()
for sentence in data:
words, tags = zip(*sentence)
word_counts.update(words)
all_words = ['#EOS#', '#UNK#'] + list(list(zip(*word_counts.most_common(
10000)))[0])
print('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /
sum(word_counts.values())))
<mask token>
word_to_id = defaultdict(lambda : 1, {word: i for i, word in enumerate(
all_words)})
tag_to_id = {tag: i for i, tag in enumerate(all_tags)}
def to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',
time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len, lines))
matrix = np.empty([len(lines), max_len], dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]
matrix[i, :len(line_ix)] = line_ix
return matrix.T if time_major else matrix
batch_words, batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])
print('Word ids:')
print(to_matrix(batch_words, word_to_id))
print('Tag ids:')
print(to_matrix(batch_tags, tag_to_id))
<mask token>
BATCH_SIZE = 32
def generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):
assert isinstance(sentences, np.ndarray
), 'Make sure sentences is q numpy array'
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0, len(indices) - 1, batch_size):
batch_indices = indices[start:start + batch_size]
batch_words, batch_tags = [], []
for sent in sentences[batch_indices]:
words, tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words, word_to_id, max_len, pad)
batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)
batch_tags_1hot = to_categorical(batch_tags, len(all_tags)
).reshape(batch_tags.shape + (-1,))
yield batch_words, batch_tags_1hot
def compute_test_accuracy(model):
test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])
test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(
test_tags, tag_to_id)
predicted_tag_probabilities = model.predict(test_words, verbose=1)
predicted_tags = predicted_tag_probabilities.argmax(axis=-1)
numerator = np.sum(np.logical_and(predicted_tags == test_tags,
test_words != 0))
denominator = np.sum(test_words != 0)
return float(numerator) / denominator
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
sys.stdout.flush()
print('\nMeasuring validation accuracy...')
acc = compute_test_accuracy(self.model)
print('\nValidation accuracy: %.5f\n' % acc)
sys.stdout.flush()
model = keras.models.Sequential()
model = keras.models.Sequential()
model.add(L.InputLayer([None], dtype='int32'))
model.add(L.Embedding(len(all_words), 50))
model.add(L.TimeDistributed(L.Dense(96, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(96, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.Conv1D(128, 2, padding='same', activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128, 3, padding='same', activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128, 4, padding='same', activation='tanh'))
model.add(L.TimeDistributed(L.Dense(256, activation='tanh')))
model.add(L.Dropout(0.25))
stepwise_dense = L.Dense(len(all_tags), activation='softmax')
stepwise_dense = L.TimeDistributed(stepwise_dense)
model.add(stepwise_dense)
model.summary()
model.compile('adam', 'categorical_crossentropy')
model.fit_generator(generate_batches(train_data), len(train_data) /
BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)
acc = compute_test_accuracy(model)
print("""
Final accuracy: %.5f""" % acc)
model.save_weights('LSTM_gpu_trained_weights_1layer.h5')
| import sys
sys.path.append("..")
import helpers
helpers.mask_busy_gpus(wait=False)
import nltk
import numpy as np
nltk.download('brown')
nltk.download('universal_tagset')
data = nltk.corpus.brown.tagged_sents(tagset='universal')
all_tags = ['#EOS#','#UNK#','ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.', 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']
data = np.array([ [(word.lower(),tag) for word,tag in sentence] for sentence in data ])
from sklearn.cross_validation import train_test_split
train_data,test_data = train_test_split(data,test_size=0.25,random_state=42)
from collections import Counter
word_counts = Counter()
for sentence in data:
words,tags = zip(*sentence)
word_counts.update(words)
all_words = ['#EOS#','#UNK#']+list(list(zip(*word_counts.most_common(10000)))[0])
#print(all_words)
#let's measure what fraction of data words are in the dictionary
print("Coverage = %.5f"%(float(sum(word_counts[w] for w in all_words)) / sum(word_counts.values())))
from collections import defaultdict
word_to_id = defaultdict(lambda:1,{word:i for i,word in enumerate(all_words)})
tag_to_id = {tag:i for i,tag in enumerate(all_tags)}
def to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len,lines))
matrix = np.empty([len(lines),max_len],dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]
matrix[i,:len(line_ix)] = line_ix
return matrix.T if time_major else matrix
batch_words,batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])
print("Word ids:")
print(to_matrix(batch_words,word_to_id))
print("Tag ids:")
print(to_matrix(batch_tags,tag_to_id))
import keras
import keras.layers as L
from keras.utils.np_utils import to_categorical
BATCH_SIZE=32
def generate_batches(sentences,batch_size=BATCH_SIZE,max_len=None,pad=0):
assert isinstance(sentences,np.ndarray),"Make sure sentences is q numpy array"
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0,len(indices)-1,batch_size):
batch_indices = indices[start:start+batch_size]
batch_words,batch_tags = [],[]
for sent in sentences[batch_indices]:
words,tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words,word_to_id,max_len,pad)
batch_tags = to_matrix(batch_tags,tag_to_id,max_len,pad)
batch_tags_1hot = to_categorical(batch_tags,len(all_tags)).reshape(batch_tags.shape+(-1,))
yield batch_words,batch_tags_1hot
def compute_test_accuracy(model):
test_words,test_tags = zip(*[zip(*sentence) for sentence in test_data])
test_words,test_tags = to_matrix(test_words,word_to_id),to_matrix(test_tags,tag_to_id)
#predict tag probabilities of shape [batch,time,n_tags]
predicted_tag_probabilities = model.predict(test_words,verbose=1)
predicted_tags = predicted_tag_probabilities.argmax(axis=-1)
#compute accurary excluding padding
numerator = np.sum(np.logical_and((predicted_tags == test_tags),(test_words != 0)))
denominator = np.sum(test_words != 0)
return float(numerator)/denominator
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs=None):
sys.stdout.flush()
print("\nMeasuring validation accuracy...")
acc = compute_test_accuracy(self.model)
print("\nValidation accuracy: %.5f\n"%acc)
sys.stdout.flush()
model = keras.models.Sequential()
model = keras.models.Sequential()
model.add(L.InputLayer([None],dtype='int32'))
model.add(L.Embedding(len(all_words),50))
model.add(L.TimeDistributed(L.Dense(96,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(96,activation='tanh')))
model.add(L.Dropout(0.25))
#model.add(L.Conv1D(32,3,padding='same',activation='tanh'))
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
#
#
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.Conv1D(128,2,padding='same',activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128,3,padding='same',activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128,4,padding='same',activation='tanh'))
model.add(L.TimeDistributed(L.Dense(256,activation='tanh')))
model.add(L.Dropout(0.25))
#model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
#model.add(L.Dropout(0.25))
stepwise_dense = L.Dense(len(all_tags),activation='softmax')
stepwise_dense = L.TimeDistributed(stepwise_dense)
model.add(stepwise_dense)
model.summary()
model.compile('adam','categorical_crossentropy')
model.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE,
callbacks=[EvaluateAccuracy()], epochs=50,)
acc = compute_test_accuracy(model)
print("\nFinal accuracy: %.5f"%acc)
model.save_weights("LSTM_gpu_trained_weights_1layer.h5")
| [
4,
5,
6,
7,
9
] |
307 | 8698aedc5c8671f46c73898a7188440254b79bbf | <mask token>
class Environment:
@abstractmethod
def __init__(self, agent):
pass
<mask token>
<mask token>
<mask token>
| <mask token>
class Environment:
@abstractmethod
def __init__(self, agent):
pass
<mask token>
@abstractmethod
def execute_all(self):
pass
@abstractmethod
def set_delay(self, delay):
pass
| <mask token>
class Environment:
@abstractmethod
def __init__(self, agent):
pass
@abstractmethod
def execute_step(self, n=1):
pass
@abstractmethod
def execute_all(self):
pass
@abstractmethod
def set_delay(self, delay):
pass
| from abc import abstractmethod
class Environment:
@abstractmethod
def __init__(self, agent):
pass
@abstractmethod
def execute_step(self, n=1):
pass
@abstractmethod
def execute_all(self):
pass
@abstractmethod
def set_delay(self, delay):
pass
| null | [
2,
4,
5,
6
] |
308 | 1be510e6715d21e814c48fe05496704e9a65d554 | <mask token>
| <mask token>
c = Client()
| from end import Client
c = Client()
| null | null | [
0,
1,
2
] |
309 | 6b727cdfc684db4ba919cd5390fe45de43a806fe | <mask token>
| <mask token>
for var in var_list:
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
<mask token>
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
<mask token>
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
<mask token>
data.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')
| <mask token>
data_root = '../data/synthetic/standard/'
var_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']
eke = 0.01
output = []
diagnostic_functions = [basic_stats]
for var in var_list:
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
var = 'hs'
diagnostic_functions = [hs_spectral_slope]
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
var = 'cur'
diagnostic_functions = [flow_stats]
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
ds = xr.merge(output)
df = ds.to_dataframe()
df = df.reset_index()
data = df.to_xarray()
data.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')
| import glob
import xarray as xr
from model_diagnostics import *
data_root = '../data/synthetic/standard/'
var_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']
eke = 0.01
output = []
diagnostic_functions = [basic_stats]
for var in var_list:
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
var = 'hs'
diagnostic_functions = [hs_spectral_slope]
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
var = 'cur'
diagnostic_functions = [flow_stats]
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
ds = xr.merge(output)
df = ds.to_dataframe()
df = df.reset_index()
data = df.to_xarray()
data.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')
| import glob
import xarray as xr
from model_diagnostics import *
data_root = '../data/synthetic/standard/'
var_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']
eke = 0.01
##########################
output = []
diagnostic_functions = [basic_stats]
for var in var_list:
grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))
for f in grid_files:
output.append(analize_member(f, var,diagnostic_functions))
print("processing %s" %os.path.basename(f))
var = 'hs'
diagnostic_functions = [hs_spectral_slope]
grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))
for f in grid_files:
output.append(analize_member(f, var,diagnostic_functions))
print("processing %s" %os.path.basename(f))
var = 'cur'
diagnostic_functions = [flow_stats]
grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print("processing %s" %os.path.basename(f))
ds = xr.merge(output)
df = ds.to_dataframe()
df = df.reset_index()
data = df.to_xarray()
data.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc'%eke, mode='w')
| [
0,
1,
2,
3,
4
] |
310 | c66f4ee5719f764c8c713c23815302c00b6fb9af | <mask token>
@app.route('/buy', methods=['GET', 'POST'])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == 'POST':
if not request.form.get('symbol'):
return apology('must provide symbol', 400)
elif not request.form.get('shares'):
return apology('must provide shares', 400)
if not request.form.get('shares').isdigit():
return apology('must be integer', 400)
elif int(request.form.get('shares')) < 1:
return apology('must be positive integer', 400)
elif lookup(request.form.get('symbol')) == None:
return apology('Must be a valid symbol', 400)
quote = lookup(request.form.get('symbol'))
shares = request.form.get('shares')
cash = db.execute('SELECT cash FROM users WHERE id=?', session[
'user_id'])
if cash[0]['cash'] < int(quote['price']) * int(shares):
return apology("You can't affort this/these", 400)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))"
, session['user_id'], int(shares), quote['symbol'], float(quote
['price']))
total = int(quote['price']) * int(shares)
db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
return render_template('buy.html')
<mask token>
@app.route('/logout')
def logout():
"""Log user out"""
session.clear()
return redirect('/')
<mask token>
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Register user"""
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 400)
elif not request.form.get('password'):
return apology('must provide password', 400)
elif not request.form.get('confirmation'):
return apology('must comfirm password', 400)
elif request.form.get('confirmation') != request.form.get('password'):
return apology('Password not matches', 400)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 0:
return apology('username used', 400)
db.execute('INSERT INTO users (username,hash) VALUES (?,?)',
request.form.get('username'), generate_password_hash(request.
form.get('password')))
return redirect('/')
else:
return render_template('register.html')
<mask token>
| <mask token>
@app.after_request
def after_request(response):
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Expires'] = 0
response.headers['Pragma'] = 'no-cache'
return response
<mask token>
@app.route('/')
@login_required
def index():
"""Show portfolio of stocks"""
rows = db.execute(
'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'
, session['user_id'])
cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])
cash_ = cash[0]['cash']
display = []
total_share = 0
for row in rows:
symbol = str(row['symbol'])
print(symbol)
name = lookup(symbol)['name']
shares = int(row['amount'])
price = float(lookup(symbol)['price'])
total = float(shares) * price
total_share += total
display.append({'symbol': symbol, 'name': name, 'shares': shares,
'price': price, 'total': total})
total_money = total_share + cash[0]['cash']
return render_template('index.html', display=display, total_money=
total_money, cash=cash_)
@app.route('/buy', methods=['GET', 'POST'])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == 'POST':
if not request.form.get('symbol'):
return apology('must provide symbol', 400)
elif not request.form.get('shares'):
return apology('must provide shares', 400)
if not request.form.get('shares').isdigit():
return apology('must be integer', 400)
elif int(request.form.get('shares')) < 1:
return apology('must be positive integer', 400)
elif lookup(request.form.get('symbol')) == None:
return apology('Must be a valid symbol', 400)
quote = lookup(request.form.get('symbol'))
shares = request.form.get('shares')
cash = db.execute('SELECT cash FROM users WHERE id=?', session[
'user_id'])
if cash[0]['cash'] < int(quote['price']) * int(shares):
return apology("You can't affort this/these", 400)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))"
, session['user_id'], int(shares), quote['symbol'], float(quote
['price']))
total = int(quote['price']) * int(shares)
db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
return render_template('buy.html')
<mask token>
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Log user in"""
session.clear()
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 403)
elif not request.form.get('password'):
return apology('must provide password', 403)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 1 or not check_password_hash(rows[0]['hash'],
request.form.get('password')):
return apology('invalid username and/or password', 403)
session['user_id'] = rows[0]['id']
return redirect('/')
else:
return render_template('login.html')
@app.route('/logout')
def logout():
"""Log user out"""
session.clear()
return redirect('/')
@app.route('/quote', methods=['GET', 'POST'])
@login_required
def quote():
"""Get stock quote."""
if request.method == 'POST':
quote = lookup(request.form.get('symbol'))
if quote == None:
return apology('Invalid symbol', 400)
price = usd(quote['price'])
return render_template('quoted.html', quote=quote, price=price)
else:
return render_template('quote.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Register user"""
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 400)
elif not request.form.get('password'):
return apology('must provide password', 400)
elif not request.form.get('confirmation'):
return apology('must comfirm password', 400)
elif request.form.get('confirmation') != request.form.get('password'):
return apology('Password not matches', 400)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 0:
return apology('username used', 400)
db.execute('INSERT INTO users (username,hash) VALUES (?,?)',
request.form.get('username'), generate_password_hash(request.
form.get('password')))
return redirect('/')
else:
return render_template('register.html')
<mask token>
@app.route('/HAX', methods=['GET', 'POST'])
@login_required
def HAX():
if request.method == 'POST':
total = request.form.get('HAX')
db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,
session['user_id'])
flash(u'HAX SUCCESSFULLY ACTIVATED!!!')
return redirect('/')
else:
return render_template('HAX.html')
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
<mask token>
| <mask token>
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
@app.after_request
def after_request(response):
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Expires'] = 0
response.headers['Pragma'] = 'no-cache'
return response
app.jinja_env.filters['usd'] = usd
app.config['SESSION_FILE_DIR'] = mkdtemp()
app.config['SESSION_PERMANENT'] = False
app.config['SESSION_TYPE'] = 'filesystem'
Session(app)
db = SQL('sqlite:///finance.db')
if not os.environ.get('API_KEY'):
raise RuntimeError('API_KEY not set')
@app.route('/')
@login_required
def index():
"""Show portfolio of stocks"""
rows = db.execute(
'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'
, session['user_id'])
cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])
cash_ = cash[0]['cash']
display = []
total_share = 0
for row in rows:
symbol = str(row['symbol'])
print(symbol)
name = lookup(symbol)['name']
shares = int(row['amount'])
price = float(lookup(symbol)['price'])
total = float(shares) * price
total_share += total
display.append({'symbol': symbol, 'name': name, 'shares': shares,
'price': price, 'total': total})
total_money = total_share + cash[0]['cash']
return render_template('index.html', display=display, total_money=
total_money, cash=cash_)
@app.route('/buy', methods=['GET', 'POST'])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == 'POST':
if not request.form.get('symbol'):
return apology('must provide symbol', 400)
elif not request.form.get('shares'):
return apology('must provide shares', 400)
if not request.form.get('shares').isdigit():
return apology('must be integer', 400)
elif int(request.form.get('shares')) < 1:
return apology('must be positive integer', 400)
elif lookup(request.form.get('symbol')) == None:
return apology('Must be a valid symbol', 400)
quote = lookup(request.form.get('symbol'))
shares = request.form.get('shares')
cash = db.execute('SELECT cash FROM users WHERE id=?', session[
'user_id'])
if cash[0]['cash'] < int(quote['price']) * int(shares):
return apology("You can't affort this/these", 400)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))"
, session['user_id'], int(shares), quote['symbol'], float(quote
['price']))
total = int(quote['price']) * int(shares)
db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
return render_template('buy.html')
@app.route('/history')
@login_required
def history():
"""Show history of transactions"""
rows = db.execute('SELECT * FROM record ORDER BY t1')
return render_template('history.html', rows=rows)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Log user in"""
session.clear()
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 403)
elif not request.form.get('password'):
return apology('must provide password', 403)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 1 or not check_password_hash(rows[0]['hash'],
request.form.get('password')):
return apology('invalid username and/or password', 403)
session['user_id'] = rows[0]['id']
return redirect('/')
else:
return render_template('login.html')
@app.route('/logout')
def logout():
"""Log user out"""
session.clear()
return redirect('/')
@app.route('/quote', methods=['GET', 'POST'])
@login_required
def quote():
"""Get stock quote."""
if request.method == 'POST':
quote = lookup(request.form.get('symbol'))
if quote == None:
return apology('Invalid symbol', 400)
price = usd(quote['price'])
return render_template('quoted.html', quote=quote, price=price)
else:
return render_template('quote.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Register user"""
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 400)
elif not request.form.get('password'):
return apology('must provide password', 400)
elif not request.form.get('confirmation'):
return apology('must comfirm password', 400)
elif request.form.get('confirmation') != request.form.get('password'):
return apology('Password not matches', 400)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 0:
return apology('username used', 400)
db.execute('INSERT INTO users (username,hash) VALUES (?,?)',
request.form.get('username'), generate_password_hash(request.
form.get('password')))
return redirect('/')
else:
return render_template('register.html')
@app.route('/sell', methods=['GET', 'POST'])
@login_required
def sell():
"""Sell shares of stock"""
if request.method == 'POST':
if not request.form.get('shares'):
return apology('Please enter how much u want to sell', 400)
sell = request.form.get('symbol')
shares = request.form.get('shares')
amount = db.execute(
'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'
, session['user_id'], sell)
if amount[0]['amount'] < int(shares):
return apology('You dont own that much shares', 400)
quote = lookup(sell)
price = quote['price']
total = int(price) * int(shares)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))"
, session['user_id'], int(shares) * -1, quote['symbol'], price)
db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
rows = db.execute(
'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'
, session['user_id'])
return render_template('sell.html', rows=rows)
@app.route('/HAX', methods=['GET', 'POST'])
@login_required
def HAX():
if request.method == 'POST':
total = request.form.get('HAX')
db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,
session['user_id'])
flash(u'HAX SUCCESSFULLY ACTIVATED!!!')
return redirect('/')
else:
return render_template('HAX.html')
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
| import os
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
@app.after_request
def after_request(response):
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Expires'] = 0
response.headers['Pragma'] = 'no-cache'
return response
app.jinja_env.filters['usd'] = usd
app.config['SESSION_FILE_DIR'] = mkdtemp()
app.config['SESSION_PERMANENT'] = False
app.config['SESSION_TYPE'] = 'filesystem'
Session(app)
db = SQL('sqlite:///finance.db')
if not os.environ.get('API_KEY'):
raise RuntimeError('API_KEY not set')
@app.route('/')
@login_required
def index():
"""Show portfolio of stocks"""
rows = db.execute(
'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'
, session['user_id'])
cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])
cash_ = cash[0]['cash']
display = []
total_share = 0
for row in rows:
symbol = str(row['symbol'])
print(symbol)
name = lookup(symbol)['name']
shares = int(row['amount'])
price = float(lookup(symbol)['price'])
total = float(shares) * price
total_share += total
display.append({'symbol': symbol, 'name': name, 'shares': shares,
'price': price, 'total': total})
total_money = total_share + cash[0]['cash']
return render_template('index.html', display=display, total_money=
total_money, cash=cash_)
@app.route('/buy', methods=['GET', 'POST'])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == 'POST':
if not request.form.get('symbol'):
return apology('must provide symbol', 400)
elif not request.form.get('shares'):
return apology('must provide shares', 400)
if not request.form.get('shares').isdigit():
return apology('must be integer', 400)
elif int(request.form.get('shares')) < 1:
return apology('must be positive integer', 400)
elif lookup(request.form.get('symbol')) == None:
return apology('Must be a valid symbol', 400)
quote = lookup(request.form.get('symbol'))
shares = request.form.get('shares')
cash = db.execute('SELECT cash FROM users WHERE id=?', session[
'user_id'])
if cash[0]['cash'] < int(quote['price']) * int(shares):
return apology("You can't affort this/these", 400)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))"
, session['user_id'], int(shares), quote['symbol'], float(quote
['price']))
total = int(quote['price']) * int(shares)
db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
return render_template('buy.html')
@app.route('/history')
@login_required
def history():
"""Show history of transactions"""
rows = db.execute('SELECT * FROM record ORDER BY t1')
return render_template('history.html', rows=rows)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Log user in"""
session.clear()
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 403)
elif not request.form.get('password'):
return apology('must provide password', 403)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 1 or not check_password_hash(rows[0]['hash'],
request.form.get('password')):
return apology('invalid username and/or password', 403)
session['user_id'] = rows[0]['id']
return redirect('/')
else:
return render_template('login.html')
@app.route('/logout')
def logout():
"""Log user out"""
session.clear()
return redirect('/')
@app.route('/quote', methods=['GET', 'POST'])
@login_required
def quote():
"""Get stock quote."""
if request.method == 'POST':
quote = lookup(request.form.get('symbol'))
if quote == None:
return apology('Invalid symbol', 400)
price = usd(quote['price'])
return render_template('quoted.html', quote=quote, price=price)
else:
return render_template('quote.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Register user"""
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 400)
elif not request.form.get('password'):
return apology('must provide password', 400)
elif not request.form.get('confirmation'):
return apology('must comfirm password', 400)
elif request.form.get('confirmation') != request.form.get('password'):
return apology('Password not matches', 400)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 0:
return apology('username used', 400)
db.execute('INSERT INTO users (username,hash) VALUES (?,?)',
request.form.get('username'), generate_password_hash(request.
form.get('password')))
return redirect('/')
else:
return render_template('register.html')
@app.route('/sell', methods=['GET', 'POST'])
@login_required
def sell():
"""Sell shares of stock"""
if request.method == 'POST':
if not request.form.get('shares'):
return apology('Please enter how much u want to sell', 400)
sell = request.form.get('symbol')
shares = request.form.get('shares')
amount = db.execute(
'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'
, session['user_id'], sell)
if amount[0]['amount'] < int(shares):
return apology('You dont own that much shares', 400)
quote = lookup(sell)
price = quote['price']
total = int(price) * int(shares)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))"
, session['user_id'], int(shares) * -1, quote['symbol'], price)
db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
rows = db.execute(
'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'
, session['user_id'])
return render_template('sell.html', rows=rows)
@app.route('/HAX', methods=['GET', 'POST'])
@login_required
def HAX():
if request.method == 'POST':
total = request.form.get('HAX')
db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,
session['user_id'])
flash(u'HAX SUCCESSFULLY ACTIVATED!!!')
return redirect('/')
else:
return render_template('HAX.html')
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
| import os
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
# Make sure API key is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
rows=db.execute("SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions",session["user_id"])
cash=db.execute("SELECT cash FROM users WHERE id=?",session["user_id"])
cash_=cash[0]["cash"]
#store all the data into a dict so its easier to pass in to html
display=[]
total_share=0
for row in rows:
symbol=str(row["symbol"])
print(symbol)
name=lookup(symbol)["name"]
shares=int(row["amount"])
price=float(lookup(symbol)["price"])
total=float(shares) *price
total_share+=total
display.append({'symbol':symbol, 'name':name, 'shares':shares, 'price':price, 'total':total})
total_money=total_share+cash[0]["cash"]
return render_template("index.html",display=display,total_money=total_money,cash=cash_)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == "POST":
# Ensure symbol was submitted
if not request.form.get("symbol"):
return apology("must provide symbol", 400)
# Ensure shares was submitted
elif not request.form.get("shares"):
return apology("must provide shares", 400)
if not request.form.get("shares").isdigit():
return apology("must be integer",400)
elif int(request.form.get("shares"))<1 :
return apology("must be positive integer", 400)
elif lookup(request.form.get("symbol"))==None:
return apology("Must be a valid symbol",400)
#ensure money>price
quote=lookup(request.form.get("symbol"))
shares=request.form.get("shares")
cash=db.execute("SELECT cash FROM users WHERE id=?",session["user_id"])
if cash[0]["cash"]<int(quote["price"])*int(shares):
return apology("You can't affort this/these",400)
#BUY, STORE DATA IN REPOSITORY AND RECORD
#record this transaction
db.execute("INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))",session["user_id"],int(shares),quote["symbol"],float(quote["price"]))
#deduct the cash
total=int(quote["price"])*int(shares)
db.execute("UPDATE users SET cash=cash- (?) WHERE id=?",total,session["user_id"])
return redirect("/")
else:
return render_template("buy.html")
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
rows=db.execute("SELECT * FROM record ORDER BY t1")
return render_template("history.html",rows=rows)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
if request.method=="POST":
quote=lookup(request.form.get("symbol"))
if quote==None:
return apology("Invalid symbol",400)
price=usd(quote["price"])
return render_template("quoted.html",quote=quote,price=price)
else:
return render_template("quote.html")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 400)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 400)
# Ensure comfirm password was submitted
elif not request.form.get("confirmation"):
return apology("must comfirm password", 400)
# Ensure password matches
elif request.form.get("confirmation") != request.form.get("password"):
return apology("Password not matches",400)
# Ensure username is new(unique)
rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username"))
if len(rows) != 0:
return apology("username used", 400)
db.execute("INSERT INTO users (username,hash) VALUES (?,?)",request.form.get("username"),generate_password_hash(request.form.get("password")))
# Redirect user to home page
return redirect("/")
else:
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
if request.method=='POST':
#parameter is not filled
if not request.form.get("shares"):
return apology("Please enter how much u want to sell",400)
#check if shares(amount) that are going to be sell less than owner's share.
sell=request.form.get("symbol")
shares=request.form.get("shares")
amount=db.execute("SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions",session["user_id"],sell)
if amount[0]["amount"]<int(shares):
return apology("You dont own that much shares",400)
#record sell and add cash amount
quote=lookup(sell)
price=quote["price"]
total=int(price)*int(shares)
db.execute("INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))",session["user_id"],(int(shares)*-1),quote["symbol"],price)
db.execute("UPDATE users SET cash=cash+ (?) WHERE id=?",total,session["user_id"])
return redirect("/")
else:
rows=db.execute("SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions",session["user_id"])
return render_template("sell.html",rows=rows)
@app.route("/HAX", methods=["GET", "POST"])
@login_required
def HAX():
#add free monei boiiii
if request.method=="POST":
total=request.form.get("HAX")
db.execute("UPDATE users SET cash=cash+ (?) WHERE id=?",total,session["user_id"])
flash(u'HAX SUCCESSFULLY ACTIVATED!!!')
return redirect("/")
else:
return render_template("HAX.html")
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
| [
3,
9,
13,
14,
15
] |
311 | 097a87f7f1346e5db1599e59680232912348aef7 | <mask token>
| <mask token>
class hrsalaryRule(models.Model):
<mask token>
<mask token>
| <mask token>
class hrsalaryRule(models.Model):
_inherit = 'hr.salary.rule'
is_tax_fdfp = fields.Boolean('Est un impôt FDFP')
| from odoo import api, models, fields, _
class hrsalaryRule(models.Model):
_inherit = 'hr.salary.rule'
is_tax_fdfp = fields.Boolean('Est un impôt FDFP')
| # -*- coding:utf-8 -*-
from odoo import api, models, fields, _
class hrsalaryRule(models.Model):
_inherit = "hr.salary.rule"
is_tax_fdfp = fields.Boolean("Est un impôt FDFP") | [
0,
1,
2,
3,
4
] |
312 | a126b1775ffe1ba1aebc288ce17fac8ada0b0756 | <mask token>
def quitScreen():
messagebox.showinfo('collecting data', '點擊視窗開始分析')
root.destroy()
root2 = Tk()
root2.destroy()
def getTextInput():
global result, result2
result = text.get(1.0, tk.END + '-1c')
result2 = text2.get(1.0, tk.END + '-1c')
<mask token>
def Result_Print():
window = Tk()
window.title('分析結果')
window.geometry('600x900')
frame2 = Frame(window)
frame2.pack(fill='both')
tablayout = Notebook(frame2)
tablayout2 = Notebook(frame2)
ntab1 = Frame(tablayout2)
ntab1.pack(fill='both')
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=25, height=2, text=name_n[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab1, text='交叉配對結果')
ntab2 = Frame(tablayout2)
ntab2.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab2, text='配方1')
ntab3 = Frame(tablayout2)
ntab3.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab3, text='配方2')
ntab4 = Frame(tablayout2)
ntab4.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab4, text='配方3')
ntab5 = Frame(tablayout2)
ntab5.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab5, text='最接近配方')
tab1 = Frame(tablayout)
tab1.pack(fill='both')
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=25, height=2, text=name[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=5, height=2, text='%s' % rate[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab1, text='顏色分類結果')
tab2 = Frame(tablayout)
tab2.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=row_df3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab2, text='配方1')
tab3 = Frame(tablayout)
tab3.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=row_df32[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab3, text='配方2')
tab4 = Frame(tablayout)
tab4.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=row_df33[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab4, text='配方3')
tab5 = Frame(tablayout)
tab5.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=row_text[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab5, text='最接近配方')
tablayout.pack()
tablayout2.pack()
window.mainloop()
def CircleCallback(event, x, y, flags, param):
n = 8
global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text
global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol
if event == cv2.EVENT_LBUTTONDOWN:
n = 500
for c in range(0, n):
c += 1
ranx = random.randint(0, 499)
rany = random.randint(0, 499)
refPt.append((ranx, rany))
b, g, r = img[ranx, rany]
PtBGR.append((b, g, r))
b = [x[0] for x in PtBGR]
g = [x[1] for x in PtBGR]
r = [x[2] for x in PtBGR]
if len(refPt) == n:
BAvr = round(sum(b[0:n]) / n)
GAvr = round(sum(g[0:n]) / n)
RAvr = round(sum(r[0:n]) / n)
SumRGB = BAvr + GAvr + RAvr
SumAvr = round(SumRGB / 3)
color_def(BAvr, GAvr, RAvr)
color_name.append(color)
AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,
'Avr': SumAvr, 'color': color_name}
df_test = pd.DataFrame(AvrRGB, index=[0])
dfread = pd.read_csv('.data base\\%s' % result2)
dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'
]) / 3)
dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']
nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])
nfread = dfread[['Serial no', 'R', 'G', 'B']]
loan = pd.merge(nf, nfread)
group = loan.groupby('Serial no')
Newnf = group.count()
Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)
Newnf = Newnf.sort_values(by=['R'], ascending=False)
Rate = Newnf['P'].tolist()
Newnf.columns = [' '.join(col).strip() for col in Newnf.
columns.values]
nf2 = pd.DataFrame(Newnf.to_records())
nf2 = nf2.head(5)
print(nf2)
if len(nf2['Serial no']) == 0:
i = 0
j = 0
k = 0
elif len(nf2['Serial no']) == 1:
i = nf2.at[0, 'Serial no']
j = 0
k = 0
elif len(nf2['Serial no']) == 2:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = 0
else:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = nf2.at[2, 'Serial no']
print(k)
nf3 = dfread.loc[dfread['Serial no'] == i].head(1)
nf4 = dfread.loc[dfread['Serial no'] == j].head(1)
nf5 = dfread.loc[dfread['Serial no'] == k].head(1)
nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf = pd.concat([nf3, nf4, nf5])
nf.to_csv('.data base\\test_result2.csv', index=False,
encoding='utf_8_sig')
print(nf)
ncol = list(nf.columns)
if len(nf2['Serial no']) == 0:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(nf2['Serial no']) == 1:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
elif len(nf2['Serial no']) == 2:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
else:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
print(row_nf32)
row_nf33 = nf5.iloc[0].tolist()
name_n = nf['Serial no'].tolist()
rate_n = Rate
"""
newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]
newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]
newdf=pd.concat([newdf1, newdf2])
"""
"""
newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf=newdf.loc[(newdf['color']==color)]
"""
newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'
] == SumAvr) | (dfread['S'] == SumRGB)]
newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))
newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))
newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))
newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))
newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))
df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True
).head(100)
df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])
)
df = df.sort_values(by=['dalta'], ascending=True)
data = df[['Serial no', 'color']]
group = data.groupby('Serial no')
datacount = group.count()
df = df.merge(datacount, left_on='Serial no', right_index=True)
df = df.sort_values(by=['color_y'], ascending=False)
df3 = df.drop_duplicates('Serial no', keep='first', inplace
=False).head()
print(df3)
df3.to_csv('.data base\\test_result.csv', index=False,
encoding='utf_8_sig')
if df3.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(df3) <= 2:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '只找到少數資料\n 已存在test_result')
else:
Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &
(df3['Bdif'] == 0)]
Zero = Zero.head(3)
if Zero.empty == False:
Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(Zero.columns)
row_text = Zero.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('0')
print(Zero)
else:
filtdf = df3.loc[df3['A'] >= SumAvr]
filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']
).head()
Neg_filtdf = df3.loc[df3['A'] < SumAvr]
Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',
'Gdif', 'Bdif']).head()
if Neg_filtdf.empty == True and filtdf.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
else:
filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',
'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',
'Sdif', 'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(filtdf.columns)
row_text = filtdf.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('最接近的為1', filtdf.head(1))
<mask token>
def main():
while True:
cv2.imshow('mouse_callback', img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
<mask token>
| <mask token>
def quitScreen():
messagebox.showinfo('collecting data', '點擊視窗開始分析')
root.destroy()
root2 = Tk()
root2.destroy()
def getTextInput():
global result, result2
result = text.get(1.0, tk.END + '-1c')
result2 = text2.get(1.0, tk.END + '-1c')
<mask token>
def Result_Print():
window = Tk()
window.title('分析結果')
window.geometry('600x900')
frame2 = Frame(window)
frame2.pack(fill='both')
tablayout = Notebook(frame2)
tablayout2 = Notebook(frame2)
ntab1 = Frame(tablayout2)
ntab1.pack(fill='both')
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=25, height=2, text=name_n[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab1, text='交叉配對結果')
ntab2 = Frame(tablayout2)
ntab2.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab2, text='配方1')
ntab3 = Frame(tablayout2)
ntab3.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab3, text='配方2')
ntab4 = Frame(tablayout2)
ntab4.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab4, text='配方3')
ntab5 = Frame(tablayout2)
ntab5.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab5, text='最接近配方')
tab1 = Frame(tablayout)
tab1.pack(fill='both')
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=25, height=2, text=name[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=5, height=2, text='%s' % rate[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab1, text='顏色分類結果')
tab2 = Frame(tablayout)
tab2.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=row_df3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab2, text='配方1')
tab3 = Frame(tablayout)
tab3.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=row_df32[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab3, text='配方2')
tab4 = Frame(tablayout)
tab4.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=row_df33[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab4, text='配方3')
tab5 = Frame(tablayout)
tab5.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=row_text[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab5, text='最接近配方')
tablayout.pack()
tablayout2.pack()
window.mainloop()
def CircleCallback(event, x, y, flags, param):
n = 8
global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text
global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol
if event == cv2.EVENT_LBUTTONDOWN:
n = 500
for c in range(0, n):
c += 1
ranx = random.randint(0, 499)
rany = random.randint(0, 499)
refPt.append((ranx, rany))
b, g, r = img[ranx, rany]
PtBGR.append((b, g, r))
b = [x[0] for x in PtBGR]
g = [x[1] for x in PtBGR]
r = [x[2] for x in PtBGR]
if len(refPt) == n:
BAvr = round(sum(b[0:n]) / n)
GAvr = round(sum(g[0:n]) / n)
RAvr = round(sum(r[0:n]) / n)
SumRGB = BAvr + GAvr + RAvr
SumAvr = round(SumRGB / 3)
color_def(BAvr, GAvr, RAvr)
color_name.append(color)
AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,
'Avr': SumAvr, 'color': color_name}
df_test = pd.DataFrame(AvrRGB, index=[0])
dfread = pd.read_csv('.data base\\%s' % result2)
dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'
]) / 3)
dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']
nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])
nfread = dfread[['Serial no', 'R', 'G', 'B']]
loan = pd.merge(nf, nfread)
group = loan.groupby('Serial no')
Newnf = group.count()
Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)
Newnf = Newnf.sort_values(by=['R'], ascending=False)
Rate = Newnf['P'].tolist()
Newnf.columns = [' '.join(col).strip() for col in Newnf.
columns.values]
nf2 = pd.DataFrame(Newnf.to_records())
nf2 = nf2.head(5)
print(nf2)
if len(nf2['Serial no']) == 0:
i = 0
j = 0
k = 0
elif len(nf2['Serial no']) == 1:
i = nf2.at[0, 'Serial no']
j = 0
k = 0
elif len(nf2['Serial no']) == 2:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = 0
else:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = nf2.at[2, 'Serial no']
print(k)
nf3 = dfread.loc[dfread['Serial no'] == i].head(1)
nf4 = dfread.loc[dfread['Serial no'] == j].head(1)
nf5 = dfread.loc[dfread['Serial no'] == k].head(1)
nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf = pd.concat([nf3, nf4, nf5])
nf.to_csv('.data base\\test_result2.csv', index=False,
encoding='utf_8_sig')
print(nf)
ncol = list(nf.columns)
if len(nf2['Serial no']) == 0:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(nf2['Serial no']) == 1:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
elif len(nf2['Serial no']) == 2:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
else:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
print(row_nf32)
row_nf33 = nf5.iloc[0].tolist()
name_n = nf['Serial no'].tolist()
rate_n = Rate
"""
newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]
newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]
newdf=pd.concat([newdf1, newdf2])
"""
"""
newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf=newdf.loc[(newdf['color']==color)]
"""
newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'
] == SumAvr) | (dfread['S'] == SumRGB)]
newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))
newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))
newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))
newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))
newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))
df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True
).head(100)
df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])
)
df = df.sort_values(by=['dalta'], ascending=True)
data = df[['Serial no', 'color']]
group = data.groupby('Serial no')
datacount = group.count()
df = df.merge(datacount, left_on='Serial no', right_index=True)
df = df.sort_values(by=['color_y'], ascending=False)
df3 = df.drop_duplicates('Serial no', keep='first', inplace
=False).head()
print(df3)
df3.to_csv('.data base\\test_result.csv', index=False,
encoding='utf_8_sig')
if df3.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(df3) <= 2:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '只找到少數資料\n 已存在test_result')
else:
Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &
(df3['Bdif'] == 0)]
Zero = Zero.head(3)
if Zero.empty == False:
Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(Zero.columns)
row_text = Zero.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('0')
print(Zero)
else:
filtdf = df3.loc[df3['A'] >= SumAvr]
filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']
).head()
Neg_filtdf = df3.loc[df3['A'] < SumAvr]
Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',
'Gdif', 'Bdif']).head()
if Neg_filtdf.empty == True and filtdf.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
else:
filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',
'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',
'Sdif', 'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(filtdf.columns)
row_text = filtdf.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('最接近的為1', filtdf.head(1))
def color_def(BAvr, GAvr, RAvr):
global color
if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:
color = 'White'
return color
elif BAvr >= GAvr and BAvr >= RAvr:
if BAvr - GAvr > 3 and BAvr - RAvr >= 3:
color = 'Blue'
return color
elif BAvr - GAvr < 3:
color = 'Cyan'
return color
else:
color = 'Purple'
return color
elif GAvr >= RAvr and GAvr >= BAvr:
if GAvr - RAvr > 3 or GAvr - BAvr > 3:
color = 'Green'
return color
elif GAvr - RAvr < 3:
color = 'Yellow'
return color
else:
color = 'Cyan'
return color
elif RAvr >= GAvr and RAvr >= BAvr:
if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:
color = 'Red'
return color
elif RAvr - GAvr < 3:
color = 'Yellow'
return color
else:
color = 'Purple'
return color
else:
color = 'White'
<mask token>
def main():
while True:
cv2.imshow('mouse_callback', img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
<mask token>
| <mask token>
refPt = []
PtBGR = []
r = []
g = []
b = []
refPt = []
Serial = []
PtBGR = []
r1 = []
r2 = []
r3 = []
r4 = []
rate = []
rate2 = []
rate3 = []
r6 = []
r7 = []
r8 = []
r9 = []
add = []
add2 = []
add3 = []
color_name = []
locate = []
brand = []
boolean = False
root = tk.Tk()
root.geometry('400x200')
root.configure(background='white')
def quitScreen():
messagebox.showinfo('collecting data', '點擊視窗開始分析')
root.destroy()
root2 = Tk()
root2.destroy()
def getTextInput():
global result, result2
result = text.get(1.0, tk.END + '-1c')
result2 = text2.get(1.0, tk.END + '-1c')
img = PhotoImage(file='buttons/QJsmall.png')
panel = tk.Label(root, image=img)
panel.grid(row=0, column=0, columnspan=3)
labelmode = tk.Label(root, text="""請輸入圖片完整名稱
ex:104432 w7.jpg""", bg='white')
labelmode.configure(font=('微軟正黑體', 10))
labelmode.grid(row=1)
text = tk.Text(root, width=20, height=1)
text.insert('insert', '.jpg')
text.configure(font=('微軟正黑體', 10))
text.grid(row=1, column=2)
labelmode2 = tk.Label(root, text="""請輸入讀取資料庫名稱
ex:PureColorBig.csv""", bg=
'white')
labelmode2.configure(font=('微軟正黑體', 10))
labelmode2.grid(row=2)
text2 = tk.Text(root, width=20, height=1)
text2.insert('insert', 'PureColorBig.csv')
text2.configure(font=('微軟正黑體', 10))
text2.grid(row=2, column=2)
img_confirm = PhotoImage(file='buttons/confirm.png')
img_start = PhotoImage(file='buttons/start.png')
btnRead = tk.Button(root, image=img_confirm, text=' ', relief='flat',
command=getTextInput)
btnRead.grid(row=5, column=1)
btnRead2 = tk.Button(root, image=img_start, text=' ', relief='flat',
command=quitScreen)
btnRead2.grid(row=5, column=2)
root.mainloop()
def Result_Print():
window = Tk()
window.title('分析結果')
window.geometry('600x900')
frame2 = Frame(window)
frame2.pack(fill='both')
tablayout = Notebook(frame2)
tablayout2 = Notebook(frame2)
ntab1 = Frame(tablayout2)
ntab1.pack(fill='both')
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=25, height=2, text=name_n[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab1, text='交叉配對結果')
ntab2 = Frame(tablayout2)
ntab2.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab2, text='配方1')
ntab3 = Frame(tablayout2)
ntab3.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab3, text='配方2')
ntab4 = Frame(tablayout2)
ntab4.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab4, text='配方3')
ntab5 = Frame(tablayout2)
ntab5.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab5, text='最接近配方')
tab1 = Frame(tablayout)
tab1.pack(fill='both')
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=25, height=2, text=name[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=5, height=2, text='%s' % rate[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab1, text='顏色分類結果')
tab2 = Frame(tablayout)
tab2.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=row_df3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab2, text='配方1')
tab3 = Frame(tablayout)
tab3.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=row_df32[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab3, text='配方2')
tab4 = Frame(tablayout)
tab4.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=row_df33[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab4, text='配方3')
tab5 = Frame(tablayout)
tab5.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=row_text[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab5, text='最接近配方')
tablayout.pack()
tablayout2.pack()
window.mainloop()
def CircleCallback(event, x, y, flags, param):
n = 8
global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text
global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol
if event == cv2.EVENT_LBUTTONDOWN:
n = 500
for c in range(0, n):
c += 1
ranx = random.randint(0, 499)
rany = random.randint(0, 499)
refPt.append((ranx, rany))
b, g, r = img[ranx, rany]
PtBGR.append((b, g, r))
b = [x[0] for x in PtBGR]
g = [x[1] for x in PtBGR]
r = [x[2] for x in PtBGR]
if len(refPt) == n:
BAvr = round(sum(b[0:n]) / n)
GAvr = round(sum(g[0:n]) / n)
RAvr = round(sum(r[0:n]) / n)
SumRGB = BAvr + GAvr + RAvr
SumAvr = round(SumRGB / 3)
color_def(BAvr, GAvr, RAvr)
color_name.append(color)
AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,
'Avr': SumAvr, 'color': color_name}
df_test = pd.DataFrame(AvrRGB, index=[0])
dfread = pd.read_csv('.data base\\%s' % result2)
dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'
]) / 3)
dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']
nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])
nfread = dfread[['Serial no', 'R', 'G', 'B']]
loan = pd.merge(nf, nfread)
group = loan.groupby('Serial no')
Newnf = group.count()
Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)
Newnf = Newnf.sort_values(by=['R'], ascending=False)
Rate = Newnf['P'].tolist()
Newnf.columns = [' '.join(col).strip() for col in Newnf.
columns.values]
nf2 = pd.DataFrame(Newnf.to_records())
nf2 = nf2.head(5)
print(nf2)
if len(nf2['Serial no']) == 0:
i = 0
j = 0
k = 0
elif len(nf2['Serial no']) == 1:
i = nf2.at[0, 'Serial no']
j = 0
k = 0
elif len(nf2['Serial no']) == 2:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = 0
else:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = nf2.at[2, 'Serial no']
print(k)
nf3 = dfread.loc[dfread['Serial no'] == i].head(1)
nf4 = dfread.loc[dfread['Serial no'] == j].head(1)
nf5 = dfread.loc[dfread['Serial no'] == k].head(1)
nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf = pd.concat([nf3, nf4, nf5])
nf.to_csv('.data base\\test_result2.csv', index=False,
encoding='utf_8_sig')
print(nf)
ncol = list(nf.columns)
if len(nf2['Serial no']) == 0:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(nf2['Serial no']) == 1:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
elif len(nf2['Serial no']) == 2:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
else:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
print(row_nf32)
row_nf33 = nf5.iloc[0].tolist()
name_n = nf['Serial no'].tolist()
rate_n = Rate
"""
newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]
newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]
newdf=pd.concat([newdf1, newdf2])
"""
"""
newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf=newdf.loc[(newdf['color']==color)]
"""
newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'
] == SumAvr) | (dfread['S'] == SumRGB)]
newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))
newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))
newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))
newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))
newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))
df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True
).head(100)
df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])
)
df = df.sort_values(by=['dalta'], ascending=True)
data = df[['Serial no', 'color']]
group = data.groupby('Serial no')
datacount = group.count()
df = df.merge(datacount, left_on='Serial no', right_index=True)
df = df.sort_values(by=['color_y'], ascending=False)
df3 = df.drop_duplicates('Serial no', keep='first', inplace
=False).head()
print(df3)
df3.to_csv('.data base\\test_result.csv', index=False,
encoding='utf_8_sig')
if df3.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(df3) <= 2:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '只找到少數資料\n 已存在test_result')
else:
Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &
(df3['Bdif'] == 0)]
Zero = Zero.head(3)
if Zero.empty == False:
Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(Zero.columns)
row_text = Zero.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('0')
print(Zero)
else:
filtdf = df3.loc[df3['A'] >= SumAvr]
filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']
).head()
Neg_filtdf = df3.loc[df3['A'] < SumAvr]
Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',
'Gdif', 'Bdif']).head()
if Neg_filtdf.empty == True and filtdf.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
else:
filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',
'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',
'Sdif', 'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(filtdf.columns)
row_text = filtdf.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('最接近的為1', filtdf.head(1))
def color_def(BAvr, GAvr, RAvr):
global color
if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:
color = 'White'
return color
elif BAvr >= GAvr and BAvr >= RAvr:
if BAvr - GAvr > 3 and BAvr - RAvr >= 3:
color = 'Blue'
return color
elif BAvr - GAvr < 3:
color = 'Cyan'
return color
else:
color = 'Purple'
return color
elif GAvr >= RAvr and GAvr >= BAvr:
if GAvr - RAvr > 3 or GAvr - BAvr > 3:
color = 'Green'
return color
elif GAvr - RAvr < 3:
color = 'Yellow'
return color
else:
color = 'Cyan'
return color
elif RAvr >= GAvr and RAvr >= BAvr:
if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:
color = 'Red'
return color
elif RAvr - GAvr < 3:
color = 'Yellow'
return color
else:
color = 'Purple'
return color
else:
color = 'White'
img = cv2.imdecode(np.fromfile('.pure\\%s' % result, dtype=np.uint8), -1)
cv2.namedWindow('mouse_callback')
cv2.setMouseCallback('mouse_callback', CircleCallback)
def main():
while True:
cv2.imshow('mouse_callback', img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| import cv2
import numpy as np
import pandas as pd
import tkinter as tk
import random
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter import Scale, Tk
from tkinter.ttk import Notebook
refPt = []
PtBGR = []
r = []
g = []
b = []
refPt = []
Serial = []
PtBGR = []
r1 = []
r2 = []
r3 = []
r4 = []
rate = []
rate2 = []
rate3 = []
r6 = []
r7 = []
r8 = []
r9 = []
add = []
add2 = []
add3 = []
color_name = []
locate = []
brand = []
boolean = False
root = tk.Tk()
root.geometry('400x200')
root.configure(background='white')
def quitScreen():
messagebox.showinfo('collecting data', '點擊視窗開始分析')
root.destroy()
root2 = Tk()
root2.destroy()
def getTextInput():
global result, result2
result = text.get(1.0, tk.END + '-1c')
result2 = text2.get(1.0, tk.END + '-1c')
img = PhotoImage(file='buttons/QJsmall.png')
panel = tk.Label(root, image=img)
panel.grid(row=0, column=0, columnspan=3)
labelmode = tk.Label(root, text="""請輸入圖片完整名稱
ex:104432 w7.jpg""", bg='white')
labelmode.configure(font=('微軟正黑體', 10))
labelmode.grid(row=1)
text = tk.Text(root, width=20, height=1)
text.insert('insert', '.jpg')
text.configure(font=('微軟正黑體', 10))
text.grid(row=1, column=2)
labelmode2 = tk.Label(root, text="""請輸入讀取資料庫名稱
ex:PureColorBig.csv""", bg=
'white')
labelmode2.configure(font=('微軟正黑體', 10))
labelmode2.grid(row=2)
text2 = tk.Text(root, width=20, height=1)
text2.insert('insert', 'PureColorBig.csv')
text2.configure(font=('微軟正黑體', 10))
text2.grid(row=2, column=2)
img_confirm = PhotoImage(file='buttons/confirm.png')
img_start = PhotoImage(file='buttons/start.png')
btnRead = tk.Button(root, image=img_confirm, text=' ', relief='flat',
command=getTextInput)
btnRead.grid(row=5, column=1)
btnRead2 = tk.Button(root, image=img_start, text=' ', relief='flat',
command=quitScreen)
btnRead2.grid(row=5, column=2)
root.mainloop()
def Result_Print():
window = Tk()
window.title('分析結果')
window.geometry('600x900')
frame2 = Frame(window)
frame2.pack(fill='both')
tablayout = Notebook(frame2)
tablayout2 = Notebook(frame2)
ntab1 = Frame(tablayout2)
ntab1.pack(fill='both')
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=25, height=2, text=name_n[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab1, text='交叉配對結果')
ntab2 = Frame(tablayout2)
ntab2.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab2, text='配方1')
ntab3 = Frame(tablayout2)
ntab3.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab3, text='配方2')
ntab4 = Frame(tablayout2)
ntab4.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab4, text='配方3')
ntab5 = Frame(tablayout2)
ntab5.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab5, text='最接近配方')
tab1 = Frame(tablayout)
tab1.pack(fill='both')
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=25, height=2, text=name[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=5, height=2, text='%s' % rate[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab1, text='顏色分類結果')
tab2 = Frame(tablayout)
tab2.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=row_df3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab2, text='配方1')
tab3 = Frame(tablayout)
tab3.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=row_df32[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab3, text='配方2')
tab4 = Frame(tablayout)
tab4.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=row_df33[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab4, text='配方3')
tab5 = Frame(tablayout)
tab5.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=row_text[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab5, text='最接近配方')
tablayout.pack()
tablayout2.pack()
window.mainloop()
def CircleCallback(event, x, y, flags, param):
n = 8
global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text
global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol
if event == cv2.EVENT_LBUTTONDOWN:
n = 500
for c in range(0, n):
c += 1
ranx = random.randint(0, 499)
rany = random.randint(0, 499)
refPt.append((ranx, rany))
b, g, r = img[ranx, rany]
PtBGR.append((b, g, r))
b = [x[0] for x in PtBGR]
g = [x[1] for x in PtBGR]
r = [x[2] for x in PtBGR]
if len(refPt) == n:
BAvr = round(sum(b[0:n]) / n)
GAvr = round(sum(g[0:n]) / n)
RAvr = round(sum(r[0:n]) / n)
SumRGB = BAvr + GAvr + RAvr
SumAvr = round(SumRGB / 3)
color_def(BAvr, GAvr, RAvr)
color_name.append(color)
AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,
'Avr': SumAvr, 'color': color_name}
df_test = pd.DataFrame(AvrRGB, index=[0])
dfread = pd.read_csv('.data base\\%s' % result2)
dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'
]) / 3)
dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']
nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])
nfread = dfread[['Serial no', 'R', 'G', 'B']]
loan = pd.merge(nf, nfread)
group = loan.groupby('Serial no')
Newnf = group.count()
Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)
Newnf = Newnf.sort_values(by=['R'], ascending=False)
Rate = Newnf['P'].tolist()
Newnf.columns = [' '.join(col).strip() for col in Newnf.
columns.values]
nf2 = pd.DataFrame(Newnf.to_records())
nf2 = nf2.head(5)
print(nf2)
if len(nf2['Serial no']) == 0:
i = 0
j = 0
k = 0
elif len(nf2['Serial no']) == 1:
i = nf2.at[0, 'Serial no']
j = 0
k = 0
elif len(nf2['Serial no']) == 2:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = 0
else:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = nf2.at[2, 'Serial no']
print(k)
nf3 = dfread.loc[dfread['Serial no'] == i].head(1)
nf4 = dfread.loc[dfread['Serial no'] == j].head(1)
nf5 = dfread.loc[dfread['Serial no'] == k].head(1)
nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf = pd.concat([nf3, nf4, nf5])
nf.to_csv('.data base\\test_result2.csv', index=False,
encoding='utf_8_sig')
print(nf)
ncol = list(nf.columns)
if len(nf2['Serial no']) == 0:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(nf2['Serial no']) == 1:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
elif len(nf2['Serial no']) == 2:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
else:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
print(row_nf32)
row_nf33 = nf5.iloc[0].tolist()
name_n = nf['Serial no'].tolist()
rate_n = Rate
"""
newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]
newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]
newdf=pd.concat([newdf1, newdf2])
"""
"""
newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf=newdf.loc[(newdf['color']==color)]
"""
newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'
] == SumAvr) | (dfread['S'] == SumRGB)]
newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))
newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))
newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))
newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))
newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))
df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True
).head(100)
df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])
)
df = df.sort_values(by=['dalta'], ascending=True)
data = df[['Serial no', 'color']]
group = data.groupby('Serial no')
datacount = group.count()
df = df.merge(datacount, left_on='Serial no', right_index=True)
df = df.sort_values(by=['color_y'], ascending=False)
df3 = df.drop_duplicates('Serial no', keep='first', inplace
=False).head()
print(df3)
df3.to_csv('.data base\\test_result.csv', index=False,
encoding='utf_8_sig')
if df3.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(df3) <= 2:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '只找到少數資料\n 已存在test_result')
else:
Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &
(df3['Bdif'] == 0)]
Zero = Zero.head(3)
if Zero.empty == False:
Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(Zero.columns)
row_text = Zero.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('0')
print(Zero)
else:
filtdf = df3.loc[df3['A'] >= SumAvr]
filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']
).head()
Neg_filtdf = df3.loc[df3['A'] < SumAvr]
Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',
'Gdif', 'Bdif']).head()
if Neg_filtdf.empty == True and filtdf.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
else:
filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',
'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',
'Sdif', 'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(filtdf.columns)
row_text = filtdf.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('最接近的為1', filtdf.head(1))
def color_def(BAvr, GAvr, RAvr):
global color
if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:
color = 'White'
return color
elif BAvr >= GAvr and BAvr >= RAvr:
if BAvr - GAvr > 3 and BAvr - RAvr >= 3:
color = 'Blue'
return color
elif BAvr - GAvr < 3:
color = 'Cyan'
return color
else:
color = 'Purple'
return color
elif GAvr >= RAvr and GAvr >= BAvr:
if GAvr - RAvr > 3 or GAvr - BAvr > 3:
color = 'Green'
return color
elif GAvr - RAvr < 3:
color = 'Yellow'
return color
else:
color = 'Cyan'
return color
elif RAvr >= GAvr and RAvr >= BAvr:
if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:
color = 'Red'
return color
elif RAvr - GAvr < 3:
color = 'Yellow'
return color
else:
color = 'Purple'
return color
else:
color = 'White'
img = cv2.imdecode(np.fromfile('.pure\\%s' % result, dtype=np.uint8), -1)
cv2.namedWindow('mouse_callback')
cv2.setMouseCallback('mouse_callback', CircleCallback)
def main():
while True:
cv2.imshow('mouse_callback', img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| import cv2
import numpy as np
import pandas as pd
import tkinter as tk
import random
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter import Scale,Tk
from tkinter.ttk import Notebook
refPt = []
PtBGR=[]
r=[]
g=[]
b=[]
refPt = []
Serial=[]
PtBGR=[]
r1=[]
r2=[]
r3=[]
r4=[]
rate=[]
rate2=[]
rate3=[]
r6=[]
r7=[]
r8=[]
r9=[]
add=[]
add2=[]
add3=[]
color_name=[]
locate=[]
brand=[]
boolean=False
root = tk.Tk()
root.geometry("400x200")
root.configure(background='white')
def quitScreen():
messagebox.showinfo("collecting data", "點擊視窗開始分析")
root.destroy()
root2=Tk()
root2.destroy()
def getTextInput():
global result,result2
result=text.get(1.0, tk.END+"-1c")
result2=text2.get(1.0, tk.END+"-1c")
img = PhotoImage(file="buttons/QJsmall.png")
panel = tk.Label(root, image = img)
panel.grid(row=0,column=0,columnspan=3)
labelmode = tk.Label(root,text = "請輸入圖片完整名稱\n ex:104432 w7.jpg",bg="white")
labelmode.configure(font=("微軟正黑體", 10))
labelmode.grid(row=1)
text=tk.Text(root, width=20,height=1)
text.insert("insert",".jpg")
text.configure(font=("微軟正黑體", 10))
text.grid(row=1,column=2)
labelmode2 = tk.Label(root,text = "請輸入讀取資料庫名稱\n ex:PureColorBig.csv",bg="white")
labelmode2.configure(font=("微軟正黑體", 10))
labelmode2.grid(row=2)
text2=tk.Text(root, width=20,height=1)
text2.insert("insert","PureColorBig.csv")
text2.configure(font=("微軟正黑體", 10))
text2.grid(row=2,column=2)
img_confirm=PhotoImage(file="buttons/confirm.png")
img_start=PhotoImage(file="buttons/start.png")
btnRead=tk.Button(root, image=img_confirm,text=" ",relief='flat',
command=getTextInput)
btnRead.grid(row=5,column=1)
btnRead2=tk.Button(root, image=img_start,text=" ",relief='flat',
command=quitScreen)
btnRead2.grid(row=5,column=2)
root.mainloop()
def Result_Print():
window=Tk()
window.title("分析結果")
window.geometry("600x900")
frame2=Frame(window)
frame2.pack(fill="both")
tablayout=Notebook(frame2)
tablayout2=Notebook(frame2)
#交叉配對
ntab1=Frame(tablayout2)
ntab1.pack(fill="both")
for row in range(len(name_n)):
for column in range(1):
label=Label(ntab1,width=25,height=2,text=name_n[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=column,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(name_n)):
for column in range(1):
label=Label(ntab1,width=5,height=2,text="%s" %rate_n[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(name_n)):
for column in range(1):
label=Label(ntab1,width=12,height=2,text="% 相似程度",bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=2,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
tablayout2.add(ntab1,text="交叉配對結果")
ntab2=Frame(tablayout2)
ntab2.pack(fill="both")
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab2,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab2,width=22,height=1,text=row_nf3[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
tablayout2.add(ntab2,text="配方1")
ntab3=Frame(tablayout2)
ntab3.pack(fill="both")
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab3,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab3,width=22,height=1,text=row_nf32[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
tablayout2.add(ntab3,text="配方2")
ntab4=Frame(tablayout2)
ntab4.pack(fill="both")
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab4,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab4,width=22,height=1,text=row_nf33[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
tablayout2.add(ntab4,text="配方3")
ntab5=Frame(tablayout2)
ntab5.pack(fill="both")
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab5,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab5,width=22,height=1,text=row_nf3[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
tablayout2.add(ntab5,text="最接近配方")
#顏色分類
tab1=Frame(tablayout)
tab1.pack(fill="both")
for row in range(len(name)):
for column in range(1):
label=Label(tab1,width=25,height=2,text=name[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=column,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(name)):
for column in range(1):
label=Label(tab1,width=5,height=2,text="%s" %rate[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(name)):
for column in range(1):
label=Label(tab1,width=12,height=2,text="% 相似程度",bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=2,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
tablayout.add(tab1,text="顏色分類結果")
tab2=Frame(tablayout)
tab2.pack(fill="both")
for row in range(len(col)):
for column in range(1):
label=Label(tab2,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(col)):
for column in range(1):
label=Label(tab2,width=22,height=1,text=row_df3[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
tablayout.add(tab2,text="配方1")
tab3=Frame(tablayout)
tab3.pack(fill="both")
for row in range(len(col)):
for column in range(1):
label=Label(tab3,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(col)):
for column in range(1):
label=Label(tab3,width=22,height=1,text=row_df32[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
tablayout.add(tab3,text="配方2")
tab4=Frame(tablayout)
tab4.pack(fill="both")
for row in range(len(col)):
for column in range(1):
label=Label(tab4,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(col)):
for column in range(1):
label=Label(tab4,width=22,height=1,text=row_df33[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
tablayout.add(tab4,text="配方3")
tab5=Frame(tablayout)
tab5.pack(fill="both")
for row in range(len(col)):
for column in range(1):
label=Label(tab5,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(col)):
for column in range(1):
label=Label(tab5,width=22,height=1,text=row_text[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
tablayout.add(tab5,text="最接近配方")
tablayout.pack()
tablayout2.pack()
window.mainloop()
def CircleCallback(event,x,y,flags,param):
n=8
global refPt,PtBGR,w,h,Serial,r1,r2,r3,r4,rate,rate2,rate3,r6,r7,r8,r9,add,add2,add3,color,b,g,r,df3,name,rate,col,row_text
global row_df3,row_df32,row_df33,row_text2,row_nf3,row_nf32,nf3,row_nf33,name_n,rate_n,ncol
if event == cv2.EVENT_LBUTTONDOWN:
#下面n代表取樣點數 若n越大則越精準一般不建議超過1000
n=500
for c in range(0,n):
c+=1
#若n改變下面499改為n-1
ranx=(random.randint(0,499))
rany=(random.randint(0,499))
refPt.append((ranx,rany))
b, g, r = img[ranx,rany]
PtBGR.append((b,g,r))
#print(PtBGR[0:n])
b=[x[0] for x in PtBGR]
g=[x[1] for x in PtBGR]
r=[x[2] for x in PtBGR]
if len(refPt)==n:
BAvr=(round(sum(b[0:n])/n))
GAvr=(round(sum(g[0:n])/n))
RAvr=(round(sum(r[0:n])/n))
SumRGB=(BAvr+GAvr+RAvr)
SumAvr=(round(SumRGB/3))
color_def(BAvr,GAvr,RAvr)
color_name.append(color)
AvrRGB={'R':RAvr,'G':GAvr,'B':BAvr,'Sum':SumRGB,'Avr':SumAvr,'color':color_name}
df_test = pd.DataFrame(AvrRGB,index=[0])
dfread = pd.read_csv(".data base\\%s" %(result2))
dfread['A']= round((dfread['R'] + dfread['G'] + dfread['B'])/3)
dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']
#交叉比對法
nf=pd.DataFrame(list(zip(r,g,b)),columns=['R','G','B'])
nfread=dfread[['Serial no','R','G','B']]
loan=pd.merge(nf,nfread)
group=loan.groupby('Serial no')
Newnf=group.count()
Newnf['P']=round((Newnf['R']/Newnf['R'].sum())* 100)
Newnf=Newnf.sort_values(by=['R'],ascending=False)
Rate=Newnf['P'].tolist()
Newnf.columns = [' '.join(col).strip() for col in Newnf.columns.values]
nf2=pd.DataFrame(Newnf.to_records())
nf2=nf2.head(5)
print(nf2)
if(len(nf2['Serial no'])==0):
i=0
j=0
k=0
elif(len(nf2['Serial no'])==1):
i=nf2.at[0,'Serial no']
j=0
k=0
elif(len(nf2['Serial no'])==2):
i=nf2.at[0,'Serial no']
j=nf2.at[1,'Serial no']
k=0
else:
i=nf2.at[0,'Serial no']
j=nf2.at[1,'Serial no']
k=nf2.at[2,'Serial no']
print(k)
nf3=dfread.loc[(dfread['Serial no']==i)].head(1)
nf4=dfread.loc[(dfread['Serial no']==j)].head(1)
nf5=dfread.loc[(dfread['Serial no']==k)].head(1)
nf3=nf3.drop(['R','G','B','color','A','S'],axis=1)
nf4=nf4.drop(['R','G','B','color','A','S'],axis=1)
nf5=nf5.drop(['R','G','B','color','A','S'],axis=1)
nf=pd.concat([nf3, nf4,nf5])
nf.to_csv(".data base\\test_result2.csv",index=False,encoding="utf_8_sig")
print(nf)
ncol=list(nf.columns)
if(len(nf2['Serial no'])==0):
root=tk.Tk()
root.withdraw()
messagebox.showinfo("失敗", "未找到符合資料")
elif(len(nf2['Serial no'])==1):
row_nf3=nf3.iloc[0].tolist()
row_nf32=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']
row_nf33=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']
elif(len(nf2['Serial no'])==2):
row_nf3=nf3.iloc[0].tolist()
row_nf32=nf4.iloc[0].tolist()
row_nf33=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']
else:
row_nf3=nf3.iloc[0].tolist()
row_nf32=nf4.iloc[0].tolist()
print(row_nf32)
row_nf33=nf5.iloc[0].tolist()
name_n=nf['Serial no'].tolist()
rate_n=Rate
#顏色分類法
#(可以改)當需要寬鬆一點的比對,刪除下面一段的上下兩個'''
'''
newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]
newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]
newdf=pd.concat([newdf1, newdf2])
'''
#(可以改)當需要嚴格一點的比對,刪除下面一段的上下兩個'''
'''
newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf=newdf.loc[(newdf['color']==color)]
'''
#並在下面一行的開頭加上#
newdf=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf.insert(1,'Rdif',newdf[['R']].add(-RAvr))
newdf.insert(2,'Gdif',newdf[['G']].add(-GAvr))
newdf.insert(3,'Bdif',newdf[['B']].add(-BAvr))
newdf.insert(4,'Adif',abs(newdf[['A']].add(-SumAvr)))
newdf.insert(5,'Sdif',abs(newdf[['S']].add(-SumRGB)))
df=newdf.sort_values(by=['Sdif', 'Adif'], ascending=True).head(100)
df.insert(1,'dalta',abs(df['Rdif']+df['Gdif']+df['Bdif']))
df=df.sort_values(by=['dalta'],ascending=True)
data=df[['Serial no','color']]
group=data.groupby('Serial no')
datacount=group.count()
df=df.merge(datacount,left_on='Serial no',right_index=True)
df=df.sort_values(by=['color_y'],ascending=False)
df3=df.drop_duplicates('Serial no', keep='first', inplace=False).head()
print(df3)
df3.to_csv(".data base\\test_result.csv",index=False,encoding="utf_8_sig")
if df3.empty ==True:
root=tk.Tk()
root.withdraw()
messagebox.showinfo("失敗", "未找到符合資料")
elif len(df3)<=2:
root=tk.Tk()
root.withdraw()
messagebox.showinfo("失敗", "只找到少數資料\n 已存在test_result")
else:
Zero=df3.loc[(df3['Rdif']==0)&(df3['Gdif']==0)&(df3['Bdif']==0)]
Zero=Zero.head(3)
if Zero.empty==False:
Zero=Zero.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)
name=df3['Serial no'].tolist()
rate=df3['color_y'].tolist()
col=list(Zero.columns)
row_text=Zero.iloc[0].tolist()
df3=df3.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)
row_df3=df3.iloc[0].tolist()
row_df32=df3.iloc[1].tolist()
row_df33=df3.iloc[2].tolist()
Result_Print()
print('0')
print(Zero)
else:
filtdf=df3.loc[(df3['A']>=SumAvr)]
filtdf=filtdf.sort_values(by=['Rdif','Gdif','Bdif']).head()
Neg_filtdf=df3.loc[(df3['A']<SumAvr)]
Neg_filtdf=Neg_filtdf.sort_values(by=['Rdif','Gdif','Bdif']).head()
if Neg_filtdf.empty==True and filtdf.empty ==True:
root=tk.Tk()
root.withdraw()
messagebox.showinfo("失敗", "未找到符合資料")
else:
filtdf=filtdf.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)
name=df3['Serial no'].tolist()
rate=df3['color_y'].tolist()
col=list(filtdf.columns)
row_text=filtdf.iloc[0].tolist()
df3=df3.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)
row_df3=df3.iloc[0].tolist()
row_df32=df3.iloc[1].tolist()
row_df33=df3.iloc[2].tolist()
Result_Print()
print("最接近的為1",filtdf.head(1))
def color_def(BAvr,GAvr,RAvr):
global color
if abs(int(BAvr)-int(GAvr))<=1 and abs(int(BAvr)-int(RAvr))<=1:
color='White'
return color
elif BAvr>=GAvr and BAvr>=RAvr:
if BAvr-GAvr>3 and BAvr-RAvr>=3:
color='Blue'
return color
elif BAvr-GAvr<3:
color='Cyan'
return color
else:
color='Purple'
return color
elif GAvr>=RAvr and GAvr>=BAvr:
if GAvr-RAvr>3 or GAvr-BAvr>3:
color='Green'
return color
elif GAvr-RAvr<3:
color='Yellow'
return color
else:
color='Cyan'
return color
elif RAvr>=GAvr and RAvr>=BAvr:
if RAvr-GAvr>=3 and RAvr-BAvr>=3:
color='Red'
return color
elif RAvr-GAvr<3:
color='Yellow'
return color
else:
color='Purple'
return color
else:
color='White'
#img=cv2.imdecode(np.fromfile(r"D:\桌面\JA Material\JA-material\pure\%s" % (result),dtype=np.uint8),-1)
img=cv2.imdecode(np.fromfile(r".pure\%s" % (result),dtype=np.uint8),-1)
cv2.namedWindow('mouse_callback')
# bind the callback function to window
cv2.setMouseCallback('mouse_callback',CircleCallback)
def main():
while (True):
cv2.imshow('mouse_callback',img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| [
5,
6,
8,
9,
10
] |
313 | dca36de5556b120b8b93eac0ad7b971ad735d907 | <mask token>
| <mask token>
def GradientDescent(f, gradf, x0, epsilon, num_iter, line_search, disp=
False, callback=None, **kwargs):
x = x0.copy()
iteration = 0
opt_arg = {'f': f, 'grad_f': gradf}
for key in kwargs:
opt_arg[key] = kwargs[key]
while True:
gradient = -gradf(x)
alpha = line_search(x, gradient, **opt_arg)
x = x + alpha * gradient
if callback is not None:
callback(x)
iteration += 1
if disp:
print('Current function val =', f(x))
print('Current gradient norm = ', np.linalg.norm(gradf(x)))
if np.linalg.norm(gradf(x)) < epsilon:
break
if iteration >= num_iter:
break
res = {'x': x, 'num_iter': iteration, 'tol': np.linalg.norm(gradf(x))}
return res
<mask token>
| <mask token>
def GradientDescent(f, gradf, x0, epsilon, num_iter, line_search, disp=
False, callback=None, **kwargs):
x = x0.copy()
iteration = 0
opt_arg = {'f': f, 'grad_f': gradf}
for key in kwargs:
opt_arg[key] = kwargs[key]
while True:
gradient = -gradf(x)
alpha = line_search(x, gradient, **opt_arg)
x = x + alpha * gradient
if callback is not None:
callback(x)
iteration += 1
if disp:
print('Current function val =', f(x))
print('Current gradient norm = ', np.linalg.norm(gradf(x)))
if np.linalg.norm(gradf(x)) < epsilon:
break
if iteration >= num_iter:
break
res = {'x': x, 'num_iter': iteration, 'tol': np.linalg.norm(gradf(x))}
return res
def backtracking(x, descent_dir, **kwargs):
f = kwargs['f']
grad_f = kwargs['grad_f']
if kwargs['method'] == 'Armijo':
beta1 = kwargs['beta1']
rho = kwargs['rho']
alpha = 1
while f(x + alpha * descent_dir) >= f(x) + beta1 * alpha * grad_f(x
).dot(descent_dir) or np.isnan(f(x + alpha * descent_dir)):
alpha *= rho
return alpha
| import numpy as np
def GradientDescent(f, gradf, x0, epsilon, num_iter, line_search, disp=
False, callback=None, **kwargs):
x = x0.copy()
iteration = 0
opt_arg = {'f': f, 'grad_f': gradf}
for key in kwargs:
opt_arg[key] = kwargs[key]
while True:
gradient = -gradf(x)
alpha = line_search(x, gradient, **opt_arg)
x = x + alpha * gradient
if callback is not None:
callback(x)
iteration += 1
if disp:
print('Current function val =', f(x))
print('Current gradient norm = ', np.linalg.norm(gradf(x)))
if np.linalg.norm(gradf(x)) < epsilon:
break
if iteration >= num_iter:
break
res = {'x': x, 'num_iter': iteration, 'tol': np.linalg.norm(gradf(x))}
return res
def backtracking(x, descent_dir, **kwargs):
f = kwargs['f']
grad_f = kwargs['grad_f']
if kwargs['method'] == 'Armijo':
beta1 = kwargs['beta1']
rho = kwargs['rho']
alpha = 1
while f(x + alpha * descent_dir) >= f(x) + beta1 * alpha * grad_f(x
).dot(descent_dir) or np.isnan(f(x + alpha * descent_dir)):
alpha *= rho
return alpha
| import numpy as np
def GradientDescent(f, gradf, x0, epsilon, num_iter, line_search,
disp=False, callback=None, **kwargs):
x = x0.copy()
iteration = 0
opt_arg = {"f": f, "grad_f": gradf}
for key in kwargs:
opt_arg[key] = kwargs[key]
while True:
gradient = -gradf(x)
alpha = line_search(x, gradient, **opt_arg)
x = x + alpha * gradient
if callback is not None:
callback(x)
iteration += 1
if disp:
print("Current function val =", f(x))
print("Current gradient norm = ", np.linalg.norm(gradf(x)))
if np.linalg.norm(gradf(x)) < epsilon:
break
if iteration >= num_iter:
break
res = {"x": x, "num_iter": iteration, "tol": np.linalg.norm(gradf(x))}
return res
def backtracking(x, descent_dir, **kwargs):
f = kwargs["f"]
grad_f = kwargs["grad_f"]
if kwargs["method"] == "Armijo":
beta1 = kwargs["beta1"]
rho = kwargs["rho"]
alpha = 1
while f(x + alpha * descent_dir) >= f(x) + beta1 * alpha * grad_f(x).dot(descent_dir) or np.isnan(f(x + alpha * descent_dir)):
alpha *= rho
return alpha
| [
0,
1,
2,
3,
4
] |
314 | 4711adcc7c95993ec13b9d06fa674aa064f79bfd | <mask token>
class Net(torch.nn.Module):
<mask token>
<mask token>
| <mask token>
class Net(torch.nn.Module):
def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device
=None):
super(Net, self).__init__()
self.device = device
if dropout_prob is not None and dropout_prob > 0.5:
print(
'Are you sure dropout_prob is supposed to be greater than 0.5?'
)
self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',
pretrained=True)
for param in self.roberta.parameters():
param.requires_grad = False
self.roberta.eval()
resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',
pretrained=True)
self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])
self.bns = nn.ModuleList()
self.fcs = nn.ModuleList()
self.drops = None if dropout_prob is None else nn.ModuleList()
prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2
for i, size in enumerate(layer_sizes):
self.bns.append(nn.BatchNorm1d(prev_size))
self.fcs.append(nn.Linear(prev_size, size))
if dropout_prob is not None:
self.drops.append(nn.Dropout(p=dropout_prob))
prev_size = size
<mask token>
| <mask token>
class Net(torch.nn.Module):
def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device
=None):
super(Net, self).__init__()
self.device = device
if dropout_prob is not None and dropout_prob > 0.5:
print(
'Are you sure dropout_prob is supposed to be greater than 0.5?'
)
self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',
pretrained=True)
for param in self.roberta.parameters():
param.requires_grad = False
self.roberta.eval()
resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',
pretrained=True)
self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])
self.bns = nn.ModuleList()
self.fcs = nn.ModuleList()
self.drops = None if dropout_prob is None else nn.ModuleList()
prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2
for i, size in enumerate(layer_sizes):
self.bns.append(nn.BatchNorm1d(prev_size))
self.fcs.append(nn.Linear(prev_size, size))
if dropout_prob is not None:
self.drops.append(nn.Dropout(p=dropout_prob))
prev_size = size
def forward(self, inputs):
first_images = inputs['image1'].to(self.device)
first_text = inputs['text1']
first_length = inputs['length1'].to(self.device)
first_categories = inputs['categories1'].to(self.device)
first_days_posted = inputs['days_posted1'].to(self.device)
second_images = inputs['image2'].to(self.device)
second_text = inputs['text2']
second_length = inputs['length2'].to(self.device)
second_categories = inputs['categories2'].to(self.device)
second_days_posted = inputs['days_posted2'].to(self.device)
image_tensor_one = self.resnet.forward(first_images)
image_tensor_two = self.resnet.forward(second_images)
text_features1 = torch.Tensor()
text_features2 = torch.Tensor()
text_features1 = text_features1.to(self.device)
text_features2 = text_features2.to(self.device)
for text in first_text:
first_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(first_tokens)
feature_means = torch.mean(features, dim=1)
text_features1 = torch.cat([text_features1, feature_means])
for text in second_text:
second_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(second_tokens)
feature_means = torch.mean(features, dim=1)
text_features2 = torch.cat([text_features2, feature_means])
concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)
concat_tensor = torch.squeeze(concat_tensor)
concat_tensor = torch.cat((text_features1, text_features2,
concat_tensor), 1)
additional_features = torch.cat([torch.reshape(first_length, (-1, 1
)), torch.reshape(second_length, (-1, 1)), torch.reshape(
first_days_posted, (-1, 1)), torch.reshape(second_days_posted,
(-1, 1))], dim=1)
concat_tensor = torch.cat([concat_tensor, additional_features.float
(), first_categories.float(), second_categories.float()], dim=1)
x = concat_tensor
zipped_layers = zip(self.bns, self.fcs, [None] * len(self.bns)
) if self.drops is None else zip(self.bns, self.fcs, self.drops)
for i, (bn, fc, drop) in enumerate(zipped_layers):
x = bn(x)
if drop is not None:
x = drop(x)
if i == len(self.bns) - 1:
x = fc(x)
else:
x = F.relu(fc(x))
return x
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device
=None):
super(Net, self).__init__()
self.device = device
if dropout_prob is not None and dropout_prob > 0.5:
print(
'Are you sure dropout_prob is supposed to be greater than 0.5?'
)
self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',
pretrained=True)
for param in self.roberta.parameters():
param.requires_grad = False
self.roberta.eval()
resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',
pretrained=True)
self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])
self.bns = nn.ModuleList()
self.fcs = nn.ModuleList()
self.drops = None if dropout_prob is None else nn.ModuleList()
prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2
for i, size in enumerate(layer_sizes):
self.bns.append(nn.BatchNorm1d(prev_size))
self.fcs.append(nn.Linear(prev_size, size))
if dropout_prob is not None:
self.drops.append(nn.Dropout(p=dropout_prob))
prev_size = size
def forward(self, inputs):
first_images = inputs['image1'].to(self.device)
first_text = inputs['text1']
first_length = inputs['length1'].to(self.device)
first_categories = inputs['categories1'].to(self.device)
first_days_posted = inputs['days_posted1'].to(self.device)
second_images = inputs['image2'].to(self.device)
second_text = inputs['text2']
second_length = inputs['length2'].to(self.device)
second_categories = inputs['categories2'].to(self.device)
second_days_posted = inputs['days_posted2'].to(self.device)
image_tensor_one = self.resnet.forward(first_images)
image_tensor_two = self.resnet.forward(second_images)
text_features1 = torch.Tensor()
text_features2 = torch.Tensor()
text_features1 = text_features1.to(self.device)
text_features2 = text_features2.to(self.device)
for text in first_text:
first_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(first_tokens)
feature_means = torch.mean(features, dim=1)
text_features1 = torch.cat([text_features1, feature_means])
for text in second_text:
second_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(second_tokens)
feature_means = torch.mean(features, dim=1)
text_features2 = torch.cat([text_features2, feature_means])
concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)
concat_tensor = torch.squeeze(concat_tensor)
concat_tensor = torch.cat((text_features1, text_features2,
concat_tensor), 1)
additional_features = torch.cat([torch.reshape(first_length, (-1, 1
)), torch.reshape(second_length, (-1, 1)), torch.reshape(
first_days_posted, (-1, 1)), torch.reshape(second_days_posted,
(-1, 1))], dim=1)
concat_tensor = torch.cat([concat_tensor, additional_features.float
(), first_categories.float(), second_categories.float()], dim=1)
x = concat_tensor
zipped_layers = zip(self.bns, self.fcs, [None] * len(self.bns)
) if self.drops is None else zip(self.bns, self.fcs, self.drops)
for i, (bn, fc, drop) in enumerate(zipped_layers):
x = bn(x)
if drop is not None:
x = drop(x)
if i == len(self.bns) - 1:
x = fc(x)
else:
x = F.relu(fc(x))
return x
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device=None):
super(Net, self).__init__()
self.device = device
if dropout_prob is not None and dropout_prob > 0.5:
print("Are you sure dropout_prob is supposed to be greater than 0.5?")
# Load Roberta
self.roberta = torch.hub.load(
"pytorch/fairseq", "roberta.base", pretrained=True
)
for param in self.roberta.parameters():
param.requires_grad = False
self.roberta.eval()
# Load ResNet
resnet_full = torch.hub.load(
"pytorch/vision:v0.6.0", "resnet18", pretrained=True
)
self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])
# for param in self.resnet.parameters():
# param.requires_grad = False
# self.resnet.eval()
# self.lstm = nn.LSTM(input_size=768, hidden_size=768 * 2)
# self.lstm.eval()
# Layers
self.bns = nn.ModuleList()
self.fcs = nn.ModuleList()
self.drops = None if dropout_prob is None else nn.ModuleList()
prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2
for i, size in enumerate(layer_sizes):
self.bns.append(nn.BatchNorm1d(prev_size))
self.fcs.append(nn.Linear(prev_size, size))
if dropout_prob is not None:
self.drops.append(nn.Dropout(p=dropout_prob))
prev_size = size
def forward(self, inputs):
first_images = inputs["image1"].to(self.device)
first_text = inputs["text1"]
first_length = inputs["length1"].to(self.device)
first_categories = inputs["categories1"].to(self.device)
first_days_posted = inputs["days_posted1"].to(self.device)
second_images = inputs["image2"].to(self.device)
second_text = inputs["text2"]
second_length = inputs["length2"].to(self.device)
second_categories = inputs["categories2"].to(self.device)
second_days_posted = inputs["days_posted2"].to(self.device)
# Resnet
image_tensor_one = self.resnet.forward(first_images)
image_tensor_two = self.resnet.forward(second_images)
# Roberta
text_features1 = torch.Tensor()
text_features2 = torch.Tensor()
text_features1 = text_features1.to(self.device)
text_features2 = text_features2.to(self.device)
for text in first_text:
first_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(first_tokens)
feature_means = torch.mean(features, dim=1)
# features = torch.reshape(features, (-1, 1,768))
# output, (hn, cn) = self.lstm(features)
# cn = torch.reshape(cn, (1, 768 * 2))
text_features1 = torch.cat([text_features1, feature_means])
for text in second_text:
second_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(second_tokens)
# print("DIMENSION OF FEATURES ", features.shape)
feature_means = torch.mean(features, dim=1)
# features = torch.reshape(features, (-1, 1,768))
# output, (hn, cn) = self.lstm(features)
# cn = torch.reshape(cn, (1, 768 * 2))
# print("DIMENSION OF FEATURES ", features.shape)
text_features2 = torch.cat([text_features2, feature_means])
# Concatenated tensor
concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)
concat_tensor = torch.squeeze(concat_tensor)
concat_tensor = torch.cat((text_features1, text_features2, concat_tensor), 1)
additional_features = torch.cat(
[
torch.reshape(first_length, (-1, 1)),
torch.reshape(second_length, (-1, 1)),
torch.reshape(first_days_posted, (-1, 1)),
torch.reshape(second_days_posted, (-1, 1)),
],
dim=1,
)
concat_tensor = torch.cat(
[
concat_tensor,
additional_features.float(),
first_categories.float(),
second_categories.float(),
],
dim=1,
)
x = concat_tensor
zipped_layers = (
zip(self.bns, self.fcs, [None] * len(self.bns))
if self.drops is None
else zip(self.bns, self.fcs, self.drops)
)
for i, (bn, fc, drop) in enumerate(zipped_layers):
x = bn(x)
if drop is not None:
x = drop(x)
if i == len(self.bns) - 1:
x = fc(x)
else:
x = F.relu(fc(x))
return x
| [
1,
2,
3,
4,
5
] |
315 | 3479276d4769518aa60dcd4e1bb41a8a1a7d6517 | <mask token>
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
@click.option('--cores', type=click.INT, default=12, help=
'The number of workers for parallelization.')
@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),
help=
'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'
)
@click.option('--order', type=click.INT, nargs=1, default=3, help=
'The order of the spline interpolation used to resample')
def main(input_folder, output_folder, bounding_boxes_file, cores,
resampling, order):
""" This command line interface allows to resample NIFTI files within a
given bounding box contain in BOUNDING_BOXES_FILE. The images are
resampled with spline interpolation
of degree --order (default=3) and the segmentation are resampled
by nearest neighbor interpolation.
INPUT_FOLDER is the path of the folder containing the NIFTI to
resample.
OUTPUT_FOLDER is the path of the folder where to store the
resampled NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Resampling')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print('resampling is {}'.format(str(resampling)))
bb_df = pd.read_csv(bounding_boxes_file)
bb_df = bb_df.set_index('PatientID')
files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',
recursive=True)]
resampler = Resampler(bb_df, output_folder, order, resampling=resampling)
with Pool(cores) as p:
p.map(resampler, files_list)
<mask token>
| <mask token>
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
@click.option('--cores', type=click.INT, default=12, help=
'The number of workers for parallelization.')
@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),
help=
'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'
)
@click.option('--order', type=click.INT, nargs=1, default=3, help=
'The order of the spline interpolation used to resample')
def main(input_folder, output_folder, bounding_boxes_file, cores,
resampling, order):
""" This command line interface allows to resample NIFTI files within a
given bounding box contain in BOUNDING_BOXES_FILE. The images are
resampled with spline interpolation
of degree --order (default=3) and the segmentation are resampled
by nearest neighbor interpolation.
INPUT_FOLDER is the path of the folder containing the NIFTI to
resample.
OUTPUT_FOLDER is the path of the folder where to store the
resampled NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Resampling')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print('resampling is {}'.format(str(resampling)))
bb_df = pd.read_csv(bounding_boxes_file)
bb_df = bb_df.set_index('PatientID')
files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',
recursive=True)]
resampler = Resampler(bb_df, output_folder, order, resampling=resampling)
with Pool(cores) as p:
p.map(resampler, files_list)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.captureWarnings(True)
main()
| <mask token>
path_in = 'data/hecktor_nii/'
path_out = 'data/resampled/'
path_bb = 'data/bbox.csv'
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
@click.option('--cores', type=click.INT, default=12, help=
'The number of workers for parallelization.')
@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),
help=
'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'
)
@click.option('--order', type=click.INT, nargs=1, default=3, help=
'The order of the spline interpolation used to resample')
def main(input_folder, output_folder, bounding_boxes_file, cores,
resampling, order):
""" This command line interface allows to resample NIFTI files within a
given bounding box contain in BOUNDING_BOXES_FILE. The images are
resampled with spline interpolation
of degree --order (default=3) and the segmentation are resampled
by nearest neighbor interpolation.
INPUT_FOLDER is the path of the folder containing the NIFTI to
resample.
OUTPUT_FOLDER is the path of the folder where to store the
resampled NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Resampling')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print('resampling is {}'.format(str(resampling)))
bb_df = pd.read_csv(bounding_boxes_file)
bb_df = bb_df.set_index('PatientID')
files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',
recursive=True)]
resampler = Resampler(bb_df, output_folder, order, resampling=resampling)
with Pool(cores) as p:
p.map(resampler, files_list)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.captureWarnings(True)
main()
| import os
from multiprocessing import Pool
import glob
import click
import logging
import pandas as pd
from src.resampling.resampling import Resampler
path_in = 'data/hecktor_nii/'
path_out = 'data/resampled/'
path_bb = 'data/bbox.csv'
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
@click.option('--cores', type=click.INT, default=12, help=
'The number of workers for parallelization.')
@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),
help=
'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'
)
@click.option('--order', type=click.INT, nargs=1, default=3, help=
'The order of the spline interpolation used to resample')
def main(input_folder, output_folder, bounding_boxes_file, cores,
resampling, order):
""" This command line interface allows to resample NIFTI files within a
given bounding box contain in BOUNDING_BOXES_FILE. The images are
resampled with spline interpolation
of degree --order (default=3) and the segmentation are resampled
by nearest neighbor interpolation.
INPUT_FOLDER is the path of the folder containing the NIFTI to
resample.
OUTPUT_FOLDER is the path of the folder where to store the
resampled NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Resampling')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print('resampling is {}'.format(str(resampling)))
bb_df = pd.read_csv(bounding_boxes_file)
bb_df = bb_df.set_index('PatientID')
files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',
recursive=True)]
resampler = Resampler(bb_df, output_folder, order, resampling=resampling)
with Pool(cores) as p:
p.map(resampler, files_list)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.captureWarnings(True)
main()
| import os
from multiprocessing import Pool
import glob
import click
import logging
import pandas as pd
from src.resampling.resampling import Resampler
# Default paths
path_in = 'data/hecktor_nii/'
path_out = 'data/resampled/'
path_bb = 'data/bbox.csv'
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
@click.option('--cores',
type=click.INT,
default=12,
help='The number of workers for parallelization.')
@click.option('--resampling',
type=click.FLOAT,
nargs=3,
default=(1, 1, 1),
help='Expect 3 positive floats describing the output '
'resolution of the resampling. To avoid resampling '
'on one or more dimension a value of -1 can be fed '
'e.g. --resampling 1.0 1.0 -1 will resample the x '
'and y axis at 1 mm/px and left the z axis untouched.')
@click.option('--order',
type=click.INT,
nargs=1,
default=3,
help='The order of the spline interpolation used to resample')
def main(input_folder, output_folder, bounding_boxes_file, cores, resampling,
order):
""" This command line interface allows to resample NIFTI files within a
given bounding box contain in BOUNDING_BOXES_FILE. The images are
resampled with spline interpolation
of degree --order (default=3) and the segmentation are resampled
by nearest neighbor interpolation.
INPUT_FOLDER is the path of the folder containing the NIFTI to
resample.
OUTPUT_FOLDER is the path of the folder where to store the
resampled NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Resampling')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print('resampling is {}'.format(str(resampling)))
bb_df = pd.read_csv(bounding_boxes_file)
bb_df = bb_df.set_index('PatientID')
files_list = [
f for f in glob.glob(input_folder + '/**/*.nii.gz', recursive=True)
]
resampler = Resampler(bb_df, output_folder, order, resampling=resampling)
with Pool(cores) as p:
p.map(resampler, files_list)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.captureWarnings(True)
main()
| [
1,
2,
3,
4,
5
] |
316 | d5beff74e3746c77cbaf6b8233b822ed1a86701e | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('task', '0022_taskrecycle_create_date')]
operations = [migrations.RemoveField(model_name='ansibleextravars',
name='playbook'), migrations.RemoveField(model_name=
'ansibleplaybook', name='project'), migrations.DeleteModel(name=
'CrontabTask'), migrations.DeleteModel(name='TaskHistory'),
migrations.DeleteModel(name='TaskRecycle'), migrations.RemoveField(
model_name='taskscript', name='project'), migrations.DeleteModel(
name='AnsibleExtravars'), migrations.DeleteModel(name=
'AnsiblePlaybook'), migrations.DeleteModel(name='AnsibleProject'),
migrations.DeleteModel(name='TaskProject'), migrations.DeleteModel(
name='TaskScript')]
| from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('task', '0022_taskrecycle_create_date')]
operations = [migrations.RemoveField(model_name='ansibleextravars',
name='playbook'), migrations.RemoveField(model_name=
'ansibleplaybook', name='project'), migrations.DeleteModel(name=
'CrontabTask'), migrations.DeleteModel(name='TaskHistory'),
migrations.DeleteModel(name='TaskRecycle'), migrations.RemoveField(
model_name='taskscript', name='project'), migrations.DeleteModel(
name='AnsibleExtravars'), migrations.DeleteModel(name=
'AnsiblePlaybook'), migrations.DeleteModel(name='AnsibleProject'),
migrations.DeleteModel(name='TaskProject'), migrations.DeleteModel(
name='TaskScript')]
| # Generated by Django 2.2.2 on 2021-01-23 04:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('task', '0022_taskrecycle_create_date'),
]
operations = [
migrations.RemoveField(
model_name='ansibleextravars',
name='playbook',
),
migrations.RemoveField(
model_name='ansibleplaybook',
name='project',
),
migrations.DeleteModel(
name='CrontabTask',
),
migrations.DeleteModel(
name='TaskHistory',
),
migrations.DeleteModel(
name='TaskRecycle',
),
migrations.RemoveField(
model_name='taskscript',
name='project',
),
migrations.DeleteModel(
name='AnsibleExtravars',
),
migrations.DeleteModel(
name='AnsiblePlaybook',
),
migrations.DeleteModel(
name='AnsibleProject',
),
migrations.DeleteModel(
name='TaskProject',
),
migrations.DeleteModel(
name='TaskScript',
),
]
| [
0,
1,
2,
3,
4
] |
317 | 77763f501c6776969d2594f987e5d7ab7d4377fb | <mask token>
@attach_common
class TalkBotThread(QThread):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def run(self):
self.start_database()
self.update_bot_type()
self.start_main_loop()
def start_database(self):
if self.config.ENABLED_DOWNLOAD:
DatabaseDownload.remove_tempfiles()
DatabaseDownload.set_url('')
DatabaseDownload.do_download_html()
else:
DatabaseDownload.do_not_download_html()
self.text_target = DatabaseDownload().get_outcome()
<mask token>
<mask token>
def select_token_list(self):
if self.type_bot == 0:
self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 1:
self.lister = NgramList(num_of_gram=3, text_target=self.text_target
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 2:
self.lister = MorphemeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 3:
self.lister = MemorizeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
err = self.type_bot
raise Exception(err)
def select_bot(self):
if self.type_bot == 0:
self.bot = NoneBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 1:
self.bot = NgramBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 2:
self.bot = MorphemeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 3:
self.bot = MemorizeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
err = self.type_bot
raise Exception(err)
def start_main_loop(self):
parent = self.parent
while True:
time.sleep(0.2)
if parent.is_app_close:
break
if parent.type_bot != self.type_bot:
self.update_bot_type()
continue
parent.update_bot_msg_to_proper_latest_status()
msg = 'Stopped the database thread!'
self.logger.w(msg)
<mask token>
| <mask token>
@attach_common
class TalkBotThread(QThread):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def run(self):
self.start_database()
self.update_bot_type()
self.start_main_loop()
def start_database(self):
if self.config.ENABLED_DOWNLOAD:
DatabaseDownload.remove_tempfiles()
DatabaseDownload.set_url('')
DatabaseDownload.do_download_html()
else:
DatabaseDownload.do_not_download_html()
self.text_target = DatabaseDownload().get_outcome()
<mask token>
def output_bot_type(self):
parent = self.parent
msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot
), ' bot: {}'.format(parent.bot.__class__.__name__
), ' lister: {}'.format(parent.lister.__class__.__name__
), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]
)
for msg in msgs:
self.logger.w(msg)
def select_token_list(self):
if self.type_bot == 0:
self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 1:
self.lister = NgramList(num_of_gram=3, text_target=self.text_target
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 2:
self.lister = MorphemeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 3:
self.lister = MemorizeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
err = self.type_bot
raise Exception(err)
def select_bot(self):
if self.type_bot == 0:
self.bot = NoneBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 1:
self.bot = NgramBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 2:
self.bot = MorphemeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 3:
self.bot = MemorizeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
err = self.type_bot
raise Exception(err)
def start_main_loop(self):
parent = self.parent
while True:
time.sleep(0.2)
if parent.is_app_close:
break
if parent.type_bot != self.type_bot:
self.update_bot_type()
continue
parent.update_bot_msg_to_proper_latest_status()
msg = 'Stopped the database thread!'
self.logger.w(msg)
<mask token>
| <mask token>
@attach_common
class TalkBotThread(QThread):
parent = None
bot = None
lister = None
text_target = ''
tokens_start_of_text = []
tokens_of_text = []
def run(self):
self.start_database()
self.update_bot_type()
self.start_main_loop()
def start_database(self):
if self.config.ENABLED_DOWNLOAD:
DatabaseDownload.remove_tempfiles()
DatabaseDownload.set_url('')
DatabaseDownload.do_download_html()
else:
DatabaseDownload.do_not_download_html()
self.text_target = DatabaseDownload().get_outcome()
def update_bot_type(self):
parent = self.parent
parent.is_bot_ready = False
parent.show_bot_not_ready_msg()
self.type_bot = parent.type_bot
self.select_token_list()
self.select_bot()
parent.bot = self.bot
parent.lister = self.lister
parent.is_bot_ready = True
parent.show_bot_ready_msg()
self.output_bot_type()
def output_bot_type(self):
parent = self.parent
msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot
), ' bot: {}'.format(parent.bot.__class__.__name__
), ' lister: {}'.format(parent.lister.__class__.__name__
), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]
)
for msg in msgs:
self.logger.w(msg)
def select_token_list(self):
if self.type_bot == 0:
self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 1:
self.lister = NgramList(num_of_gram=3, text_target=self.text_target
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 2:
self.lister = MorphemeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 3:
self.lister = MemorizeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
err = self.type_bot
raise Exception(err)
def select_bot(self):
if self.type_bot == 0:
self.bot = NoneBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 1:
self.bot = NgramBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 2:
self.bot = MorphemeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 3:
self.bot = MemorizeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
err = self.type_bot
raise Exception(err)
def start_main_loop(self):
parent = self.parent
while True:
time.sleep(0.2)
if parent.is_app_close:
break
if parent.type_bot != self.type_bot:
self.update_bot_type()
continue
parent.update_bot_msg_to_proper_latest_status()
msg = 'Stopped the database thread!'
self.logger.w(msg)
<mask token>
| import time
from PyQt5.QtCore import QThread
from common import attach_common
from database_downloader import DatabaseDownload
from ai_list_memorize import MemorizeList
from ai_list_morpheme import MorphemeList
from ai_list_ngram import NgramList
from ai_list_none import NoneList
from ai_bot_memorize import MemorizeBot
from ai_bot_morpheme import MorphemeBot
from ai_bot_ngram import NgramBot
from ai_bot_none import NoneBot
@attach_common
class TalkBotThread(QThread):
parent = None
bot = None
lister = None
text_target = ''
tokens_start_of_text = []
tokens_of_text = []
def run(self):
self.start_database()
self.update_bot_type()
self.start_main_loop()
def start_database(self):
if self.config.ENABLED_DOWNLOAD:
DatabaseDownload.remove_tempfiles()
DatabaseDownload.set_url('')
DatabaseDownload.do_download_html()
else:
DatabaseDownload.do_not_download_html()
self.text_target = DatabaseDownload().get_outcome()
def update_bot_type(self):
parent = self.parent
parent.is_bot_ready = False
parent.show_bot_not_ready_msg()
self.type_bot = parent.type_bot
self.select_token_list()
self.select_bot()
parent.bot = self.bot
parent.lister = self.lister
parent.is_bot_ready = True
parent.show_bot_ready_msg()
self.output_bot_type()
def output_bot_type(self):
parent = self.parent
msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot
), ' bot: {}'.format(parent.bot.__class__.__name__
), ' lister: {}'.format(parent.lister.__class__.__name__
), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]
)
for msg in msgs:
self.logger.w(msg)
def select_token_list(self):
if self.type_bot == 0:
self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 1:
self.lister = NgramList(num_of_gram=3, text_target=self.text_target
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 2:
self.lister = MorphemeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 3:
self.lister = MemorizeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
err = self.type_bot
raise Exception(err)
def select_bot(self):
if self.type_bot == 0:
self.bot = NoneBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 1:
self.bot = NgramBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 2:
self.bot = MorphemeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 3:
self.bot = MemorizeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
err = self.type_bot
raise Exception(err)
def start_main_loop(self):
parent = self.parent
while True:
time.sleep(0.2)
if parent.is_app_close:
break
if parent.type_bot != self.type_bot:
self.update_bot_type()
continue
parent.update_bot_msg_to_proper_latest_status()
msg = 'Stopped the database thread!'
self.logger.w(msg)
if __name__ == '__main__':
from gui_talkbot import MainWindow
TestClass = MainWindow
import sys
from PyQt5.QtWidgets import QApplication
qapp = QApplication(sys.argv)
window = TestClass()
window.show()
code = qapp.exec()
sys.exit(code)
| import time
from PyQt5.QtCore import (
QThread,
)
from common import attach_common
from database_downloader import DatabaseDownload
from ai_list_memorize import MemorizeList
from ai_list_morpheme import MorphemeList
from ai_list_ngram import NgramList
from ai_list_none import NoneList
from ai_bot_memorize import MemorizeBot
from ai_bot_morpheme import MorphemeBot
from ai_bot_ngram import NgramBot
from ai_bot_none import NoneBot
@attach_common
class TalkBotThread(QThread):
parent = None
bot = None
lister = None
text_target = ''
tokens_start_of_text = []
tokens_of_text = []
def run(self):
self.start_database()
self.update_bot_type()
self.start_main_loop()
def start_database(self):
if self.config.ENABLED_DOWNLOAD:
DatabaseDownload.remove_tempfiles()
DatabaseDownload.set_url('')
DatabaseDownload.do_download_html()
else:
DatabaseDownload.do_not_download_html()
self.text_target = DatabaseDownload().get_outcome()
def update_bot_type(self):
parent = self.parent
parent.is_bot_ready = False
parent.show_bot_not_ready_msg()
self.type_bot = parent.type_bot
self.select_token_list()
self.select_bot()
parent.bot = self.bot
parent.lister = self.lister
parent.is_bot_ready = True
parent.show_bot_ready_msg()
self.output_bot_type()
def output_bot_type(self):
parent = self.parent
msgs = (
'TalkBot:',
' id: {}'.format(parent.type_bot),
' bot: {}'.format(parent.bot.__class__.__name__),
' lister: {}'.format(parent.lister.__class__.__name__),
' tokens: {}'.format(str(parent.lister.get_token_list())[:60]),
)
for msg in msgs:
self.logger.w(msg)
def select_token_list(self):
if self.type_bot == 0:
self.lister = NoneList(
num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target,
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 1:
self.lister = NgramList(
num_of_gram=3,
text_target=self.text_target,
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 2:
self.lister = MorphemeList(
num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target,
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 3:
self.lister = MemorizeList(
num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target,
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
err = self.type_bot
raise Exception(err)
def select_bot(self):
if self.type_bot == 0:
self.bot = NoneBot(
starting_token_list=self.tokens_start_of_text,
token_list=self.tokens_of_text,
)
return
if self.type_bot == 1:
self.bot = NgramBot(
starting_token_list=self.tokens_start_of_text,
token_list=self.tokens_of_text,
)
return
if self.type_bot == 2:
self.bot = MorphemeBot(
starting_token_list=self.tokens_start_of_text,
token_list=self.tokens_of_text,
)
return
if self.type_bot == 3:
self.bot = MemorizeBot(
starting_token_list=self.tokens_start_of_text,
token_list=self.tokens_of_text,
)
return
err = self.type_bot
raise Exception(err)
def start_main_loop(self):
parent = self.parent
while True:
time.sleep(0.2)
if parent.is_app_close:
break
if parent.type_bot != self.type_bot:
self.update_bot_type()
continue
parent.update_bot_msg_to_proper_latest_status()
msg = 'Stopped the database thread!'
self.logger.w(msg)
if __name__ == "__main__":
from gui_talkbot import MainWindow
TestClass = MainWindow
import sys
from PyQt5.QtWidgets import QApplication
qapp = QApplication(sys.argv)
window = TestClass()
window.show()
code = qapp.exec()
sys.exit(code)
| [
6,
7,
9,
11,
12
] |
318 | d6a760774b45454c959c2932d7b28deee7f81872 | <mask token>
| <mask token>
def submissions_to_user_submission_activities_dfs(submissions_df: DataFrame
) ->Dict[str, DataFrame]:
"""
Convert a Submission API DataFrame to a Dict of UserActivity
UDM DataFrames grouped by source system section id.
Parameters
----------
submissions_df: DataFrame
is a Submission API DataFrame
Returns
-------
Dict[str, DataFrame] LMS UDM UserActivity DataFrames
grouped by source system section id
Notes
-----
UserActivity DataFrame columns are:
ActivityDateTime: The date/time the activity occurred
ActivityStatus: The activity status
ActivityTimeInMinutes: The total activity time in minutes
ActivityType: The type of activity, here "Submission" or "Grade"
AssignmentIdentifier: A unique numeric identifier assigned to the assignment
Content: Content associated with the activity
LMSSectionIdentifier: A unique numeric identifier assigned to the section
SourceSystem: The system code or name providing the user activity data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a
user activity by the source system
LMSUserIdentifier: A unique numeric identifier assigned to the user
CreateDate: Date this record was created in the extractor
LastModifiedDate: Date this record was last updated in the extractor
"""
assert 'submissionHistory' in submissions_df.columns
assert 'id' in submissions_df.columns
assert 'courseId' in submissions_df.columns
assert 'courseWorkId' in submissions_df.columns
submissions_df['submissionHistory'] = submissions_df['submissionHistory'
].apply(lambda json_like: json.loads(json_like.replace("'", '"')))
submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',
'courseWorkId']].agg('-'.join, axis=1)
submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',
'submissionHistory', 'AssignmentIdentifier', 'CreateDate',
'LastModifiedDate']]
history_df = submissions_df.explode(column='submissionHistory')
history_df = history_df['submissionHistory'].apply(Series).merge(history_df
, left_index=True, right_index=True, how='outer')
history_df.drop(columns=['submissionHistory'], inplace=True)
user_submission_df = concat([history_df, history_df['stateHistory'].
apply(Series)], axis=1)
user_submission_df.dropna(subset=['stateHistory'], inplace=True)
user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[
'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,
axis=1)
user_submission_df = user_submission_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',
'actorUserId', 'CreateDate', 'LastModifiedDate']]
user_submission_df = user_submission_df.rename(columns={
'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',
'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}
)
user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE
if 'gradeHistory' in history_df:
grade_history_df = concat([history_df, history_df['gradeHistory'].
apply(Series)], axis=1)
grade_history_df.dropna(subset=['gradeHistory'], inplace=True)
grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[
'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.
join, axis=1)
grade_history_df = grade_history_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',
'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]
grade_history_df = grade_history_df.rename(columns={
'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':
'ActivityStatus', 'courseId': 'LMSSectionIdentifier',
'actorUserId': 'LMSUserIdentifier'})
grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE
user_submission_df = user_submission_df.append(grade_history_df)
user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],
inplace=True)
user_submission_df['ActivityTimeInMinutes'] = ''
user_submission_df['Content'] = ''
user_submission_df['SourceSystem'] = SOURCE_SYSTEM
user_submission_df['SourceCreateDate'] = ''
user_submission_df['SourceLastModifiedDate'] = ''
result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([
'LMSSectionIdentifier'])))
return result
| <mask token>
ACTIVITY_TYPE_STATE = 'Submission State Change'
ACTIVITY_TYPE_GRADE = 'Submission Grade Change'
def submissions_to_user_submission_activities_dfs(submissions_df: DataFrame
) ->Dict[str, DataFrame]:
"""
Convert a Submission API DataFrame to a Dict of UserActivity
UDM DataFrames grouped by source system section id.
Parameters
----------
submissions_df: DataFrame
is a Submission API DataFrame
Returns
-------
Dict[str, DataFrame] LMS UDM UserActivity DataFrames
grouped by source system section id
Notes
-----
UserActivity DataFrame columns are:
ActivityDateTime: The date/time the activity occurred
ActivityStatus: The activity status
ActivityTimeInMinutes: The total activity time in minutes
ActivityType: The type of activity, here "Submission" or "Grade"
AssignmentIdentifier: A unique numeric identifier assigned to the assignment
Content: Content associated with the activity
LMSSectionIdentifier: A unique numeric identifier assigned to the section
SourceSystem: The system code or name providing the user activity data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a
user activity by the source system
LMSUserIdentifier: A unique numeric identifier assigned to the user
CreateDate: Date this record was created in the extractor
LastModifiedDate: Date this record was last updated in the extractor
"""
assert 'submissionHistory' in submissions_df.columns
assert 'id' in submissions_df.columns
assert 'courseId' in submissions_df.columns
assert 'courseWorkId' in submissions_df.columns
submissions_df['submissionHistory'] = submissions_df['submissionHistory'
].apply(lambda json_like: json.loads(json_like.replace("'", '"')))
submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',
'courseWorkId']].agg('-'.join, axis=1)
submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',
'submissionHistory', 'AssignmentIdentifier', 'CreateDate',
'LastModifiedDate']]
history_df = submissions_df.explode(column='submissionHistory')
history_df = history_df['submissionHistory'].apply(Series).merge(history_df
, left_index=True, right_index=True, how='outer')
history_df.drop(columns=['submissionHistory'], inplace=True)
user_submission_df = concat([history_df, history_df['stateHistory'].
apply(Series)], axis=1)
user_submission_df.dropna(subset=['stateHistory'], inplace=True)
user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[
'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,
axis=1)
user_submission_df = user_submission_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',
'actorUserId', 'CreateDate', 'LastModifiedDate']]
user_submission_df = user_submission_df.rename(columns={
'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',
'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}
)
user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE
if 'gradeHistory' in history_df:
grade_history_df = concat([history_df, history_df['gradeHistory'].
apply(Series)], axis=1)
grade_history_df.dropna(subset=['gradeHistory'], inplace=True)
grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[
'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.
join, axis=1)
grade_history_df = grade_history_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',
'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]
grade_history_df = grade_history_df.rename(columns={
'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':
'ActivityStatus', 'courseId': 'LMSSectionIdentifier',
'actorUserId': 'LMSUserIdentifier'})
grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE
user_submission_df = user_submission_df.append(grade_history_df)
user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],
inplace=True)
user_submission_df['ActivityTimeInMinutes'] = ''
user_submission_df['Content'] = ''
user_submission_df['SourceSystem'] = SOURCE_SYSTEM
user_submission_df['SourceCreateDate'] = ''
user_submission_df['SourceLastModifiedDate'] = ''
result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([
'LMSSectionIdentifier'])))
return result
| import json
from typing import Dict
from pandas import DataFrame, concat, Series
from edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM
ACTIVITY_TYPE_STATE = 'Submission State Change'
ACTIVITY_TYPE_GRADE = 'Submission Grade Change'
def submissions_to_user_submission_activities_dfs(submissions_df: DataFrame
) ->Dict[str, DataFrame]:
"""
Convert a Submission API DataFrame to a Dict of UserActivity
UDM DataFrames grouped by source system section id.
Parameters
----------
submissions_df: DataFrame
is a Submission API DataFrame
Returns
-------
Dict[str, DataFrame] LMS UDM UserActivity DataFrames
grouped by source system section id
Notes
-----
UserActivity DataFrame columns are:
ActivityDateTime: The date/time the activity occurred
ActivityStatus: The activity status
ActivityTimeInMinutes: The total activity time in minutes
ActivityType: The type of activity, here "Submission" or "Grade"
AssignmentIdentifier: A unique numeric identifier assigned to the assignment
Content: Content associated with the activity
LMSSectionIdentifier: A unique numeric identifier assigned to the section
SourceSystem: The system code or name providing the user activity data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a
user activity by the source system
LMSUserIdentifier: A unique numeric identifier assigned to the user
CreateDate: Date this record was created in the extractor
LastModifiedDate: Date this record was last updated in the extractor
"""
assert 'submissionHistory' in submissions_df.columns
assert 'id' in submissions_df.columns
assert 'courseId' in submissions_df.columns
assert 'courseWorkId' in submissions_df.columns
submissions_df['submissionHistory'] = submissions_df['submissionHistory'
].apply(lambda json_like: json.loads(json_like.replace("'", '"')))
submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',
'courseWorkId']].agg('-'.join, axis=1)
submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',
'submissionHistory', 'AssignmentIdentifier', 'CreateDate',
'LastModifiedDate']]
history_df = submissions_df.explode(column='submissionHistory')
history_df = history_df['submissionHistory'].apply(Series).merge(history_df
, left_index=True, right_index=True, how='outer')
history_df.drop(columns=['submissionHistory'], inplace=True)
user_submission_df = concat([history_df, history_df['stateHistory'].
apply(Series)], axis=1)
user_submission_df.dropna(subset=['stateHistory'], inplace=True)
user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[
'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,
axis=1)
user_submission_df = user_submission_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',
'actorUserId', 'CreateDate', 'LastModifiedDate']]
user_submission_df = user_submission_df.rename(columns={
'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',
'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}
)
user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE
if 'gradeHistory' in history_df:
grade_history_df = concat([history_df, history_df['gradeHistory'].
apply(Series)], axis=1)
grade_history_df.dropna(subset=['gradeHistory'], inplace=True)
grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[
'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.
join, axis=1)
grade_history_df = grade_history_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',
'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]
grade_history_df = grade_history_df.rename(columns={
'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':
'ActivityStatus', 'courseId': 'LMSSectionIdentifier',
'actorUserId': 'LMSUserIdentifier'})
grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE
user_submission_df = user_submission_df.append(grade_history_df)
user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],
inplace=True)
user_submission_df['ActivityTimeInMinutes'] = ''
user_submission_df['Content'] = ''
user_submission_df['SourceSystem'] = SOURCE_SYSTEM
user_submission_df['SourceCreateDate'] = ''
user_submission_df['SourceLastModifiedDate'] = ''
result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([
'LMSSectionIdentifier'])))
return result
| # SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
import json
from typing import Dict
from pandas import DataFrame, concat, Series
from edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM
ACTIVITY_TYPE_STATE = "Submission State Change"
ACTIVITY_TYPE_GRADE = "Submission Grade Change"
def submissions_to_user_submission_activities_dfs(
submissions_df: DataFrame,
) -> Dict[str, DataFrame]:
"""
Convert a Submission API DataFrame to a Dict of UserActivity
UDM DataFrames grouped by source system section id.
Parameters
----------
submissions_df: DataFrame
is a Submission API DataFrame
Returns
-------
Dict[str, DataFrame] LMS UDM UserActivity DataFrames
grouped by source system section id
Notes
-----
UserActivity DataFrame columns are:
ActivityDateTime: The date/time the activity occurred
ActivityStatus: The activity status
ActivityTimeInMinutes: The total activity time in minutes
ActivityType: The type of activity, here "Submission" or "Grade"
AssignmentIdentifier: A unique numeric identifier assigned to the assignment
Content: Content associated with the activity
LMSSectionIdentifier: A unique numeric identifier assigned to the section
SourceSystem: The system code or name providing the user activity data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a
user activity by the source system
LMSUserIdentifier: A unique numeric identifier assigned to the user
CreateDate: Date this record was created in the extractor
LastModifiedDate: Date this record was last updated in the extractor
"""
assert "submissionHistory" in submissions_df.columns
assert "id" in submissions_df.columns
assert "courseId" in submissions_df.columns
assert "courseWorkId" in submissions_df.columns
# convert json-like submissionHistory string to list of dicts
submissions_df["submissionHistory"] = submissions_df["submissionHistory"].apply(lambda json_like: json.loads(json_like.replace("'", '"')))
submissions_df["AssignmentIdentifier"] = submissions_df[
["courseId", "courseWorkId"]
].agg("-".join, axis=1)
submissions_df = submissions_df[["id", "courseId", "courseWorkId", "submissionHistory", "AssignmentIdentifier", "CreateDate", "LastModifiedDate"]]
# explode submissionHistory lists into rows with other columns duplicated
history_df = submissions_df.explode(column="submissionHistory") # type: ignore
# expand submissionHistory dicts (stateHistory and gradeHistory) into their own columns
history_df = history_df["submissionHistory"].apply(Series).merge(history_df, left_index=True, right_index=True, how='outer')
history_df.drop(columns=["submissionHistory"], inplace=True)
# expand stateHistory (can assume exists, should always have at least one "CREATED" entry)
user_submission_df = concat([history_df, history_df["stateHistory"].apply(Series)], axis=1)
user_submission_df.dropna(subset=["stateHistory"], inplace=True)
# enrich stateHistory
user_submission_df["SourceSystemIdentifier"] = "S-" + user_submission_df[
["courseId", "courseWorkId", "id", "stateTimestamp"]
].agg("-".join, axis=1)
user_submission_df = user_submission_df[
[
"SourceSystemIdentifier",
"AssignmentIdentifier",
"stateTimestamp",
"state",
"courseId",
"actorUserId",
"CreateDate",
"LastModifiedDate"
]
]
user_submission_df = user_submission_df.rename(
columns={
"stateTimestamp": "ActivityDateTime",
"state": "ActivityStatus",
"courseId": "LMSSectionIdentifier",
"actorUserId": "LMSUserIdentifier",
}
)
user_submission_df["ActivityType"] = ACTIVITY_TYPE_STATE
# expand gradeHistory if exists
if "gradeHistory" in history_df:
grade_history_df = concat([history_df, history_df["gradeHistory"].apply(Series)], axis=1)
grade_history_df.dropna(subset=["gradeHistory"], inplace=True)
# enrich gradeHistory
grade_history_df["SourceSystemIdentifier"] = "G-" + grade_history_df[
["courseId", "courseWorkId", "id", "gradeTimestamp"]
].agg("-".join, axis=1)
grade_history_df = grade_history_df[
[
"SourceSystemIdentifier",
"AssignmentIdentifier",
"gradeTimestamp",
"gradeChangeType",
"courseId",
"actorUserId",
"CreateDate",
"LastModifiedDate"
]
]
grade_history_df = grade_history_df.rename(
columns={
"gradeTimestamp": "ActivityDateTime",
"gradeChangeType": "ActivityStatus",
"courseId": "LMSSectionIdentifier",
"actorUserId": "LMSUserIdentifier",
}
)
grade_history_df["ActivityType"] = ACTIVITY_TYPE_GRADE
# combine with stateHistory
user_submission_df = user_submission_df.append(grade_history_df)
# teacher actions can show up on student histories and vice-versa
user_submission_df.drop_duplicates(subset=["SourceSystemIdentifier"], inplace=True)
# finish with common columns
user_submission_df["ActivityTimeInMinutes"] = ""
user_submission_df["Content"] = ""
user_submission_df["SourceSystem"] = SOURCE_SYSTEM
user_submission_df["SourceCreateDate"] = "" # No create date available from API
user_submission_df["SourceLastModifiedDate"] = "" # No modified date available from API
# group by section id as a Dict of DataFrames
result: Dict[str, DataFrame] = dict(
tuple(user_submission_df.groupby(["LMSSectionIdentifier"]))
)
return result
| [
0,
1,
2,
3,
4
] |
319 | bd3b1263d7d657fe2edd3c7198f63821a3d1d1e5 | <mask token>
class RandomIPv4Waiter(WaiterInterface):
<mask token>
<mask token>
def generator(self):
while self.limit_generate != 0:
randomIPv4 = generateRandomIPv4()
yield randomIPv4, self.ports
if self.limit_generate != -1:
self.limit_generate -= 1
<mask token>
| <mask token>
class RandomIPv4Waiter(WaiterInterface):
"""
HostPortWaiter which generates random ipv4 adresses
"""
def __init__(self, options):
self.ports = options['ports']
self.limit_generate = options.get('limit_generate', -1)
def generator(self):
while self.limit_generate != 0:
randomIPv4 = generateRandomIPv4()
yield randomIPv4, self.ports
if self.limit_generate != -1:
self.limit_generate -= 1
<mask token>
| <mask token>
class RandomIPv4Waiter(WaiterInterface):
"""
HostPortWaiter which generates random ipv4 adresses
"""
def __init__(self, options):
self.ports = options['ports']
self.limit_generate = options.get('limit_generate', -1)
def generator(self):
while self.limit_generate != 0:
randomIPv4 = generateRandomIPv4()
yield randomIPv4, self.ports
if self.limit_generate != -1:
self.limit_generate -= 1
def generateRandomIPv4():
"""
Helper method to generate a random ipv4 adress
"""
return '.'.join(map(str, (random.randint(0, 255) for _ in range(4))))
| import random
from . import WaiterInterface
class RandomIPv4Waiter(WaiterInterface):
"""
HostPortWaiter which generates random ipv4 adresses
"""
def __init__(self, options):
self.ports = options['ports']
self.limit_generate = options.get('limit_generate', -1)
def generator(self):
while self.limit_generate != 0:
randomIPv4 = generateRandomIPv4()
yield randomIPv4, self.ports
if self.limit_generate != -1:
self.limit_generate -= 1
def generateRandomIPv4():
"""
Helper method to generate a random ipv4 adress
"""
return '.'.join(map(str, (random.randint(0, 255) for _ in range(4))))
| import random
from . import WaiterInterface
class RandomIPv4Waiter(WaiterInterface):
"""
HostPortWaiter which generates random ipv4 adresses
"""
def __init__(self, options):
self.ports = options['ports']
self.limit_generate = options.get('limit_generate', -1)
def generator(self):
while self.limit_generate != 0:
randomIPv4 = generateRandomIPv4()
yield (randomIPv4, self.ports)
if self.limit_generate != -1:
self.limit_generate -= 1
def generateRandomIPv4():
"""
Helper method to generate a random ipv4 adress
"""
return ".".join(map(str, (random.randint(0, 255) for _ in range(4))))
| [
2,
4,
5,
6,
7
] |
320 | 5830a6001d7db50002c44aede6fb10938fa01dd1 | import nltk
class Text(object):
def __init__(self, text):
self.text = text
self.words = nltk.word_tokenize(text)
self.sents = nltk.sent_tokenize(text)
class Passage(Text):
def __init__(self, title, story, questions):
Text.__init__(self,story)
self.title = title
self.questions = questions
def display(self):
print self.title + '\n'
print self.text + '\n\n***\n'
for q in self.questions:
print '\n' + q.text + ' (' + q.qtype + ')'
for a in q.answers:
print '\t' + a.text
print '\n\tCorrect Answer: ' + q.correct_answer.text
class Question(Text):
def __init__(self, qtext, qtype, answers, correct_answer):
Text.__init__(self,qtext)
self.qtype = qtype
self.answers = answers
self.correct_answer = correct_answer
class Answer(Text):
def __init__(self, atext):
Text.__init__(self,atext) | null | null | null | null | [
0
] |
321 | d2c31d9c3cc66b43966cfd852582539d4e4bea17 | <mask token>
def deconnexion(Chrome):
"""登陆"""
"""初始化"""
global web, actions
web = webdriver.Chrome(Chrome)
web.maximize_window()
web.implicitly_wait(10)
web.get(
'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'
)
actions = ActionChains(web)
"""登录网页"""
username = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')
password = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')
username.send_keys('15375429564')
password.send_keys('cdc1234cdc')
enter = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')
enter.click()
return 0
def menu_lien():
"""跳转页面"""
enter_into = web.find_element_by_xpath(
'/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'
)
enter_into.click()
return 0
def confirm_area(city, area):
"""确定区域"""
"""点击区域"""
enter_area = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'
).click()
"""点击安徽省"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
except:
time.sleep(1)
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')
enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'
)
web.execute_script('arguments[0].click();', enter_AnHui_down)
"""选择城市"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
for i in range(len(enter_city)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
if enter_city[i].text == city:
enter_city_down = enter_city[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_city_down)
break
"""选则区县"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
for i in range(len(enter_area)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
if enter_area[i].text == area:
enter_area_down = enter_area[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_area_down)
break
return 0
def confirm_time_on(excel_time):
if type(excel_time) == str:
return str(excel_time)
elif type(excel_time) == datetime.datetime:
excel_time_2 = excel_time.strftime('%Y-%m-%d')
return str(excel_time_2)
def confirm_tiem(time):
"""确定时间"""
time = confirm_time_on(time)
enter_time = web.find_elements_by_class_name('el-range-input')
for i in enter_time:
i.send_keys(time)
return 0
<mask token>
def search():
"""点击搜索"""
enter_search = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/button[1]').click()
return 0
def reset():
"""点击重置"""
enter_reset = web.find_element_by_xpath(
'/html/body/div/section/main/div/div[3]/button[2]').click()
return 0
<mask token>
def resetting_excel(cause, clinique, path='D:\\林钟\\下载'):
"""重命名病例"""
try:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.rename(src, dst)
except FileExistsError:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.remove(dst)
os.rename(src, dst)
return 0
def pagination():
pagination__total = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/span[1]')
a = int(pagination__total.text[2:-2])
return a
<mask token>
def tourne_page():
enter_tourne_page = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()
return ''
<mask token>
| <mask token>
def deconnexion(Chrome):
"""登陆"""
"""初始化"""
global web, actions
web = webdriver.Chrome(Chrome)
web.maximize_window()
web.implicitly_wait(10)
web.get(
'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'
)
actions = ActionChains(web)
"""登录网页"""
username = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')
password = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')
username.send_keys('15375429564')
password.send_keys('cdc1234cdc')
enter = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')
enter.click()
return 0
def menu_lien():
"""跳转页面"""
enter_into = web.find_element_by_xpath(
'/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'
)
enter_into.click()
return 0
def confirm_area(city, area):
"""确定区域"""
"""点击区域"""
enter_area = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'
).click()
"""点击安徽省"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
except:
time.sleep(1)
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')
enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'
)
web.execute_script('arguments[0].click();', enter_AnHui_down)
"""选择城市"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
for i in range(len(enter_city)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
if enter_city[i].text == city:
enter_city_down = enter_city[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_city_down)
break
"""选则区县"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
for i in range(len(enter_area)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
if enter_area[i].text == area:
enter_area_down = enter_area[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_area_down)
break
return 0
def confirm_time_on(excel_time):
if type(excel_time) == str:
return str(excel_time)
elif type(excel_time) == datetime.datetime:
excel_time_2 = excel_time.strftime('%Y-%m-%d')
return str(excel_time_2)
def confirm_tiem(time):
"""确定时间"""
time = confirm_time_on(time)
enter_time = web.find_elements_by_class_name('el-range-input')
for i in enter_time:
i.send_keys(time)
return 0
<mask token>
def search():
"""点击搜索"""
enter_search = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/button[1]').click()
return 0
def reset():
"""点击重置"""
enter_reset = web.find_element_by_xpath(
'/html/body/div/section/main/div/div[3]/button[2]').click()
return 0
<mask token>
def download_cas():
"""下载病例"""
enter_download_cas = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()
enter_download_cas_1 = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'
).click()
return 0
def resetting_excel(cause, clinique, path='D:\\林钟\\下载'):
"""重命名病例"""
try:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.rename(src, dst)
except FileExistsError:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.remove(dst)
os.rename(src, dst)
return 0
def pagination():
pagination__total = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/span[1]')
a = int(pagination__total.text[2:-2])
return a
def search_data(cause, city, area, clinique, excel_time):
"""查找数据"""
ls_2 = []
trlist_table_on = web.find_element_by_class_name('is-scrolling-none')
trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
for row in range(len(trlist_tr)):
trlist_table = web.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
i = 0
j = 0
ls = []
for col in range(len(trlist_td)):
i += 1
if i == 2:
ls.append(trlist_td[col].text)
elif i == 3:
ls.append(trlist_td[col].text)
elif i == 7:
ls.append(trlist_td[col])
elif i == 9:
j = 1
ls.append(trlist_td[col])
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
if ls[0] == cause:
if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:
if j == 0:
ls_2.append(ls[2])
elif j == 1:
ls_2.append(ls[3])
return ls_2
def search_data_down(cause, clinique, path):
"""找到病例后的对病例进行一系列的处理"""
"""下载病例"""
download_cas()
"""返回上一界面"""
web.back()
"""点击重置"""
reset()
"""点击待完成"""
pending()
"""给病例重命名"""
time.sleep(2)
try:
resetting_excel(cause, clinique, path)
except FileNotFoundError:
time.sleep(2)
resetting_excel(cause, clinique, path)
print(clinique + '--' + cause + '已下载完成!')
def tourne_page():
enter_tourne_page = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()
return ''
def search_data_on(cause, city, area, clinique, excel_time, path):
"""核心处理流程"""
time.sleep(2)
number = pagination()
"""判断待处理下标是否为0"""
if number == 0:
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""判断待处理里是否小于20条数据"""
if 0 < number <= 20:
"""如果小于进行查找"""
pending__search_data = search_data(cause, city, area, clinique,
excel_time)
"""判断有没有找到"""
if len(pending__search_data) == 0:
"""没找到"""
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause,
city, area, clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' +
cause + ' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到了"""
pending__search_data[0].click()
search_data_down(cause, clinique, path)
<mask token>
| <mask token>
def deconnexion(Chrome):
"""登陆"""
"""初始化"""
global web, actions
web = webdriver.Chrome(Chrome)
web.maximize_window()
web.implicitly_wait(10)
web.get(
'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'
)
actions = ActionChains(web)
"""登录网页"""
username = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')
password = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')
username.send_keys('15375429564')
password.send_keys('cdc1234cdc')
enter = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')
enter.click()
return 0
def menu_lien():
"""跳转页面"""
enter_into = web.find_element_by_xpath(
'/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'
)
enter_into.click()
return 0
def confirm_area(city, area):
"""确定区域"""
"""点击区域"""
enter_area = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'
).click()
"""点击安徽省"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
except:
time.sleep(1)
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')
enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'
)
web.execute_script('arguments[0].click();', enter_AnHui_down)
"""选择城市"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
for i in range(len(enter_city)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
if enter_city[i].text == city:
enter_city_down = enter_city[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_city_down)
break
"""选则区县"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
for i in range(len(enter_area)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
if enter_area[i].text == area:
enter_area_down = enter_area[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_area_down)
break
return 0
def confirm_time_on(excel_time):
if type(excel_time) == str:
return str(excel_time)
elif type(excel_time) == datetime.datetime:
excel_time_2 = excel_time.strftime('%Y-%m-%d')
return str(excel_time_2)
def confirm_tiem(time):
"""确定时间"""
time = confirm_time_on(time)
enter_time = web.find_elements_by_class_name('el-range-input')
for i in enter_time:
i.send_keys(time)
return 0
def confirm_cause(cause):
"""选则症状"""
enter_symptom = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'
).click()
enter_on = web.find_element_by_class_name('is-multiple')
enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')
enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')
for i in range(len(enter_on_symptom)):
enter_on = web.find_element_by_class_name('is-multiple')
enter_on_symptom = enter_on.find_elements_by_tag_name('li')
if enter_on_symptom[i].text == cause:
enter_on_symptom[i].click()
break
return 0
def search():
"""点击搜索"""
enter_search = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/button[1]').click()
return 0
def reset():
"""点击重置"""
enter_reset = web.find_element_by_xpath(
'/html/body/div/section/main/div/div[3]/button[2]').click()
return 0
def pending():
"""待处理"""
enter_pending = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'
).click()
return 0
<mask token>
def download_cas():
"""下载病例"""
enter_download_cas = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()
enter_download_cas_1 = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'
).click()
return 0
def resetting_excel(cause, clinique, path='D:\\林钟\\下载'):
"""重命名病例"""
try:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.rename(src, dst)
except FileExistsError:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.remove(dst)
os.rename(src, dst)
return 0
def pagination():
pagination__total = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/span[1]')
a = int(pagination__total.text[2:-2])
return a
def search_data(cause, city, area, clinique, excel_time):
"""查找数据"""
ls_2 = []
trlist_table_on = web.find_element_by_class_name('is-scrolling-none')
trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
for row in range(len(trlist_tr)):
trlist_table = web.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
i = 0
j = 0
ls = []
for col in range(len(trlist_td)):
i += 1
if i == 2:
ls.append(trlist_td[col].text)
elif i == 3:
ls.append(trlist_td[col].text)
elif i == 7:
ls.append(trlist_td[col])
elif i == 9:
j = 1
ls.append(trlist_td[col])
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
if ls[0] == cause:
if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:
if j == 0:
ls_2.append(ls[2])
elif j == 1:
ls_2.append(ls[3])
return ls_2
def search_data_down(cause, clinique, path):
"""找到病例后的对病例进行一系列的处理"""
"""下载病例"""
download_cas()
"""返回上一界面"""
web.back()
"""点击重置"""
reset()
"""点击待完成"""
pending()
"""给病例重命名"""
time.sleep(2)
try:
resetting_excel(cause, clinique, path)
except FileNotFoundError:
time.sleep(2)
resetting_excel(cause, clinique, path)
print(clinique + '--' + cause + '已下载完成!')
def tourne_page():
enter_tourne_page = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()
return ''
def search_data_on(cause, city, area, clinique, excel_time, path):
"""核心处理流程"""
time.sleep(2)
number = pagination()
"""判断待处理下标是否为0"""
if number == 0:
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""判断待处理里是否小于20条数据"""
if 0 < number <= 20:
"""如果小于进行查找"""
pending__search_data = search_data(cause, city, area, clinique,
excel_time)
"""判断有没有找到"""
if len(pending__search_data) == 0:
"""没找到"""
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause,
city, area, clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' +
cause + ' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到了"""
pending__search_data[0].click()
search_data_down(cause, clinique, path)
<mask token>
| <mask token>
def deconnexion(Chrome):
"""登陆"""
"""初始化"""
global web, actions
web = webdriver.Chrome(Chrome)
web.maximize_window()
web.implicitly_wait(10)
web.get(
'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'
)
actions = ActionChains(web)
"""登录网页"""
username = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')
password = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')
username.send_keys('15375429564')
password.send_keys('cdc1234cdc')
enter = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')
enter.click()
return 0
def menu_lien():
"""跳转页面"""
enter_into = web.find_element_by_xpath(
'/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'
)
enter_into.click()
return 0
def confirm_area(city, area):
"""确定区域"""
"""点击区域"""
enter_area = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'
).click()
"""点击安徽省"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
except:
time.sleep(1)
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')
enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'
)
web.execute_script('arguments[0].click();', enter_AnHui_down)
"""选择城市"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
for i in range(len(enter_city)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
if enter_city[i].text == city:
enter_city_down = enter_city[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_city_down)
break
"""选则区县"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
for i in range(len(enter_area)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
if enter_area[i].text == area:
enter_area_down = enter_area[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_area_down)
break
return 0
def confirm_time_on(excel_time):
if type(excel_time) == str:
return str(excel_time)
elif type(excel_time) == datetime.datetime:
excel_time_2 = excel_time.strftime('%Y-%m-%d')
return str(excel_time_2)
def confirm_tiem(time):
"""确定时间"""
time = confirm_time_on(time)
enter_time = web.find_elements_by_class_name('el-range-input')
for i in enter_time:
i.send_keys(time)
return 0
def confirm_cause(cause):
"""选则症状"""
enter_symptom = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'
).click()
enter_on = web.find_element_by_class_name('is-multiple')
enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')
enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')
for i in range(len(enter_on_symptom)):
enter_on = web.find_element_by_class_name('is-multiple')
enter_on_symptom = enter_on.find_elements_by_tag_name('li')
if enter_on_symptom[i].text == cause:
enter_on_symptom[i].click()
break
return 0
def search():
"""点击搜索"""
enter_search = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/button[1]').click()
return 0
def reset():
"""点击重置"""
enter_reset = web.find_element_by_xpath(
'/html/body/div/section/main/div/div[3]/button[2]').click()
return 0
def pending():
"""待处理"""
enter_pending = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'
).click()
return 0
def accomplish():
"""已完成"""
enter__accomplish = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'
).click()
return 0
def download_cas():
"""下载病例"""
enter_download_cas = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()
enter_download_cas_1 = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'
).click()
return 0
def resetting_excel(cause, clinique, path='D:\\林钟\\下载'):
"""重命名病例"""
try:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.rename(src, dst)
except FileExistsError:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.remove(dst)
os.rename(src, dst)
return 0
def pagination():
pagination__total = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/span[1]')
a = int(pagination__total.text[2:-2])
return a
def search_data(cause, city, area, clinique, excel_time):
"""查找数据"""
ls_2 = []
trlist_table_on = web.find_element_by_class_name('is-scrolling-none')
trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
for row in range(len(trlist_tr)):
trlist_table = web.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
i = 0
j = 0
ls = []
for col in range(len(trlist_td)):
i += 1
if i == 2:
ls.append(trlist_td[col].text)
elif i == 3:
ls.append(trlist_td[col].text)
elif i == 7:
ls.append(trlist_td[col])
elif i == 9:
j = 1
ls.append(trlist_td[col])
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
if ls[0] == cause:
if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:
if j == 0:
ls_2.append(ls[2])
elif j == 1:
ls_2.append(ls[3])
return ls_2
def search_data_down(cause, clinique, path):
"""找到病例后的对病例进行一系列的处理"""
"""下载病例"""
download_cas()
"""返回上一界面"""
web.back()
"""点击重置"""
reset()
"""点击待完成"""
pending()
"""给病例重命名"""
time.sleep(2)
try:
resetting_excel(cause, clinique, path)
except FileNotFoundError:
time.sleep(2)
resetting_excel(cause, clinique, path)
print(clinique + '--' + cause + '已下载完成!')
def tourne_page():
enter_tourne_page = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()
return ''
def search_data_on(cause, city, area, clinique, excel_time, path):
"""核心处理流程"""
time.sleep(2)
number = pagination()
"""判断待处理下标是否为0"""
if number == 0:
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""判断待处理里是否小于20条数据"""
if 0 < number <= 20:
"""如果小于进行查找"""
pending__search_data = search_data(cause, city, area, clinique,
excel_time)
"""判断有没有找到"""
if len(pending__search_data) == 0:
"""没找到"""
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause,
city, area, clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' +
cause + ' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到了"""
pending__search_data[0].click()
search_data_down(cause, clinique, path)
if __name__ == '__main__':
download_revers = []
"""初始化"""
url = input('请输入文件的绝对路径:')
path = 'D:\\林钟\\下载'
Chrome = 'D:\\PYthon\\webdrivers\\chromedriver.exe'
time1 = time.time()
"""登录页面"""
deconnexion(Chrome)
print('已登陆')
menu_lien()
print('已跳转')
"""读取表格"""
excel = vb.load_workbook(url)
sheet = excel['1-每日监控告警明细']
subscript = 1
for i in sheet.iter_rows(min_row=2, max_row=101, max_col=1):
for cell in i:
if cell.value in ['3', 3, '高']:
"""初始化数值"""
cause = sheet['I' + str(cell.row)].value
city = sheet['E' + str(cell.row)].value
area = sheet['F' + str(cell.row)].value
clinique = sheet['G' + str(cell.row)].value
excel_time = sheet['D' + str(cell.row)].value
"""搜索"""
try:
confirm_area(city, area)
confirm_tiem(excel_time)
confirm_cause(cause)
search()
except:
try:
web.refresh()
print('刷新成功')
confirm_area(city, area)
confirm_tiem(excel_time)
confirm_cause(cause)
search()
except Exception as e:
print('刷新失败!', format(e))
"""查找数据"""
search_data_on(cause, city, area, clinique, excel_time, path)
"""打印最终结果"""
print('')
print('<-----------下面是下载失败的----------->')
for i in download_revers:
print(i)
print('已全部下载完毕')
time2 = time.time()
print('用时:{:.2f} 秒'.format(time2 - time1))
| from selenium import webdriver
import time
import datetime
import os
import openpyxl as vb
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
def deconnexion(Chrome):
"""登陆"""
"""初始化"""
global web, actions
web = webdriver.Chrome(Chrome) #公司电脑
# web = webdriver.Chrome(r'D:\python\webdrivers\chromedriver.exe') #自己的电脑
web.maximize_window()
web.implicitly_wait(10) # 最大运行时间不超过10秒
web.get('http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain')
actions = ActionChains(web)
"""登录网页"""
username = web.find_element_by_xpath('/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input') # 获得账号和密码
password = web.find_element_by_xpath('/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')
username.send_keys('15375429564')
password.send_keys("cdc1234cdc")
enter = web.find_element_by_xpath("/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button")
enter.click()
return 0
def menu_lien():
"""跳转页面"""
enter_into = web.find_element_by_xpath(
"/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article")
enter_into.click()
return 0
def confirm_area(city, area):
"""确定区域"""
"""点击区域"""
enter_area = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input").click()
"""点击安徽省"""
enter_on_on = web.find_element_by_class_name("el-cascader__dropdown")
enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel")
try:
enter_AnHui_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_AnHui_on =enter_AnHui_on_on[0].find_element_by_class_name("el-scrollbar__view")
except:
time.sleep(1)
enter_AnHui_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name("el-scrollbar__view")
enter_AnHui = enter_AnHui_on.find_element_by_tag_name("li")
enter_AnHui_down =enter_AnHui.find_element_by_class_name("el-radio__input")
web.execute_script("arguments[0].click();", enter_AnHui_down)
"""选择城市"""
enter_on_on = web.find_element_by_class_name("el-cascader__dropdown")
enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel")
try:
enter_city_on_on =enter_on.find_elements_by_class_name("el-scrollbar")
enter_city_on = enter_city_on_on[1].find_element_by_class_name("el-cascader-menu__wrap")
except:
time.sleep(1)
enter_city_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_city_on = enter_city_on_on[1].find_element_by_class_name("el-cascader-menu__wrap")
enter_city = enter_city_on.find_elements_by_tag_name("li")
for i in range(len(enter_city)):
enter_on_on = web.find_element_by_class_name("el-cascader__dropdown")
enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel")
enter_city_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_city_on = enter_city_on_on[1].find_element_by_class_name("el-cascader-menu__wrap")
enter_city = enter_city_on.find_elements_by_tag_name("li")
if enter_city[i].text ==city:
enter_city_down = enter_city[i].find_element_by_class_name("el-radio__input")
web.execute_script("arguments[0].click();", enter_city_down)
break
"""选则区县"""
enter_on_on = web.find_element_by_class_name("el-cascader__dropdown")
enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel")
try:
enter_area_on_on =enter_on.find_elements_by_class_name("el-scrollbar")
enter_area_on = enter_area_on_on[2].find_element_by_class_name("el-cascader-menu__wrap")
except:
time.sleep(1)
enter_area_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_area_on = enter_area_on_on[2].find_element_by_class_name("el-cascader-menu__wrap")
enter_area = enter_area_on.find_elements_by_tag_name("li")
for i in range(len(enter_area)):
enter_on_on = web.find_element_by_class_name("el-cascader__dropdown")
enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel")
enter_area_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_area_on = enter_area_on_on[2].find_element_by_class_name("el-cascader-menu__wrap")
enter_area = enter_area_on.find_elements_by_tag_name("li")
if enter_area[i].text ==area:
enter_area_down = enter_area[i].find_element_by_class_name("el-radio__input")
web.execute_script("arguments[0].click();", enter_area_down)
break
return 0
def confirm_time_on(excel_time):
if type(excel_time) == str:
return str(excel_time)
elif type(excel_time) == datetime.datetime:
excel_time_2 = excel_time.strftime('%Y-%m-%d')
return str(excel_time_2)
def confirm_tiem(time):
"""确定时间"""
time =confirm_time_on(time)
enter_time = web.find_elements_by_class_name("el-range-input")
for i in enter_time:
i.send_keys(time)
return 0
def confirm_cause(cause):
"""选则症状"""
enter_symptom = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input").click()
enter_on = web.find_element_by_class_name("is-multiple")
enter_on_1 =enter_on.find_element_by_class_name("el-scrollbar")
enter_on_symptom = enter_on_1.find_elements_by_tag_name("li")
for i in range(len(enter_on_symptom)):
enter_on = web.find_element_by_class_name("is-multiple")
enter_on_symptom = enter_on.find_elements_by_tag_name("li")
if enter_on_symptom[i].text == cause:
enter_on_symptom[i].click()
break
return 0
def search():
"""点击搜索"""
enter_search = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[3]/button[1]").click()
return 0
def reset():
"""点击重置"""
enter_reset = web.find_element_by_xpath("/html/body/div/section/main/div/div[3]/button[2]").click()
return 0
def pending():
"""待处理"""
enter_pending = web.find_element_by_xpath(
"/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]").click()
return 0
def accomplish():
"""已完成"""
enter__accomplish = web.find_element_by_xpath(
"/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]").click()
return 0
def download_cas():
"""下载病例"""
enter_download_cas = web.find_element_by_xpath(
"/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]").click()
enter_download_cas_1 = web.find_element_by_xpath(
"/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]").click()
return 0
def resetting_excel(cause, clinique, path="D:\林钟\下载"):
"""重命名病例"""
try:
files = os.listdir(path)
src = path + "\\" + "外呼结果导出表格.xlsx"
if cause =="发热伴畏寒|寒战":
cause ="发热伴畏寒寒战'"
if cause == "畏寒|寒战":
cause = "畏寒寒战'"
dst = path + "\\" + clinique + "--" + cause + ".xlsx"
os.rename(src, dst)
except (FileExistsError):
files = os.listdir(path)
src = path + "\\" + "外呼结果导出表格.xlsx"
if cause =="发热伴畏寒|寒战":
cause ="发热伴畏寒寒战'"
if cause == "畏寒|寒战":
cause = "畏寒寒战'"
dst = path + "\\" + clinique + "--" + cause + ".xlsx"
os.remove(dst)
os.rename(src, dst)
return 0
def pagination(): #获取当前界面一共有多少条数据
pagination__total = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[5]/span[1]")
a = int(pagination__total.text[2:-2])
return a
def search_data(cause, city, area, clinique, excel_time):
"""查找数据"""
ls_2 =[] #存储最终点击的元素,如果为空则说明没找到。
trlist_table_on = web.find_element_by_class_name("is-scrolling-none")
trlist_table = trlist_table_on.find_element_by_class_name("el-table__body")
trlist_tr = trlist_table.find_elements_by_tag_name("tr")
for row in range(len(trlist_tr)):
trlist_table = web.find_element_by_class_name("el-table__body")
trlist_tr = trlist_table.find_elements_by_tag_name("tr")
trlist_td = trlist_tr[row].find_elements_by_tag_name("td")
i = 0
j = 0
ls = []
for col in range(len(trlist_td)):
i += 1
if i == 2:
ls.append(trlist_td[col].text)
elif i == 3:
ls.append(trlist_td[col].text)
elif i == 7:
ls.append(trlist_td[col])
elif i == 9:
j = 1
ls.append((trlist_td[col]))
trlist_td = trlist_tr[row].find_elements_by_tag_name("td")
if ls[0] == cause:
if ls[1] == ("安徽省/" + city + "/" + area + "/" + clinique):
if j == 0:
# ls[2].click()
ls_2.append(ls[2])
elif j == 1:
# ls[3].click()
ls_2.append(ls[3])
return ls_2
def search_data_down(cause,clinique,path):
"""找到病例后的对病例进行一系列的处理"""
"""下载病例"""
download_cas()
"""返回上一界面"""
web.back()
"""点击重置"""
reset()
"""点击待完成"""
pending()
"""给病例重命名"""
time.sleep(2)
try:
resetting_excel(cause, clinique,path)
except FileNotFoundError:
time.sleep(2)
resetting_excel(cause, clinique,path)
print(clinique + "--" + cause + "已下载完成!")
def tourne_page():
enter_tourne_page =web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[5]/button[2]/i").click()
return ""
def search_data_on(cause, city, area, clinique, excel_time,path):
"""核心处理流程"""
time.sleep(2)
number = pagination()
"""判断待处理下标是否为0"""
if number == 0 :
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause,clinique,path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause,clinique,path)
else:
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""判断待处理里是否小于20条数据"""
if 0 < number <= 20:
"""如果小于进行查找"""
pending__search_data = search_data(cause, city, area, clinique, excel_time)
"""判断有没有找到"""
if len(pending__search_data) == 0:
"""没找到"""
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""找到了"""
pending__search_data[0].click()
search_data_down(cause,clinique,path)
# elif 20< number <= 40:
# pending__search_data = search_data(cause, city, area, clinique, excel_time)
# """判断有没有找到"""
# if len(pending__search_data) == 0:
if __name__ == "__main__":
download_revers = []
"""初始化"""
url = input("请输入文件的绝对路径:") #文件路径
path = "D:\林钟\下载" # 下载路径
Chrome = r'D:\PYthon\webdrivers\chromedriver.exe' #驱动路径
time1 = time.time()
"""登录页面"""
deconnexion(Chrome)
print("已登陆")
menu_lien()
print("已跳转")
"""读取表格"""
excel = vb.load_workbook(url)
sheet = excel["1-每日监控告警明细"]
subscript = 1
for i in sheet.iter_rows(min_row=2, max_row=101, max_col=1):
for cell in i:
if cell.value in ["3", 3, "高"]:
"""初始化数值"""
cause = sheet["I" + str(cell.row)].value
city = sheet["E" + str(cell.row)].value
area = sheet["F" + str(cell.row)].value
clinique = sheet["G" + str(cell.row)].value
excel_time = sheet["D" + str(cell.row)].value
"""搜索"""
try:
confirm_area(city, area)
confirm_tiem(excel_time)
confirm_cause(cause)
search()
except:
try:
web.refresh() # 刷新方法 refresh
print('刷新成功')
confirm_area(city, area)
confirm_tiem(excel_time)
confirm_cause(cause)
search()
except Exception as e:
print("刷新失败!", format(e))
"""查找数据"""
search_data_on(cause, city, area, clinique, excel_time, path)
"""打印最终结果"""
print("")
print("<-----------下面是下载失败的----------->")
for i in download_revers:
print(i)
print("已全部下载完毕")
time2 = time.time()
print("用时:{:.2f} 秒".format(time2-time1)) | [
10,
14,
16,
18,
20
] |
322 | 213ab22a269abc8180524462a8966e5d929ef7d1 | <mask token>
| <mask token>
def get_markdown_file(name, lang='en'):
"""
Get the contents of a markdown file.
"""
filename_temp = '{0}_{1}.markdown'
md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')
filepath = os.path.join(md_dir, filename_temp.format(name, lang))
if not os.path.isfile(filepath) and lang == 'fr':
filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))
if not os.path.isfile(filepath):
return None
with codecs.open(filepath, mode='r', encoding='utf-8') as f:
return markdown.markdown(f.read())
| <mask token>
def get_json_file(filename, lang='en'):
"""
Get the contents of a JSON file.
"""
filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)
with open(filepath, 'r') as f:
return json.loads(f.read())
def get_markdown_file(name, lang='en'):
"""
Get the contents of a markdown file.
"""
filename_temp = '{0}_{1}.markdown'
md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')
filepath = os.path.join(md_dir, filename_temp.format(name, lang))
if not os.path.isfile(filepath) and lang == 'fr':
filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))
if not os.path.isfile(filepath):
return None
with codecs.open(filepath, mode='r', encoding='utf-8') as f:
return markdown.markdown(f.read())
| import os
import json
import codecs
import markdown
from flask import current_app
def get_json_file(filename, lang='en'):
"""
Get the contents of a JSON file.
"""
filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)
with open(filepath, 'r') as f:
return json.loads(f.read())
def get_markdown_file(name, lang='en'):
"""
Get the contents of a markdown file.
"""
filename_temp = '{0}_{1}.markdown'
md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')
filepath = os.path.join(md_dir, filename_temp.format(name, lang))
if not os.path.isfile(filepath) and lang == 'fr':
filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))
if not os.path.isfile(filepath):
return None
with codecs.open(filepath, mode='r', encoding='utf-8') as f:
return markdown.markdown(f.read())
| import os
import json
import codecs
import markdown
from flask import current_app
def get_json_file(filename, lang='en'):
"""
Get the contents of a JSON file.
"""
filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)
with open(filepath, 'r') as f:
return json.loads(f.read())
def get_markdown_file(name, lang='en'):
"""
Get the contents of a markdown file.
"""
filename_temp = "{0}_{1}.markdown"
md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')
filepath = os.path.join(md_dir, filename_temp.format(name, lang))
if not os.path.isfile(filepath) and lang == 'fr':
filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))
if not os.path.isfile(filepath):
return None
with codecs.open(filepath, mode='r', encoding="utf-8") as f:
return markdown.markdown(f.read())
| [
0,
1,
2,
3,
4
] |
323 | 398263b65fd98003f27020e46ae38e913dc5dd45 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Brice Chou'
import os
import lib
import sys
import time
import getopt
import training
try:
import cv2
import h5py
except Exception as e:
error_info = 'Please install h5py/cv2 tools first. Error: {}.\n'.format(e)
print('\033[0;31m%s\033[0m' % error_info)
quit()
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def run():
# Set the window name
window_name = __author__
# Get a reference to webcam #-1 (the last one)
video_capture = cv2.VideoCapture(-1)
# Initialize some variables
unknown_folder_path = os.path.abspath('unknown')
i = lib.get_file_max_number(unknown_folder_path)
filerd = h5py.File('database/training_encodings.hdf5', 'r')
# Image encodings mode
encodings_mode = 'large'
# Temp to save predict result name
face_names = []
# Save the screen locations and encodings to find a person
screen_locations = []
screen_encodings = []
# Save the training data from database
training_names = []
training_eigenvalues = []
process_this_frame = True
for key in filerd.keys():
training_names.append(filerd[key].name.split('/')[-1])
training_eigenvalues.append(filerd[key].value)
filerd.close()
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size
# for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings
# in the current frame of video
screen_locations = lib.face_locations(small_frame, 1,
'hog')
screen_encodings = lib.face_encodings(small_frame, None,
1, encodings_mode)
face_names = []
# How manay faces in the screen
detected_face_length = len(screen_locations)
info = 'We detected \033[0;32m{}\033[0m faces in the screen.\n'
print(info.format(detected_face_length))
if detected_face_length >= 1:
for screen_encoding in screen_encodings:
# Compare the locations and get the face's name
name = lib.compare_faces(training_eigenvalues,
training_names,
screen_encoding, 0.31)
face_names.append(name)
# Auto save the unknown images
if '' == name:
img_file_path = '{}/{}.jpg'.format(
unknown_folder_path, i)
cv2.imwrite(img_file_path, frame)
i += 1
time.sleep(0.15)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(screen_locations, face_names):
# We detected in was scaled to 1/2 size
top *= 2
right *= 2
bottom *= 2
left *= 2
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
if '' != name:
# Draw a label with a name below the face
# # cv2.cv.CV_FILLED
cv2.rectangle(frame, (left - 60, bottom + 30),
(right + 60, bottom - 10), (0, 0, 255),
cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left - 50, bottom + 20),
font, 1, (255, 255, 255), 1)
# Display the resulting image
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
# cv2.cv.CV_WINDOW_FULLSCREEN
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
cv2.imshow(window_name, frame)
key = cv2.waitKey(1)
if key == ord('s'):
label = 'cache/{}.jpg'.format(i)
cv2.imwrite(label, frame)
i += 1
elif key == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
argv_list = argv[1:]
opts, args = getopt.getopt(argv_list, 'h', ['help'])
arg = argv_list[0]
if 'run' == arg:
run()
elif 'save' == arg:
training.save()
elif 'move' == arg:
training.move()
elif 'detect' == arg:
training.detect()
elif 'catch' == arg:
if 2 == len(argv_list):
training.catch(argv_list[1])
else:
training.catch()
elif 'rotate' == arg:
if 2 == len(argv_list):
training.rotate(amount=int(argv_list[1]))
else:
training.rotate()
except getopt.error, msg:
raise Usage(msg)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, 'for help use --help'
return 2
if __name__ == '__main__':
lib.initial_project_folder()
sys.exit(main())
| null | null | null | null | [
0
] |
324 | 606e40dd073c3efc95ef01a08466fd536a28f140 | from slistener import SListener
from slistener import track
import datetime
import time, tweepy, sys
import json
import re
#def tweet_collector():
consumer_key='qpUR91PwjvChszV0VFgrc4Hje'
consumer_secret='q9mPUZE2OsFbaqKUF32ZsY1ry4anZ1k8pNSne56wc3HInmERFu'
access_token='2845943577-R0g6YRlrdEqSFb2mKy5HXuByQPdpq4TLGrPkmSs'
access_token_secret='ed5emUSxHENLtqN8nLYvGkbipKAEemFd0fgjsXNPC8GED'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
listen = SListener(api)
stream = tweepy.Stream(auth, listen)
print "Streaming started..."
global track
try:
stream.filter(track = track)
except:
stream.disconnect()
| null | null | null | null | [
0
] |
325 | 86ca94820c05b3f63f4a733b6d1fa7eb9dea6a5d | <mask token>
| source_root_dir = '/home/songsong/image_transport_ws/src'
whitelisted_packages = ''.split(';') if '' != '' else []
blacklisted_packages = ''.split(';') if '' != '' else []
underlay_workspaces = (
'/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic'
.split(';') if
'/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic'
!= '' else [])
| # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/songsong/image_transport_ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic".split(';') if "/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic" != "" else []
| null | null | [
0,
1,
2
] |
326 | 095d7abfc8297e0bf741a4ebb351a7776055623f | ''' The previous code does not correcly compute the stiffening coefficients
This program uses the clustering data to re-compute the stiffening coefficients '''
import glob
import sys
import time
#-----------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------#
def LoadClusterHistogram(inputfile):
f = open(inputfile)
data = []
while True:
fields = f.readline().strip().split(',')
if len(fields)>1:
nAtomsInCluster = float(fields[0])
nClusters = float(fields[1])
data.append((nAtomsInCluster, nClusters))
else:
break
return data
def NIntsBetweenTerminalGroupsMax(nGroups):
return nGroups*(nGroups-1)*0.5
def NIntsBetweenTerminalGroupsMin(nGroups):
return nGroups - 1
def NTerminalGroupsInCluster(nAtomsInCluster, moltype):
nAtomsPerGroup = {'EtOCSMethyl': 1.0, 'EtOCSVinyl': 2.0, 'EtOCSPhenyl': 6.0}
return int(nAtomsInCluster/nAtomsPerGroup[moltype])
def ComputeStiffening(data, moltype):
# the min and max number of interactions between pairs of terminal groups
nAtomIntsPerPairOfGroupsMin = {'EtOCSMethyl': 1, 'EtOCSVinyl': 1, 'EtOCSPhenyl': 4}
nAtomIntsPerPairOfGroupsMax = {'EtOCSMethyl': 1, 'EtOCSVinyl': 4, 'EtOCSPhenyl': 36}
nStericInteractionsMin = 0 # gamma_min
nStericInteractionsMax = 0 # gamma_max
for cluster in data:
nAtomsInCluster, nClusters = cluster
nTerminalGroups = NTerminalGroupsInCluster(nAtomsInCluster, moltype)
nGroupIntsMin = NIntsBetweenTerminalGroupsMin(nTerminalGroups)
nGroupIntsMax = NIntsBetweenTerminalGroupsMax(nTerminalGroups)
nStericInteractionsMin += nGroupIntsMin * nAtomIntsPerPairOfGroupsMin[moltype] * nClusters
nStericInteractionsMax += nGroupIntsMax * nAtomIntsPerPairOfGroupsMax[moltype] * nClusters
return (nStericInteractionsMin, nStericInteractionsMax)
def ComputeStiffeningOH(data):
nStericInteractionsMin = 0 # gamma_min
nStericInteractionsMax = 0 # gamma_max
for cluster in data:
nAtomsInCluster, nClusters = cluster
nStericInteractionsMin += (nAtomsInCluster-1)*nClusters
nStericInteractionsMax += (nAtomsInCluster*(nAtomsInCluster-1)*0.5)*nClusters
return (nStericInteractionsMin, nStericInteractionsMax)
def ComputeStiffeningCoeffs(data):
nStericInteractionsMin = 0 # gamma_min
nStericInteractionsMax = 0 # gamma_max
for cluster in data:
nAtomsInCluster, nClusters = cluster
nStericInteractionsMin += (nAtomsInCluster-1)*nClusters
nStericInteractionsMax += (nAtomsInCluster*(nAtomsInCluster-1)*0.5)*nClusters
return (nStericInteractionsMin, nStericInteractionsMax)
#-----------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------#
if len(sys.argv) < 2:
print 'Usage:'
print ' python %s <precursor type> [OH - False]' %sys.argv[0]
exit()
moltype = sys.argv[1]
if len(sys.argv) > 2:
OHGroups = True
else:
OHGroups = False
t0 = time.time()
# get all the relevant files and process each network
inputfiles = glob.glob('{}_*.txt'.format(moltype))
# write all the results to the same file
f = open('steric_interactions.txt', 'w')
f.write('Filename : gamma_min, gamma_max\n')
for inputfile in inputfiles:
print 'Working with %s...' %inputfile
data = LoadClusterHistogram(inputfile)
gamma_min, gamma_max = ComputeStiffeningCoeffs(data)
# if OHGroups:
# gamma_min, gamma_max = ComputeStiffeningOH(data)
# else:
# gamma_min, gamma_max = ComputeStiffening(data, moltype)
f.write('%s : %.4f, %.4f\n' %(inputfile, gamma_min, gamma_max))
f.close()
print 'Analyzed network in %.4f seconds.' %(time.time()-t0) | null | null | null | null | [
0
] |
327 | 1f27b697985c7417e6d8d978703175a415c6c57d | <mask token>
| <mask token>
print('%.2f' % ukupanPut)
| <mask token>
r = float(input())
p = int(input())
obim = 2 * r * math.pi
ukupanPut = p * obim
ukupanPut = ukupanPut * 0.01
print('%.2f' % ukupanPut)
| import math
r = float(input())
p = int(input())
obim = 2 * r * math.pi
ukupanPut = p * obim
ukupanPut = ukupanPut * 0.01
print('%.2f' % ukupanPut)
| import math
r = float(input())
p = int(input())
obim = 2 * r * math.pi
ukupanPut = p * obim
# centimetre pretvaramo u metre
ukupanPut = ukupanPut * 0.01
print("%.2f" % ukupanPut)
| [
0,
1,
2,
3,
4
] |
328 | 71a9c9b8f47dcfbecc154c44d5a72ddbd852145a | <mask token>
| def randomizer(n, garrafa_vidro, lata_metal, copo_plastico, bola_papel,
maça_organico):
lixos = [garrafa_vidro, lata_metal, copo_plastico, bola_papel,
maça_organico]
return lixos[n]
| null | null | null | [
0,
1
] |
329 | 02e711dfc122007c74949cd9f86e2aeb9d334871 | <mask token>
class Adaline:
<mask token>
def fit(self, X, Y):
X = np.hstack((np.ones((X.shape[0], 1)), X))
self.w = np.random.uniform(-1, 1, (X.shape[1], 1))
for n in range(self.n_iter):
y = X.dot(self.w)
error = Y - y
self.w += self.eta * X.T.dot(error)
cost = 1.0 / 2 * np.sum(error ** 2)
self.error.append(cost)
return self
<mask token>
| <mask token>
class Adaline:
def __init__(self, eta=0.0001, n_iter=2000):
self.eta = eta
self.n_iter = n_iter
self.error = []
def fit(self, X, Y):
X = np.hstack((np.ones((X.shape[0], 1)), X))
self.w = np.random.uniform(-1, 1, (X.shape[1], 1))
for n in range(self.n_iter):
y = X.dot(self.w)
error = Y - y
self.w += self.eta * X.T.dot(error)
cost = 1.0 / 2 * np.sum(error ** 2)
self.error.append(cost)
return self
<mask token>
| <mask token>
class Adaline:
def __init__(self, eta=0.0001, n_iter=2000):
self.eta = eta
self.n_iter = n_iter
self.error = []
def fit(self, X, Y):
X = np.hstack((np.ones((X.shape[0], 1)), X))
self.w = np.random.uniform(-1, 1, (X.shape[1], 1))
for n in range(self.n_iter):
y = X.dot(self.w)
error = Y - y
self.w += self.eta * X.T.dot(error)
cost = 1.0 / 2 * np.sum(error ** 2)
self.error.append(cost)
return self
def predict(self, X):
X = np.hstack((np.ones((X.shape[0], 1)), X))
Y_hat = X.dot(self.w)
return Y_hat
| import numpy as np
class Adaline:
def __init__(self, eta=0.0001, n_iter=2000):
self.eta = eta
self.n_iter = n_iter
self.error = []
def fit(self, X, Y):
X = np.hstack((np.ones((X.shape[0], 1)), X))
self.w = np.random.uniform(-1, 1, (X.shape[1], 1))
for n in range(self.n_iter):
y = X.dot(self.w)
error = Y - y
self.w += self.eta * X.T.dot(error)
cost = 1.0 / 2 * np.sum(error ** 2)
self.error.append(cost)
return self
def predict(self, X):
X = np.hstack((np.ones((X.shape[0], 1)), X))
Y_hat = X.dot(self.w)
return Y_hat
| import numpy as np
class Adaline:
def __init__(self, eta = 0.0001, n_iter = 2000):
self.eta = eta
self.n_iter = n_iter
self.error = []
def fit(self, X, Y):
X = np.hstack((np.ones((X.shape[0],1)), X))
self.w = np.random.uniform(-1, 1, (X.shape[1], 1))
for n in range(self.n_iter):
y = X.dot(self.w)
error = Y - y
self.w += self.eta * X.T.dot(error)
cost = 1./2 * np.sum(error**2)
self.error.append(cost)
return self
def predict(self, X):
X = np.hstack((np.ones((X.shape[0],1)), X))
Y_hat = X.dot(self.w)
return Y_hat
| [
2,
3,
4,
5,
6
] |
330 | 6bf1d410a33e3b2535e39e4f8c5c7f8278b3de67 | <mask token>
| <mask token>
def save_card(word, image_path, filepath='data/cards/', filename=None):
"""Функция для генерации и сохранения изображения
Возвращает filepath+filename
Параметры:
word - слово, чей контент будет на карточке
image - задний фон изображения
filepath - путь для хранения изображения
filename - имя изображения
"""
content = urbandictionary_api.get_word_data(word)
image = Image.open(image_path)
rep = Repository()
fonts = rep.fonts
model = CardModel(content=content, image=image, auth_font=fonts.
aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font
=fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.
word_font, thumb_font=fonts.thumb_font)
card_drawer = CardDrawer(model)
card_drawer.draw_card()
path = card_drawer.save(filepath=filepath, filename=filename)
return path
<mask token>
| <mask token>
def save_card(word, image_path, filepath='data/cards/', filename=None):
"""Функция для генерации и сохранения изображения
Возвращает filepath+filename
Параметры:
word - слово, чей контент будет на карточке
image - задний фон изображения
filepath - путь для хранения изображения
filename - имя изображения
"""
content = urbandictionary_api.get_word_data(word)
image = Image.open(image_path)
rep = Repository()
fonts = rep.fonts
model = CardModel(content=content, image=image, auth_font=fonts.
aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font
=fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.
word_font, thumb_font=fonts.thumb_font)
card_drawer = CardDrawer(model)
card_drawer.draw_card()
path = card_drawer.save(filepath=filepath, filename=filename)
return path
if __name__ == '__main__':
from random import randint
save_card(get_random_word(),
f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')
| from PIL import Image
from src import urbandictionary_api
from src.card.cardDrawer import CardDrawer
from src.card.cardModel import CardModel
from src.repository import Repository
from src.urbandictionary_api import get_random_word
def save_card(word, image_path, filepath='data/cards/', filename=None):
"""Функция для генерации и сохранения изображения
Возвращает filepath+filename
Параметры:
word - слово, чей контент будет на карточке
image - задний фон изображения
filepath - путь для хранения изображения
filename - имя изображения
"""
content = urbandictionary_api.get_word_data(word)
image = Image.open(image_path)
rep = Repository()
fonts = rep.fonts
model = CardModel(content=content, image=image, auth_font=fonts.
aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font
=fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.
word_font, thumb_font=fonts.thumb_font)
card_drawer = CardDrawer(model)
card_drawer.draw_card()
path = card_drawer.save(filepath=filepath, filename=filename)
return path
if __name__ == '__main__':
from random import randint
save_card(get_random_word(),
f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')
| from PIL import Image
from src import urbandictionary_api
from src.card.cardDrawer import CardDrawer
from src.card.cardModel import CardModel
from src.repository import Repository
from src.urbandictionary_api import get_random_word
def save_card(word, image_path, filepath='data/cards/', filename=None):
'''Функция для генерации и сохранения изображения
Возвращает filepath+filename
Параметры:
word - слово, чей контент будет на карточке
image - задний фон изображения
filepath - путь для хранения изображения
filename - имя изображения
'''
content = urbandictionary_api.get_word_data(word)
image = Image.open(image_path)
rep = Repository()
fonts = rep.fonts
model = CardModel(
content=content,
image=image,
auth_font=fonts.aut_font,
cat_font=fonts.cat_font,
def_font=fonts.def_font,
ex_font=fonts.ex_font,
rect_font=fonts.rect_font,
word_font=fonts.word_font,
thumb_font=fonts.thumb_font
)
card_drawer = CardDrawer(model)
card_drawer.draw_card()
path = card_drawer.save(filepath=filepath, filename=filename)
return path
if __name__ == '__main__':
from random import randint
save_card(get_random_word(), f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')
| [
0,
1,
2,
3,
4
] |
331 | a096e811e50e25e47a9b76b1f813c51f4307bbfe | <mask token>
| <mask token>
class DrinkFilter(django_filters.FilterSet):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class Meta:
model = Drinks
fields = ['name', 'brands']
<mask token>
| <mask token>
class DrinkFilter(django_filters.FilterSet):
BRAND_CHOICES = tuple((brand.name, brand.name) for brand in Brand.
objects.all())
name = django_filters.CharFilter(lookup_expr='icontains')
price_lt = django_filters.NumberFilter(field_name='price', lookup_expr='lt'
)
price_gt = django_filters.NumberFilter(field_name='price', lookup_expr='gt'
)
likes_lt = django_filters.NumberFilter(field_name='likes', lookup_expr='lt'
)
likes_gt = django_filters.NumberFilter(field_name='likes', lookup_expr='gt'
)
brands = django_filters.MultipleChoiceFilter(field_name='brand__name',
choices=BRAND_CHOICES)
class Meta:
model = Drinks
fields = ['name', 'brands']
<mask token>
| import django_filters
from .models import Drinks, Brand
class DrinkFilter(django_filters.FilterSet):
BRAND_CHOICES = tuple((brand.name, brand.name) for brand in Brand.
objects.all())
name = django_filters.CharFilter(lookup_expr='icontains')
price_lt = django_filters.NumberFilter(field_name='price', lookup_expr='lt'
)
price_gt = django_filters.NumberFilter(field_name='price', lookup_expr='gt'
)
likes_lt = django_filters.NumberFilter(field_name='likes', lookup_expr='lt'
)
likes_gt = django_filters.NumberFilter(field_name='likes', lookup_expr='gt'
)
brands = django_filters.MultipleChoiceFilter(field_name='brand__name',
choices=BRAND_CHOICES)
class Meta:
model = Drinks
fields = ['name', 'brands']
<mask token>
| import django_filters
from .models import Drinks, Brand
class DrinkFilter(django_filters.FilterSet):
BRAND_CHOICES = tuple(
(brand.name, brand.name) for brand in Brand.objects.all())
name = django_filters.CharFilter(lookup_expr='icontains')
price_lt = django_filters.NumberFilter(field_name='price',
lookup_expr='lt')
price_gt = django_filters.NumberFilter(field_name='price',
lookup_expr='gt')
likes_lt = django_filters.NumberFilter(field_name='likes',
lookup_expr='lt')
likes_gt = django_filters.NumberFilter(field_name='likes',
lookup_expr='gt')
brands = django_filters.MultipleChoiceFilter(field_name='brand__name',
choices=BRAND_CHOICES)
class Meta:
model = Drinks
fields = ['name', 'brands']
"""
f = F({'date_after': '2016-01-01', 'date_before': '2016-02-01'})
""" | [
0,
1,
2,
3,
4
] |
332 | 3ea42e7ad5301314a39bf522280c084342cd18c5 | <mask token>
class PredictDigitView(MethodView):
def post(self):
repo = ClassifierRepo(CLASSIFIER_STORAGE)
service = PredictDigitService(repo)
image_data_uri = request.json['image']
prediction = service.handle(image_data_uri)
return Response(str(prediction).encode(), status=200)
| <mask token>
class IndexView(View):
<mask token>
class PredictDigitView(MethodView):
def post(self):
repo = ClassifierRepo(CLASSIFIER_STORAGE)
service = PredictDigitService(repo)
image_data_uri = request.json['image']
prediction = service.handle(image_data_uri)
return Response(str(prediction).encode(), status=200)
| <mask token>
class IndexView(View):
def dispatch_request(self):
return render_template('index.html')
class PredictDigitView(MethodView):
def post(self):
repo = ClassifierRepo(CLASSIFIER_STORAGE)
service = PredictDigitService(repo)
image_data_uri = request.json['image']
prediction = service.handle(image_data_uri)
return Response(str(prediction).encode(), status=200)
| from flask import render_template, request, Response
from flask.views import MethodView, View
from flask.views import View
from repo import ClassifierRepo
from services import PredictDigitService
from settings import CLASSIFIER_STORAGE
class IndexView(View):
def dispatch_request(self):
return render_template('index.html')
class PredictDigitView(MethodView):
def post(self):
repo = ClassifierRepo(CLASSIFIER_STORAGE)
service = PredictDigitService(repo)
image_data_uri = request.json['image']
prediction = service.handle(image_data_uri)
return Response(str(prediction).encode(), status=200)
| null | [
2,
3,
4,
5
] |
333 | 0c97569c77fb3598d83eba607960328bb2134dd2 | <mask token>
| <mask token>
torch.manual_seed(1)
if use_cuda:
torch.cuda.manual_seed(1)
np.random.seed(1)
<mask token>
print('DCCNet training script')
<mask token>
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--image_size', type=int, default=400)
parser.add_argument('--dataset_image_path', type=str, default=
'datasets/pf-pascal/', help='path to PF Pascal dataset')
parser.add_argument('--dataset_csv_path', type=str, default=
'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')
parser.add_argument('--num_epochs', type=int, default=5, help=
'number of training epochs')
parser.add_argument('--batch_size', type=int, default=16, help=
'training batch size')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--result_model_fn', type=str, default=
'checkpoint_adam', help='trained model filename')
parser.add_argument('--result-model-dir', type=str, default=
'../model/checkpoints', help='path to trained models folder')
parser.add_argument('--fe_finetune_params', type=int, default=0, help=
'number of layers to finetune')
parser.add_argument('--exp_name', type=str, default='exp_delete', help=
'experiment name')
parser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,
5, 5], help='kernels sizes in neigh. cons.')
parser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,
16, 1], help='channels in neigh. cons')
parser.add_argument('--sce_kernel_size', type=int, default=25, help=
'kernel size in sce.')
parser.add_argument('--sce_hidden_dim', type=int, default=1024, help=
'hidden dim in sce')
parser.add_argument('--scaleloss_weight', type=float, default=1.0, help=
'whether use scale loss, if use the weight for scale loss')
parser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,
default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')
parser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,
default=[16, 16, 1], help='channels in dynamic fusion net')
<mask token>
print(args)
print('Creating CNN model...')
<mask token>
if args.fe_finetune_params > 0:
for i in range(args.fe_finetune_params):
for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(
):
p.requires_grad = True
print('Trainable parameters:')
<mask token>
for i, param in enumerate(model.named_parameters()):
name, p = param
if p.requires_grad:
count += 1
print(str(count) + ': ' + name + '\t' + str(p.shape) + '\t')
print(model)
print('using Adam optimizer')
<mask token>
if not exists(dirname(log_name)):
makedirs(dirname(log_name))
print('Checkpoint name: ' + checkpoint_name)
<mask token>
def process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,
batch_preprocessing_fn, use_cuda=True, log_interval=50):
epoch_loss = 0
for batch_idx, batch in enumerate(dataloader):
st = time.time()
if mode == 'train':
optimizer.zero_grad()
tnf_batch = batch_preprocessing_fn(batch)
loss = loss_fn(model, tnf_batch)
loss_np = loss.data.cpu().numpy()[0]
epoch_loss += loss_np
if mode == 'train':
loss.backward()
optimizer.step()
else:
loss = None
if batch_idx % log_interval == 0:
print(mode.capitalize() +
' Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.12f}\t\tcost time: {:.1f}'
.format(epoch, batch_idx, len(dataloader), 100.0 *
batch_idx / len(dataloader), loss_np, time.time() - st))
epoch_loss /= len(dataloader)
print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))
return epoch_loss
<mask token>
model.module.FeatureExtraction.eval()
print('Starting training...')
for epoch in range(1, args.num_epochs + 1):
st = time.time()
train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,
optimizer, dataloader, batch_preprocessing_fn, log_interval=1)
time_train = time.time() - st
st = time.time()
val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,
optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)
time_valloss = time.time() - st
st = time.time()
val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=
model, verbose=False)
time_valpck = time.time() - st
train_loss[epoch - 1] = train_loss_curepoch
val_loss[epoch - 1] = val_loss_curepoch
val_pcks[epoch - 1] = val_pck_curepoch
is_best = val_pcks[epoch - 1] > best_val_pck
best_val_pck = max(val_pcks[epoch - 1], best_val_pck)
save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.
state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':
train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,
'best_val_pck': best_val_pck}, is_best, checkpoint_name,
save_all_epochs=False)
message = (
"""Epoch{} Train_loss{:.6f} cost time{:.1f} Val_loss{:.6f} cost time{:.1f} Val_pck{:.6f} cost time{:.1f}
"""
.format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,
time_valloss, val_pck_curepoch, time_valpck))
print(message)
with open(log_name, 'a') as log_file:
log_file.write('%s\n' % message)
print('Done!')
| <mask token>
use_cuda = torch.cuda.is_available()
torch.manual_seed(1)
if use_cuda:
torch.cuda.manual_seed(1)
np.random.seed(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print('DCCNet training script')
parser = argparse.ArgumentParser(description='Compute PF Pascal matches')
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--image_size', type=int, default=400)
parser.add_argument('--dataset_image_path', type=str, default=
'datasets/pf-pascal/', help='path to PF Pascal dataset')
parser.add_argument('--dataset_csv_path', type=str, default=
'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')
parser.add_argument('--num_epochs', type=int, default=5, help=
'number of training epochs')
parser.add_argument('--batch_size', type=int, default=16, help=
'training batch size')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--result_model_fn', type=str, default=
'checkpoint_adam', help='trained model filename')
parser.add_argument('--result-model-dir', type=str, default=
'../model/checkpoints', help='path to trained models folder')
parser.add_argument('--fe_finetune_params', type=int, default=0, help=
'number of layers to finetune')
parser.add_argument('--exp_name', type=str, default='exp_delete', help=
'experiment name')
parser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,
5, 5], help='kernels sizes in neigh. cons.')
parser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,
16, 1], help='channels in neigh. cons')
parser.add_argument('--sce_kernel_size', type=int, default=25, help=
'kernel size in sce.')
parser.add_argument('--sce_hidden_dim', type=int, default=1024, help=
'hidden dim in sce')
parser.add_argument('--scaleloss_weight', type=float, default=1.0, help=
'whether use scale loss, if use the weight for scale loss')
parser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,
default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')
parser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,
default=[16, 16, 1], help='channels in dynamic fusion net')
args = parser.parse_args()
print(args)
print('Creating CNN model...')
model = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint,
ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.
ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=
args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.
att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.
att_scale_ncons_channels)
model = nn.DataParallel(model)
if args.fe_finetune_params > 0:
for i in range(args.fe_finetune_params):
for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(
):
p.requires_grad = True
print('Trainable parameters:')
count = 0
for i, param in enumerate(model.named_parameters()):
name, p = param
if p.requires_grad:
count += 1
print(str(count) + ': ' + name + '\t' + str(p.shape) + '\t')
print(model)
print('using Adam optimizer')
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()
), lr=args.lr)
cnn_image_size = args.image_size, args.image_size
Dataset = ImagePairDataset
train_csv = 'train_pairs.csv'
val_nocoordinates_csv = 'val_pairs_nocoords.csv'
val_csv = 'image_pairs/val_pairs.csv'
normalization_tnf = NormalizeImageDict(['source_image', 'target_image'])
batch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)
dataset = Dataset(transform=normalization_tnf, dataset_image_path=args.
dataset_image_path, dataset_csv_path=args.dataset_csv_path,
dataset_csv_file=train_csv, output_size=cnn_image_size)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True,
num_workers=0)
dataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.
dataset_image_path, dataset_csv_path=args.dataset_csv_path,
dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size)
dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,
shuffle=True, num_workers=4)
dataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size,
eval_dataset_path=args.dataset_image_path, csv_file=val_csv)
checkpoint_dir = os.path.join(args.result_model_dir, args.exp_name)
checkpoint_name = os.path.join(args.result_model_dir, args.exp_name,
datetime.datetime.now().strftime('%Y-%m-%d_%H:%M') + '_' + args.
result_model_fn + '.pth.tar')
log_name = os.path.join(args.result_model_dir, args.exp_name, 'logmain_' +
args.exp_name + '.txt')
if not exists(dirname(log_name)):
makedirs(dirname(log_name))
print('Checkpoint name: ' + checkpoint_name)
best_val_pck = float('-inf')
loss_fn = lambda model, batch: weak_loss(model, batch, normalization=
'softmax', scaleloss_weight=args.scaleloss_weight)
def process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,
batch_preprocessing_fn, use_cuda=True, log_interval=50):
epoch_loss = 0
for batch_idx, batch in enumerate(dataloader):
st = time.time()
if mode == 'train':
optimizer.zero_grad()
tnf_batch = batch_preprocessing_fn(batch)
loss = loss_fn(model, tnf_batch)
loss_np = loss.data.cpu().numpy()[0]
epoch_loss += loss_np
if mode == 'train':
loss.backward()
optimizer.step()
else:
loss = None
if batch_idx % log_interval == 0:
print(mode.capitalize() +
' Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.12f}\t\tcost time: {:.1f}'
.format(epoch, batch_idx, len(dataloader), 100.0 *
batch_idx / len(dataloader), loss_np, time.time() - st))
epoch_loss /= len(dataloader)
print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))
return epoch_loss
train_loss = np.zeros(args.num_epochs)
val_loss = np.zeros(args.num_epochs)
val_pcks = np.zeros(args.num_epochs)
model.module.FeatureExtraction.eval()
print('Starting training...')
for epoch in range(1, args.num_epochs + 1):
st = time.time()
train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,
optimizer, dataloader, batch_preprocessing_fn, log_interval=1)
time_train = time.time() - st
st = time.time()
val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,
optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)
time_valloss = time.time() - st
st = time.time()
val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=
model, verbose=False)
time_valpck = time.time() - st
train_loss[epoch - 1] = train_loss_curepoch
val_loss[epoch - 1] = val_loss_curepoch
val_pcks[epoch - 1] = val_pck_curepoch
is_best = val_pcks[epoch - 1] > best_val_pck
best_val_pck = max(val_pcks[epoch - 1], best_val_pck)
save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.
state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':
train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,
'best_val_pck': best_val_pck}, is_best, checkpoint_name,
save_all_epochs=False)
message = (
"""Epoch{} Train_loss{:.6f} cost time{:.1f} Val_loss{:.6f} cost time{:.1f} Val_pck{:.6f} cost time{:.1f}
"""
.format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,
time_valloss, val_pck_curepoch, time_valpck))
print(message)
with open(log_name, 'a') as log_file:
log_file.write('%s\n' % message)
print('Done!')
| from __future__ import print_function, division
import os
from os.path import exists, join, basename, dirname
from os import makedirs
import numpy as np
import datetime
import time
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from lib.dataloader import DataLoader
from lib.im_pair_dataset import ImagePairDataset
from lib.normalization import NormalizeImageDict
from lib.torch_util import save_checkpoint
from lib.torch_util import BatchTensorToVars
from lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader
from models.model_dynamic import DCCNet
from models.loss_dynamic import weak_loss
use_cuda = torch.cuda.is_available()
torch.manual_seed(1)
if use_cuda:
torch.cuda.manual_seed(1)
np.random.seed(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print('DCCNet training script')
parser = argparse.ArgumentParser(description='Compute PF Pascal matches')
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--image_size', type=int, default=400)
parser.add_argument('--dataset_image_path', type=str, default=
'datasets/pf-pascal/', help='path to PF Pascal dataset')
parser.add_argument('--dataset_csv_path', type=str, default=
'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')
parser.add_argument('--num_epochs', type=int, default=5, help=
'number of training epochs')
parser.add_argument('--batch_size', type=int, default=16, help=
'training batch size')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--result_model_fn', type=str, default=
'checkpoint_adam', help='trained model filename')
parser.add_argument('--result-model-dir', type=str, default=
'../model/checkpoints', help='path to trained models folder')
parser.add_argument('--fe_finetune_params', type=int, default=0, help=
'number of layers to finetune')
parser.add_argument('--exp_name', type=str, default='exp_delete', help=
'experiment name')
parser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,
5, 5], help='kernels sizes in neigh. cons.')
parser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,
16, 1], help='channels in neigh. cons')
parser.add_argument('--sce_kernel_size', type=int, default=25, help=
'kernel size in sce.')
parser.add_argument('--sce_hidden_dim', type=int, default=1024, help=
'hidden dim in sce')
parser.add_argument('--scaleloss_weight', type=float, default=1.0, help=
'whether use scale loss, if use the weight for scale loss')
parser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,
default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')
parser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,
default=[16, 16, 1], help='channels in dynamic fusion net')
args = parser.parse_args()
print(args)
print('Creating CNN model...')
model = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint,
ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.
ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=
args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.
att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.
att_scale_ncons_channels)
model = nn.DataParallel(model)
if args.fe_finetune_params > 0:
for i in range(args.fe_finetune_params):
for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(
):
p.requires_grad = True
print('Trainable parameters:')
count = 0
for i, param in enumerate(model.named_parameters()):
name, p = param
if p.requires_grad:
count += 1
print(str(count) + ': ' + name + '\t' + str(p.shape) + '\t')
print(model)
print('using Adam optimizer')
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()
), lr=args.lr)
cnn_image_size = args.image_size, args.image_size
Dataset = ImagePairDataset
train_csv = 'train_pairs.csv'
val_nocoordinates_csv = 'val_pairs_nocoords.csv'
val_csv = 'image_pairs/val_pairs.csv'
normalization_tnf = NormalizeImageDict(['source_image', 'target_image'])
batch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)
dataset = Dataset(transform=normalization_tnf, dataset_image_path=args.
dataset_image_path, dataset_csv_path=args.dataset_csv_path,
dataset_csv_file=train_csv, output_size=cnn_image_size)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True,
num_workers=0)
dataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.
dataset_image_path, dataset_csv_path=args.dataset_csv_path,
dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size)
dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,
shuffle=True, num_workers=4)
dataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size,
eval_dataset_path=args.dataset_image_path, csv_file=val_csv)
checkpoint_dir = os.path.join(args.result_model_dir, args.exp_name)
checkpoint_name = os.path.join(args.result_model_dir, args.exp_name,
datetime.datetime.now().strftime('%Y-%m-%d_%H:%M') + '_' + args.
result_model_fn + '.pth.tar')
log_name = os.path.join(args.result_model_dir, args.exp_name, 'logmain_' +
args.exp_name + '.txt')
if not exists(dirname(log_name)):
makedirs(dirname(log_name))
print('Checkpoint name: ' + checkpoint_name)
best_val_pck = float('-inf')
loss_fn = lambda model, batch: weak_loss(model, batch, normalization=
'softmax', scaleloss_weight=args.scaleloss_weight)
def process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,
batch_preprocessing_fn, use_cuda=True, log_interval=50):
epoch_loss = 0
for batch_idx, batch in enumerate(dataloader):
st = time.time()
if mode == 'train':
optimizer.zero_grad()
tnf_batch = batch_preprocessing_fn(batch)
loss = loss_fn(model, tnf_batch)
loss_np = loss.data.cpu().numpy()[0]
epoch_loss += loss_np
if mode == 'train':
loss.backward()
optimizer.step()
else:
loss = None
if batch_idx % log_interval == 0:
print(mode.capitalize() +
' Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.12f}\t\tcost time: {:.1f}'
.format(epoch, batch_idx, len(dataloader), 100.0 *
batch_idx / len(dataloader), loss_np, time.time() - st))
epoch_loss /= len(dataloader)
print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))
return epoch_loss
train_loss = np.zeros(args.num_epochs)
val_loss = np.zeros(args.num_epochs)
val_pcks = np.zeros(args.num_epochs)
model.module.FeatureExtraction.eval()
print('Starting training...')
for epoch in range(1, args.num_epochs + 1):
st = time.time()
train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,
optimizer, dataloader, batch_preprocessing_fn, log_interval=1)
time_train = time.time() - st
st = time.time()
val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,
optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)
time_valloss = time.time() - st
st = time.time()
val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=
model, verbose=False)
time_valpck = time.time() - st
train_loss[epoch - 1] = train_loss_curepoch
val_loss[epoch - 1] = val_loss_curepoch
val_pcks[epoch - 1] = val_pck_curepoch
is_best = val_pcks[epoch - 1] > best_val_pck
best_val_pck = max(val_pcks[epoch - 1], best_val_pck)
save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.
state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':
train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,
'best_val_pck': best_val_pck}, is_best, checkpoint_name,
save_all_epochs=False)
message = (
"""Epoch{} Train_loss{:.6f} cost time{:.1f} Val_loss{:.6f} cost time{:.1f} Val_pck{:.6f} cost time{:.1f}
"""
.format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,
time_valloss, val_pck_curepoch, time_valpck))
print(message)
with open(log_name, 'a') as log_file:
log_file.write('%s\n' % message)
print('Done!')
| from __future__ import print_function, division
import os
from os.path import exists, join, basename, dirname
from os import makedirs
import numpy as np
import datetime
import time
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from lib.dataloader import DataLoader
from lib.im_pair_dataset import ImagePairDataset
from lib.normalization import NormalizeImageDict
from lib.torch_util import save_checkpoint
from lib.torch_util import BatchTensorToVars
from lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader
# import DCCNet
from models.model_dynamic import DCCNet
from models.loss_dynamic import weak_loss
# Seed and CUDA
use_cuda = torch.cuda.is_available()
torch.manual_seed(1)
if use_cuda:
torch.cuda.manual_seed(1)
np.random.seed(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print('DCCNet training script')
# Argument parsing
parser = argparse.ArgumentParser(description='Compute PF Pascal matches')
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--image_size', type=int, default=400)
parser.add_argument('--dataset_image_path', type=str, default='datasets/pf-pascal/', help='path to PF Pascal dataset')
parser.add_argument('--dataset_csv_path', type=str, default='datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')
parser.add_argument('--num_epochs', type=int, default=5, help='number of training epochs')
parser.add_argument('--batch_size', type=int, default=16, help='training batch size')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--result_model_fn', type=str, default='checkpoint_adam', help='trained model filename')
parser.add_argument('--result-model-dir', type=str, default='../model/checkpoints', help='path to trained models folder')
parser.add_argument('--fe_finetune_params', type=int, default=0, help='number of layers to finetune')
parser.add_argument('--exp_name', type=str, default='exp_delete', help='experiment name')
# DCCNet args
parser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.')
parser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons')
parser.add_argument('--sce_kernel_size',type=int,default=25,help='kernel size in sce.')
parser.add_argument('--sce_hidden_dim',type=int,default=1024,help='hidden dim in sce')
parser.add_argument('--scaleloss_weight',type=float,default=1.0,help='whether use scale loss, if use the weight for scale loss')
parser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in dynamic fusion net.')
parser.add_argument('--att_scale_ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in dynamic fusion net')
args = parser.parse_args()
print(args)
# Create model
print('Creating CNN model...')
model = DCCNet(use_cuda=use_cuda,
checkpoint=args.checkpoint,
ncons_kernel_sizes=args.ncons_kernel_sizes,
ncons_channels=args.ncons_channels,
sce_kernel_size=args.sce_kernel_size,
sce_hidden_dim=args.sce_hidden_dim,
att_scale_ncons_kernel_sizes=args.att_scale_ncons_kernel_sizes,
att_scale_ncons_channels=args.att_scale_ncons_channels,
)
#Multi-GPU support
model = nn.DataParallel(model)
# Set which parts of the model to train
if args.fe_finetune_params>0:
for i in range(args.fe_finetune_params):
for p in model.module.FeatureExtraction.model[-1][-(i+1)].parameters():
p.requires_grad=True
print('Trainable parameters:')
count = 0
for i,param in enumerate(model.named_parameters()):
name,p = param
if p.requires_grad:
count+=1
print(str(count)+": "+name+"\t"+str(p.shape)+"\t")
print(model)
# Optimizer
print('using Adam optimizer')
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
cnn_image_size=(args.image_size,args.image_size)
Dataset = ImagePairDataset
train_csv = 'train_pairs.csv'
#val_pairs_nocoords.csv: for compute loss, with flip column in csv, no coordinates
#val_pairs.csv: for compute pck, with coordinates
val_nocoordinates_csv = 'val_pairs_nocoords.csv'
val_csv = 'image_pairs/val_pairs.csv'
normalization_tnf = NormalizeImageDict(['source_image','target_image'])
batch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)
# Dataset and dataloader
dataset = Dataset(transform=normalization_tnf,
dataset_image_path=args.dataset_image_path,
dataset_csv_path=args.dataset_csv_path,
dataset_csv_file = train_csv,
output_size=cnn_image_size,
)
dataloader = DataLoader(dataset, batch_size=args.batch_size,
shuffle=True,
num_workers=0)
dataset_val = Dataset(transform=normalization_tnf,
dataset_image_path=args.dataset_image_path,
dataset_csv_path=args.dataset_csv_path,
dataset_csv_file=val_nocoordinates_csv,
output_size=cnn_image_size)
# compute val loss
dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,
shuffle=True, num_workers=4)
# compute val pck
dataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size, eval_dataset_path=args.dataset_image_path, csv_file=val_csv) #load pfpascal val dataset
# Define checkpoint name
checkpoint_dir = os.path.join(args.result_model_dir,args.exp_name)
checkpoint_name = os.path.join(args.result_model_dir,args.exp_name,
datetime.datetime.now().strftime("%Y-%m-%d_%H:%M")+'_'+args.result_model_fn + '.pth.tar')
log_name = os.path.join(args.result_model_dir,args.exp_name, 'logmain_'+args.exp_name+'.txt')
if not exists(dirname(log_name)):
makedirs(dirname(log_name))
print('Checkpoint name: '+checkpoint_name)
# Train
best_val_pck = float("-inf")
loss_fn = lambda model,batch: weak_loss(model, batch, normalization='softmax', scaleloss_weight=args.scaleloss_weight)
# define epoch function
def process_epoch(mode,epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,use_cuda=True,log_interval=50):
epoch_loss = 0
for batch_idx, batch in enumerate(dataloader):
st = time.time()
if mode=='train':
optimizer.zero_grad()
tnf_batch = batch_preprocessing_fn(batch)
loss = loss_fn(model,tnf_batch)
loss_np = loss.data.cpu().numpy()[0]
#loss_np = loss.data.cpu().numpy()
epoch_loss += loss_np
if mode=='train':
loss.backward()
optimizer.step()
else:
loss=None
if batch_idx % log_interval == 0:
print(mode.capitalize()+' Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.12f}\t\tcost time: {:.1f}'.format(
epoch, batch_idx , len(dataloader),
100. * batch_idx / len(dataloader), loss_np,time.time()-st))
epoch_loss /= len(dataloader)
print(mode.capitalize()+' set: Average loss: {:.12f}'.format(epoch_loss))
return epoch_loss
train_loss = np.zeros(args.num_epochs)
val_loss = np.zeros(args.num_epochs)
val_pcks = np.zeros(args.num_epochs)
model.module.FeatureExtraction.eval()
print('Starting training...')
for epoch in range(1, args.num_epochs+1):
st = time.time()
train_loss_curepoch = process_epoch('train',epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,log_interval=1)
time_train = time.time()-st
st = time.time()
val_loss_curepoch = process_epoch('val', epoch, model, loss_fn, optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)
time_valloss = time.time()-st
st = time.time()
val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck,model=model,verbose=False)
time_valpck = time.time()-st
train_loss[epoch - 1] = train_loss_curepoch
val_loss[epoch - 1] = val_loss_curepoch
val_pcks[epoch-1] = val_pck_curepoch
# remember best loss
is_best = val_pcks[epoch - 1] > best_val_pck
best_val_pck = max(val_pcks[epoch - 1], best_val_pck)
save_checkpoint({
'epoch': epoch,
'args': args,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'train_loss': train_loss,
'val_loss': val_loss,
'val_pck': val_pcks,
'best_val_pck':best_val_pck,
}, is_best,checkpoint_name,save_all_epochs=False)
message = 'Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n'.format\
(epoch, train_loss_curepoch, time_train, val_loss_curepoch, time_valloss,val_pck_curepoch,time_valpck,)
print(message)
with open(log_name, "a") as log_file:
log_file.write('%s\n' % message)
print('Done!')
| [
0,
2,
3,
4,
5
] |
334 | a65dfca1773c1e4101ebfb953e0f617a2c345695 | <mask token>
| def merge(self, intervals):
intervals.sort()
arr = []
for i in intervals:
if len(arr) == 0 or arr[-1][1] < i[0]:
arr.append(i)
else:
arr[-1][1] = max(arr[-1][1], i[1])
return arr
| null | null | null | [
0,
1
] |
335 | 49005500b299ca276f663fe8431bb955e5585bbd | <mask token>
| <mask token>
while True:
net = Net.FeedForwardNet(input_count=784, layers=[100, 10],
activation_function=Net.FeedForwardNet.leaky_relu)
try:
epoch_num = int(input('Epoch_num:'))
batch_size = int(input('Batch_size:'))
learning_rate = float(input('Learning rate:'))
inertion_factor = float(input('Inertion factor:'))
except:
print('Parse error')
continue
for i in range(epoch_num):
batch_in, batch_out = net.generate_random_batch(in_values,
out_values, batch_size)
net.forward_propagation(batch_in)
net.backpropagation(batch_out, learning_rate=learning_rate,
inertion_factor=inertion_factor)
if i % 50 == 0:
print()
output = net.forward_propagation(in_testing_values)
if net.check_total_squared_error(output_values=
out_testing_values, epsilon=1000, verbose=True):
break
output_numbers = mnist_parser.one_hots_to_ints(output)
correct = np.sum(out_gt_numbers_test == output_numbers)
print('Epoch: ', i, ' br tocnih:', correct, '/', output_numbers
.size, '(', correct / output_numbers.size, '%)')
output = net.forward_propagation(in_testing_values)
conf_mat = net.calculate_confusion_matrix(out_testing_values)
output_numbers = mnist_parser.one_hots_to_ints(output)
correct = np.sum(out_gt_numbers_test == output_numbers)
print('Correct:', correct, '/', output_numbers.size, '(', correct /
output_numbers.size, '%)')
print(conf_mat)
save = int(input('Save?(1/0)'))
if save == 1:
name = input('Save as?')
net.save_state(name)
exit = int(input('Exit?(1/0)'))
if exit == 1:
break
| <mask token>
in_values = np.load('MNIST/mnist_train_images.npy')
out_values = np.load('MNIST/mnist_train_labels.npy')
out_gt_numbers = mnist_parser.one_hots_to_ints(out_values)
in_testing_values = np.load('MNIST/mnist_test_images.npy')
out_testing_values = np.load('MNIST/mnist_test_labels.npy')
out_gt_numbers_test = mnist_parser.one_hots_to_ints(out_testing_values)
while True:
net = Net.FeedForwardNet(input_count=784, layers=[100, 10],
activation_function=Net.FeedForwardNet.leaky_relu)
try:
epoch_num = int(input('Epoch_num:'))
batch_size = int(input('Batch_size:'))
learning_rate = float(input('Learning rate:'))
inertion_factor = float(input('Inertion factor:'))
except:
print('Parse error')
continue
for i in range(epoch_num):
batch_in, batch_out = net.generate_random_batch(in_values,
out_values, batch_size)
net.forward_propagation(batch_in)
net.backpropagation(batch_out, learning_rate=learning_rate,
inertion_factor=inertion_factor)
if i % 50 == 0:
print()
output = net.forward_propagation(in_testing_values)
if net.check_total_squared_error(output_values=
out_testing_values, epsilon=1000, verbose=True):
break
output_numbers = mnist_parser.one_hots_to_ints(output)
correct = np.sum(out_gt_numbers_test == output_numbers)
print('Epoch: ', i, ' br tocnih:', correct, '/', output_numbers
.size, '(', correct / output_numbers.size, '%)')
output = net.forward_propagation(in_testing_values)
conf_mat = net.calculate_confusion_matrix(out_testing_values)
output_numbers = mnist_parser.one_hots_to_ints(output)
correct = np.sum(out_gt_numbers_test == output_numbers)
print('Correct:', correct, '/', output_numbers.size, '(', correct /
output_numbers.size, '%)')
print(conf_mat)
save = int(input('Save?(1/0)'))
if save == 1:
name = input('Save as?')
net.save_state(name)
exit = int(input('Exit?(1/0)'))
if exit == 1:
break
| import Net
import mnist_parser
import numpy as np
in_values = np.load('MNIST/mnist_train_images.npy')
out_values = np.load('MNIST/mnist_train_labels.npy')
out_gt_numbers = mnist_parser.one_hots_to_ints(out_values)
in_testing_values = np.load('MNIST/mnist_test_images.npy')
out_testing_values = np.load('MNIST/mnist_test_labels.npy')
out_gt_numbers_test = mnist_parser.one_hots_to_ints(out_testing_values)
while True:
net = Net.FeedForwardNet(input_count=784, layers=[100, 10],
activation_function=Net.FeedForwardNet.leaky_relu)
try:
epoch_num = int(input('Epoch_num:'))
batch_size = int(input('Batch_size:'))
learning_rate = float(input('Learning rate:'))
inertion_factor = float(input('Inertion factor:'))
except:
print('Parse error')
continue
for i in range(epoch_num):
batch_in, batch_out = net.generate_random_batch(in_values,
out_values, batch_size)
net.forward_propagation(batch_in)
net.backpropagation(batch_out, learning_rate=learning_rate,
inertion_factor=inertion_factor)
if i % 50 == 0:
print()
output = net.forward_propagation(in_testing_values)
if net.check_total_squared_error(output_values=
out_testing_values, epsilon=1000, verbose=True):
break
output_numbers = mnist_parser.one_hots_to_ints(output)
correct = np.sum(out_gt_numbers_test == output_numbers)
print('Epoch: ', i, ' br tocnih:', correct, '/', output_numbers
.size, '(', correct / output_numbers.size, '%)')
output = net.forward_propagation(in_testing_values)
conf_mat = net.calculate_confusion_matrix(out_testing_values)
output_numbers = mnist_parser.one_hots_to_ints(output)
correct = np.sum(out_gt_numbers_test == output_numbers)
print('Correct:', correct, '/', output_numbers.size, '(', correct /
output_numbers.size, '%)')
print(conf_mat)
save = int(input('Save?(1/0)'))
if save == 1:
name = input('Save as?')
net.save_state(name)
exit = int(input('Exit?(1/0)'))
if exit == 1:
break
| import Net
import mnist_parser
import numpy as np
#To use this model it is required to download the MNIST database
#The donwloaded base is then needet parse to numpy using mnist_parser.parse_to_npy method
#The files genetared using mnist_parser.parse_to_npy are then loaded using np.load
in_values = np.load("MNIST/mnist_train_images.npy")
out_values = np.load("MNIST/mnist_train_labels.npy")
out_gt_numbers=mnist_parser.one_hots_to_ints(out_values)
in_testing_values = np.load("MNIST/mnist_test_images.npy")
out_testing_values = np.load("MNIST/mnist_test_labels.npy")
out_gt_numbers_test=mnist_parser.one_hots_to_ints(out_testing_values)
while(True):
net = Net.FeedForwardNet(input_count=784, layers=[100, 10], activation_function=Net.FeedForwardNet.leaky_relu)
try:
epoch_num=int(input("Epoch_num:"))
batch_size=int(input("Batch_size:")) #30
learning_rate=float(input("Learning rate:")) #0.001
inertion_factor=float(input("Inertion factor:")) #0.5
# max_error=float(input("Maximum error"))
except:
print("Parse error")
continue
for i in range(epoch_num):
batch_in,batch_out=net.generate_random_batch(in_values,out_values,batch_size)
net.forward_propagation(batch_in)
net.backpropagation(batch_out, learning_rate=learning_rate, inertion_factor=inertion_factor)
# print("X:",net.X[-1])
# net.stochastic_backpropagation(batch_out, learning_rate=learning_rate)
if i % 50 == 0:
print()
output=net.forward_propagation(in_testing_values)
if net.check_total_squared_error(output_values=out_testing_values, epsilon=1000, verbose=True):
break
output_numbers=mnist_parser.one_hots_to_ints(output)
correct=np.sum( out_gt_numbers_test == output_numbers)
print("Epoch: ", i, " br tocnih:",correct,"/",output_numbers.size,"(",correct/output_numbers.size,"%)")
output=net.forward_propagation(in_testing_values)
conf_mat=net.calculate_confusion_matrix(out_testing_values)
output_numbers = mnist_parser.one_hots_to_ints(output)
correct=np.sum(out_gt_numbers_test == output_numbers)
print("Correct:",correct,"/",output_numbers.size,"(",correct/output_numbers.size ,"%)")
print(conf_mat)
save=int(input("Save?(1/0)"))
if(save == 1):
name=input("Save as?")
net.save_state(name)
exit=int(input("Exit?(1/0)"))
if(exit == 1):
break
| [
0,
1,
2,
3,
4
] |
336 | 219929d52b5f1a0690590e83b41d2b4f0b2b3a51 | <mask token>
| <mask token>
def sort(list):
for i in range(len(list) - 1):
if list[i] > list[i + 1]:
a = list[i]
list[i] = list[i + 1]
list[i + 1] = a
print(list)
<mask token>
| <mask token>
def sort(list):
for i in range(len(list) - 1):
if list[i] > list[i + 1]:
a = list[i]
list[i] = list[i + 1]
list[i + 1] = a
print(list)
sort(list)
| list = [3, 1, 2, 5, 4, 7, 6]
def sort(list):
for i in range(len(list) - 1):
if list[i] > list[i + 1]:
a = list[i]
list[i] = list[i + 1]
list[i + 1] = a
print(list)
sort(list)
| null | [
0,
1,
2,
3
] |
337 | e884ce5878de75afe93085e2310b4b8d5953963a | <mask token>
| <mask token>
print(str(bool(re.search(regex, raw_input()))).lower())
| <mask token>
regex = '^\\d{2}(-?)\\d{2}\\1\\d{2}\\1\\d{2}$'
<mask token>
print(str(bool(re.search(regex, raw_input()))).lower())
| <mask token>
regex = '^\\d{2}(-?)\\d{2}\\1\\d{2}\\1\\d{2}$'
import re
print(str(bool(re.search(regex, raw_input()))).lower())
| '''
Created on 13 Dec 2016
@author: hpcosta
'''
# https://www.hackerrank.com/challenges/backreferences-to-failed-groups
regex = r"^\d{2}(-?)\d{2}\1\d{2}\1\d{2}$" # Do not delete 'r'.
import re
print(str(bool(re.search(regex, raw_input()))).lower())
# Task
#
# You have a test string S.
# Your task is to write a regex which will match S, with following condition(s):
#
# S consists of 8 digits.
# S may have "-" separator such that string S gets divided in 4 parts, with each part having exactly two digits. (Eg. 12-34-56-78)
# Valid
#
# 12345678
# 12-34-56-87
# Invalid
#
# 1-234-56-78
# 12-45-7810 | [
0,
1,
2,
3,
4
] |
338 | 9951588f581c5045154a77535b36d230d586d8a5 | from OpenSSL import SSL, crypto
from twisted.internet import ssl, reactor
from twisted.internet.protocol import Factory, Protocol
import os
from time import time
class Echo(Protocol):
def dataReceived(self, data):
print "Data received: " + data
# define cases
options = {
"generate": self.generateCertificate,
"sign": self.signCertificate
}
tmp = data.split(';')
method = tmp.pop(0)
print "method is " + method
#TODO: catch unknown cases
# delegate case to method
result = options[method](tmp)
self.transport.write(result)
def generateCertificate(self, userDataList):
# generate a key-pair with RSA and 2048 bits
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 2048)
# create a new certificate of x509 structure
x509 = crypto.X509()
# X509Name type
subject = self.setSubject(x509.get_subject(), userDataList)
#x509.set_subject(subject)
# list of (name, value) tuples
subComponents = subject.get_components()
for (name, value) in subComponents:
print name + " is " + value
# cert is valid immediately
x509.gmtime_adj_notBefore(0)
# cert gets invalid after 10 years
x509.gmtime_adj_notAfter(10*365*24*60*60)
#TODO: load our CA root cert(PKCS12 type) and set subject as issuer
# set issuer (CA) data
x509.set_issuer(x509.get_subject())
print "Issuer set - ACTUALLY SELF-SIGNED MODE!!!"
# set user public key
x509.set_pubkey(pkey)
#TODO: which algorithm to use? (replace with sha512)
#TODO: replace key with CA private key
# sign the certificate
x509.sign(pkey, 'sha256')
print "Certificate signed - ACTUALLY SELF-SIGNED MODE!!!"
# create a new PKCS12 object
pkcs12 = crypto.PKCS12()
# set the new user certificate
pkcs12.set_certificate(x509)
# insert user private key
pkcs12.set_privatekey(pkey)
# create a dump of PKCS12 and return
return pkcs12.export()
def setSubject(self, subject, data):
#subjectVariables = {
# "C": subject.C,
# "ST": subject.ST,
# "L": subject.L,
# "O": subject.O,
# "OU": subject.OU,
# "CN": subject.CN
#}
for d in data:
s = d.split('=')
variable = s[0]
value = s[1]
print "Setting variable " + variable + " to " + value + " on subject"
#subjectVariables[variable] = value
if variable == "C":
subject.C = value
elif variable == "ST":
subject.ST = value
elif variable == "L":
subject.L = value
elif variable == "O":
subject.O = value
elif variable == "OU":
subject.OU = value
elif variable == "CN":
subject.CN = value
return subject
def signCertificate(self, certData):
x509 = crypto.X509()
pkcs12 = crypto.load_pkcs12(certData)
req = pkcs12.get_certificate()
x509.set_subject(req.get_subject())
x509.set_pubkey(req.get_pubkey())
#issuer aus Datei setzen
# cert is valid immediately
x509.gmtime_adj_notBefore(0)
# cert gets invalid after 10 years
x509.gmtime_adj_notAfter(10*365*24*60*60)
x509.sign(pkey, 'sha256')
pkcs12.set_certificate(x509)
return pkcs12.export()
def verifyCallback(connection, x509, errnum, errdepth, ok):
if not ok:
print 'invalid cert from subject:', x509.get_subject()
return False
else:
print "Certs are fine", x509.get_subject()
return True
def getTimestamp():
return str(int(round(time() * 1000)))
def addTimestamp(millis, name):
print millis + '_' + name
if __name__ == '__main__':
factory = Factory()
factory.protocol = Echo
os.system("echo 'Server started...'")
myContextFactory = ssl.DefaultOpenSSLContextFactory(
'keys/ca-key.pem', 'keys/ca-root.pem'
)
ctx = myContextFactory.getContext()
# SSL.VERIFY_PEER: Verifizierung des verwendeten SSL-Certs vorraussetzen (default=true)
# VERIFY_FAIL_IF_NO_PEER_CERT: Vorgang wird abgebrochen, wenn die Verbindung ohne Zertifikat
# verwendet wird (setzt obigen Parameer vorraus!)
ctx.set_verify(
SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
verifyCallback
)
# Since we have self-signed certs we have to explicitly
# tell the server to trust them.
ctx.load_verify_locations("keys/ca-root.pem")
reactor.listenSSL(8000, factory, myContextFactory)
reactor.run()
| null | null | null | null | [
0
] |
339 | 302accfd5001a27c7bbe6081856d43dbec704168 | <mask token>
| <mask token>
@dp.message_handler(commands='upload', user_id=ADMINS, state='*')
async def upload_profile(command_msg: Message, state: FSMContext):
profile_msg = command_msg.reply_to_message
admin = command_msg.from_user
param = command_msg.get_args()
if not profile_msg:
await command_msg.answer('Чтобы загрузить анкету сделай на неё REPLY')
return
elif param != 'g' and param != 'b':
await command_msg.answer(
'Чтобы воспользоваться командой /upload нужно добавить параметры <b>b | g</b>'
)
return
other_bot = profile_msg.forward_from
if not other_bot or other_bot.id != 1234060895:
await profile_msg.reply(
'Загружать анкеты можно только из нашего БотаX :)')
return
elif not profile_msg.photo and not profile_msg.video or not profile_msg.caption:
await profile_msg.reply(
'Загружать нужно именно анкету, а не части анкеты')
return
profile_data = text.get_parse_data(profile_msg.caption)
if profile_msg.photo:
media_id = profile_msg.photo[-1].file_id
with_video = False
else:
media_id = profile_msg.video.file_id
with_video = True
profile_data.update(id=random.randint(1, 100000), username='f',
media_id=media_id, with_video=with_video, sex=1 if param == 'g' else 2)
await db.add_user(**profile_data)
await profile_msg.reply('Пользователь {}-{} успешно добавлен ✅'.format(
profile_data['user_nick'], profile_data['id']))
logging.info(
f"Admin @{admin.username}-{admin.id} successfully added fake {profile_data['user_nick']}-{profile_data['id']} "
)
@dp.message_handler(commands='get_msg_info', user_id=ADMINS, state='*')
async def get_msg_info(command_msg: Message, state: FSMContext):
msg = command_msg.reply_to_message
await command_msg.delete()
if not msg:
await command_msg.answer('Нужно делать реплай на сообщение.')
return
state = await state.get_state()
await msg.reply(
f"""Эхо в состоянии <code>{state}</code>.
Содержание сообщения:
<code>{msg}</code>
content_type = {msg.content_type}
entities={msg.entities}"""
)
@dp.message_handler(commands='ban_user', user_id=ADMINS, state='*')
async def ban_user(command_msg: Message, state: FSMContext):
ban_user_id = command_msg.get_args()
admin = command_msg.from_user
await command_msg.delete()
if not ban_user_id or not ban_user_id.isdecimal():
await command_msg.answer(f'Формат команды: /ban_user user_id')
return
ban_user_id = int(ban_user_id)
is_banned = await db.ban_user(ban_user_id)
if not is_banned:
await command_msg.answer(
f'Пользователя с таким <user_id> не существует')
return
await redis_commands.ban_user(ban_user_id)
await command_msg.answer('Пользователь({}) успешно забанен 😎'.format(
ban_user_id))
logging.info(f'Admin @{admin.username}-{admin.id} BAN USER-{ban_user_id}')
@dp.message_handler(commands='unban_user', user_id=ADMINS, state='*')
async def unban_user(command_msg: Message, state: FSMContext):
unban_user_id = command_msg.get_args()
admin = command_msg.from_user
await command_msg.delete()
if not unban_user_id or not unban_user_id.isdecimal():
await command_msg.answer(f'Формат команды: /unban_user user_id')
return
unban_user_id = int(unban_user_id)
is_unbanned = await db.unban_user(unban_user_id)
if not is_unbanned:
await command_msg.answer(
f'Пользователя с таким <user_id> не существует')
return
await redis_commands.unban_user(unban_user_id)
await command_msg.answer('Пользователь({}) успешно разбанен 👻'.format(
unban_user_id))
logging.info(
f'Admin @{admin.username}-{admin.id} UNBAN USER-{unban_user_id}')
@dp.message_handler(commands='clean_old_likes', user_id=ADMINS, state='*')
async def clean_old_likes(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
await command_msg.delete()
count = await db.clean_old_likes(interval=24)
await command_msg.answer(
'Было успешно удалено {} старых лайков(за {} hours)'.format(count, 24))
logging.info(
f'Admin @{admin.username}-{admin.id} delete old likes(count={count})')
@dp.message_handler(commands='say_to_all_now_go', user_id=ADMINS, state='*')
async def say_to_all(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
msg = command_msg.reply_to_message
await command_msg.delete()
if not msg:
await command_msg.answer(
'Чтобы воспользоваться этой командой сделай REPLY')
return
active_user_ids = await db.get_all_users(active=True)
delete_bot_count = 0
for user_id in active_user_ids:
try:
await dp.bot.copy_message(chat_id=user_id, from_chat_id=
command_msg.chat.id, message_id=msg.message_id)
await asyncio.sleep(0.05)
except BotBlocked as exc:
await db.update_user(user_id, active=False)
await redis_commands.clear_user(user_id)
await redis_commands.clear_search_ids(user_id)
delete_bot_count += 1
await msg.reply(
'Сообщение успешно отправлено: оставили бот({}), заблокировали бот({})'
.format(len(active_user_ids) - delete_bot_count, delete_bot_count))
logging.info(
f'Admin @{admin.username}-{admin.id} SAY TO ALL MSG(id={msg.message_id})'
)
@dp.message_handler(commands='show_state_statistic', user_id=ADMINS, state='*')
async def show_state_statistic(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
statistic = dict()
await command_msg.delete()
states_list = await storage.get_states_list()
for states_item in states_list:
chat_id, user_id = states_item
state_text = await storage.get_state(chat=chat_id, user=user_id,
default='Deactivate bot')
try:
statistic[state_text] += 1
except KeyError:
statistic.update({state_text: 1})
out_text = '<b>Статичктика по пользователям:</b>\n\n'
for state_text, count_users in statistic.items():
out_text += (
f'В состоянии {state_text} — {count_users} пользователей\n\n')
await command_msg.answer(out_text)
logging.info(f'For Admin @{admin.username}-{admin.id} show state statistic'
)
@rate_limit(3)
@dp.message_handler(commands='show_info', user_id=ADMINS, state='*')
async def show_info(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
await command_msg.delete()
await cur_bot_info(for_chat_id=command_msg.chat.id)
logging.info(f'For admin @{admin.username}-{admin.id} SHOW INFO(command)')
@dp.callback_query_handler(active_menu_callback.filter(), chat_id=
ADMIN_CHAT_ID, state='*')
async def change_active(call: CallbackQuery, state: FSMContext,
callback_data: dict):
active = not bool(int(callback_data['active']))
user_id = int(callback_data['user_id'])
admin = call.from_user
profile_msg = call.message
if active:
await db.unban_user(user_id)
await redis_commands.unban_user(user_id)
else:
await db.ban_user(user_id)
await redis_commands.ban_user(user_id)
await profile_msg.edit_reply_markup(keyboards.inline.get_activate_menu(
user_id=user_id, active=active))
await call.answer()
logging.info(
f'Admin @{admin.username}-{admin.id} CHANGE ACTIVE FOR USER-{user_id} TO {active}'
)
| import asyncio
import logging
import random
from aiogram.dispatcher import FSMContext
from aiogram.types import ContentTypes, Message, CallbackQuery
from aiogram.utils.exceptions import BotBlocked
import keyboards
from data.config import ADMINS, ADMIN_CHAT_ID
from keyboards.inline.activate_menu import active_menu_callback
from loader import dp, db, storage
from utils import text
from utils.db_api import redis_commands
from utils.jobs import cur_bot_info
from utils.misc import rate_limit
@dp.message_handler(commands='upload', user_id=ADMINS, state='*')
async def upload_profile(command_msg: Message, state: FSMContext):
profile_msg = command_msg.reply_to_message
admin = command_msg.from_user
param = command_msg.get_args()
if not profile_msg:
await command_msg.answer('Чтобы загрузить анкету сделай на неё REPLY')
return
elif param != 'g' and param != 'b':
await command_msg.answer(
'Чтобы воспользоваться командой /upload нужно добавить параметры <b>b | g</b>'
)
return
other_bot = profile_msg.forward_from
if not other_bot or other_bot.id != 1234060895:
await profile_msg.reply(
'Загружать анкеты можно только из нашего БотаX :)')
return
elif not profile_msg.photo and not profile_msg.video or not profile_msg.caption:
await profile_msg.reply(
'Загружать нужно именно анкету, а не части анкеты')
return
profile_data = text.get_parse_data(profile_msg.caption)
if profile_msg.photo:
media_id = profile_msg.photo[-1].file_id
with_video = False
else:
media_id = profile_msg.video.file_id
with_video = True
profile_data.update(id=random.randint(1, 100000), username='f',
media_id=media_id, with_video=with_video, sex=1 if param == 'g' else 2)
await db.add_user(**profile_data)
await profile_msg.reply('Пользователь {}-{} успешно добавлен ✅'.format(
profile_data['user_nick'], profile_data['id']))
logging.info(
f"Admin @{admin.username}-{admin.id} successfully added fake {profile_data['user_nick']}-{profile_data['id']} "
)
@dp.message_handler(commands='get_msg_info', user_id=ADMINS, state='*')
async def get_msg_info(command_msg: Message, state: FSMContext):
msg = command_msg.reply_to_message
await command_msg.delete()
if not msg:
await command_msg.answer('Нужно делать реплай на сообщение.')
return
state = await state.get_state()
await msg.reply(
f"""Эхо в состоянии <code>{state}</code>.
Содержание сообщения:
<code>{msg}</code>
content_type = {msg.content_type}
entities={msg.entities}"""
)
@dp.message_handler(commands='ban_user', user_id=ADMINS, state='*')
async def ban_user(command_msg: Message, state: FSMContext):
ban_user_id = command_msg.get_args()
admin = command_msg.from_user
await command_msg.delete()
if not ban_user_id or not ban_user_id.isdecimal():
await command_msg.answer(f'Формат команды: /ban_user user_id')
return
ban_user_id = int(ban_user_id)
is_banned = await db.ban_user(ban_user_id)
if not is_banned:
await command_msg.answer(
f'Пользователя с таким <user_id> не существует')
return
await redis_commands.ban_user(ban_user_id)
await command_msg.answer('Пользователь({}) успешно забанен 😎'.format(
ban_user_id))
logging.info(f'Admin @{admin.username}-{admin.id} BAN USER-{ban_user_id}')
@dp.message_handler(commands='unban_user', user_id=ADMINS, state='*')
async def unban_user(command_msg: Message, state: FSMContext):
unban_user_id = command_msg.get_args()
admin = command_msg.from_user
await command_msg.delete()
if not unban_user_id or not unban_user_id.isdecimal():
await command_msg.answer(f'Формат команды: /unban_user user_id')
return
unban_user_id = int(unban_user_id)
is_unbanned = await db.unban_user(unban_user_id)
if not is_unbanned:
await command_msg.answer(
f'Пользователя с таким <user_id> не существует')
return
await redis_commands.unban_user(unban_user_id)
await command_msg.answer('Пользователь({}) успешно разбанен 👻'.format(
unban_user_id))
logging.info(
f'Admin @{admin.username}-{admin.id} UNBAN USER-{unban_user_id}')
@dp.message_handler(commands='clean_old_likes', user_id=ADMINS, state='*')
async def clean_old_likes(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
await command_msg.delete()
count = await db.clean_old_likes(interval=24)
await command_msg.answer(
'Было успешно удалено {} старых лайков(за {} hours)'.format(count, 24))
logging.info(
f'Admin @{admin.username}-{admin.id} delete old likes(count={count})')
@dp.message_handler(commands='say_to_all_now_go', user_id=ADMINS, state='*')
async def say_to_all(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
msg = command_msg.reply_to_message
await command_msg.delete()
if not msg:
await command_msg.answer(
'Чтобы воспользоваться этой командой сделай REPLY')
return
active_user_ids = await db.get_all_users(active=True)
delete_bot_count = 0
for user_id in active_user_ids:
try:
await dp.bot.copy_message(chat_id=user_id, from_chat_id=
command_msg.chat.id, message_id=msg.message_id)
await asyncio.sleep(0.05)
except BotBlocked as exc:
await db.update_user(user_id, active=False)
await redis_commands.clear_user(user_id)
await redis_commands.clear_search_ids(user_id)
delete_bot_count += 1
await msg.reply(
'Сообщение успешно отправлено: оставили бот({}), заблокировали бот({})'
.format(len(active_user_ids) - delete_bot_count, delete_bot_count))
logging.info(
f'Admin @{admin.username}-{admin.id} SAY TO ALL MSG(id={msg.message_id})'
)
@dp.message_handler(commands='show_state_statistic', user_id=ADMINS, state='*')
async def show_state_statistic(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
statistic = dict()
await command_msg.delete()
states_list = await storage.get_states_list()
for states_item in states_list:
chat_id, user_id = states_item
state_text = await storage.get_state(chat=chat_id, user=user_id,
default='Deactivate bot')
try:
statistic[state_text] += 1
except KeyError:
statistic.update({state_text: 1})
out_text = '<b>Статичктика по пользователям:</b>\n\n'
for state_text, count_users in statistic.items():
out_text += (
f'В состоянии {state_text} — {count_users} пользователей\n\n')
await command_msg.answer(out_text)
logging.info(f'For Admin @{admin.username}-{admin.id} show state statistic'
)
@rate_limit(3)
@dp.message_handler(commands='show_info', user_id=ADMINS, state='*')
async def show_info(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
await command_msg.delete()
await cur_bot_info(for_chat_id=command_msg.chat.id)
logging.info(f'For admin @{admin.username}-{admin.id} SHOW INFO(command)')
@dp.callback_query_handler(active_menu_callback.filter(), chat_id=
ADMIN_CHAT_ID, state='*')
async def change_active(call: CallbackQuery, state: FSMContext,
callback_data: dict):
active = not bool(int(callback_data['active']))
user_id = int(callback_data['user_id'])
admin = call.from_user
profile_msg = call.message
if active:
await db.unban_user(user_id)
await redis_commands.unban_user(user_id)
else:
await db.ban_user(user_id)
await redis_commands.ban_user(user_id)
await profile_msg.edit_reply_markup(keyboards.inline.get_activate_menu(
user_id=user_id, active=active))
await call.answer()
logging.info(
f'Admin @{admin.username}-{admin.id} CHANGE ACTIVE FOR USER-{user_id} TO {active}'
)
| import asyncio
import logging
import random
from aiogram.dispatcher import FSMContext
from aiogram.types import ContentTypes, Message, CallbackQuery
from aiogram.utils.exceptions import BotBlocked
import keyboards
from data.config import ADMINS, ADMIN_CHAT_ID
from keyboards.inline.activate_menu import active_menu_callback
from loader import dp, db, storage
from utils import text
from utils.db_api import redis_commands
from utils.jobs import cur_bot_info
from utils.misc import rate_limit
@dp.message_handler(commands="upload", user_id=ADMINS, state="*")
async def upload_profile(command_msg: Message, state: FSMContext):
profile_msg = command_msg.reply_to_message
admin = command_msg.from_user
param = command_msg.get_args()
if not profile_msg:
await command_msg.answer("Чтобы загрузить анкету сделай на неё REPLY")
return
elif param != "g" and param != "b":
await command_msg.answer("Чтобы воспользоваться командой /upload нужно добавить параметры <b>b | g</b>")
return
other_bot = profile_msg.forward_from
if not other_bot or other_bot.id != 1234060895:
await profile_msg.reply("Загружать анкеты можно только из нашего БотаX :)")
return
elif (not profile_msg.photo and not profile_msg.video) or not profile_msg.caption:
await profile_msg.reply("Загружать нужно именно анкету, а не части анкеты")
return
profile_data = text.get_parse_data(profile_msg.caption)
if profile_msg.photo:
media_id = profile_msg.photo[-1].file_id
with_video = False
else:
media_id = profile_msg.video.file_id
with_video = True
profile_data.update(
id=random.randint(1, 100000),
username="f",
media_id=media_id,
with_video=with_video,
sex=1 if param == "g" else 2
)
await db.add_user(**profile_data)
await profile_msg.reply("Пользователь {}-{} успешно добавлен ✅"
"".format(profile_data["user_nick"], profile_data["id"]))
logging.info(f"Admin @{admin.username}-{admin.id} successfully "
f"added fake {profile_data['user_nick']}-{profile_data['id']} ")
@dp.message_handler(commands="get_msg_info", user_id=ADMINS, state="*")
async def get_msg_info(command_msg: Message, state: FSMContext):
msg = command_msg.reply_to_message
await command_msg.delete()
if not msg:
await command_msg.answer("Нужно делать реплай на сообщение.")
return
state = await state.get_state()
await msg.reply(f"Эхо в состоянии <code>{state}</code>.\n"
f"\nСодержание сообщения:\n"
f"\n<code>{msg}</code>\n"
f"\ncontent_type = {msg.content_type}\n"
f"\nentities={msg.entities}")
@dp.message_handler(commands="ban_user", user_id=ADMINS, state="*")
async def ban_user(command_msg: Message, state: FSMContext):
ban_user_id = command_msg.get_args()
admin = command_msg.from_user
await command_msg.delete()
if not ban_user_id or not ban_user_id.isdecimal():
await command_msg.answer(f"Формат команды: /ban_user user_id")
return
ban_user_id = int(ban_user_id)
is_banned = await db.ban_user(ban_user_id)
if not is_banned:
await command_msg.answer(f"Пользователя с таким <user_id> не существует")
return
await redis_commands.ban_user(ban_user_id)
await command_msg.answer("Пользователь({}) успешно забанен 😎".format(ban_user_id))
logging.info(f"Admin @{admin.username}-{admin.id} BAN USER-{ban_user_id}")
@dp.message_handler(commands="unban_user", user_id=ADMINS, state="*")
async def unban_user(command_msg: Message, state: FSMContext):
unban_user_id = command_msg.get_args()
admin = command_msg.from_user
await command_msg.delete()
if not unban_user_id or not unban_user_id.isdecimal():
await command_msg.answer(f"Формат команды: /unban_user user_id")
return
unban_user_id = int(unban_user_id)
is_unbanned = await db.unban_user(unban_user_id)
if not is_unbanned:
await command_msg.answer(f"Пользователя с таким <user_id> не существует")
return
await redis_commands.unban_user(unban_user_id)
await command_msg.answer("Пользователь({}) успешно разбанен 👻".format(unban_user_id))
logging.info(f"Admin @{admin.username}-{admin.id} UNBAN USER-{unban_user_id}")
@dp.message_handler(commands="clean_old_likes", user_id=ADMINS, state="*")
async def clean_old_likes(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
await command_msg.delete()
count = await db.clean_old_likes(interval=24)
await command_msg.answer("Было успешно удалено {} старых лайков(за {} hours)".format(count, 24))
logging.info(f"Admin @{admin.username}-{admin.id} delete old likes(count={count})")
@dp.message_handler(commands="say_to_all_now_go", user_id=ADMINS, state="*")
async def say_to_all(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
msg = command_msg.reply_to_message
await command_msg.delete()
if not msg:
await command_msg.answer("Чтобы воспользоваться этой командой сделай REPLY")
return
active_user_ids = await db.get_all_users(active=True) # [375766905, 997319478]
delete_bot_count = 0
for user_id in active_user_ids:
try:
await dp.bot.copy_message(
chat_id=user_id,
from_chat_id=command_msg.chat.id,
message_id=msg.message_id
)
await asyncio.sleep(0.05)
except BotBlocked as exc:
await db.update_user(user_id, active=False)
await redis_commands.clear_user(user_id)
await redis_commands.clear_search_ids(user_id)
delete_bot_count += 1
await msg.reply("Сообщение успешно отправлено: оставили бот({}), заблокировали бот({})"
"".format(len(active_user_ids) - delete_bot_count, delete_bot_count))
logging.info(f"Admin @{admin.username}-{admin.id} SAY TO ALL MSG(id={msg.message_id})")
@dp.message_handler(commands="show_state_statistic", user_id=ADMINS, state="*")
async def show_state_statistic(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
statistic = dict()
await command_msg.delete()
states_list = await storage.get_states_list()
for states_item in states_list:
chat_id, user_id = states_item
state_text = await storage.get_state(chat=chat_id, user=user_id, default="Deactivate bot")
try:
statistic[state_text] += 1
except KeyError:
statistic.update({state_text: 1})
out_text = "<b>Статичктика по пользователям:</b>\n\n"
for state_text, count_users in statistic.items():
out_text += f"В состоянии {state_text} — {count_users} пользователей\n\n"
await command_msg.answer(out_text)
logging.info(f"For Admin @{admin.username}-{admin.id} show state statistic")
@rate_limit(3)
@dp.message_handler(commands="show_info", user_id=ADMINS, state="*")
async def show_info(command_msg: Message, state: FSMContext):
admin = command_msg.from_user
await command_msg.delete()
await cur_bot_info(for_chat_id=command_msg.chat.id)
logging.info(f"For admin @{admin.username}-{admin.id} SHOW INFO(command)")
@dp.callback_query_handler(active_menu_callback.filter(), chat_id=ADMIN_CHAT_ID, state="*")
async def change_active(call: CallbackQuery, state: FSMContext, callback_data: dict):
active = not bool(int(callback_data["active"]))
user_id = int(callback_data["user_id"])
admin = call.from_user
profile_msg = call.message
if active:
await db.unban_user(user_id)
await redis_commands.unban_user(user_id)
else:
await db.ban_user(user_id)
await redis_commands.ban_user(user_id)
await profile_msg.edit_reply_markup(keyboards.inline.get_activate_menu(user_id=user_id, active=active))
await call.answer()
logging.info(f"Admin @{admin.username}-{admin.id} CHANGE ACTIVE FOR USER-{user_id} TO {active}")
| null | [
0,
1,
2,
3
] |
340 | de925b8f6bd31bfdfd1f04628659847b0761899d | <mask token>
class Solution:
<mask token>
<mask token>
| <mask token>
class Solution:
def letterCombinations(self, digits: str) ->List[str]:
d = {(2): 'abc', (3): 'def', (4): 'ghi', (5): 'jkl', (6): 'mno', (7
): 'pqrs', (8): 'tuv', (9): 'wxyz'}
def merge(body, digits):
if len(digits) == 0:
ans.append(body)
return
else:
for c in d[int(digits[0])]:
merge(body + c, digits[1:])
ans = []
merge('', digits)
return ans if len(ans) != 1 else []
<mask token>
| <mask token>
class Solution:
def letterCombinations(self, digits: str) ->List[str]:
d = {(2): 'abc', (3): 'def', (4): 'ghi', (5): 'jkl', (6): 'mno', (7
): 'pqrs', (8): 'tuv', (9): 'wxyz'}
def merge(body, digits):
if len(digits) == 0:
ans.append(body)
return
else:
for c in d[int(digits[0])]:
merge(body + c, digits[1:])
ans = []
merge('', digits)
return ans if len(ans) != 1 else []
print(Solution().letterCombinations(''))
| <mask token>
from typing import List
class Solution:
def letterCombinations(self, digits: str) ->List[str]:
d = {(2): 'abc', (3): 'def', (4): 'ghi', (5): 'jkl', (6): 'mno', (7
): 'pqrs', (8): 'tuv', (9): 'wxyz'}
def merge(body, digits):
if len(digits) == 0:
ans.append(body)
return
else:
for c in d[int(digits[0])]:
merge(body + c, digits[1:])
ans = []
merge('', digits)
return ans if len(ans) != 1 else []
print(Solution().letterCombinations(''))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@author: Allen(Zifeng) An
@course:
@contact: [email protected]
@file: 17. Letter Combinations of a Phone Number.py
@time: 2020/2/2 21:18
'''
from typing import List
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
d={2:'abc',
3:'def',
4:'ghi',
5:'jkl',
6:'mno',
7:'pqrs',
8:'tuv',
9:'wxyz'
}
def merge(body,digits):
if len(digits)==0:
ans.append(body)
return
else:
for c in d[int(digits[0])]:
merge(body+c,digits[1:])
# arr=[]
ans=[]
# for digit in digits:
# arr.append(list(d[int(digit)]))
# print(arr)
merge('',digits)
return ans if len(ans)!=1 else []
print(Solution().letterCombinations(''))
#
# class Solution:
# def letterCombinations(self, digits: str) -> List[str]:
# d={2:'abc',
# 3:'def',
# 4:'ghi',
# 5:'jkl',
# 6:'mno',
# 7:'pqrs',
# 8:'tuv',
# 9:'wxyz'
# }
#
# cmb=[''] if len(digits)!=0 else []
#
# for digit in digits:
# cmb=[p+q for p in cmb for q in d[int(digit)]]
#
# return cmb
# print(Solution().letterCombinations('23'))
| [
1,
2,
3,
4,
5
] |
341 | 6531833a4fe57c15c0668cee9015c7d43491427a | /home/openerp/production/extra-addons/productivity_analysis/report/productivity_analysis.py | null | null | null | null | [
0
] |
342 | b3d9013ab6facb8dd9361e2a0715a8ed0cdfeaba | <mask token>
| <mask token>
def get_version():
ver_file = None
try:
ver_file, pathname, description = imp.find_module('__version__', [
'cmakelint'])
vermod = imp.load_module('__version__', ver_file, pathname, description
)
version = vermod.VERSION
return version
finally:
if ver_file is not None:
ver_file.close()
<mask token>
| <mask token>
def get_version():
ver_file = None
try:
ver_file, pathname, description = imp.find_module('__version__', [
'cmakelint'])
vermod = imp.load_module('__version__', ver_file, pathname, description
)
version = vermod.VERSION
return version
finally:
if ver_file is not None:
ver_file.close()
setup(name='cmakelint', version=get_version(), packages=['cmakelint'],
scripts=['bin/cmakelint'], entry_points={'console_scripts': [
'cmakelint = cmakelint.main:main']}, install_requires=[''], author=
'Richard Quirk', author_email='[email protected]', url=
'https://github.com/richq/cmake-lint', download_url=
'https://github.com/richq/cmake-lint', keywords=['cmake', 'lint'],
classifiers=['Topic :: Software Development',
'Programming Language :: Other', 'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License'], description=
'Static code checker for CMake files', long_description=
'cmakelint parses CMake files and reports style issues.', license=
'Apache 2.0')
| from setuptools import setup
import imp
def get_version():
ver_file = None
try:
ver_file, pathname, description = imp.find_module('__version__', [
'cmakelint'])
vermod = imp.load_module('__version__', ver_file, pathname, description
)
version = vermod.VERSION
return version
finally:
if ver_file is not None:
ver_file.close()
setup(name='cmakelint', version=get_version(), packages=['cmakelint'],
scripts=['bin/cmakelint'], entry_points={'console_scripts': [
'cmakelint = cmakelint.main:main']}, install_requires=[''], author=
'Richard Quirk', author_email='[email protected]', url=
'https://github.com/richq/cmake-lint', download_url=
'https://github.com/richq/cmake-lint', keywords=['cmake', 'lint'],
classifiers=['Topic :: Software Development',
'Programming Language :: Other', 'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License'], description=
'Static code checker for CMake files', long_description=
'cmakelint parses CMake files and reports style issues.', license=
'Apache 2.0')
| from setuptools import setup
import imp
def get_version():
ver_file = None
try:
ver_file, pathname, description = imp.find_module('__version__', ['cmakelint'])
vermod = imp.load_module('__version__', ver_file, pathname, description)
version = vermod.VERSION
return version
finally:
if ver_file is not None:
ver_file.close()
setup(name='cmakelint',
version=get_version(),
packages=['cmakelint'],
scripts=['bin/cmakelint'],
entry_points={
'console_scripts': [
'cmakelint = cmakelint.main:main'
]
},
install_requires=[''],
author="Richard Quirk",
author_email="[email protected]",
url="https://github.com/richq/cmake-lint",
download_url="https://github.com/richq/cmake-lint",
keywords=["cmake", "lint"],
classifiers=[
"Topic :: Software Development",
"Programming Language :: Other",
"Programming Language :: Python",
"License :: OSI Approved :: Apache Software License"],
description="Static code checker for CMake files",
long_description="""cmakelint parses CMake files and reports style issues.""",
license="Apache 2.0")
| [
0,
1,
2,
3,
4
] |
343 | 359db73de2c2bb5967723dfb78f98fb84b337b9d | <mask token>
class Arrow(Sprite):
def __init__(self, color, screen, character, click_position):
Sprite.__init__(self)
self.color = color
self.screen = screen
self.character = character
self.click_position = click_position
width, height = 2, 2
self.image = Surface([width, height])
self.image.fill(color)
draw.rect(self.image, color, [0, 0, width, height])
self.rect = self.image.get_rect()
self.released = False
self.release_speed = None
self.release_position = None
self.angle: Degree = None
self.t = 0
self.is_moving_right = False
self.stopped = False
<mask token>
def get_center(self):
return self.rect.center
def update(self):
if self.rect.y >= FLOOR_Y:
self.stopped = True
if not self.released:
return
speed = self.release_speed
t = self.t
g = 0.98
vx = cos(self.angle) * speed
vy = -sin(self.angle) * speed + 0.5 * g * t * t
self.rect.x += vx
self.rect.y += vy
self.t += 0.1
<mask token>
def get_catheuses(self):
click_x, click_y = self.click_position
release_x, release_y = self.release_position
adjacent = abs(release_x - click_x)
opposite = abs(release_y - click_y)
return adjacent, opposite
def get_release_angle(self, adjacent: float, opposite: float) ->Degree:
if adjacent == 0:
return 90
return degrees(atan(opposite / adjacent))
<mask token>
def get_line_length(self, adjacent, opposite) ->float:
return sqrt(adjacent ** 2 + opposite ** 2)
def get_release_speed(self, length) ->float:
if length > 100:
return ARROW_MAX_SPEED
return ARROW_MAX_SPEED * (length / 100)
def hit(self):
self.stopped = True
| <mask token>
class Arrow(Sprite):
def __init__(self, color, screen, character, click_position):
Sprite.__init__(self)
self.color = color
self.screen = screen
self.character = character
self.click_position = click_position
width, height = 2, 2
self.image = Surface([width, height])
self.image.fill(color)
draw.rect(self.image, color, [0, 0, width, height])
self.rect = self.image.get_rect()
self.released = False
self.release_speed = None
self.release_position = None
self.angle: Degree = None
self.t = 0
self.is_moving_right = False
self.stopped = False
<mask token>
def get_center(self):
return self.rect.center
def update(self):
if self.rect.y >= FLOOR_Y:
self.stopped = True
if not self.released:
return
speed = self.release_speed
t = self.t
g = 0.98
vx = cos(self.angle) * speed
vy = -sin(self.angle) * speed + 0.5 * g * t * t
self.rect.x += vx
self.rect.y += vy
self.t += 0.1
def release(self, release_position):
if self.released:
return
self.released = True
click_x, click_y = self.click_position
self.release_position = release_x, release_y = release_position
if release_x < click_x:
self.is_moving_right = True
adjacent, opposite = self.get_catheuses()
angle: Degree = self.get_release_angle(adjacent, opposite)
aiming_down = release_y < click_y
self.angle = self.adjust_angle_to_aim_direction(angle, aiming_down,
self.is_moving_right)
length = self.get_line_length(adjacent, opposite)
self.release_speed = self.get_release_speed(length)
def get_catheuses(self):
click_x, click_y = self.click_position
release_x, release_y = self.release_position
adjacent = abs(release_x - click_x)
opposite = abs(release_y - click_y)
return adjacent, opposite
def get_release_angle(self, adjacent: float, opposite: float) ->Degree:
if adjacent == 0:
return 90
return degrees(atan(opposite / adjacent))
<mask token>
def get_line_length(self, adjacent, opposite) ->float:
return sqrt(adjacent ** 2 + opposite ** 2)
def get_release_speed(self, length) ->float:
if length > 100:
return ARROW_MAX_SPEED
return ARROW_MAX_SPEED * (length / 100)
def hit(self):
self.stopped = True
| <mask token>
class Arrow(Sprite):
def __init__(self, color, screen, character, click_position):
Sprite.__init__(self)
self.color = color
self.screen = screen
self.character = character
self.click_position = click_position
width, height = 2, 2
self.image = Surface([width, height])
self.image.fill(color)
draw.rect(self.image, color, [0, 0, width, height])
self.rect = self.image.get_rect()
self.released = False
self.release_speed = None
self.release_position = None
self.angle: Degree = None
self.t = 0
self.is_moving_right = False
self.stopped = False
<mask token>
def get_center(self):
return self.rect.center
def update(self):
if self.rect.y >= FLOOR_Y:
self.stopped = True
if not self.released:
return
speed = self.release_speed
t = self.t
g = 0.98
vx = cos(self.angle) * speed
vy = -sin(self.angle) * speed + 0.5 * g * t * t
self.rect.x += vx
self.rect.y += vy
self.t += 0.1
def release(self, release_position):
if self.released:
return
self.released = True
click_x, click_y = self.click_position
self.release_position = release_x, release_y = release_position
if release_x < click_x:
self.is_moving_right = True
adjacent, opposite = self.get_catheuses()
angle: Degree = self.get_release_angle(adjacent, opposite)
aiming_down = release_y < click_y
self.angle = self.adjust_angle_to_aim_direction(angle, aiming_down,
self.is_moving_right)
length = self.get_line_length(adjacent, opposite)
self.release_speed = self.get_release_speed(length)
def get_catheuses(self):
click_x, click_y = self.click_position
release_x, release_y = self.release_position
adjacent = abs(release_x - click_x)
opposite = abs(release_y - click_y)
return adjacent, opposite
def get_release_angle(self, adjacent: float, opposite: float) ->Degree:
if adjacent == 0:
return 90
return degrees(atan(opposite / adjacent))
def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:
bool, is_moving_right: bool) ->Radian:
adjustment: Degree = 0
if is_moving_right:
if aiming_down:
adjustment = 180 + 2 * (90 - angle)
elif aiming_down:
adjustment = 180
else:
adjustment = 2 * (90 - angle)
return radians(angle + adjustment)
def get_line_length(self, adjacent, opposite) ->float:
return sqrt(adjacent ** 2 + opposite ** 2)
def get_release_speed(self, length) ->float:
if length > 100:
return ARROW_MAX_SPEED
return ARROW_MAX_SPEED * (length / 100)
def hit(self):
self.stopped = True
| from math import degrees, sqrt, sin, cos, atan, radians
from pygame import Surface, draw
from pygame.sprite import Sprite
from constants import ARROW_MAX_SPEED, FLOOR_Y
from game_types import Radian, Degree
class Arrow(Sprite):
def __init__(self, color, screen, character, click_position):
Sprite.__init__(self)
self.color = color
self.screen = screen
self.character = character
self.click_position = click_position
width, height = 2, 2
self.image = Surface([width, height])
self.image.fill(color)
draw.rect(self.image, color, [0, 0, width, height])
self.rect = self.image.get_rect()
self.released = False
self.release_speed = None
self.release_position = None
self.angle: Degree = None
self.t = 0
self.is_moving_right = False
self.stopped = False
def set_center(self):
if self.released:
return
x, y = self.character.get_center()
self.rect.x = x
self.rect.y = y
def get_center(self):
return self.rect.center
def update(self):
if self.rect.y >= FLOOR_Y:
self.stopped = True
if not self.released:
return
speed = self.release_speed
t = self.t
g = 0.98
vx = cos(self.angle) * speed
vy = -sin(self.angle) * speed + 0.5 * g * t * t
self.rect.x += vx
self.rect.y += vy
self.t += 0.1
def release(self, release_position):
if self.released:
return
self.released = True
click_x, click_y = self.click_position
self.release_position = release_x, release_y = release_position
if release_x < click_x:
self.is_moving_right = True
adjacent, opposite = self.get_catheuses()
angle: Degree = self.get_release_angle(adjacent, opposite)
aiming_down = release_y < click_y
self.angle = self.adjust_angle_to_aim_direction(angle, aiming_down,
self.is_moving_right)
length = self.get_line_length(adjacent, opposite)
self.release_speed = self.get_release_speed(length)
def get_catheuses(self):
click_x, click_y = self.click_position
release_x, release_y = self.release_position
adjacent = abs(release_x - click_x)
opposite = abs(release_y - click_y)
return adjacent, opposite
def get_release_angle(self, adjacent: float, opposite: float) ->Degree:
if adjacent == 0:
return 90
return degrees(atan(opposite / adjacent))
def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:
bool, is_moving_right: bool) ->Radian:
adjustment: Degree = 0
if is_moving_right:
if aiming_down:
adjustment = 180 + 2 * (90 - angle)
elif aiming_down:
adjustment = 180
else:
adjustment = 2 * (90 - angle)
return radians(angle + adjustment)
def get_line_length(self, adjacent, opposite) ->float:
return sqrt(adjacent ** 2 + opposite ** 2)
def get_release_speed(self, length) ->float:
if length > 100:
return ARROW_MAX_SPEED
return ARROW_MAX_SPEED * (length / 100)
def hit(self):
self.stopped = True
| from math import degrees, sqrt, sin, cos, atan, radians
from pygame import Surface, draw
from pygame.sprite import Sprite
from constants import ARROW_MAX_SPEED, FLOOR_Y
from game_types import Radian, Degree
class Arrow(Sprite):
def __init__(self, color, screen, character, click_position):
Sprite.__init__(self)
self.color = color
self.screen = screen
self.character = character
self.click_position = click_position
width, height = 2, 2
self.image = Surface([width, height])
self.image.fill(color)
draw.rect(self.image, color, [0, 0, width, height])
self.rect = self.image.get_rect()
self.released = False
self.release_speed = None
self.release_position = None
self.angle: Degree = None
self.t = 0
self.is_moving_right = False
self.stopped = False
def set_center(self):
if self.released:
return
x, y = self.character.get_center()
self.rect.x = x
self.rect.y = y
def get_center(self):
return self.rect.center
def update(self):
if self.rect.y >= FLOOR_Y:
self.stopped = True
if not self.released:
return
speed = self.release_speed
t = self.t
g = 0.980
vx = cos(self.angle) * speed
vy = -sin(self.angle) * speed + 0.5 * g * t * t
# print("angle {} -sin(angle) {:0<25} 0.5 * g * t * t {:0<25}".format(self.angle, -sin(self.angle), 0.5*g*t*t))
self.rect.x += vx
self.rect.y += vy
self.t += 0.1
def release(self, release_position):
if self.released:
return
self.released = True
click_x, click_y = self.click_position
self.release_position = release_x, release_y = release_position
if release_x < click_x:
self.is_moving_right = True
adjacent, opposite = self.get_catheuses()
angle: Degree = self.get_release_angle(adjacent, opposite)
aiming_down = release_y < click_y
self.angle = self.adjust_angle_to_aim_direction(angle, aiming_down, self.is_moving_right)
length = self.get_line_length(adjacent, opposite)
self.release_speed = self.get_release_speed(length)
def get_catheuses(self):
click_x, click_y = self.click_position
release_x, release_y = self.release_position
adjacent = abs(release_x - click_x)
opposite = abs(release_y - click_y)
return adjacent, opposite
def get_release_angle(self, adjacent: float, opposite: float) -> Degree:
if adjacent == 0:
return 90
return degrees(atan(opposite / adjacent))
def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down: bool, is_moving_right: bool) -> Radian:
adjustment: Degree = 0
if is_moving_right:
if aiming_down:
adjustment = 180 + 2 * (90 - angle)
else:
if aiming_down:
adjustment = 180
else:
adjustment = 2 * (90 - angle)
return radians(angle + adjustment)
def get_line_length(self, adjacent, opposite) -> float:
return sqrt(adjacent ** 2 + opposite ** 2)
def get_release_speed(self, length) -> float:
if length > 100:
return ARROW_MAX_SPEED
# Ex.: 80 / 100 = 80% * ARROW_MAX_SPEED
return ARROW_MAX_SPEED * (length / 100)
def hit(self):
self.stopped = True
| [
9,
10,
11,
13,
14
] |
344 | 97cc29e0d54e5d5e05dff16c92ecc4046363185f | <mask token>
| <mask token>
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
| <mask token>
urlpatterns = [path('user', include('user.urls')), path('order', include(
'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path(
'product', include('product.urls')), path('', include('home.urls')),
path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls),
path('ckeditor', include('ckeditor_uploader.urls')), path('about/',
views.about, name='about'), path('contact/', views.contact, name=
'about'), path('search/', views.search, name='search'), path(
'search_auto', views.search_auto, name='search_auto'), path(
'category/<int:id>/<slug:slug>/', views.category_products, name=
'category_products'), path('product/<int:id>/<slug:slug>/', views.
product_detail, name='product_detail'), path('lic/', views.lic, name=
'lic'), path('post/', views.post, name='post'), path('post/<int:id>/',
views.post_detail, name='post_detail'), path('lic/<int:id>/', views.
lic_detail, name='lic_detail')]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
| from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from home import views
from order import views as OV
urlpatterns = [path('user', include('user.urls')), path('order', include(
'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path(
'product', include('product.urls')), path('', include('home.urls')),
path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls),
path('ckeditor', include('ckeditor_uploader.urls')), path('about/',
views.about, name='about'), path('contact/', views.contact, name=
'about'), path('search/', views.search, name='search'), path(
'search_auto', views.search_auto, name='search_auto'), path(
'category/<int:id>/<slug:slug>/', views.category_products, name=
'category_products'), path('product/<int:id>/<slug:slug>/', views.
product_detail, name='product_detail'), path('lic/', views.lic, name=
'lic'), path('post/', views.post, name='post'), path('post/<int:id>/',
views.post_detail, name='post_detail'), path('lic/<int:id>/', views.
lic_detail, name='lic_detail')]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
| from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from home import views
from order import views as OV
urlpatterns = [
path('user', include('user.urls')),
path('order', include('order.urls')),
path('shopcart/', OV.shopcart, name='shopcart'),
path('product',include('product.urls')),
path('',include('home.urls')),# '' - bu home
path('faq/', views.faq, name='faq'),
path('admin/', admin.site.urls),
path('ckeditor', include('ckeditor_uploader.urls')),
path('about/', views.about, name='about'),
path('contact/', views.contact, name='about'),
path('search/', views.search,name='search'),
path('search_auto', views.search_auto, name='search_auto'),
path('category/<int:id>/<slug:slug>/', views.category_products, name='category_products'),
path('product/<int:id>/<slug:slug>/',views.product_detail, name='product_detail'),
path('lic/',views.lic,name='lic'),
path('post/',views.post,name='post'),
path('post/<int:id>/',views.post_detail, name='post_detail'),
path('lic/<int:id>/',views.lic_detail, name='lic_detail'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
0,
1,
2,
3,
4
] |
345 | 35b24ffa14f8b3c2040d5becc8a35721e86d8b3d | <mask token>
| <mask token>
print('-' * 40)
print('LOJA SUPER BARATÃO')
print('-' * 40)
while True:
produto = str(input('Nome do Produto: '))
preco = float(input('Preço: '))
cont += 1
total += preco
if preco > 1000:
totmil += 1
if cont == 1 or preco < menor:
barato = produto
menor = preco
resp = ' '
while resp not in 'SN':
resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]
if resp == 'N':
break
print('O total da compra foi R${:.2f}'.format(total))
print('Temos {} produtos custando mais de R$1000,00'.format(totmil))
print('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))
| total = totmil = cont = menor = 0
barato = ' '
print('-' * 40)
print('LOJA SUPER BARATÃO')
print('-' * 40)
while True:
produto = str(input('Nome do Produto: '))
preco = float(input('Preço: '))
cont += 1
total += preco
if preco > 1000:
totmil += 1
if cont == 1 or preco < menor:
barato = produto
menor = preco
resp = ' '
while resp not in 'SN':
resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]
if resp == 'N':
break
print('O total da compra foi R${:.2f}'.format(total))
print('Temos {} produtos custando mais de R$1000,00'.format(totmil))
print('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))
| null | null | [
0,
1,
2
] |
346 | 709271b98fc2b40c763522c54488be36968f02d8 | <mask token>
| <mask token>
try:
from .prod_local import *
except:
pass
<mask token>
| <mask token>
try:
from .prod_local import *
except:
pass
ELEMENTARY_ALLOW_REPO_CREATION = True
| from base import *
try:
from .prod_local import *
except:
pass
ELEMENTARY_ALLOW_REPO_CREATION = True
| from base import *
try:
from .prod_local import *
except:
pass
# we currently don't have an interface that allows an administrator
# to create a repository for another user. Until we have added this
# capability, allow users to create repos.
ELEMENTARY_ALLOW_REPO_CREATION = True
| [
0,
1,
2,
3,
4
] |
347 | edcccc673994a8de281a683b747de52d2115f89e | <mask token>
def test_components_to_conf_and_back():
for Component in comp_list:
x = Component()
y = x.to_conf().make()
assert x == y
<mask token>
class TestEfConf:
def test_conf_export(self):
conf = EfConf(sources=[ParticleSource()], inner_regions=(
InnerRegion(),))
s = conf.export_to_string()
c1 = EfConf.from_string(s)
assert c1 == conf
def test_conf_repr(self):
from numpy import array
conf = EfConf(sources=[ParticleSource()], inner_regions=(
InnerRegion(),))
s = repr(conf)
c1 = eval(s)
assert c1 == conf
| <mask token>
def test_components_to_conf_and_back():
for Component in comp_list:
x = Component()
y = x.to_conf().make()
assert x == y
def test_conf_to_configparser_and_back():
confs = [C().to_conf() for C in comp_list]
parser = ConfigParser()
for c in confs:
c.add_section_to_parser(parser)
conf2 = ConfigSection.parser_to_confs(parser)
assert conf2 == confs
<mask token>
class TestEfConf:
def test_conf_export(self):
conf = EfConf(sources=[ParticleSource()], inner_regions=(
InnerRegion(),))
s = conf.export_to_string()
c1 = EfConf.from_string(s)
assert c1 == conf
def test_conf_repr(self):
from numpy import array
conf = EfConf(sources=[ParticleSource()], inner_regions=(
InnerRegion(),))
s = repr(conf)
c1 = eval(s)
assert c1 == conf
| <mask token>
def test_components_to_conf_and_back():
for Component in comp_list:
x = Component()
y = x.to_conf().make()
assert x == y
def test_conf_to_configparser_and_back():
confs = [C().to_conf() for C in comp_list]
parser = ConfigParser()
for c in confs:
c.add_section_to_parser(parser)
conf2 = ConfigSection.parser_to_confs(parser)
assert conf2 == confs
def test_minimal_example():
parser = ConfigParser()
parser.read('examples/minimal_working_example/minimal_conf.conf')
components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)
]
assert components == [TimeGrid(1e-07, 1e-09, 1e-09), SpatialMesh((5, 5,
15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'),
BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'),
ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_',
'.h5')]
class TestEfConf:
def test_conf_export(self):
conf = EfConf(sources=[ParticleSource()], inner_regions=(
InnerRegion(),))
s = conf.export_to_string()
c1 = EfConf.from_string(s)
assert c1 == conf
def test_conf_repr(self):
from numpy import array
conf = EfConf(sources=[ParticleSource()], inner_regions=(
InnerRegion(),))
s = repr(conf)
c1 = eval(s)
assert c1 == conf
| from configparser import ConfigParser
from ef.config.components import *
from ef.config.efconf import EfConf
from ef.config.section import ConfigSection
comp_list = [BoundaryConditions, InnerRegion, OutputFile,
ParticleInteractionModel, ParticleSource, SpatialMesh, TimeGrid,
ExternalFieldUniform]
def test_components_to_conf_and_back():
for Component in comp_list:
x = Component()
y = x.to_conf().make()
assert x == y
def test_conf_to_configparser_and_back():
confs = [C().to_conf() for C in comp_list]
parser = ConfigParser()
for c in confs:
c.add_section_to_parser(parser)
conf2 = ConfigSection.parser_to_confs(parser)
assert conf2 == confs
def test_minimal_example():
parser = ConfigParser()
parser.read('examples/minimal_working_example/minimal_conf.conf')
components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)
]
assert components == [TimeGrid(1e-07, 1e-09, 1e-09), SpatialMesh((5, 5,
15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'),
BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'),
ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_',
'.h5')]
class TestEfConf:
def test_conf_export(self):
conf = EfConf(sources=[ParticleSource()], inner_regions=(
InnerRegion(),))
s = conf.export_to_string()
c1 = EfConf.from_string(s)
assert c1 == conf
def test_conf_repr(self):
from numpy import array
conf = EfConf(sources=[ParticleSource()], inner_regions=(
InnerRegion(),))
s = repr(conf)
c1 = eval(s)
assert c1 == conf
| from configparser import ConfigParser
from ef.config.components import *
from ef.config.efconf import EfConf
from ef.config.section import ConfigSection
comp_list = [BoundaryConditions, InnerRegion, OutputFile, ParticleInteractionModel,
ParticleSource, SpatialMesh, TimeGrid, ExternalFieldUniform]
def test_components_to_conf_and_back():
for Component in comp_list:
x = Component()
y = x.to_conf().make()
assert x == y
def test_conf_to_configparser_and_back():
confs = [C().to_conf() for C in comp_list]
parser = ConfigParser()
for c in confs:
c.add_section_to_parser(parser)
conf2 = ConfigSection.parser_to_confs(parser)
assert conf2 == confs
def test_minimal_example():
parser = ConfigParser()
parser.read("examples/minimal_working_example/minimal_conf.conf")
components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)]
assert components == [TimeGrid(1e-7, 1e-9, 1e-9), SpatialMesh((5, 5, 15), (0.5, 0.5, 1.5)),
ParticleInteractionModel('noninteracting'), BoundaryConditions(0),
ExternalFieldUniform('mgn_uni', 'magnetic'),
ExternalFieldUniform('el_uni', 'electric'),
OutputFile('example_', '.h5')]
class TestEfConf:
def test_conf_export(self):
conf = EfConf(sources=[ParticleSource()], inner_regions=(InnerRegion(),))
s = conf.export_to_string()
c1 = EfConf.from_string(s)
assert c1 == conf
def test_conf_repr(self):
from numpy import array # for use in eval
conf = EfConf(sources=[ParticleSource()], inner_regions=(InnerRegion(),))
s = repr(conf)
c1 = eval(s)
assert c1 == conf
| [
4,
5,
6,
8,
9
] |
348 | 38e167630519b73bffea4ff527bc7b7272a49f1a | <mask token>
def embed_last_token(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
valid = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
if buffer == words[tid]:
valid.append(i)
buffer = ''
tid += 1
if len(valid) != len(sent.split()) or tid != len(words):
print(valid)
print(sent.split())
print(result[1])
batch.append(tensor[valid, :])
return batch
def embed_sum(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
token_tensor = []
sent_tensor = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
token_tensor.append(tensor[i, :])
if buffer == words[tid]:
sent_tensor.append(np.stack(token_tensor).mean(axis=0))
token_tensor = []
buffer = ''
tid += 1
if tid != len(words) or len(sent_tensor) != len(words):
print(sent.split())
print(tokens)
exit()
batch.append(np.stack(sent_tensor))
return batch
def generate_bert(path, output, embed_fun=embed_sum):
print(output)
total = 0
with open(path) as src:
batch = []
tensor = []
for line in src:
line = line.strip()
if len(line) == 0:
continue
batch.append(CharTable.convert(line).replace('—', '-').replace(
'‘', "'").replace('…', '.').replace('坜', '壢').replace('唛',
'麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', "'"))
if len(batch) and len(batch) % 100 == 0:
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
batch = []
if len(batch):
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
with open(output, 'wb') as f:
pickle.dump(tensor, f)
<mask token>
| <mask token>
def embed_last_token(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
valid = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
if buffer == words[tid]:
valid.append(i)
buffer = ''
tid += 1
if len(valid) != len(sent.split()) or tid != len(words):
print(valid)
print(sent.split())
print(result[1])
batch.append(tensor[valid, :])
return batch
def embed_sum(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
token_tensor = []
sent_tensor = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
token_tensor.append(tensor[i, :])
if buffer == words[tid]:
sent_tensor.append(np.stack(token_tensor).mean(axis=0))
token_tensor = []
buffer = ''
tid += 1
if tid != len(words) or len(sent_tensor) != len(words):
print(sent.split())
print(tokens)
exit()
batch.append(np.stack(sent_tensor))
return batch
def generate_bert(path, output, embed_fun=embed_sum):
print(output)
total = 0
with open(path) as src:
batch = []
tensor = []
for line in src:
line = line.strip()
if len(line) == 0:
continue
batch.append(CharTable.convert(line).replace('—', '-').replace(
'‘', "'").replace('…', '.').replace('坜', '壢').replace('唛',
'麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', "'"))
if len(batch) and len(batch) % 100 == 0:
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
batch = []
if len(batch):
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
with open(output, 'wb') as f:
pickle.dump(tensor, f)
if __name__ == '__main__':
generate_bert('data/semeval15/cz.pas.dev.sent.txt',
'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.train.sent.txt',
'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.id.pas.sent.txt',
'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)
| <mask token>
CharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')
bc = BertClient(ip='127.0.0.1')
def embed_last_token(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
valid = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
if buffer == words[tid]:
valid.append(i)
buffer = ''
tid += 1
if len(valid) != len(sent.split()) or tid != len(words):
print(valid)
print(sent.split())
print(result[1])
batch.append(tensor[valid, :])
return batch
def embed_sum(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
token_tensor = []
sent_tensor = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
token_tensor.append(tensor[i, :])
if buffer == words[tid]:
sent_tensor.append(np.stack(token_tensor).mean(axis=0))
token_tensor = []
buffer = ''
tid += 1
if tid != len(words) or len(sent_tensor) != len(words):
print(sent.split())
print(tokens)
exit()
batch.append(np.stack(sent_tensor))
return batch
def generate_bert(path, output, embed_fun=embed_sum):
print(output)
total = 0
with open(path) as src:
batch = []
tensor = []
for line in src:
line = line.strip()
if len(line) == 0:
continue
batch.append(CharTable.convert(line).replace('—', '-').replace(
'‘', "'").replace('…', '.').replace('坜', '壢').replace('唛',
'麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', "'"))
if len(batch) and len(batch) % 100 == 0:
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
batch = []
if len(batch):
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
with open(output, 'wb') as f:
pickle.dump(tensor, f)
if __name__ == '__main__':
generate_bert('data/semeval15/cz.pas.dev.sent.txt',
'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.train.sent.txt',
'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.id.pas.sent.txt',
'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)
| import pickle
import numpy as np
from bert_serving.client import BertClient
from pyhanlp import *
CharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')
bc = BertClient(ip='127.0.0.1')
def embed_last_token(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
valid = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
if buffer == words[tid]:
valid.append(i)
buffer = ''
tid += 1
if len(valid) != len(sent.split()) or tid != len(words):
print(valid)
print(sent.split())
print(result[1])
batch.append(tensor[valid, :])
return batch
def embed_sum(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
token_tensor = []
sent_tensor = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
token_tensor.append(tensor[i, :])
if buffer == words[tid]:
sent_tensor.append(np.stack(token_tensor).mean(axis=0))
token_tensor = []
buffer = ''
tid += 1
if tid != len(words) or len(sent_tensor) != len(words):
print(sent.split())
print(tokens)
exit()
batch.append(np.stack(sent_tensor))
return batch
def generate_bert(path, output, embed_fun=embed_sum):
print(output)
total = 0
with open(path) as src:
batch = []
tensor = []
for line in src:
line = line.strip()
if len(line) == 0:
continue
batch.append(CharTable.convert(line).replace('—', '-').replace(
'‘', "'").replace('…', '.').replace('坜', '壢').replace('唛',
'麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', "'"))
if len(batch) and len(batch) % 100 == 0:
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
batch = []
if len(batch):
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
with open(output, 'wb') as f:
pickle.dump(tensor, f)
if __name__ == '__main__':
generate_bert('data/semeval15/cz.pas.dev.sent.txt',
'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.train.sent.txt',
'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.id.pas.sent.txt',
'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)
| # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-01-13 15:01
import pickle
import numpy as np
from bert_serving.client import BertClient
from pyhanlp import *
CharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')
# bc = BertClient(ip='192.168.1.88') # ip address of the server
bc = BertClient(ip='127.0.0.1') # ip address of the GPU machine
def embed_last_token(text):
result = bc.encode(text, show_tokens=True)
# print(result)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
valid = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
if buffer == words[tid]:
valid.append(i)
buffer = ''
tid += 1
# print(len(valid))
# exit()
if len(valid) != len(sent.split()) or tid != len(words):
print(valid)
print(sent.split())
print(result[1])
batch.append(tensor[valid, :])
return batch
def embed_sum(text):
result = bc.encode(text, show_tokens=True)
# print(result)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
token_tensor = []
sent_tensor = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
token_tensor.append(tensor[i, :])
if buffer == words[tid]:
sent_tensor.append(np.stack(token_tensor).mean(axis=0))
token_tensor = []
buffer = ''
tid += 1
# print(len(valid))
# exit()
if tid != len(words) or len(sent_tensor) != len(words):
print(sent.split())
print(tokens)
exit()
batch.append(np.stack(sent_tensor))
return batch
def generate_bert(path, output, embed_fun=embed_sum):
print(output)
total = 0
with open(path) as src:
batch = []
tensor = []
for line in src:
line = line.strip()
if len(line) == 0:
continue
batch.append(CharTable.convert(line).replace('—', '-')
.replace('‘', '\'')
.replace('…', '.')
.replace('坜', '壢')
.replace('唛', '麦')
.replace('ㄅㄆㄇㄈ', '呀呀')
.replace('’', '\''))
if len(batch) and len(batch) % 100 == 0:
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
batch = []
if len(batch):
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
with open(output, 'wb') as f:
pickle.dump(tensor, f)
if __name__ == '__main__':
# generate_bert('data/SemEval-2016/news.test.sent.txt', 'data/SemEval-2016/news.test.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/news.valid.sent.txt', 'data/SemEval-2016/news.valid.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/news.train.sent.txt', 'data/SemEval-2016/news.train.bert', embed_fun=embed_sum)
#
# generate_bert('data/SemEval-2016/text.test.sent.txt', 'data/SemEval-2016/text.test.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/text.valid.sent.txt', 'data/SemEval-2016/text.valid.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/text.train.sent.txt', 'data/SemEval-2016/text.train.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.dev.sent.txt', 'data/embedding/bert_base_sum/cz.pas.dev.bert',
embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.train.sent.txt', 'data/embedding/bert_base_sum/cz.pas.train.bert',
embed_fun=embed_sum)
generate_bert('data/semeval15/cz.id.pas.sent.txt', 'data/embedding/bert_base_sum/cz.id.pas.bert',
embed_fun=embed_sum)
# generate_bert('data/ctb5.1-pos/dev.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.dev.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5.1-pos/test.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.test.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5.1-pos/train.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.train.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/dev.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/test.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/train.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/dev.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/train.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5/dev.sent.txt', 'data/embedding/bert_base_sum/ctb.dev.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5/test.sent.txt', 'data/embedding/bert_base_sum/ctb.test.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5/train.sent.txt', 'data/embedding/bert_base_sum/ctb.train.bert',
# embed_fun=embed_sum)
| [
3,
4,
5,
6,
7
] |
349 | 816b1a932208a4525230dd886adf8c67dec3af3e | <mask token>
| <mask token>
@pytest.fixture(scope='session', autouse=True)
def set_up(request):
""" conftest.py set_up - the first to start.... """
print('\nSETUP before all tests')
request.addfinalizer(tear_down)
| <mask token>
def tear_down():
""" conftest.py tear_down - the last to go.... """
print('\nTEARDOWN after all tests')
@pytest.fixture(scope='session', autouse=True)
def set_up(request):
""" conftest.py set_up - the first to start.... """
print('\nSETUP before all tests')
request.addfinalizer(tear_down)
| import pytest
import requests
def tear_down():
""" conftest.py tear_down - the last to go.... """
print('\nTEARDOWN after all tests')
@pytest.fixture(scope='session', autouse=True)
def set_up(request):
""" conftest.py set_up - the first to start.... """
print('\nSETUP before all tests')
request.addfinalizer(tear_down)
| # content of conftest.py
# adapted from http://pytest.org/latest/example/special.html
import pytest
import requests
def tear_down():
''' conftest.py tear_down - the last to go.... '''
print("\nTEARDOWN after all tests")
@pytest.fixture(scope="session", autouse=True)
def set_up(request):
''' conftest.py set_up - the first to start.... '''
print("\nSETUP before all tests")
request.addfinalizer(tear_down)
| [
0,
1,
2,
3,
4
] |
350 | 9096ed4b68d2bef92df7db98589e744ddf3efad0 | <mask token>
| <mask token>
plt.subplot(1, 2, 1)
for ls in mls:
plt.plot(*ls.xy)
plt.plot(*p.boundary.xy, '-.k')
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.subplot(1, 2, 2)
for ls in results:
plt.plot(*ls.xy)
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.show()
| <mask token>
mls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])
p = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])
results = mls.intersection(p)
plt.subplot(1, 2, 1)
for ls in mls:
plt.plot(*ls.xy)
plt.plot(*p.boundary.xy, '-.k')
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.subplot(1, 2, 2)
for ls in results:
plt.plot(*ls.xy)
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.show()
| import matplotlib.pyplot as plt
from shapely.geometry import MultiLineString, Polygon
mls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])
p = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])
results = mls.intersection(p)
plt.subplot(1, 2, 1)
for ls in mls:
plt.plot(*ls.xy)
plt.plot(*p.boundary.xy, '-.k')
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.subplot(1, 2, 2)
for ls in results:
plt.plot(*ls.xy)
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.show()
| import matplotlib.pyplot as plt
from shapely.geometry import MultiLineString, Polygon
mls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])
p = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])
results = mls.intersection(p)
plt.subplot(1, 2, 1)
for ls in mls:
plt.plot(*ls.xy)
plt.plot(*p.boundary.xy, "-.k")
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.subplot(1, 2, 2)
for ls in results:
plt.plot(*ls.xy)
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.show()
| [
0,
1,
2,
3,
4
] |
351 | c99f1333c5ca3221e9932d9a9ba1d95a77924f0d | <mask token>
def get_max_sum(arr):
max_sum = -math.inf
for i in range(1, 5):
for j in range(1, 5):
temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][
j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]
max_sum = max(max_sum, temp)
return max_sum
<mask token>
| <mask token>
def get_max_sum(arr):
max_sum = -math.inf
for i in range(1, 5):
for j in range(1, 5):
temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][
j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]
max_sum = max(max_sum, temp)
return max_sum
def main():
sys_in = sys.stdin
sys_out = sys.stdout
arr = []
for _ in range(6):
temp = list(map(int, sys.stdin.readline().split()))
arr.append(temp)
print(get_max_sum(arr))
<mask token>
| <mask token>
def get_max_sum(arr):
max_sum = -math.inf
for i in range(1, 5):
for j in range(1, 5):
temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][
j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]
max_sum = max(max_sum, temp)
return max_sum
def main():
sys_in = sys.stdin
sys_out = sys.stdout
arr = []
for _ in range(6):
temp = list(map(int, sys.stdin.readline().split()))
arr.append(temp)
print(get_max_sum(arr))
if __name__ == '__main__':
main()
| import sys
import math
def get_max_sum(arr):
max_sum = -math.inf
for i in range(1, 5):
for j in range(1, 5):
temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][
j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]
max_sum = max(max_sum, temp)
return max_sum
def main():
sys_in = sys.stdin
sys_out = sys.stdout
arr = []
for _ in range(6):
temp = list(map(int, sys.stdin.readline().split()))
arr.append(temp)
print(get_max_sum(arr))
if __name__ == '__main__':
main()
| null | [
1,
2,
3,
4
] |
352 | 78c9f92349ba834bc64dc84f884638c4316a9ea4 | <mask token>
| INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root'
INPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root'
puSTARTUP_TTBAR = (
'/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root')
relval = {'step1': {'step': 'GEN-HLT', 'timesize': (100, ['MinBias',
'TTbar']), 'igprof': (50, ['TTbar']), 'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'], 'cmsdriver':
'--eventcontent RAWSIM --conditions auto:mc'}, 'step2': {'step':
'RAW2DIGI-RECO', 'timesize': (8000, ['MinBias', 'TTbar']), 'igprof': (
200, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'],
'pileupInput': puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS,
INPUT_TTBAR], 'cmsdriver':
'--eventcontent RECOSIM --conditions auto:startup'}, 'GENSIMDIGI': {
'step': 'GEN-SIM,DIGI', 'timesize': (100, ['MinBias',
'SingleElectronE1000', 'SingleMuMinusPt10', 'SinglePiMinusE1000',
'TTbar']), 'igprof': (5, ['TTbar']), 'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'], 'fileInput': '', 'cmsdriver':
'--eventcontent FEVTDEBUG --conditions auto:mc'}, 'HLT': {'step': 'HLT',
'timesize': (8000, ['MinBias', 'TTbar']), 'igprof': (500, ['TTbar']),
'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], 'pileupInput':
puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS, INPUT_TTBAR], 'cmsdriver':
'--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW'
}, 'FASTSIM': {'step': 'GEN-FASTSIM', 'timesize': (8000, ['MinBias',
'TTbar']), 'igprof': (500, ['TTbar']), 'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'], 'cmsdriver':
'--eventcontent RECOSIM --conditions auto:mc'}}
| INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root'
INPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root'
puSTARTUP_TTBAR = '/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root'
relval = {
'step1': { 'step': 'GEN-HLT',
'timesize': (100, ['MinBias','TTbar']),
'igprof': (50, ['TTbar']),
'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'],
#??? 'pileupInput': '',
'cmsdriver': '--eventcontent RAWSIM --conditions auto:mc' },
'step2': { 'step': 'RAW2DIGI-RECO',
'timesize': (8000, ['MinBias','TTbar']),
'igprof': (200, ['TTbar']),
'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'],
'pileupInput': puSTARTUP_TTBAR,
'fileInput': [INPUT_MINBIAS,INPUT_TTBAR],
'cmsdriver': '--eventcontent RECOSIM --conditions auto:startup' },
'GENSIMDIGI': { 'step': 'GEN-SIM,DIGI',
'timesize': (100, ['MinBias','SingleElectronE1000','SingleMuMinusPt10','SinglePiMinusE1000','TTbar']),
'igprof': (5, ['TTbar']),
'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'],
#??? 'pileupInput': '',
'fileInput': '',
'cmsdriver': '--eventcontent FEVTDEBUG --conditions auto:mc' },
'HLT': { 'step': 'HLT',
'timesize': (8000, ['MinBias','TTbar']),
'igprof': (500, ['TTbar']),
'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'],
'pileupInput': puSTARTUP_TTBAR,
'fileInput': [INPUT_MINBIAS,INPUT_TTBAR],
'cmsdriver': '--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW' },
'FASTSIM': { 'step': 'GEN-FASTSIM',
'timesize': (8000, ['MinBias','TTbar']),
'igprof': (500, ['TTbar']),
'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'],
'cmsdriver': '--eventcontent RECOSIM --conditions auto:mc' }
}
| null | null | [
0,
1,
2
] |
353 | c6b261a09b2982e17704f847586bbf38d27cb786 | <mask token>
| from ._sinAction import *
from ._sinActionFeedback import *
from ._sinActionGoal import *
from ._sinActionResult import *
from ._sinFeedback import *
from ._sinGoal import *
from ._sinResult import *
| null | null | null | [
0,
1
] |
354 | f4306f80330850415b74d729384f360489644e39 | <mask token>
class TestObs(unittest.TestCase):
<mask token>
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def test_development_stage_ontology_term_id_mouse(self):
"""
If organism_ontolology_term_id is "NCBITaxon:10090" for Mus musculus,
this MUST be the most accurate MmusDv term
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'NCBITaxon:10090'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown."
])
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def test_sex_ontology_term_id(self):
"""
sex_ontology_term_id categorical with str categories.
This MUST be a child of PATOPATO:0001894 for phenotypic sex or "unknown" if unavailable
"""
self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed."
])
<mask token>
class TestVar(unittest.TestCase):
"""
Fail cases in adata.var and adata.raw.var
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_var_and_raw_var_same_index(self):
"""
var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.
"""
var = Validator.getattr_anndata(self.validator.adata, 'var')
new_index = list(var.index)
tmp = new_index[0]
new_index[0] = new_index[1]
new_index[1] = tmp
var.set_index(pd.Index(new_index), inplace=True)
tmp = var.iloc[0, :].copy()
var.iloc[0, :] = var.iloc[1, :].copy()
var.iloc[1, :] = tmp
self.validator.validate_adata()
print('FOO', self.validator.errors)
self.assertEqual(self.validator.errors, [
"ERROR: Index of 'raw.var' is not identical to index of 'var'."])
def test_check_unique_var(self):
"""
var.index MUST contain unique ENSEMBL gene identifiers for features.
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[1] = new_index[0]
component.set_index(pd.Index(new_index), inplace=True)
component.iloc[1, :] = component.iloc[0, :]
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Column 'index' in dataframe '{component_name}' is not unique."
])
def test_column_presence(self):
"""
var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
feature_is_filtered must not be in raw.var, and it's only checked in var
"""
columns = ['feature_is_filtered', 'feature_biotype']
for component_name in ['var', 'raw.var']:
for column in columns:
if (column == 'feature_is_filtered' and component_name ==
'raw.var'):
continue
with self.subTest(component_name=component_name, column=column
):
self.validator.errors = []
self.validator.adata = examples.adata.copy()
component = Validator.getattr_anndata(self.validator.
adata, component_name)
component.drop(column, axis=1, inplace=True)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Dataframe '{component_name}' is missing column '{column}'."
])
def test_feature_is_filtered(self):
"""
feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)
but is present in the raw matrix (raw.X). The value for all cells of the given feature in the
final matrix MUST be 0.
Otherwise, this MUST be False.
"""
self.validator.adata.var['feature_is_filtered'][0] = True
for i in range(self.validator.adata.X.shape[0]):
self.validator.adata.X[i, 0] = 0
self.validator.adata.X[0, 0] = 1
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0."
])
def test_columns_not_in_raw_var(self):
"""
Curators MUST annotate the following column only in the var dataframe.
This column MUST NOT be present in raw.var:
feature_is_filtered
"""
self.validator.adata.raw = self.validator.adata
self.validator.adata.uns['X_normalization'] = 'CPM'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'."
])
def test_feature_id_wrong_format(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ID with an incorrect format "ENSEBML_NOGENE"
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ENSEBML_NOGENE'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'gene'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID."
])
def test_feature_id_non_existent_ensembl(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ENSEMBL ID that has the right format but doesn't exist
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ENSG000'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'gene'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'."
])
def test_feature_id_non_existent_ercc(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ERCC ID that has the right format but doesn't exist
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ERCC-000000'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'spike-in'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'."
])
class TestUns(unittest.TestCase):
"""
Fail cases in adata.uns
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_required_fields_schema_version(self):
"""
Curators MUST annotate `schema_version` and values in uns (schema_version)
"""
del self.validator.adata.uns['schema_version']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed."
])
def test_required_fields_title(self):
"""
Curators MUST annotate `schema_version` and values in uns (title)
"""
del self.validator.adata.uns['title']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'title' in 'uns' is not present."])
def test_required_fields_X_normalization(self):
"""
Curators MUST annotate `schema_version` and values in uns (X_normalization)
"""
del self.validator.adata.uns['X_normalization']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'X_normalization' in 'uns' is not present."])
def test_leading_trailing_double_spaces_in_strings(self):
"""
The following sequences MUST NOT appear in str types documented in the schema:
Leading control or space separators - ” This is an example”
Trailing control or space separators - “This is an example ”
Multiple (internal) control or space separators - "This is an example"
"""
self.validator.adata.uns['title'] = ' There is a leading space'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces."
])
self.validator.adata.uns['title'] = 'There is a trailing space '
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces."
])
self.validator.adata.uns['title'] = 'There are double spaces'
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces."
])
def test_schema_version(self):
"""
Schema_version, This MUST be "2.0.0".
"""
self.validator.adata.uns['schema_version'] = '1.0.0'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed."
])
def test_title(self):
"""
Title MUST be a string
"""
self.validator.adata.uns['title'] = ['title']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string."
])
def test_X_normalization_is_str(self):
"""
X_normalization str.
"""
self.validator.adata.uns['X_normalization'] = ['normalization']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string."
])
def test_X_normalization_not_raw(self):
"""
X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.
If data in X are raw, this SHOULD be "none".
FAIL CASE for when X_normalization was set to "none" but X may not be raw data
"""
del self.validator.adata.raw
self.validator.adata.uns['X_normalization'] = 'none'
self.validator.validate_adata()
print('FOO', self.validator.warnings)
self.assertEqual(self.validator.warnings, [
"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)"
])
def test_batch_condition_is_list(self):
"""
batch_condition list[str]
"""
self.validator.adata.uns['batch_condition'] = numpy.array(self.
validator.adata.uns['batch_condition'])
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
self.validator.adata.uns['batch_condition'
] = 'cell_type_ontology_term_id'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array."
])
def test_batch_condition_is_column_from_obs(self):
"""
batch_condition list[str]. str values MUST refer to cell metadata keys in obs.
"""
self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'."
])
def test_default_embedding_is_str(self):
"""
Default_embedding str.
"""
self.validator.adata.uns['default_embedding'] = ['X_umap']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string."
])
def test_default_embedding_is_key_from_obsm(self):
"""
Default_embedding str. The value MUST match a key to an embedding in obsm
"""
self.validator.adata.uns['default_embedding'] = 'X_other'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'."
])
def test_X_approximate_distribution_is_str(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal".
Note that `normal` is tested in the happy path test case using `good_uns`.
"""
self.validator.adata.uns['X_approximate_distribution'] = 'count'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
self.validator.adata.uns['X_approximate_distribution'] = ['count']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string."
])
def test_X_approximate_distribution_is_valid(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal"
"""
self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal']."
])
class TestObsm(unittest.TestCase):
"""
Fail cases for adata.obsm
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_obsm_values_ara_numpy(self):
"""
values in obsm MUST be a numpy.ndarray
"""
self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.
adata.obsm['X_umap'], index=self.validator.adata.obs_names)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>')."
])
def test_obsm_values_at_least_one_X(self):
"""
At least one key for the embedding MUST be prefixed with "X_"
"""
self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']
self.validator.adata.uns['default_embedding'] = 'umap'
del self.validator.adata.obsm['X_umap']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix."
])
def test_obsm_shape(self):
"""
Curators MUST annotate one or more two-dimensional (m >= 2) embeddings
"""
self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.
adata.obsm['X_umap'], 0, 1)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'."
])
class TestAddingLabels(unittest.TestCase):
"""
Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually
created dataframes (positive control) against the ones produced by the validator
"""
@classmethod
def setUpClass(cls):
cls.adata_with_labels = examples.adata_with_labels
validator = Validator()
validator.adata = examples.adata.copy()
validator.validate_adata()
cls.label_writer = AnnDataLabelAppender(validator)
cls.label_writer._add_labels()
def test_var_added_labels(self):
"""
When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism
to the var dataframe. Curators MUST NOT annotate the following columns:
- feature_name. If the feature_biotype is "gene" then this MUST be the human-readable ENSEMBL gene
name assigned to the feature_id. If the feature_biotype is "spike-in" then this MUST be the
ERCC Spike-In identifier appended with " spike-in control".
- feature_reference. This MUST be the reference organism for a feature:
Homo sapiens "NCBITaxon:9606"
Mus musculus "NCBITaxon:10090"
SARS-CoV-2 "NCBITaxon:2697049"
ERCC Spike-Ins "NCBITaxon:32630"
"""
for column in ['feature_name', 'feature_reference']:
expected_column = self.adata_with_labels.var[column]
obtained_column = self.label_writer.adata.var[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()
):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
def test_obs_added_labels(self):
"""
When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding ontology term to the obs dataframe.
Curators MUST NOT annotate the following columns.
- assay. categorical with str categories. This MUST be the human-readable name assigned to the value
of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to
assay_ontology_term_id MUST be appended to assay.
- cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value
of cell_type_ontology_term_id.
- development_stage. categorical with str categories. This MUST be "unknown" if set in
development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to
the value of development_stage_ontology_term_id.
- disease. categorical with str categories. This MUST be the human-readable name assigned to
the value of disease_ontology_term_id.
- ethnicity. categorical with str categories. This MUST be "na" or "unknown" if
set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable
name assigned to the value of ethnicity_ontology_term_id.
- organism. categorical with str categories. This MUST be the human-readable name assigned
to the value of organism_ontology_term_id.
- sex. categorical with str categories. This MUST be "unknown" if set in sex_ontology_term_id;
otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.
- tissue. categorical with str categories. This MUST be the human-readable name assigned to the
value of tissue_ontology_term_id. " (cell culture)" or " (organoid)" MUST
be appended if present in tissue_ontology_term_id.
"""
for column in ['assay', 'cell_type', 'development_stage', 'disease',
'ethnicity', 'organism', 'sex', 'tissue']:
expected_column = self.adata_with_labels.obs[column]
obtained_column = self.label_writer.adata.obs[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()
):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
| <mask token>
class TestObs(unittest.TestCase):
<mask token>
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def test_development_stage_ontology_term_id_human(self):
"""
development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be "unknown".
If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
this MUST be the most accurate HsapDv term.
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'NCBITaxon:9606'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown."
])
def test_development_stage_ontology_term_id_mouse(self):
"""
If organism_ontolology_term_id is "NCBITaxon:10090" for Mus musculus,
this MUST be the most accurate MmusDv term
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'NCBITaxon:10090'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown."
])
<mask token>
<mask token>
<mask token>
def test_organism_ontology_term_id(self):
"""
organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'unknown'
self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed."
])
<mask token>
<mask token>
<mask token>
def test_sex_ontology_term_id(self):
"""
sex_ontology_term_id categorical with str categories.
This MUST be a child of PATOPATO:0001894 for phenotypic sex or "unknown" if unavailable
"""
self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed."
])
<mask token>
class TestVar(unittest.TestCase):
"""
Fail cases in adata.var and adata.raw.var
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_var_and_raw_var_same_index(self):
"""
var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.
"""
var = Validator.getattr_anndata(self.validator.adata, 'var')
new_index = list(var.index)
tmp = new_index[0]
new_index[0] = new_index[1]
new_index[1] = tmp
var.set_index(pd.Index(new_index), inplace=True)
tmp = var.iloc[0, :].copy()
var.iloc[0, :] = var.iloc[1, :].copy()
var.iloc[1, :] = tmp
self.validator.validate_adata()
print('FOO', self.validator.errors)
self.assertEqual(self.validator.errors, [
"ERROR: Index of 'raw.var' is not identical to index of 'var'."])
def test_check_unique_var(self):
"""
var.index MUST contain unique ENSEMBL gene identifiers for features.
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[1] = new_index[0]
component.set_index(pd.Index(new_index), inplace=True)
component.iloc[1, :] = component.iloc[0, :]
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Column 'index' in dataframe '{component_name}' is not unique."
])
def test_column_presence(self):
"""
var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
feature_is_filtered must not be in raw.var, and it's only checked in var
"""
columns = ['feature_is_filtered', 'feature_biotype']
for component_name in ['var', 'raw.var']:
for column in columns:
if (column == 'feature_is_filtered' and component_name ==
'raw.var'):
continue
with self.subTest(component_name=component_name, column=column
):
self.validator.errors = []
self.validator.adata = examples.adata.copy()
component = Validator.getattr_anndata(self.validator.
adata, component_name)
component.drop(column, axis=1, inplace=True)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Dataframe '{component_name}' is missing column '{column}'."
])
def test_feature_is_filtered(self):
"""
feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)
but is present in the raw matrix (raw.X). The value for all cells of the given feature in the
final matrix MUST be 0.
Otherwise, this MUST be False.
"""
self.validator.adata.var['feature_is_filtered'][0] = True
for i in range(self.validator.adata.X.shape[0]):
self.validator.adata.X[i, 0] = 0
self.validator.adata.X[0, 0] = 1
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0."
])
def test_columns_not_in_raw_var(self):
"""
Curators MUST annotate the following column only in the var dataframe.
This column MUST NOT be present in raw.var:
feature_is_filtered
"""
self.validator.adata.raw = self.validator.adata
self.validator.adata.uns['X_normalization'] = 'CPM'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'."
])
def test_feature_id_wrong_format(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ID with an incorrect format "ENSEBML_NOGENE"
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ENSEBML_NOGENE'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'gene'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID."
])
def test_feature_id_non_existent_ensembl(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ENSEMBL ID that has the right format but doesn't exist
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ENSG000'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'gene'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'."
])
def test_feature_id_non_existent_ercc(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ERCC ID that has the right format but doesn't exist
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ERCC-000000'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'spike-in'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'."
])
class TestUns(unittest.TestCase):
"""
Fail cases in adata.uns
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_required_fields_schema_version(self):
"""
Curators MUST annotate `schema_version` and values in uns (schema_version)
"""
del self.validator.adata.uns['schema_version']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed."
])
def test_required_fields_title(self):
"""
Curators MUST annotate `schema_version` and values in uns (title)
"""
del self.validator.adata.uns['title']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'title' in 'uns' is not present."])
def test_required_fields_X_normalization(self):
"""
Curators MUST annotate `schema_version` and values in uns (X_normalization)
"""
del self.validator.adata.uns['X_normalization']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'X_normalization' in 'uns' is not present."])
def test_leading_trailing_double_spaces_in_strings(self):
"""
The following sequences MUST NOT appear in str types documented in the schema:
Leading control or space separators - ” This is an example”
Trailing control or space separators - “This is an example ”
Multiple (internal) control or space separators - "This is an example"
"""
self.validator.adata.uns['title'] = ' There is a leading space'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces."
])
self.validator.adata.uns['title'] = 'There is a trailing space '
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces."
])
self.validator.adata.uns['title'] = 'There are double spaces'
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces."
])
def test_schema_version(self):
"""
Schema_version, This MUST be "2.0.0".
"""
self.validator.adata.uns['schema_version'] = '1.0.0'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed."
])
def test_title(self):
"""
Title MUST be a string
"""
self.validator.adata.uns['title'] = ['title']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string."
])
def test_X_normalization_is_str(self):
"""
X_normalization str.
"""
self.validator.adata.uns['X_normalization'] = ['normalization']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string."
])
def test_X_normalization_not_raw(self):
"""
X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.
If data in X are raw, this SHOULD be "none".
FAIL CASE for when X_normalization was set to "none" but X may not be raw data
"""
del self.validator.adata.raw
self.validator.adata.uns['X_normalization'] = 'none'
self.validator.validate_adata()
print('FOO', self.validator.warnings)
self.assertEqual(self.validator.warnings, [
"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)"
])
def test_batch_condition_is_list(self):
"""
batch_condition list[str]
"""
self.validator.adata.uns['batch_condition'] = numpy.array(self.
validator.adata.uns['batch_condition'])
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
self.validator.adata.uns['batch_condition'
] = 'cell_type_ontology_term_id'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array."
])
def test_batch_condition_is_column_from_obs(self):
"""
batch_condition list[str]. str values MUST refer to cell metadata keys in obs.
"""
self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'."
])
def test_default_embedding_is_str(self):
"""
Default_embedding str.
"""
self.validator.adata.uns['default_embedding'] = ['X_umap']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string."
])
def test_default_embedding_is_key_from_obsm(self):
"""
Default_embedding str. The value MUST match a key to an embedding in obsm
"""
self.validator.adata.uns['default_embedding'] = 'X_other'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'."
])
def test_X_approximate_distribution_is_str(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal".
Note that `normal` is tested in the happy path test case using `good_uns`.
"""
self.validator.adata.uns['X_approximate_distribution'] = 'count'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
self.validator.adata.uns['X_approximate_distribution'] = ['count']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string."
])
def test_X_approximate_distribution_is_valid(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal"
"""
self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal']."
])
class TestObsm(unittest.TestCase):
"""
Fail cases for adata.obsm
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_obsm_values_ara_numpy(self):
"""
values in obsm MUST be a numpy.ndarray
"""
self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.
adata.obsm['X_umap'], index=self.validator.adata.obs_names)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>')."
])
def test_obsm_values_at_least_one_X(self):
"""
At least one key for the embedding MUST be prefixed with "X_"
"""
self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']
self.validator.adata.uns['default_embedding'] = 'umap'
del self.validator.adata.obsm['X_umap']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix."
])
def test_obsm_shape(self):
"""
Curators MUST annotate one or more two-dimensional (m >= 2) embeddings
"""
self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.
adata.obsm['X_umap'], 0, 1)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'."
])
class TestAddingLabels(unittest.TestCase):
"""
Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually
created dataframes (positive control) against the ones produced by the validator
"""
@classmethod
def setUpClass(cls):
cls.adata_with_labels = examples.adata_with_labels
validator = Validator()
validator.adata = examples.adata.copy()
validator.validate_adata()
cls.label_writer = AnnDataLabelAppender(validator)
cls.label_writer._add_labels()
def test_var_added_labels(self):
"""
When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism
to the var dataframe. Curators MUST NOT annotate the following columns:
- feature_name. If the feature_biotype is "gene" then this MUST be the human-readable ENSEMBL gene
name assigned to the feature_id. If the feature_biotype is "spike-in" then this MUST be the
ERCC Spike-In identifier appended with " spike-in control".
- feature_reference. This MUST be the reference organism for a feature:
Homo sapiens "NCBITaxon:9606"
Mus musculus "NCBITaxon:10090"
SARS-CoV-2 "NCBITaxon:2697049"
ERCC Spike-Ins "NCBITaxon:32630"
"""
for column in ['feature_name', 'feature_reference']:
expected_column = self.adata_with_labels.var[column]
obtained_column = self.label_writer.adata.var[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()
):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
def test_obs_added_labels(self):
"""
When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding ontology term to the obs dataframe.
Curators MUST NOT annotate the following columns.
- assay. categorical with str categories. This MUST be the human-readable name assigned to the value
of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to
assay_ontology_term_id MUST be appended to assay.
- cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value
of cell_type_ontology_term_id.
- development_stage. categorical with str categories. This MUST be "unknown" if set in
development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to
the value of development_stage_ontology_term_id.
- disease. categorical with str categories. This MUST be the human-readable name assigned to
the value of disease_ontology_term_id.
- ethnicity. categorical with str categories. This MUST be "na" or "unknown" if
set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable
name assigned to the value of ethnicity_ontology_term_id.
- organism. categorical with str categories. This MUST be the human-readable name assigned
to the value of organism_ontology_term_id.
- sex. categorical with str categories. This MUST be "unknown" if set in sex_ontology_term_id;
otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.
- tissue. categorical with str categories. This MUST be the human-readable name assigned to the
value of tissue_ontology_term_id. " (cell culture)" or " (organoid)" MUST
be appended if present in tissue_ontology_term_id.
"""
for column in ['assay', 'cell_type', 'development_stage', 'disease',
'ethnicity', 'organism', 'sex', 'tissue']:
expected_column = self.adata_with_labels.obs[column]
obtained_column = self.label_writer.adata.obs[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()
):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
| <mask token>
class TestObs(unittest.TestCase):
<mask token>
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_column_presence(self):
"""
obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
"""
columns = ['assay_ontology_term_id',
'development_stage_ontology_term_id',
'disease_ontology_term_id', 'ethnicity_ontology_term_id',
'is_primary_data', 'sex_ontology_term_id',
'tissue_ontology_term_id']
for column in columns:
with self.subTest(column=column):
self.validator.errors = []
self.validator.adata = examples.adata.copy()
self.validator.adata.obs.drop(column, axis=1, inplace=True)
self.validator.adata.uns.pop('batch_condition')
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Dataframe 'obs' is missing column '{column}'."])
<mask token>
<mask token>
def test_assay_ontology_term_id(self):
"""
assay_ontology_term_id categorical with str categories.
This MUST be an EFO term and either child of "EFO:0002772" or "EFO:0010183"
If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to
the most accurate term. For example, the sci-plex assay could be curated as "EFO:0010183 (sci-plex)"
"""
self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'."
,
"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'."
])
self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'."
])
self.validator.adata.obs['assay_ontology_term_id'][0
] = 'EFO:0010183 sci-plex'
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'."
,
"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'."
])
<mask token>
def test_development_stage_ontology_term_id_human(self):
"""
development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be "unknown".
If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
this MUST be the most accurate HsapDv term.
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'NCBITaxon:9606'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown."
])
def test_development_stage_ontology_term_id_mouse(self):
"""
If organism_ontolology_term_id is "NCBITaxon:10090" for Mus musculus,
this MUST be the most accurate MmusDv term
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'NCBITaxon:10090'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown."
])
<mask token>
def test_disease_ontology_term_id(self):
"""
disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or
PATO:0000461 for normal or healthy.
"""
self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids."
])
self.validator.errors = []
self.validator.adata.obs['disease_ontology_term_id'][0
] = 'PATO:0001894'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids."
])
<mask token>
def test_organism_ontology_term_id(self):
"""
organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'unknown'
self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed."
])
def test_tissue_ontology_term_id_base(self):
"""
tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue
that this cell was derived from, depending on the type of biological sample:
"""
self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'."
])
<mask token>
def test_tissue_ontology_term_id_organoid(self):
"""
Organoid - MUST be an UBERON term appended with " (organoid)"
"""
self.validator.adata.obs['tissue_ontology_term_id'][0
] = 'CL:0000057 (ORGANOID)'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'."
])
def test_sex_ontology_term_id(self):
"""
sex_ontology_term_id categorical with str categories.
This MUST be a child of PATOPATO:0001894 for phenotypic sex or "unknown" if unavailable
"""
self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed."
])
<mask token>
class TestVar(unittest.TestCase):
"""
Fail cases in adata.var and adata.raw.var
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_var_and_raw_var_same_index(self):
"""
var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.
"""
var = Validator.getattr_anndata(self.validator.adata, 'var')
new_index = list(var.index)
tmp = new_index[0]
new_index[0] = new_index[1]
new_index[1] = tmp
var.set_index(pd.Index(new_index), inplace=True)
tmp = var.iloc[0, :].copy()
var.iloc[0, :] = var.iloc[1, :].copy()
var.iloc[1, :] = tmp
self.validator.validate_adata()
print('FOO', self.validator.errors)
self.assertEqual(self.validator.errors, [
"ERROR: Index of 'raw.var' is not identical to index of 'var'."])
def test_check_unique_var(self):
"""
var.index MUST contain unique ENSEMBL gene identifiers for features.
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[1] = new_index[0]
component.set_index(pd.Index(new_index), inplace=True)
component.iloc[1, :] = component.iloc[0, :]
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Column 'index' in dataframe '{component_name}' is not unique."
])
def test_column_presence(self):
"""
var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
feature_is_filtered must not be in raw.var, and it's only checked in var
"""
columns = ['feature_is_filtered', 'feature_biotype']
for component_name in ['var', 'raw.var']:
for column in columns:
if (column == 'feature_is_filtered' and component_name ==
'raw.var'):
continue
with self.subTest(component_name=component_name, column=column
):
self.validator.errors = []
self.validator.adata = examples.adata.copy()
component = Validator.getattr_anndata(self.validator.
adata, component_name)
component.drop(column, axis=1, inplace=True)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Dataframe '{component_name}' is missing column '{column}'."
])
def test_feature_is_filtered(self):
"""
feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)
but is present in the raw matrix (raw.X). The value for all cells of the given feature in the
final matrix MUST be 0.
Otherwise, this MUST be False.
"""
self.validator.adata.var['feature_is_filtered'][0] = True
for i in range(self.validator.adata.X.shape[0]):
self.validator.adata.X[i, 0] = 0
self.validator.adata.X[0, 0] = 1
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0."
])
def test_columns_not_in_raw_var(self):
"""
Curators MUST annotate the following column only in the var dataframe.
This column MUST NOT be present in raw.var:
feature_is_filtered
"""
self.validator.adata.raw = self.validator.adata
self.validator.adata.uns['X_normalization'] = 'CPM'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'."
])
def test_feature_id_wrong_format(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ID with an incorrect format "ENSEBML_NOGENE"
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ENSEBML_NOGENE'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'gene'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID."
])
def test_feature_id_non_existent_ensembl(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ENSEMBL ID that has the right format but doesn't exist
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ENSG000'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'gene'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'."
])
def test_feature_id_non_existent_ercc(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ERCC ID that has the right format but doesn't exist
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ERCC-000000'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'spike-in'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'."
])
class TestUns(unittest.TestCase):
"""
Fail cases in adata.uns
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_required_fields_schema_version(self):
"""
Curators MUST annotate `schema_version` and values in uns (schema_version)
"""
del self.validator.adata.uns['schema_version']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed."
])
def test_required_fields_title(self):
"""
Curators MUST annotate `schema_version` and values in uns (title)
"""
del self.validator.adata.uns['title']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'title' in 'uns' is not present."])
def test_required_fields_X_normalization(self):
"""
Curators MUST annotate `schema_version` and values in uns (X_normalization)
"""
del self.validator.adata.uns['X_normalization']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'X_normalization' in 'uns' is not present."])
def test_leading_trailing_double_spaces_in_strings(self):
"""
The following sequences MUST NOT appear in str types documented in the schema:
Leading control or space separators - ” This is an example”
Trailing control or space separators - “This is an example ”
Multiple (internal) control or space separators - "This is an example"
"""
self.validator.adata.uns['title'] = ' There is a leading space'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces."
])
self.validator.adata.uns['title'] = 'There is a trailing space '
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces."
])
self.validator.adata.uns['title'] = 'There are double spaces'
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces."
])
def test_schema_version(self):
"""
Schema_version, This MUST be "2.0.0".
"""
self.validator.adata.uns['schema_version'] = '1.0.0'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed."
])
def test_title(self):
"""
Title MUST be a string
"""
self.validator.adata.uns['title'] = ['title']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string."
])
def test_X_normalization_is_str(self):
"""
X_normalization str.
"""
self.validator.adata.uns['X_normalization'] = ['normalization']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string."
])
def test_X_normalization_not_raw(self):
"""
X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.
If data in X are raw, this SHOULD be "none".
FAIL CASE for when X_normalization was set to "none" but X may not be raw data
"""
del self.validator.adata.raw
self.validator.adata.uns['X_normalization'] = 'none'
self.validator.validate_adata()
print('FOO', self.validator.warnings)
self.assertEqual(self.validator.warnings, [
"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)"
])
def test_batch_condition_is_list(self):
"""
batch_condition list[str]
"""
self.validator.adata.uns['batch_condition'] = numpy.array(self.
validator.adata.uns['batch_condition'])
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
self.validator.adata.uns['batch_condition'
] = 'cell_type_ontology_term_id'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array."
])
def test_batch_condition_is_column_from_obs(self):
"""
batch_condition list[str]. str values MUST refer to cell metadata keys in obs.
"""
self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'."
])
def test_default_embedding_is_str(self):
"""
Default_embedding str.
"""
self.validator.adata.uns['default_embedding'] = ['X_umap']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string."
])
def test_default_embedding_is_key_from_obsm(self):
"""
Default_embedding str. The value MUST match a key to an embedding in obsm
"""
self.validator.adata.uns['default_embedding'] = 'X_other'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'."
])
def test_X_approximate_distribution_is_str(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal".
Note that `normal` is tested in the happy path test case using `good_uns`.
"""
self.validator.adata.uns['X_approximate_distribution'] = 'count'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
self.validator.adata.uns['X_approximate_distribution'] = ['count']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string."
])
def test_X_approximate_distribution_is_valid(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal"
"""
self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal']."
])
class TestObsm(unittest.TestCase):
"""
Fail cases for adata.obsm
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_obsm_values_ara_numpy(self):
"""
values in obsm MUST be a numpy.ndarray
"""
self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.
adata.obsm['X_umap'], index=self.validator.adata.obs_names)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>')."
])
def test_obsm_values_at_least_one_X(self):
"""
At least one key for the embedding MUST be prefixed with "X_"
"""
self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']
self.validator.adata.uns['default_embedding'] = 'umap'
del self.validator.adata.obsm['X_umap']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix."
])
def test_obsm_shape(self):
"""
Curators MUST annotate one or more two-dimensional (m >= 2) embeddings
"""
self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.
adata.obsm['X_umap'], 0, 1)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'."
])
class TestAddingLabels(unittest.TestCase):
"""
Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually
created dataframes (positive control) against the ones produced by the validator
"""
@classmethod
def setUpClass(cls):
cls.adata_with_labels = examples.adata_with_labels
validator = Validator()
validator.adata = examples.adata.copy()
validator.validate_adata()
cls.label_writer = AnnDataLabelAppender(validator)
cls.label_writer._add_labels()
def test_var_added_labels(self):
"""
When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism
to the var dataframe. Curators MUST NOT annotate the following columns:
- feature_name. If the feature_biotype is "gene" then this MUST be the human-readable ENSEMBL gene
name assigned to the feature_id. If the feature_biotype is "spike-in" then this MUST be the
ERCC Spike-In identifier appended with " spike-in control".
- feature_reference. This MUST be the reference organism for a feature:
Homo sapiens "NCBITaxon:9606"
Mus musculus "NCBITaxon:10090"
SARS-CoV-2 "NCBITaxon:2697049"
ERCC Spike-Ins "NCBITaxon:32630"
"""
for column in ['feature_name', 'feature_reference']:
expected_column = self.adata_with_labels.var[column]
obtained_column = self.label_writer.adata.var[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()
):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
def test_obs_added_labels(self):
"""
When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding ontology term to the obs dataframe.
Curators MUST NOT annotate the following columns.
- assay. categorical with str categories. This MUST be the human-readable name assigned to the value
of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to
assay_ontology_term_id MUST be appended to assay.
- cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value
of cell_type_ontology_term_id.
- development_stage. categorical with str categories. This MUST be "unknown" if set in
development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to
the value of development_stage_ontology_term_id.
- disease. categorical with str categories. This MUST be the human-readable name assigned to
the value of disease_ontology_term_id.
- ethnicity. categorical with str categories. This MUST be "na" or "unknown" if
set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable
name assigned to the value of ethnicity_ontology_term_id.
- organism. categorical with str categories. This MUST be the human-readable name assigned
to the value of organism_ontology_term_id.
- sex. categorical with str categories. This MUST be "unknown" if set in sex_ontology_term_id;
otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.
- tissue. categorical with str categories. This MUST be the human-readable name assigned to the
value of tissue_ontology_term_id. " (cell culture)" or " (organoid)" MUST
be appended if present in tissue_ontology_term_id.
"""
for column in ['assay', 'cell_type', 'development_stage', 'disease',
'ethnicity', 'organism', 'sex', 'tissue']:
expected_column = self.adata_with_labels.obs[column]
obtained_column = self.label_writer.adata.obs[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()
):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
| <mask token>
class TestObs(unittest.TestCase):
<mask token>
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_column_presence(self):
"""
obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
"""
columns = ['assay_ontology_term_id',
'development_stage_ontology_term_id',
'disease_ontology_term_id', 'ethnicity_ontology_term_id',
'is_primary_data', 'sex_ontology_term_id',
'tissue_ontology_term_id']
for column in columns:
with self.subTest(column=column):
self.validator.errors = []
self.validator.adata = examples.adata.copy()
self.validator.adata.obs.drop(column, axis=1, inplace=True)
self.validator.adata.uns.pop('batch_condition')
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Dataframe 'obs' is missing column '{column}'."])
def test_column_presence_organism(self):
"""
obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
A separate check is need for organism_ontology_term_id because removing from anndata results in multiple
errors given that other columns depend on its presence
"""
self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,
inplace=True)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'."
,
"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs."
,
"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs."
])
def test_obsolete_term_id(self):
"""
Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310
for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by
EFO:0009899 for 10x 3' v2.
https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310
"""
self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'."
,
"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'."
])
def test_assay_ontology_term_id(self):
"""
assay_ontology_term_id categorical with str categories.
This MUST be an EFO term and either child of "EFO:0002772" or "EFO:0010183"
If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to
the most accurate term. For example, the sci-plex assay could be curated as "EFO:0010183 (sci-plex)"
"""
self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'."
,
"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'."
])
self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'."
])
self.validator.adata.obs['assay_ontology_term_id'][0
] = 'EFO:0010183 sci-plex'
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'."
,
"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'."
])
def test_cell_type_ontology_term_id(self):
"""
cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.
"""
self.validator.adata.obs['cell_type_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'."
])
def test_development_stage_ontology_term_id_human(self):
"""
development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be "unknown".
If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
this MUST be the most accurate HsapDv term.
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'NCBITaxon:9606'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown."
])
def test_development_stage_ontology_term_id_mouse(self):
"""
If organism_ontolology_term_id is "NCBITaxon:10090" for Mus musculus,
this MUST be the most accurate MmusDv term
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'NCBITaxon:10090'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown."
])
def test_development_stage_ontology_term_id_all_species(self):
"""
All other it MUST be children of UBERON:0000105 and not UBERON:0000071
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'NCBITaxon:10114'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown."
,
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown."
])
self.validator.errors = []
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'NCBITaxon:10114'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'UBERON:0000071'
self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown."
])
def test_disease_ontology_term_id(self):
"""
disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or
PATO:0000461 for normal or healthy.
"""
self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids."
])
self.validator.errors = []
self.validator.adata.obs['disease_ontology_term_id'][0
] = 'PATO:0001894'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids."
])
def test_ethnicity_ontology_term_id(self):
"""
ethnicity_ontology_term_id categorical with str categories.
If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
this MUST be either a HANCESTRO term or "unknown" if unavailable.
Otherwise, for all other organisms this MUST be "na".
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'NCBITaxon:9606'
self.validator.adata.obs['ethnicity_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'."
])
self.validator.errors = []
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'NCBITaxon:10090'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'MmusDv:0000003'
self.validator.adata.obs['ethnicity_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'."
])
def test_organism_ontology_term_id(self):
"""
organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.
"""
self.validator.adata.obs['organism_ontology_term_id'][0
] = 'EFO:0000001'
self.validator.adata.obs['development_stage_ontology_term_id'][0
] = 'unknown'
self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed."
])
def test_tissue_ontology_term_id_base(self):
"""
tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue
that this cell was derived from, depending on the type of biological sample:
"""
self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'."
])
def test_tissue_ontology_term_id_cell_culture(self):
"""
Cell Culture - MUST be a CL term appended with " (cell culture)"
"""
self.validator.adata.obs['tissue_ontology_term_id'][0
] = 'CL:0000057 (CELL culture)'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'."
])
def test_tissue_ontology_term_id_organoid(self):
"""
Organoid - MUST be an UBERON term appended with " (organoid)"
"""
self.validator.adata.obs['tissue_ontology_term_id'][0
] = 'CL:0000057 (ORGANOID)'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'."
])
def test_sex_ontology_term_id(self):
"""
sex_ontology_term_id categorical with str categories.
This MUST be a child of PATOPATO:0001894 for phenotypic sex or "unknown" if unavailable
"""
self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed."
])
def test_is_primary_data(self):
"""
is_primary_data bool. This MUST be True if this is the canonical instance of this cellular
observation and False if not. This is commonly False
for meta-analyses reusing data or for secondary views of data.
"""
self.validator.adata.obs['is_primary_data'] = 'FALSE'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'."
])
class TestVar(unittest.TestCase):
"""
Fail cases in adata.var and adata.raw.var
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_var_and_raw_var_same_index(self):
"""
var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.
"""
var = Validator.getattr_anndata(self.validator.adata, 'var')
new_index = list(var.index)
tmp = new_index[0]
new_index[0] = new_index[1]
new_index[1] = tmp
var.set_index(pd.Index(new_index), inplace=True)
tmp = var.iloc[0, :].copy()
var.iloc[0, :] = var.iloc[1, :].copy()
var.iloc[1, :] = tmp
self.validator.validate_adata()
print('FOO', self.validator.errors)
self.assertEqual(self.validator.errors, [
"ERROR: Index of 'raw.var' is not identical to index of 'var'."])
def test_check_unique_var(self):
"""
var.index MUST contain unique ENSEMBL gene identifiers for features.
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[1] = new_index[0]
component.set_index(pd.Index(new_index), inplace=True)
component.iloc[1, :] = component.iloc[0, :]
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Column 'index' in dataframe '{component_name}' is not unique."
])
def test_column_presence(self):
"""
var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
feature_is_filtered must not be in raw.var, and it's only checked in var
"""
columns = ['feature_is_filtered', 'feature_biotype']
for component_name in ['var', 'raw.var']:
for column in columns:
if (column == 'feature_is_filtered' and component_name ==
'raw.var'):
continue
with self.subTest(component_name=component_name, column=column
):
self.validator.errors = []
self.validator.adata = examples.adata.copy()
component = Validator.getattr_anndata(self.validator.
adata, component_name)
component.drop(column, axis=1, inplace=True)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Dataframe '{component_name}' is missing column '{column}'."
])
def test_feature_is_filtered(self):
"""
feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)
but is present in the raw matrix (raw.X). The value for all cells of the given feature in the
final matrix MUST be 0.
Otherwise, this MUST be False.
"""
self.validator.adata.var['feature_is_filtered'][0] = True
for i in range(self.validator.adata.X.shape[0]):
self.validator.adata.X[i, 0] = 0
self.validator.adata.X[0, 0] = 1
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0."
])
def test_columns_not_in_raw_var(self):
"""
Curators MUST annotate the following column only in the var dataframe.
This column MUST NOT be present in raw.var:
feature_is_filtered
"""
self.validator.adata.raw = self.validator.adata
self.validator.adata.uns['X_normalization'] = 'CPM'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'."
])
def test_feature_id_wrong_format(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ID with an incorrect format "ENSEBML_NOGENE"
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ENSEBML_NOGENE'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'gene'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID."
])
def test_feature_id_non_existent_ensembl(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ENSEMBL ID that has the right format but doesn't exist
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ENSG000'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'gene'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'."
])
def test_feature_id_non_existent_ercc(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ERCC ID that has the right format but doesn't exist
"""
for component_name in ['var', 'raw.var']:
with self.subTest(component_name=component_name):
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(self.validator.adata,
component_name)
new_index = list(component.index)
new_index[0] = 'ERCC-000000'
component.set_index(pd.Index(new_index), inplace=True)
component['feature_biotype'][0] = 'spike-in'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
f"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'."
])
class TestUns(unittest.TestCase):
"""
Fail cases in adata.uns
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_required_fields_schema_version(self):
"""
Curators MUST annotate `schema_version` and values in uns (schema_version)
"""
del self.validator.adata.uns['schema_version']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed."
])
def test_required_fields_title(self):
"""
Curators MUST annotate `schema_version` and values in uns (title)
"""
del self.validator.adata.uns['title']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'title' in 'uns' is not present."])
def test_required_fields_X_normalization(self):
"""
Curators MUST annotate `schema_version` and values in uns (X_normalization)
"""
del self.validator.adata.uns['X_normalization']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'X_normalization' in 'uns' is not present."])
def test_leading_trailing_double_spaces_in_strings(self):
"""
The following sequences MUST NOT appear in str types documented in the schema:
Leading control or space separators - ” This is an example”
Trailing control or space separators - “This is an example ”
Multiple (internal) control or space separators - "This is an example"
"""
self.validator.adata.uns['title'] = ' There is a leading space'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces."
])
self.validator.adata.uns['title'] = 'There is a trailing space '
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces."
])
self.validator.adata.uns['title'] = 'There are double spaces'
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces."
])
def test_schema_version(self):
"""
Schema_version, This MUST be "2.0.0".
"""
self.validator.adata.uns['schema_version'] = '1.0.0'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed."
])
def test_title(self):
"""
Title MUST be a string
"""
self.validator.adata.uns['title'] = ['title']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string."
])
def test_X_normalization_is_str(self):
"""
X_normalization str.
"""
self.validator.adata.uns['X_normalization'] = ['normalization']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string."
])
def test_X_normalization_not_raw(self):
"""
X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.
If data in X are raw, this SHOULD be "none".
FAIL CASE for when X_normalization was set to "none" but X may not be raw data
"""
del self.validator.adata.raw
self.validator.adata.uns['X_normalization'] = 'none'
self.validator.validate_adata()
print('FOO', self.validator.warnings)
self.assertEqual(self.validator.warnings, [
"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)"
])
def test_batch_condition_is_list(self):
"""
batch_condition list[str]
"""
self.validator.adata.uns['batch_condition'] = numpy.array(self.
validator.adata.uns['batch_condition'])
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
self.validator.adata.uns['batch_condition'
] = 'cell_type_ontology_term_id'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array."
])
def test_batch_condition_is_column_from_obs(self):
"""
batch_condition list[str]. str values MUST refer to cell metadata keys in obs.
"""
self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'."
])
def test_default_embedding_is_str(self):
"""
Default_embedding str.
"""
self.validator.adata.uns['default_embedding'] = ['X_umap']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string."
])
def test_default_embedding_is_key_from_obsm(self):
"""
Default_embedding str. The value MUST match a key to an embedding in obsm
"""
self.validator.adata.uns['default_embedding'] = 'X_other'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'."
])
def test_X_approximate_distribution_is_str(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal".
Note that `normal` is tested in the happy path test case using `good_uns`.
"""
self.validator.adata.uns['X_approximate_distribution'] = 'count'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
self.validator.adata.uns['X_approximate_distribution'] = ['count']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string."
])
def test_X_approximate_distribution_is_valid(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal"
"""
self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal']."
])
class TestObsm(unittest.TestCase):
"""
Fail cases for adata.obsm
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_obsm_values_ara_numpy(self):
"""
values in obsm MUST be a numpy.ndarray
"""
self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.
adata.obsm['X_umap'], index=self.validator.adata.obs_names)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>')."
])
def test_obsm_values_at_least_one_X(self):
"""
At least one key for the embedding MUST be prefixed with "X_"
"""
self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']
self.validator.adata.uns['default_embedding'] = 'umap'
del self.validator.adata.obsm['X_umap']
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix."
])
def test_obsm_shape(self):
"""
Curators MUST annotate one or more two-dimensional (m >= 2) embeddings
"""
self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.
adata.obsm['X_umap'], 0, 1)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [
"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'."
])
class TestAddingLabels(unittest.TestCase):
"""
Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually
created dataframes (positive control) against the ones produced by the validator
"""
@classmethod
def setUpClass(cls):
cls.adata_with_labels = examples.adata_with_labels
validator = Validator()
validator.adata = examples.adata.copy()
validator.validate_adata()
cls.label_writer = AnnDataLabelAppender(validator)
cls.label_writer._add_labels()
def test_var_added_labels(self):
"""
When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism
to the var dataframe. Curators MUST NOT annotate the following columns:
- feature_name. If the feature_biotype is "gene" then this MUST be the human-readable ENSEMBL gene
name assigned to the feature_id. If the feature_biotype is "spike-in" then this MUST be the
ERCC Spike-In identifier appended with " spike-in control".
- feature_reference. This MUST be the reference organism for a feature:
Homo sapiens "NCBITaxon:9606"
Mus musculus "NCBITaxon:10090"
SARS-CoV-2 "NCBITaxon:2697049"
ERCC Spike-Ins "NCBITaxon:32630"
"""
for column in ['feature_name', 'feature_reference']:
expected_column = self.adata_with_labels.var[column]
obtained_column = self.label_writer.adata.var[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()
):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
def test_obs_added_labels(self):
"""
When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding ontology term to the obs dataframe.
Curators MUST NOT annotate the following columns.
- assay. categorical with str categories. This MUST be the human-readable name assigned to the value
of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to
assay_ontology_term_id MUST be appended to assay.
- cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value
of cell_type_ontology_term_id.
- development_stage. categorical with str categories. This MUST be "unknown" if set in
development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to
the value of development_stage_ontology_term_id.
- disease. categorical with str categories. This MUST be the human-readable name assigned to
the value of disease_ontology_term_id.
- ethnicity. categorical with str categories. This MUST be "na" or "unknown" if
set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable
name assigned to the value of ethnicity_ontology_term_id.
- organism. categorical with str categories. This MUST be the human-readable name assigned
to the value of organism_ontology_term_id.
- sex. categorical with str categories. This MUST be "unknown" if set in sex_ontology_term_id;
otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.
- tissue. categorical with str categories. This MUST be the human-readable name assigned to the
value of tissue_ontology_term_id. " (cell culture)" or " (organoid)" MUST
be appended if present in tissue_ontology_term_id.
"""
for column in ['assay', 'cell_type', 'development_stage', 'disease',
'ethnicity', 'organism', 'sex', 'tissue']:
expected_column = self.adata_with_labels.obs[column]
obtained_column = self.label_writer.adata.obs[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()
):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
| import unittest
import numpy
import pandas as pd
import fixtures.examples_validate as examples
from cellxgene_schema.validate import Validator
from cellxgene_schema.write_labels import AnnDataLabelAppender
# Tests for schema compliance of an AnnData object
class TestValidAnndata(unittest.TestCase):
"""
Tests a valid AnnData object. Most other tests below modify this AnnData object and test for failure cases.
The valid AnnData object has all valid cases described in the schema.
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_valid_anndata(self):
self.validator.validate_adata()
self.assertFalse(self.validator.errors)
class TestH5adValidation(unittest.TestCase):
"""
Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes
below
"""
def setUp(self):
self.h5ad_valid_file = examples.h5ad_valid
self.h5ad_invalid_file = examples.h5ad_invalid
self.validator = Validator()
def test_validate(self):
# Valid h5ad
self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))
# Invalid h5ads
self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))
class TestExpressionMatrix(unittest.TestCase):
"""
Fail cases for expression matrices (anndata.X and anndata.raw.X)
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_shapes(self):
"""
All matrix layers MUST have the same shape, and have the same cell labels and gene labels.
"""
# Creates a raw layer
self.validator.adata.raw = self.validator.adata
self.validator.adata.raw.var.drop("feature_is_filtered", axis=1, inplace=True)
self.validator.adata.X = examples.adata_non_raw.X.copy()
self.validator.adata.uns["X_normalization"] = "CPM"
# remove one gene
self.validator.adata = self.validator.adata[:, 1:]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
["ERROR: Number of genes in X (3) is different than raw.X (4)."],
)
def test_sparsity(self):
"""
In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that
the matrix be encoded as a scipy.sparse.csr_matrix
"""
self.validator.adata.X = self.validator.adata.X.toarray()
self.validator.validate_adata()
self.assertEqual(
self.validator.warnings,
[
"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, "
"and it is not a 'scipy.sparse.csr_matrix'. It is "
"STRONGLY RECOMMENDED to use this type of matrix for "
"the given sparsity."
],
)
def test_raw_existence(self):
"""
Except for ATAC-seq and methylation data, raw data is REQUIRED
"""
# RNA - raw layer required
del self.validator.adata.raw
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'."
],
)
# ATAC - raw layer not required
# The assignment above makes X to not be raw: self.validator.adata.uns["X_normalization"] = "CPM"
# The following line makes it to be scATAC-seq data (EFO:0010891)
# Missing raw data in atac-seq data is allowed, thus the following should not return an error message
self.validator.errors = []
self.validator.adata.obs["assay_ontology_term_id"] = "EFO:0010891"
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
def test_final_strongly_recommended(self):
"""
Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED
"""
# move raw to X amd: i.e. there is no final
self.validator.adata.X = self.validator.adata.raw.X
del self.validator.adata.raw
self.validator.adata.uns["X_normalization"] = "none"
self.validator.validate_adata()
self.assertEqual(
self.validator.warnings,
[
"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. "
"It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided."
],
)
class TestObs(unittest.TestCase):
"""
Fail cases in adata.uns
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_column_presence(self):
"""
obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
"""
columns = [
"assay_ontology_term_id",
"development_stage_ontology_term_id",
"disease_ontology_term_id",
"ethnicity_ontology_term_id",
"is_primary_data",
"sex_ontology_term_id",
"tissue_ontology_term_id",
]
for column in columns:
with self.subTest(column=column):
self.validator.errors = []
self.validator.adata = examples.adata.copy()
self.validator.adata.obs.drop(column, axis=1, inplace=True)
# Remove batch condition because it has a dependency with is_primary_data
self.validator.adata.uns.pop("batch_condition")
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[f"ERROR: Dataframe 'obs' is missing " f"column '{column}'."],
)
def test_column_presence_organism(self):
"""
obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
A separate check is need for organism_ontology_term_id because removing from anndata results in multiple
errors given that other columns depend on its presence
"""
self.validator.adata.obs.drop("organism_ontology_term_id", axis=1, inplace=True)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Dataframe 'obs' is missing column "
"'organism_ontology_term_id'.",
"ERROR: Checking values with dependencies failed for "
"adata.obs['ethnicity_ontology_term_id'], this is likely due "
"to missing dependent column in adata.obs.",
"ERROR: Checking values with dependencies failed for "
"adata.obs['development_stage_ontology_term_id'], this is likely due "
"to missing dependent column in adata.obs.",
],
)
def test_obsolete_term_id(self):
"""
Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310
for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by
EFO:0009899 for 10x 3' v2.
https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310
"""
# Not a valid term
self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0009310"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.",
"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id "
"of '[['EFO:0002772', 'EFO:0010183']]'.",
],
)
def test_assay_ontology_term_id(self):
"""
assay_ontology_term_id categorical with str categories.
This MUST be an EFO term and either child of "EFO:0002772" or "EFO:0010183"
If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to
the most accurate term. For example, the sci-plex assay could be curated as "EFO:0010183 (sci-plex)"
"""
# Not a valid term
self.validator.adata.obs["assay_ontology_term_id"][0] = "CL:000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid "
"ontology term id of 'EFO'.",
"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child "
"term id of '[['EFO:0002772', 'EFO:0010183']]'.",
],
)
# Not a valid child
self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0000001"
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a "
"child term id of '[['EFO:0002772', 'EFO:0010183']]'."
],
)
# Not a clarifying text
self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0010183 sci-plex"
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.",
"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of "
"'[['EFO:0002772', 'EFO:0010183']]'.",
],
)
def test_cell_type_ontology_term_id(self):
"""
cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.
"""
# Not a valid term
self.validator.adata.obs["cell_type_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid "
"ontology term id of 'CL'."
],
)
def test_development_stage_ontology_term_id_human(self):
"""
development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be "unknown".
If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
this MUST be the most accurate HsapDv term.
"""
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:9606"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is "
"not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' "
"(Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown."
],
)
def test_development_stage_ontology_term_id_mouse(self):
"""
If organism_ontolology_term_id is "NCBITaxon:10090" for Mus musculus,
this MUST be the most accurate MmusDv term
"""
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10090"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "EFO:0000001"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is "
"not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' "
"(Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown."
],
)
def test_development_stage_ontology_term_id_all_species(self):
"""
All other it MUST be children of UBERON:0000105 and not UBERON:0000071
"""
# Fail case not an UBERON term
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10114"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "EFO:0000001"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is "
"not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' "
"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of "
"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.",
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not "
"a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' "
"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of "
"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.",
],
)
# All other it MUST be children of UBERON:0000105 and not UBERON:0000071
# Fail case UBERON:0000071
self.validator.errors = []
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10114"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "UBERON:0000071"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When "
"'organism_ontology_term_id' is not 'NCBITaxon:10090' "
"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of "
"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.",
],
)
def test_disease_ontology_term_id(self):
"""
disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or
PATO:0000461 for normal or healthy.
"""
# Invalid ontology
self.validator.adata.obs["disease_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a "
"valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids."
],
)
# Invalid PATO term id
self.validator.errors = []
self.validator.adata.obs["disease_ontology_term_id"][0] = "PATO:0001894"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. "
"Only 'PATO:0000461' is allowed for 'PATO' term ids."
],
)
def test_ethnicity_ontology_term_id(self):
"""
ethnicity_ontology_term_id categorical with str categories.
If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
this MUST be either a HANCESTRO term or "unknown" if unavailable.
Otherwise, for all other organisms this MUST be "na".
"""
# If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
# this MUST be either a HANCESTRO term or "unknown" if unavailable.
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:9606"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is "
"not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' "
"(Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'."
],
)
# Otherwise, for all other organisms this MUST be "na". Below is the test case for mouse data.
# development_stage_ontology_term_id has to be set to an appropriate mouse term id, otherwise there
# will be an error in that field.
self.validator.errors = []
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10090"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "MmusDv:0000003"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a "
"valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' "
"(Homo sapiens), ethnicity_ontology_term_id MUST be 'na'."
],
)
def test_organism_ontology_term_id(self):
"""
organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.
"""
# Setting "organism_ontology_term_id" to "EFO:0000001" is the fail case. However since this represents neither
# human nor mouse, then two other columns that are dependent on it need to be set appropriately to avoid
# other error messages: "development_stage_ontology_term_id" and "ethnicity_ontology_term_id"
self.validator.adata.obs["organism_ontology_term_id"][0] = "EFO:0000001"
self.validator.adata.obs["development_stage_ontology_term_id"][0] = "unknown"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid "
"ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed."
],
)
def test_tissue_ontology_term_id_base(self):
"""
tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue
that this cell was derived from, depending on the type of biological sample:
"""
self.validator.adata.obs["tissue_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a "
"valid ontology term id of 'UBERON, CL'."
],
)
def test_tissue_ontology_term_id_cell_culture(self):
"""
Cell Culture - MUST be a CL term appended with " (cell culture)"
"""
self.validator.adata.obs["tissue_ontology_term_id"][
0
] = "CL:0000057 (CELL culture)"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is "
"not a valid ontology term id of 'UBERON, CL'."
],
)
def test_tissue_ontology_term_id_organoid(self):
"""
Organoid - MUST be an UBERON term appended with " (organoid)"
"""
self.validator.adata.obs["tissue_ontology_term_id"][0] = "CL:0000057 (ORGANOID)"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is "
"not a valid ontology term id of 'UBERON, CL'."
],
)
def test_sex_ontology_term_id(self):
"""
sex_ontology_term_id categorical with str categories.
This MUST be a child of PATOPATO:0001894 for phenotypic sex or "unknown" if unavailable
"""
self.validator.adata.obs["sex_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is "
"not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', "
"or 'unknown' are allowed."
],
)
def test_is_primary_data(self):
"""
is_primary_data bool. This MUST be True if this is the canonical instance of this cellular
observation and False if not. This is commonly False
for meta-analyses reusing data or for secondary views of data.
"""
self.validator.adata.obs["is_primary_data"] = "FALSE"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Column 'is_primary_data' in dataframe 'obs' "
"must be boolean, not 'object'."
],
)
class TestVar(unittest.TestCase):
"""
Fail cases in adata.var and adata.raw.var
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_var_and_raw_var_same_index(self):
"""
var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.
"""
# Swap first row for second one
var = Validator.getattr_anndata(self.validator.adata, "var")
# First swap the index
new_index = list(var.index)
tmp = new_index[0]
new_index[0] = new_index[1]
new_index[1] = tmp
var.set_index(pd.Index(new_index), inplace=True)
# Then swap the actual rows
tmp = var.iloc[0, :].copy()
var.iloc[0, :] = var.iloc[1, :].copy()
var.iloc[1, :] = tmp
self.validator.validate_adata()
print("FOO", self.validator.errors)
self.assertEqual(
self.validator.errors,
["ERROR: Index of 'raw.var' is not identical to index of 'var'."],
)
def test_check_unique_var(self):
"""
var.index MUST contain unique ENSEMBL gene identifiers for features.
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
# Duplicate 1st row in var and assign it to 2nd
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[1] = new_index[0]
component.set_index(pd.Index(new_index), inplace=True)
component.iloc[1, :] = component.iloc[0, :]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: Column 'index' in dataframe '{component_name}' is not unique."
],
)
def test_column_presence(self):
"""
var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
feature_is_filtered must not be in raw.var, and it's only checked in var
"""
columns = ["feature_is_filtered", "feature_biotype"]
for component_name in ["var", "raw.var"]:
for column in columns:
if column == "feature_is_filtered" and component_name == "raw.var":
continue
with self.subTest(component_name=component_name, column=column):
# Resetting validator
self.validator.errors = []
self.validator.adata = examples.adata.copy()
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
component.drop(column, axis=1, inplace=True)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: Dataframe '{component_name}' is missing "
f"column '{column}'."
],
)
def test_feature_is_filtered(self):
"""
feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)
but is present in the raw matrix (raw.X). The value for all cells of the given feature in the
final matrix MUST be 0.
Otherwise, this MUST be False.
"""
# Duplicate 1st row in var and assigned to 2nd
self.validator.adata.var["feature_is_filtered"][0] = True
for i in range(self.validator.adata.X.shape[0]):
self.validator.adata.X[i, 0] = 0
self.validator.adata.X[0, 0] = 1
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', "
"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. "
"All values for these features must be 0."
],
)
def test_columns_not_in_raw_var(self):
"""
Curators MUST annotate the following column only in the var dataframe.
This column MUST NOT be present in raw.var:
feature_is_filtered
"""
self.validator.adata.raw = self.validator.adata
self.validator.adata.uns["X_normalization"] = "CPM"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
["ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'."],
)
def test_feature_id_wrong_format(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ID with an incorrect format "ENSEBML_NOGENE"
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[0] = "ENSEBML_NOGENE"
component.set_index(pd.Index(new_index), inplace=True)
component["feature_biotype"][0] = "gene"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' "
f"in '{component_name}', make sure it is a valid ID."
],
)
def test_feature_id_non_existent_ensembl(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ENSEMBL ID that has the right format but doesn't exist
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[0] = "ENSG000"
component.set_index(pd.Index(new_index), inplace=True)
component["feature_biotype"][0] = "gene"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'."
],
)
def test_feature_id_non_existent_ercc(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ERCC ID that has the right format but doesn't exist
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[0] = "ERCC-000000"
component.set_index(pd.Index(new_index), inplace=True)
component["feature_biotype"][0] = "spike-in"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'."
],
)
class TestUns(unittest.TestCase):
"""
Fail cases in adata.uns
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_required_fields_schema_version(self):
"""
Curators MUST annotate `schema_version` and values in uns (schema_version)
"""
del self.validator.adata.uns["schema_version"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: adata has no schema definition in 'adata.uns'. "
"Validation cannot be performed."
],
)
def test_required_fields_title(self):
"""
Curators MUST annotate `schema_version` and values in uns (title)
"""
del self.validator.adata.uns["title"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors, ["ERROR: 'title' in 'uns' is not present."]
)
def test_required_fields_X_normalization(self):
"""
Curators MUST annotate `schema_version` and values in uns (X_normalization)
"""
del self.validator.adata.uns["X_normalization"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors, ["ERROR: 'X_normalization' in 'uns' is not present."]
)
def test_leading_trailing_double_spaces_in_strings(self):
"""
The following sequences MUST NOT appear in str types documented in the schema:
Leading control or space separators - ” This is an example”
Trailing control or space separators - “This is an example ”
Multiple (internal) control or space separators - "This is an example"
"""
self.validator.adata.uns["title"] = " There is a leading space"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces."
],
)
self.validator.adata.uns["title"] = "There is a trailing space "
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces."
],
)
self.validator.adata.uns["title"] = "There are double spaces"
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces."
],
)
def test_schema_version(self):
"""
Schema_version, This MUST be "2.0.0".
"""
self.validator.adata.uns["schema_version"] = "1.0.0"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. "
"Validation cannot be performed."
],
)
def test_title(self):
"""
Title MUST be a string
"""
# list instead of string
self.validator.adata.uns["title"] = ["title"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['title']' in 'uns['title']' is not valid, "
"it must be a string."
],
)
def test_X_normalization_is_str(self):
"""
X_normalization str.
"""
# list instead of string
self.validator.adata.uns["X_normalization"] = ["normalization"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['normalization']' in 'uns['X_normalization']' is "
"not valid, it must be a string."
],
)
def test_X_normalization_not_raw(self):
"""
X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.
If data in X are raw, this SHOULD be "none".
FAIL CASE for when X_normalization was set to "none" but X may not be raw data
"""
# Assign a real value to X while X_normalization is 'none'
del self.validator.adata.raw
self.validator.adata.uns["X_normalization"] = "none"
self.validator.validate_adata()
print("FOO", self.validator.warnings)
self.assertEqual(
self.validator.warnings,
[
"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear "
"to have raw counts (integers)"
],
)
def test_batch_condition_is_list(self):
"""
batch_condition list[str]
"""
# Check valid case of numpy array which is interchangeable with lists
self.validator.adata.uns["batch_condition"] = numpy.array(
self.validator.adata.uns["batch_condition"]
)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
# Check fail case: not a list nor numpy array
self.validator.adata.uns["batch_condition"] = "cell_type_ontology_term_id"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' "
"is not valid, it must be a list or numpy array."
],
)
def test_batch_condition_is_column_from_obs(self):
"""
batch_condition list[str]. str values MUST refer to cell metadata keys in obs.
"""
self.validator.adata.uns["batch_condition"] = ["NO_COLUMN"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a "
"column in 'adata.obs'."
],
)
def test_default_embedding_is_str(self):
"""
Default_embedding str.
"""
self.validator.adata.uns["default_embedding"] = ["X_umap"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, "
"it must be a string."
],
)
def test_default_embedding_is_key_from_obsm(self):
"""
Default_embedding str. The value MUST match a key to an embedding in obsm
"""
self.validator.adata.uns["default_embedding"] = "X_other"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, "
"it must be a key of 'adata.obsm'."
],
)
def test_X_approximate_distribution_is_str(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal".
Note that `normal` is tested in the happy path test case using `good_uns`.
"""
# Check valid case of "count" which is not included in valid object
self.validator.adata.uns["X_approximate_distribution"] = "count"
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
# Invalid type: list
self.validator.adata.uns["X_approximate_distribution"] = ["count"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['count']' in 'uns['X_approximate_distribution']' "
"is not valid, it must be a string."
],
)
def test_X_approximate_distribution_is_valid(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal"
"""
self.validator.adata.uns["X_approximate_distribution"] = "COUNT"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is "
"not valid. Allowed terms: ['count', 'normal']."
],
)
class TestObsm(unittest.TestCase):
"""
Fail cases for adata.obsm
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_obsm_values_ara_numpy(self):
"""
values in obsm MUST be a numpy.ndarray
"""
self.validator.adata.obsm["X_tsne"] = pd.DataFrame(
self.validator.adata.obsm["X_umap"], index=self.validator.adata.obs_names
)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: All embeddings have to be of 'numpy.ndarray' type, "
"'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>')."
],
)
def test_obsm_values_at_least_one_X(self):
"""
At least one key for the embedding MUST be prefixed with "X_"
"""
self.validator.adata.obsm["umap"] = self.validator.adata.obsm["X_umap"]
self.validator.adata.uns["default_embedding"] = "umap"
del self.validator.adata.obsm["X_umap"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: At least one embedding in 'obsm' has to have a "
"key with an 'X_' prefix."
],
)
def test_obsm_shape(self):
"""
Curators MUST annotate one or more two-dimensional (m >= 2) embeddings
"""
# Makes 1 column array
self.validator.adata.obsm["X_umap"] = numpy.delete(
self.validator.adata.obsm["X_umap"], 0, 1
)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: All embeddings must have as many rows as cells, and "
"at least two columns.'adata.obsm['X_umap']' has shape "
"of '(2, 1)'."
],
)
class TestAddingLabels(unittest.TestCase):
"""
Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually
created dataframes (positive control) against the ones produced by the validator
"""
@classmethod
def setUpClass(cls):
# Manually created data (positive control)
cls.adata_with_labels = examples.adata_with_labels
# Validate test data
validator = Validator()
validator.adata = examples.adata.copy()
validator.validate_adata()
# Add labels through validator
cls.label_writer = AnnDataLabelAppender(validator)
cls.label_writer._add_labels()
def test_var_added_labels(self):
"""
When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism
to the var dataframe. Curators MUST NOT annotate the following columns:
- feature_name. If the feature_biotype is "gene" then this MUST be the human-readable ENSEMBL gene
name assigned to the feature_id. If the feature_biotype is "spike-in" then this MUST be the
ERCC Spike-In identifier appended with " spike-in control".
- feature_reference. This MUST be the reference organism for a feature:
Homo sapiens "NCBITaxon:9606"
Mus musculus "NCBITaxon:10090"
SARS-CoV-2 "NCBITaxon:2697049"
ERCC Spike-Ins "NCBITaxon:32630"
"""
for column in ["feature_name", "feature_reference"]:
expected_column = self.adata_with_labels.var[column]
obtained_column = self.label_writer.adata.var[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
def test_obs_added_labels(self):
"""
When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding ontology term to the obs dataframe.
Curators MUST NOT annotate the following columns.
- assay. categorical with str categories. This MUST be the human-readable name assigned to the value
of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to
assay_ontology_term_id MUST be appended to assay.
- cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value
of cell_type_ontology_term_id.
- development_stage. categorical with str categories. This MUST be "unknown" if set in
development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to
the value of development_stage_ontology_term_id.
- disease. categorical with str categories. This MUST be the human-readable name assigned to
the value of disease_ontology_term_id.
- ethnicity. categorical with str categories. This MUST be "na" or "unknown" if
set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable
name assigned to the value of ethnicity_ontology_term_id.
- organism. categorical with str categories. This MUST be the human-readable name assigned
to the value of organism_ontology_term_id.
- sex. categorical with str categories. This MUST be "unknown" if set in sex_ontology_term_id;
otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.
- tissue. categorical with str categories. This MUST be the human-readable name assigned to the
value of tissue_ontology_term_id. " (cell culture)" or " (organoid)" MUST
be appended if present in tissue_ontology_term_id.
"""
for column in [
"assay",
"cell_type",
"development_stage",
"disease",
"ethnicity",
"organism",
"sex",
"tissue",
]:
expected_column = self.adata_with_labels.obs[column]
obtained_column = self.label_writer.adata.obs[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
| [
43,
45,
50,
57,
75
] |
355 | ad3a7221883a847fc9d26097c3801973cbbda38e | <mask token>
| <mask token>
urlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),
path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'
), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=
'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.
as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.
as_view(), name='Income')]
| from django.urls import path, include
from Income import views
urlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),
path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'
), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=
'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.
as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.
as_view(), name='Income')]
|
from django.urls import path,include
from Income import views
urlpatterns = [
path('IncomeHome/',views.IncomeHome,name='IncomeHome'),
path('IncomeCreate/',views.IncomeCreate.as_view(),name='IncomeCreate'),
path('IncomeUpdate/<int:pk>',views.IncomeUpdate.as_view(),name='IncomeUpdate'),
path('IncomeDelete/<int:pk>',views.IncomeDelete.as_view(),name='IncomeDelete'),
path('Income/',views.IncomeView.as_view(),name='Income'),
]
| null | [
0,
1,
2,
3
] |
356 | 9e7dee9c0fd4cd290f4710649ffc4a94fedf0358 | import os
pil = 'y'
while(pil=='y'):
os.system("cls")
print("===============================")
print("== KALKULATOR SEDERHANA ==")
print("===============================")
print("MENU-UTAMA : ")
print("1 Penjumlahan")
print("2 Pengurangan")
print("3 Perkalian")
print("4 Pembagian")
def penjumlahan ():
print("PENJUMLAHAN DUA BUAH BILANGAN")
print("=============================")
x = float(input ("Bilangan pertama: "))
y = float(input ("Bilangan kedua : "))
print("-----------------------------")
print "Jumlah = ", x+y
def pengurangan ():
print("PENGURANGAN DUA BUAH BILANGAN")
print("=============================")
x = float(input("Bilangan pertama: "))
y = float(input("Bilangan kedua : "))
print("-----------------------------")
print "Jumlah = ", x-y
def perkalian ():
print("PERKALIAN DUA BUAH BILANGAN")
print("===========================")
x = float(input("Bilangan pertama: "))
y = float(input("Bilangan kedua : "))
print("---------------------------")
print "Jumlah = ", x*y
def pembagian ():
print("PEMBAGIAN DUA BUAH BILANGAN")
print("===========================")
x = float(input("Bilangan pertama: "))
y = float(input("Bilangan kedua : "))
print("---------------------------")
print "Jumlah = ", x/y
pilihan = int(input("Masukkan pilihan Anda(1,2,3, dan 4): "))
if (pilihan==1):
penjumlahan ()
elif (pilihan==2):
pengurangan ()
elif (pilihan==3):
perkalian ()
elif (pilihan==4):
pembagian ()
else:
print("Pilihan Anda salah")
pil = raw_input("ulang KALKULATOR lagi? (y): ")
| null | null | null | null | [
0
] |
357 | 180f7f0ade9770c6669680bd13ac8f2fd55cc8c7 | <mask token>
| def raizCubica(numero):
r = pow(numero, 1 / 3)
return r
<mask token>
| def raizCubica(numero):
r = pow(numero, 1 / 3)
return r
<mask token>
for x in range(5):
numeros.insert(x, float(input('Ingrese Numero: ')))
raices.insert(x, round(raizCubica(numeros[x]), 3))
print('Numeros: ', numeros)
print('Raices: ', raices)
| def raizCubica(numero):
r = pow(numero, 1 / 3)
return r
numeros = []
raices = []
for x in range(5):
numeros.insert(x, float(input('Ingrese Numero: ')))
raices.insert(x, round(raizCubica(numeros[x]), 3))
print('Numeros: ', numeros)
print('Raices: ', raices)
| def raizCubica(numero):
r = pow(numero,(1/3))
return r
numeros = []
raices = []
for x in range(5):
numeros.insert(x, float(input("Ingrese Numero: ")))
raices.insert(x, round(raizCubica(numeros[x]),3))
print("Numeros: ", numeros)
print("Raices: ", raices) | [
0,
1,
2,
3,
4
] |
358 | 97ebdeada3d797a971b5c3851b75f9754595f67c | <mask token>
| <mask token>
setup(name='TF_Speech', version='0.2.0', extras_require={'tensorflow': [
'tensorflow'], 'tensorflow with gpu': ['tensorflow-gpu']})
| <mask token>
from setuptools import setup
setup(name='TF_Speech', version='0.2.0', extras_require={'tensorflow': [
'tensorflow'], 'tensorflow with gpu': ['tensorflow-gpu']})
| """
Python package setup file.
"""
from setuptools import setup
setup(
name="TF_Speech",
version="0.2.0",
extras_require={'tensorflow': ['tensorflow'],
'tensorflow with gpu': ['tensorflow-gpu']},
)
| null | [
0,
1,
2,
3
] |
359 | 9f34bf3a0bb24db428b7af1a354aec1d3a72df98 | <mask token>
class CreatePasswordEmailValidationSerializer(serializers.Serializer):
<mask token>
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(validation_code=validation_code, to=
self.validated_data.get('email'), type=self.validated_data.get(
'type'))
new_validation = EmailValidation.objects.create(validation_code=
validation_code, email=email, type=self.validated_data.get('type'))
return new_validation
class CreateEmailValidationSerializer(serializers.Serializer):
email = serializers.EmailField(validators=[user_with_email_not_existing])
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(validation_code=validation_code, to=
self.validated_data.get('email'), type=self.validated_data.get(
'type'))
new_validation = EmailValidation.objects.create(validation_code=
validation_code, email=email, type=self.validated_data.get('type'))
return new_validation
class EmailSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class EmailValidationSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email', 'validation_code']
class EmailValidationPasswordSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
password = serializers.CharField(max_length=200)
class Meta:
model = EmailValidation
fields = ['email', 'validation_code', 'password']
class NewUserSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
data['user'] = FullUserSerializer(self.user).data
return data
| <mask token>
class CreatePasswordEmailValidationSerializer(serializers.Serializer):
email = serializers.EmailField(validators=[email_does_exist])
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(validation_code=validation_code, to=
self.validated_data.get('email'), type=self.validated_data.get(
'type'))
new_validation = EmailValidation.objects.create(validation_code=
validation_code, email=email, type=self.validated_data.get('type'))
return new_validation
class CreateEmailValidationSerializer(serializers.Serializer):
email = serializers.EmailField(validators=[user_with_email_not_existing])
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(validation_code=validation_code, to=
self.validated_data.get('email'), type=self.validated_data.get(
'type'))
new_validation = EmailValidation.objects.create(validation_code=
validation_code, email=email, type=self.validated_data.get('type'))
return new_validation
class EmailSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class EmailValidationSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email', 'validation_code']
class EmailValidationPasswordSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
password = serializers.CharField(max_length=200)
class Meta:
model = EmailValidation
fields = ['email', 'validation_code', 'password']
class NewUserSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
data['user'] = FullUserSerializer(self.user).data
return data
| <mask token>
def user_with_email_not_existing(email):
try:
User.objects.get(email=email)
raise ValidationError(message='This email is taken')
except User.DoesNotExist:
return email
def email_does_exist(email):
try:
User.objects.get(email=email)
return email
except User.DoesNotExist:
raise ValidationError(message='User does not exist!')
class CreatePasswordEmailValidationSerializer(serializers.Serializer):
email = serializers.EmailField(validators=[email_does_exist])
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(validation_code=validation_code, to=
self.validated_data.get('email'), type=self.validated_data.get(
'type'))
new_validation = EmailValidation.objects.create(validation_code=
validation_code, email=email, type=self.validated_data.get('type'))
return new_validation
class CreateEmailValidationSerializer(serializers.Serializer):
email = serializers.EmailField(validators=[user_with_email_not_existing])
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(validation_code=validation_code, to=
self.validated_data.get('email'), type=self.validated_data.get(
'type'))
new_validation = EmailValidation.objects.create(validation_code=
validation_code, email=email, type=self.validated_data.get('type'))
return new_validation
class EmailSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class EmailValidationSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email', 'validation_code']
class EmailValidationPasswordSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
password = serializers.CharField(max_length=200)
class Meta:
model = EmailValidation
fields = ['email', 'validation_code', 'password']
class NewUserSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
data['user'] = FullUserSerializer(self.user).data
return data
| <mask token>
User = get_user_model()
def user_with_email_not_existing(email):
try:
User.objects.get(email=email)
raise ValidationError(message='This email is taken')
except User.DoesNotExist:
return email
def email_does_exist(email):
try:
User.objects.get(email=email)
return email
except User.DoesNotExist:
raise ValidationError(message='User does not exist!')
class CreatePasswordEmailValidationSerializer(serializers.Serializer):
email = serializers.EmailField(validators=[email_does_exist])
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(validation_code=validation_code, to=
self.validated_data.get('email'), type=self.validated_data.get(
'type'))
new_validation = EmailValidation.objects.create(validation_code=
validation_code, email=email, type=self.validated_data.get('type'))
return new_validation
class CreateEmailValidationSerializer(serializers.Serializer):
email = serializers.EmailField(validators=[user_with_email_not_existing])
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(validation_code=validation_code, to=
self.validated_data.get('email'), type=self.validated_data.get(
'type'))
new_validation = EmailValidation.objects.create(validation_code=
validation_code, email=email, type=self.validated_data.get('type'))
return new_validation
class EmailSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class EmailValidationSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email', 'validation_code']
class EmailValidationPasswordSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
password = serializers.CharField(max_length=200)
class Meta:
model = EmailValidation
fields = ['email', 'validation_code', 'password']
class NewUserSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
data['user'] = FullUserSerializer(self.user).data
return data
| from random import randrange
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
from rest_framework import serializers
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from .models import EmailValidation
from ..emails.models import Email
from ..users.serializers import FullUserSerializer
User = get_user_model()
def user_with_email_not_existing(email):
try:
User.objects.get(email=email)
raise ValidationError(message='This email is taken')
except User.DoesNotExist:
return email
def email_does_exist(email):
try:
User.objects.get(email=email)
return email
except User.DoesNotExist:
raise ValidationError(message='User does not exist!')
class CreatePasswordEmailValidationSerializer(serializers.Serializer):
email = serializers.EmailField(validators=[email_does_exist])
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(
validation_code=validation_code,
to=self.validated_data.get('email'),
type=self.validated_data.get('type')
)
new_validation = EmailValidation.objects.create(
validation_code=validation_code,
email=email,
type=self.validated_data.get('type'))
return new_validation
class CreateEmailValidationSerializer(serializers.Serializer):
email = serializers.EmailField(validators=[user_with_email_not_existing])
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(
validation_code=validation_code,
to=self.validated_data.get('email'),
type=self.validated_data.get('type')
)
new_validation = EmailValidation.objects.create(
validation_code=validation_code,
email=email,
type=self.validated_data.get('type'))
return new_validation
class EmailSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class EmailValidationSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email', 'validation_code']
class EmailValidationPasswordSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
password = serializers.CharField(max_length=200)
class Meta:
model = EmailValidation
fields = ['email', 'validation_code', 'password']
class NewUserSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
data['user'] = FullUserSerializer(self.user).data
return data
| [
15,
16,
18,
19,
21
] |
360 | 2ca91c410b8c8d6306d5ed918783a4d77a091ba8 | <mask token>
class RepeatWorkBreak(rumps.App):
<mask token>
def set_up_menu(self):
self.timer.stop()
self.timer.count = 0
self.app.title = self.config['app_title']
def convert_seconds_to_time_string(self, seconds) ->str:
seconds = seconds % (24 * 3600)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
return '%d:%02d:%02d' % (hours, minutes, seconds)
def on_tick(self, sender):
time_left_in_seconds = sender.end - sender.count
time_left_in_string = self.convert_seconds_to_time_string(
time_left_in_seconds)
if sender.count != 0 and sender.count % 3600 == 0:
self.elapsed_shift_time_in_hours += 1
self.update_progress_box()
if time_left_in_seconds == 0:
rumps.notification(title=self.config['app_title'], subtitle=
self.config['timeout_message'], message='')
self.stop_timer()
self.stop_button.set_callback(None)
else:
self.stop_button.set_callback(self.stop_timer)
self.app.title = self.progress_box + ' | ' + time_left_in_string
sender.count += 1
def update_progress_box(self):
self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self
.shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours
) * '◻︎'
<mask token>
<mask token>
<mask token>
def handle_break_setting_button(self, sender):
self.break_setting_button_group.toggle(sender)
selected_minutes = int(match('^\\d+\\s{1}', sender.title)[0])
self.break_time_in_seconds = selected_minutes * 60
def run(self):
self.app.run()
<mask token>
| <mask token>
class RepeatWorkBreak(rumps.App):
def __init__(self):
rumps.debug_mode(True)
self.config = {'app_title': 'Repeat Work and Break', 'start':
'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',
'stop': 'Stop Timer', 'timeout_message':
'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *
1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{
'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],
'break_setting_buttons': [{'title': '5 minutes'}, {'title':
'10 minutes'}, {'title': '15 minutes'}]}
self.app = rumps.App(self.config['app_title'])
self.timer = rumps.Timer(self.on_tick, 1)
self.shift_setting_button_group = ButtonGroup(self.config[
'shift_setting_buttons'], callback=self.handle_shift_setting_button
)
self.break_setting_button_group = ButtonGroup(self.config[
'break_setting_buttons'], callback=self.handle_shift_setting_button
)
self.shift_time_in_seconds = self.config['shift_time_in_seconds']
self.break_time_in_seconds = self.config['break_time_in_seconds']
self.elapsed_shift_time_in_hours = 0
self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)
self.start_pause_button = rumps.MenuItem(title=self.config['start'],
callback=self.start_timer)
self.stop_button = rumps.MenuItem(title=self.config['stop'],
callback=None)
self.app.menu = [{'Preferences': {'Setting Shift': self.
shift_setting_button_group.buttons, 'Setting Break / hr': self.
break_setting_button_group.buttons}}, None, self.
start_pause_button, self.stop_button]
def set_up_menu(self):
self.timer.stop()
self.timer.count = 0
self.app.title = self.config['app_title']
def convert_seconds_to_time_string(self, seconds) ->str:
seconds = seconds % (24 * 3600)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
return '%d:%02d:%02d' % (hours, minutes, seconds)
def on_tick(self, sender):
time_left_in_seconds = sender.end - sender.count
time_left_in_string = self.convert_seconds_to_time_string(
time_left_in_seconds)
if sender.count != 0 and sender.count % 3600 == 0:
self.elapsed_shift_time_in_hours += 1
self.update_progress_box()
if time_left_in_seconds == 0:
rumps.notification(title=self.config['app_title'], subtitle=
self.config['timeout_message'], message='')
self.stop_timer()
self.stop_button.set_callback(None)
else:
self.stop_button.set_callback(self.stop_timer)
self.app.title = self.progress_box + ' | ' + time_left_in_string
sender.count += 1
def update_progress_box(self):
self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self
.shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours
) * '◻︎'
def start_timer(self, sender):
if sender.title.lower().startswith(('start', 'continue')):
if sender.title == self.config['start']:
self.timer.count = 0
self.timer.end = self.shift_time_in_seconds
sender.title = self.config['pause']
self.timer.start()
else:
sender.title = self.config['continue']
self.timer.stop()
<mask token>
def handle_shift_setting_button(self, sender):
self.shift_setting_button_group.toggle(sender)
selected_hours = int(match('^\\d+\\s{1}', sender.title)[0])
self.progress_box = '◻︎' * selected_hours
self.shift_time_in_seconds = selected_hours * 3600
def handle_break_setting_button(self, sender):
self.break_setting_button_group.toggle(sender)
selected_minutes = int(match('^\\d+\\s{1}', sender.title)[0])
self.break_time_in_seconds = selected_minutes * 60
def run(self):
self.app.run()
<mask token>
| <mask token>
class RepeatWorkBreak(rumps.App):
def __init__(self):
rumps.debug_mode(True)
self.config = {'app_title': 'Repeat Work and Break', 'start':
'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',
'stop': 'Stop Timer', 'timeout_message':
'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *
1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{
'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],
'break_setting_buttons': [{'title': '5 minutes'}, {'title':
'10 minutes'}, {'title': '15 minutes'}]}
self.app = rumps.App(self.config['app_title'])
self.timer = rumps.Timer(self.on_tick, 1)
self.shift_setting_button_group = ButtonGroup(self.config[
'shift_setting_buttons'], callback=self.handle_shift_setting_button
)
self.break_setting_button_group = ButtonGroup(self.config[
'break_setting_buttons'], callback=self.handle_shift_setting_button
)
self.shift_time_in_seconds = self.config['shift_time_in_seconds']
self.break_time_in_seconds = self.config['break_time_in_seconds']
self.elapsed_shift_time_in_hours = 0
self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)
self.start_pause_button = rumps.MenuItem(title=self.config['start'],
callback=self.start_timer)
self.stop_button = rumps.MenuItem(title=self.config['stop'],
callback=None)
self.app.menu = [{'Preferences': {'Setting Shift': self.
shift_setting_button_group.buttons, 'Setting Break / hr': self.
break_setting_button_group.buttons}}, None, self.
start_pause_button, self.stop_button]
def set_up_menu(self):
self.timer.stop()
self.timer.count = 0
self.app.title = self.config['app_title']
def convert_seconds_to_time_string(self, seconds) ->str:
seconds = seconds % (24 * 3600)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
return '%d:%02d:%02d' % (hours, minutes, seconds)
def on_tick(self, sender):
time_left_in_seconds = sender.end - sender.count
time_left_in_string = self.convert_seconds_to_time_string(
time_left_in_seconds)
if sender.count != 0 and sender.count % 3600 == 0:
self.elapsed_shift_time_in_hours += 1
self.update_progress_box()
if time_left_in_seconds == 0:
rumps.notification(title=self.config['app_title'], subtitle=
self.config['timeout_message'], message='')
self.stop_timer()
self.stop_button.set_callback(None)
else:
self.stop_button.set_callback(self.stop_timer)
self.app.title = self.progress_box + ' | ' + time_left_in_string
sender.count += 1
def update_progress_box(self):
self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self
.shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours
) * '◻︎'
def start_timer(self, sender):
if sender.title.lower().startswith(('start', 'continue')):
if sender.title == self.config['start']:
self.timer.count = 0
self.timer.end = self.shift_time_in_seconds
sender.title = self.config['pause']
self.timer.start()
else:
sender.title = self.config['continue']
self.timer.stop()
def stop_timer(self, sender=None):
self.set_up_menu()
self.stop_button.set_callback(None)
self.start_pause_button.title = self.config['start']
def handle_shift_setting_button(self, sender):
self.shift_setting_button_group.toggle(sender)
selected_hours = int(match('^\\d+\\s{1}', sender.title)[0])
self.progress_box = '◻︎' * selected_hours
self.shift_time_in_seconds = selected_hours * 3600
def handle_break_setting_button(self, sender):
self.break_setting_button_group.toggle(sender)
selected_minutes = int(match('^\\d+\\s{1}', sender.title)[0])
self.break_time_in_seconds = selected_minutes * 60
def run(self):
self.app.run()
<mask token>
| <mask token>
class RepeatWorkBreak(rumps.App):
def __init__(self):
rumps.debug_mode(True)
self.config = {'app_title': 'Repeat Work and Break', 'start':
'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',
'stop': 'Stop Timer', 'timeout_message':
'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *
1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{
'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],
'break_setting_buttons': [{'title': '5 minutes'}, {'title':
'10 minutes'}, {'title': '15 minutes'}]}
self.app = rumps.App(self.config['app_title'])
self.timer = rumps.Timer(self.on_tick, 1)
self.shift_setting_button_group = ButtonGroup(self.config[
'shift_setting_buttons'], callback=self.handle_shift_setting_button
)
self.break_setting_button_group = ButtonGroup(self.config[
'break_setting_buttons'], callback=self.handle_shift_setting_button
)
self.shift_time_in_seconds = self.config['shift_time_in_seconds']
self.break_time_in_seconds = self.config['break_time_in_seconds']
self.elapsed_shift_time_in_hours = 0
self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)
self.start_pause_button = rumps.MenuItem(title=self.config['start'],
callback=self.start_timer)
self.stop_button = rumps.MenuItem(title=self.config['stop'],
callback=None)
self.app.menu = [{'Preferences': {'Setting Shift': self.
shift_setting_button_group.buttons, 'Setting Break / hr': self.
break_setting_button_group.buttons}}, None, self.
start_pause_button, self.stop_button]
def set_up_menu(self):
self.timer.stop()
self.timer.count = 0
self.app.title = self.config['app_title']
def convert_seconds_to_time_string(self, seconds) ->str:
seconds = seconds % (24 * 3600)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
return '%d:%02d:%02d' % (hours, minutes, seconds)
def on_tick(self, sender):
time_left_in_seconds = sender.end - sender.count
time_left_in_string = self.convert_seconds_to_time_string(
time_left_in_seconds)
if sender.count != 0 and sender.count % 3600 == 0:
self.elapsed_shift_time_in_hours += 1
self.update_progress_box()
if time_left_in_seconds == 0:
rumps.notification(title=self.config['app_title'], subtitle=
self.config['timeout_message'], message='')
self.stop_timer()
self.stop_button.set_callback(None)
else:
self.stop_button.set_callback(self.stop_timer)
self.app.title = self.progress_box + ' | ' + time_left_in_string
sender.count += 1
def update_progress_box(self):
self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self
.shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours
) * '◻︎'
def start_timer(self, sender):
if sender.title.lower().startswith(('start', 'continue')):
if sender.title == self.config['start']:
self.timer.count = 0
self.timer.end = self.shift_time_in_seconds
sender.title = self.config['pause']
self.timer.start()
else:
sender.title = self.config['continue']
self.timer.stop()
def stop_timer(self, sender=None):
self.set_up_menu()
self.stop_button.set_callback(None)
self.start_pause_button.title = self.config['start']
def handle_shift_setting_button(self, sender):
self.shift_setting_button_group.toggle(sender)
selected_hours = int(match('^\\d+\\s{1}', sender.title)[0])
self.progress_box = '◻︎' * selected_hours
self.shift_time_in_seconds = selected_hours * 3600
def handle_break_setting_button(self, sender):
self.break_setting_button_group.toggle(sender)
selected_minutes = int(match('^\\d+\\s{1}', sender.title)[0])
self.break_time_in_seconds = selected_minutes * 60
def run(self):
self.app.run()
if __name__ == '__main__':
app = RepeatWorkBreak()
app.run()
| from typing import List
from re import match
from utility import ButtonGroup
import rumps
class RepeatWorkBreak(rumps.App):
def __init__(self):
rumps.debug_mode(True)
self.config = {
"app_title": "Repeat Work and Break",
"start": "Start",
"pause": "Pause Timer",
"continue": "Continue Timer",
"stop": "Stop Timer",
"timeout_message": "Time is up! Take a break :)",
"shift_time_in_seconds": 60 * 60 * 1, # 60 seconds * 60 = 1 hour
"break_time_in_seconds": 60 * 5,
'shift_setting_buttons': [
{
'title': '1 hour',
},
{
'title': '4 hour',
},
{
'title': '8 hour',
}
],
'break_setting_buttons': [
{
'title': '5 minutes',
},
{
'title': '10 minutes',
},
{
'title': '15 minutes',
}
],
}
self.app = rumps.App(self.config['app_title'])
self.timer = rumps.Timer(self.on_tick, 1)
self.shift_setting_button_group = ButtonGroup(
self.config['shift_setting_buttons'], callback=self.handle_shift_setting_button)
self.break_setting_button_group = ButtonGroup(
self.config['break_setting_buttons'], callback=self.handle_shift_setting_button)
self.shift_time_in_seconds = self.config["shift_time_in_seconds"]
self.break_time_in_seconds = self.config["break_time_in_seconds"]
self.elapsed_shift_time_in_hours = 0
self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)
self.start_pause_button = rumps.MenuItem(
title=self.config["start"], callback=self.start_timer)
self.stop_button = rumps.MenuItem(
title=self.config["stop"], callback=None)
self.app.menu = [
{
'Preferences':
{
"Setting Shift": self.shift_setting_button_group.buttons,
"Setting Break / hr": self.break_setting_button_group.buttons,
}
},
None,
self.start_pause_button,
self.stop_button,
]
def set_up_menu(self):
self.timer.stop()
self.timer.count = 0
self.app.title = self.config['app_title']
def convert_seconds_to_time_string(self, seconds) -> str:
seconds = seconds % (24 * 3600)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
return "%d:%02d:%02d" % (hours, minutes, seconds)
def on_tick(self, sender):
time_left_in_seconds = sender.end - sender.count
time_left_in_string = self.convert_seconds_to_time_string(
time_left_in_seconds)
if sender.count != 0 and sender.count % 3600 == 0:
self.elapsed_shift_time_in_hours += 1
self.update_progress_box()
if time_left_in_seconds == 0:
rumps.notification(
title=self.config["app_title"], subtitle=self.config["timeout_message"], message='')
self.stop_timer()
self.stop_button.set_callback(None)
else:
self.stop_button.set_callback(self.stop_timer)
self.app.title = self.progress_box + ' | ' + time_left_in_string
sender.count += 1
def update_progress_box(self):
self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self.shift_time_in_seconds // 3600 -
self.elapsed_shift_time_in_hours) * '◻︎'
def start_timer(self, sender):
if sender.title.lower().startswith(("start", "continue")):
if sender.title == self.config["start"]:
self.timer.count = 0
self.timer.end = self.shift_time_in_seconds
sender.title = self.config["pause"]
self.timer.start()
else:
sender.title = self.config["continue"]
self.timer.stop()
def stop_timer(self, sender=None):
self.set_up_menu()
self.stop_button.set_callback(None)
self.start_pause_button.title = self.config["start"]
def handle_shift_setting_button(self, sender):
self.shift_setting_button_group.toggle(sender)
selected_hours = int(match(r'^\d+\s{1}', sender.title)[0])
self.progress_box = "◻︎" * selected_hours # update empty progress box
self.shift_time_in_seconds = selected_hours * 3600 # hours in seconds
def handle_break_setting_button(self, sender):
self.break_setting_button_group.toggle(sender)
selected_minutes = int(match(r'^\d+\s{1}', sender.title)[0])
self.break_time_in_seconds = selected_minutes * 60
def run(self):
self.app.run()
if __name__ == "__main__":
app = RepeatWorkBreak()
app.run()
| [
7,
10,
11,
12,
14
] |
361 | 8bc40ed4fe1091ecdb40cd55ff9cf53010078823 | <mask token>
| <mask token>
for one in data:
print(one)
r = requests.post('http://localhost:8080/sumari', json=one)
print(r.text)
| <mask token>
data = json.load(open('dummy_data/data.json'))
for one in data:
print(one)
r = requests.post('http://localhost:8080/sumari', json=one)
print(r.text)
| import requests
import json
data = json.load(open('dummy_data/data.json'))
for one in data:
print(one)
r = requests.post('http://localhost:8080/sumari', json=one)
print(r.text)
| import requests
import json
data = json.load(open("dummy_data/data.json"))
for one in data:
print(one)
r = requests.post("http://localhost:8080/sumari", json=one)
print(r.text)
| [
0,
1,
2,
3,
4
] |
362 | 4e9a968842c2b3eca79690f0b56c8e176b203138 | <mask token>
| print(9 * int(input()) / 5 + 32)
| print((9*int(input())/5)+32) | null | null | [
0,
1,
2
] |
363 | 7a1bd2b4734527a414c6173ea8edb150221f8042 | <mask token>
| <mask token>
def getData():
weather_data = pd.read_csv('data/weather_data.csv')
currentMonth = datetime.now().month
currentHour = datetime.now().hour
currentMonthGroup = currentMonth // 2
hoep_data = []
temp = weather_data.iloc[:, 2]
for i in range(len(temp)):
weather_data.iloc[i, 1] = (currentHour + i) % 24
if currentMonthGroup == 0:
hoep_data = temp.apply(lambda x: (2.02887 * x + 39.633) / 100)
elif currentMonthGroup == 1:
hoep_data = temp.apply(lambda x: (0.453122 * x + 19.8276) / 100)
elif currentMonthGroup == 2:
hoep_data = temp.apply(lambda x: (1.13665 * x - 11.0085) / 100)
elif currentMonthGroup == 3:
hoep_data = temp.apply(lambda x: (1.90245 * x - 23.2826) / 100)
elif currentMonthGroup == 4:
hoep_data = temp.apply(lambda x: (1.39145 * x - 8.97971) / 100)
else:
hoep_data = temp.apply(lambda x: (1.72767 * x + 21.3536) / 100)
load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine=
'openpyxl')
load_sched = np.arange(48)
for i in range(len(temp)):
load_sched[i] = load_data.iloc[weather_data.iloc[i, 1],
currentMonthGroup]
WMST = 0.003499
start_time = time.time()
def constraint1(x):
for i in range(48):
if i == 0:
x[48] = 0
else:
x[48 + i] = x[48 + i - 1] + x[i]
return x[0:48] + x[48:96]
def constraint2(x):
for i in range(48):
if i == 0:
x[48] = 0
else:
x[48 + i] = x[48 + i - 1] + x[i]
return 10000 - (x[0:48] + x[48:96])
power = ((-5000, 5000),) * 48
storage = ((0, 10000),) * 48
def MEC(x):
return sum(sum((load_sched + np.array([x[0:48]])) * (np.array(
hoep_data) + WMST)))
x0 = np.array([np.ones(48), np.ones(48)])
bounds = power + storage
cons1 = {'type': 'ineq', 'fun': constraint1}
cons2 = {'type': 'ineq', 'fun': constraint2}
cons = [cons1, cons2]
sol = minimize(MEC, x0, method='SLSQP', bounds=bounds, constraints=cons,
options={'maxiter': 150, 'disp': True})
input_var = {'EA_w_bill': round(sol.fun, 2)}
return input_var
| import numpy as np
import pandas as pd
from scipy.optimize import minimize
from datetime import datetime
import time
from functions import weather_scraper
def getData():
weather_data = pd.read_csv('data/weather_data.csv')
currentMonth = datetime.now().month
currentHour = datetime.now().hour
currentMonthGroup = currentMonth // 2
hoep_data = []
temp = weather_data.iloc[:, 2]
for i in range(len(temp)):
weather_data.iloc[i, 1] = (currentHour + i) % 24
if currentMonthGroup == 0:
hoep_data = temp.apply(lambda x: (2.02887 * x + 39.633) / 100)
elif currentMonthGroup == 1:
hoep_data = temp.apply(lambda x: (0.453122 * x + 19.8276) / 100)
elif currentMonthGroup == 2:
hoep_data = temp.apply(lambda x: (1.13665 * x - 11.0085) / 100)
elif currentMonthGroup == 3:
hoep_data = temp.apply(lambda x: (1.90245 * x - 23.2826) / 100)
elif currentMonthGroup == 4:
hoep_data = temp.apply(lambda x: (1.39145 * x - 8.97971) / 100)
else:
hoep_data = temp.apply(lambda x: (1.72767 * x + 21.3536) / 100)
load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine=
'openpyxl')
load_sched = np.arange(48)
for i in range(len(temp)):
load_sched[i] = load_data.iloc[weather_data.iloc[i, 1],
currentMonthGroup]
WMST = 0.003499
start_time = time.time()
def constraint1(x):
for i in range(48):
if i == 0:
x[48] = 0
else:
x[48 + i] = x[48 + i - 1] + x[i]
return x[0:48] + x[48:96]
def constraint2(x):
for i in range(48):
if i == 0:
x[48] = 0
else:
x[48 + i] = x[48 + i - 1] + x[i]
return 10000 - (x[0:48] + x[48:96])
power = ((-5000, 5000),) * 48
storage = ((0, 10000),) * 48
def MEC(x):
return sum(sum((load_sched + np.array([x[0:48]])) * (np.array(
hoep_data) + WMST)))
x0 = np.array([np.ones(48), np.ones(48)])
bounds = power + storage
cons1 = {'type': 'ineq', 'fun': constraint1}
cons2 = {'type': 'ineq', 'fun': constraint2}
cons = [cons1, cons2]
sol = minimize(MEC, x0, method='SLSQP', bounds=bounds, constraints=cons,
options={'maxiter': 150, 'disp': True})
input_var = {'EA_w_bill': round(sol.fun, 2)}
return input_var
| import numpy as np
import pandas as pd
from scipy.optimize import minimize
from datetime import datetime
import time
from functions import weather_scraper
def getData():
# # run weather_scraper.py to fetch new weather data
# weather_scraper.getData()
## Read in csv file "weather_data.csv"
weather_data = pd.read_csv("data/weather_data.csv")
# Grab the current month & hour
currentMonth = datetime.now().month
currentHour = datetime.now().hour
# Determine which month group the current month is [0,5]
currentMonthGroup = currentMonth // 2
hoep_data = []
temp = weather_data.iloc[:,2]
# Change hour string to number from 0-23
for i in range(len(temp)):
weather_data.iloc[i,1] = (currentHour + i) % 24
# Convert temperature data to HOEP data
if (currentMonthGroup == 0) :
hoep_data = temp.apply(lambda x: (2.02887*x + 39.633)/100)
elif (currentMonthGroup == 1):
hoep_data = temp.apply(lambda x: (0.453122*x + 19.8276)/100)
elif (currentMonthGroup == 2):
hoep_data = temp.apply(lambda x: (1.13665*x - 11.0085)/100)
elif (currentMonthGroup == 3):
hoep_data = temp.apply(lambda x: (1.90245*x - 23.2826)/100)
elif (currentMonthGroup == 4):
hoep_data = temp.apply(lambda x: (1.39145*x - 8.97971)/100)
else:
hoep_data = temp.apply(lambda x: (1.72767*x + 21.3536)/100)
# Load in the load_data
load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine = 'openpyxl')
# Create loading schedule based on current time of day and month
load_sched = np.arange(48)
for i in range(len(temp)):
load_sched[i] = load_data.iloc[ weather_data.iloc[i,1] , currentMonthGroup]
WMST = 0.003499
## x[0:48] = PCEA
## x[48:96] = ESB
start_time = time.time()
# Constraints to ensure that ESB falls within limits
def constraint1(x):
for i in range(48):
if (i == 0):
x[48] = 0
else:
x[48+i] = x[48+i-1] + x[i]
return x[0:48] + x[48:96]
def constraint2(x):
for i in range(48):
if (i == 0):
x[48] = 0
else:
x[48+i] = x[48+i-1] + x[i]
return 10000 - (x[0:48]+ x[48:96])
power = ((-5000, 5000),) * 48
storage = ((0, 10000),) * 48
#Objective
def MEC(x): # ( PDLL + PCEA ) x HOEP
return sum(sum( (load_sched + np.array([x[0:48]])) * (np.array(hoep_data)+WMST) ))
x0 = np.array([np.ones(48), np.ones(48)])
bounds = (power + storage)
cons1 = {'type': 'ineq', 'fun': constraint1}
cons2 = {'type': 'ineq', 'fun': constraint2}
cons = ([cons1, cons2])
sol = minimize(MEC, x0, method='SLSQP',bounds=bounds,constraints=cons,options= {'maxiter':150,'disp':True})
input_var = {"EA_w_bill": round(sol.fun,2)}
return input_var
| null | [
0,
1,
2,
3
] |
364 | 1bdb19373960e4f63d80d6ab73ec3c0939e40b7f | <mask token>
@contextlib.contextmanager
def dummy_context(*args, **kwargs):
yield
<mask token>
| <mask token>
@contextlib.contextmanager
def dummy_context(*args, **kwargs):
yield
if six.PY2:
from collections import Mapping
else:
from collections.abc import Mapping
if DASK_VERSION < packaging.version.parse('1.1.0'):
blockwise = da.atop
else:
blockwise = da.blockwise
| <mask token>
SK_VERSION = packaging.version.parse(sklearn.__version__)
DASK_VERSION = packaging.version.parse(dask.__version__)
PANDAS_VERSION = packaging.version.parse(pandas.__version__)
@contextlib.contextmanager
def dummy_context(*args, **kwargs):
yield
if six.PY2:
from collections import Mapping
else:
from collections.abc import Mapping
if DASK_VERSION < packaging.version.parse('1.1.0'):
blockwise = da.atop
else:
blockwise = da.blockwise
| import contextlib
import dask
import dask.array as da
import packaging.version
import pandas
import six
import sklearn
SK_VERSION = packaging.version.parse(sklearn.__version__)
DASK_VERSION = packaging.version.parse(dask.__version__)
PANDAS_VERSION = packaging.version.parse(pandas.__version__)
@contextlib.contextmanager
def dummy_context(*args, **kwargs):
yield
if six.PY2:
from collections import Mapping
else:
from collections.abc import Mapping
if DASK_VERSION < packaging.version.parse('1.1.0'):
blockwise = da.atop
else:
blockwise = da.blockwise
| import contextlib
import dask
import dask.array as da
import packaging.version
import pandas
import six
import sklearn
SK_VERSION = packaging.version.parse(sklearn.__version__)
DASK_VERSION = packaging.version.parse(dask.__version__)
PANDAS_VERSION = packaging.version.parse(pandas.__version__)
@contextlib.contextmanager
def dummy_context(*args, **kwargs):
yield
if six.PY2:
from collections import Mapping
else:
from collections.abc import Mapping # noqa
if DASK_VERSION < packaging.version.parse("1.1.0"):
blockwise = da.atop
else:
blockwise = da.blockwise
| [
1,
2,
3,
4,
5
] |
365 | a4f932a8566afe0265dc1057d0f6534a608697f7 | <mask token>
| <mask token>
class Solution(object):
<mask token>
| <mask token>
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
length1 = len(s)
length2 = len(t)
if length1 != length2:
return False
s = sorted(s)
t = sorted(t)
for i in range(0, length1):
if s[i] != t[i]:
return False
return True
| """
LeetCode Problem: 242. Valid Anagram
Link: https://leetcode.com/problems/valid-anagram/
Written by: Mostofa Adib Shakib
Language: Python
"""
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
length1 = len(s)
length2 = len(t)
if length1 != length2:
return False
s = sorted(s) #sorted the string in alphanumeric order
t = sorted(t) #sorted the string in alphanumeric order
for i in range(0, length1):
if s[i] != t[i]:
return False # return false if the two sorted strings are not the same.
return True # if the sorted strings are same return True | null | [
0,
1,
2,
3
] |
366 | 6dd11f71e514a46462bf0b97ddac9ea474e86ad0 | <mask token>
def test_get_strategy():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *
2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):
2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
<mask token>
| <mask token>
def test_get_entity_sizes():
bytes_per_voxel = 1
R = 10, 9, 10
cs = 5, 3, 2
partition = 2, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
assert bs == 5 * 3 * 2
assert brs == 5 * 3 * 2 * 5
assert bss == 5 * 3 * 2 * 5 * 3
def test_get_strategy():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *
2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):
2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
def test_compute_buffers():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel
test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *
3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,
(5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *
5 * 3 * 7): 1}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,
cs, bs, brs, bss, partition, R, bytes_per_voxel)
nb_buffers = len(buffers.values())
assert nb_buffers == expected
<mask token>
| <mask token>
def test_get_entity_sizes():
bytes_per_voxel = 1
R = 10, 9, 10
cs = 5, 3, 2
partition = 2, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
assert bs == 5 * 3 * 2
assert brs == 5 * 3 * 2 * 5
assert bss == 5 * 3 * 2 * 5 * 3
def test_get_strategy():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *
2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):
2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
def test_compute_buffers():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel
test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *
3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,
(5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *
5 * 3 * 7): 1}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,
cs, bs, brs, bss, partition, R, bytes_per_voxel)
nb_buffers = len(buffers.values())
assert nb_buffers == expected
def test_clustered_writes():
bpv = 1
R = 20, 9, 10
cs = 5, 3, 2
ff = 'HDF5'
outdir_path = './outdir'
test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,
5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *
3 * 2 * 5 * 3 * 7]
nb_chunks = 4 * 3 * 5
origarr_filepath = './original_array.hdf5'
data = np.random.normal(size=R)
fm = get_file_manager(ff)
if os.path.isfile(origarr_filepath):
os.remove(origarr_filepath)
fm.write(origarr_filepath, data, R, _slices=None)
for m in test_case:
create_empty_dir(outdir_path)
clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)
workdir = os.getcwd()
os.chdir(outdir_path)
filenames = list()
for filename in glob.glob('*.hdf5'):
arr = fm.read_all(filename)
assert arr.shape == cs
filenames.append(filename)
assert len(filenames) == nb_chunks
os.chdir(workdir)
| import os, glob
import numpy as np
from ..algorithms.utils import get_file_manager
from ..algorithms.clustered_writes import *
from ..exp_utils import create_empty_dir
def test_get_entity_sizes():
bytes_per_voxel = 1
R = 10, 9, 10
cs = 5, 3, 2
partition = 2, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
assert bs == 5 * 3 * 2
assert brs == 5 * 3 * 2 * 5
assert bss == 5 * 3 * 2 * 5 * 3
def test_get_strategy():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *
2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):
2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
def test_compute_buffers():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel
test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *
3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,
(5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *
5 * 3 * 7): 1}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,
cs, bs, brs, bss, partition, R, bytes_per_voxel)
nb_buffers = len(buffers.values())
assert nb_buffers == expected
def test_clustered_writes():
bpv = 1
R = 20, 9, 10
cs = 5, 3, 2
ff = 'HDF5'
outdir_path = './outdir'
test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,
5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *
3 * 2 * 5 * 3 * 7]
nb_chunks = 4 * 3 * 5
origarr_filepath = './original_array.hdf5'
data = np.random.normal(size=R)
fm = get_file_manager(ff)
if os.path.isfile(origarr_filepath):
os.remove(origarr_filepath)
fm.write(origarr_filepath, data, R, _slices=None)
for m in test_case:
create_empty_dir(outdir_path)
clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)
workdir = os.getcwd()
os.chdir(outdir_path)
filenames = list()
for filename in glob.glob('*.hdf5'):
arr = fm.read_all(filename)
assert arr.shape == cs
filenames.append(filename)
assert len(filenames) == nb_chunks
os.chdir(workdir)
| import os, glob
import numpy as np
from ..algorithms.utils import get_file_manager
from ..algorithms.clustered_writes import *
from ..exp_utils import create_empty_dir
def test_get_entity_sizes():
# in C order
bytes_per_voxel = 1
R = (10,9,10)
cs = (5,3,2)
partition = (2,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
assert bs == 5*3*2
assert brs == 5*3*2*5
assert bss == 5*3*2*5*3
def test_get_strategy():
# in C order
bytes_per_voxel = 1
R = (20,9,10)
cs = (5,3,2)
partition = (4,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {
5*2*3: 0, # 1 block
5*2*3*4: 0, # 4 blocks
5*2*3*5: 1, # 1 row
5*2*3*5*2: 1, # 2 rows
5*2*3*5*3: 2, # 1 slice
5*2*3*5*3*3: 2, # 3 slices
5*2*3*5*3*4: 2, # whole img
5*2*3*5*3*7: 2, # whole img (more mem than necessary)
}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
def test_compute_buffers():
# in C order
bytes_per_voxel = 1
R = (20,9,10)
cs = (5,3,2)
partition = (4,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
origarr_size = R[0]*R[1]*R[2]*bytes_per_voxel
test_case = {
5*2*3: 4*3*5, # 1 block
5*2*3*4: 4*3*2, # 4 blocks
5*2*3*5: 4*3, # 1 row
5*2*3*5*2: 4*2, # 2 rows
5*2*3*5*3: 4, # 1 slice
5*2*3*5*3*3: 2, # 3 slices
5*2*3*5*3*4: 1, # whole img
5*2*3*5*3*7: 1, # whole img (more mem than necessary)
}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
buffers = compute_buffers(buffer_mem_size, strategy, origarr_size, cs, bs, brs, bss, partition, R, bytes_per_voxel)
# test number of buffers
nb_buffers = len(buffers.values())
assert nb_buffers == expected
def test_clustered_writes():
bpv = 1
R = (20,9,10)
cs = (5,3,2)
ff = 'HDF5'
outdir_path = './outdir'
test_case = [
5*3*2, # 1 block
5*3*2*4, # 4 blocks
5*3*2*5, # 1 row
5*3*2*5*2, # 2 rows
5*3*2*5*3, # 1 slice
5*3*2*5*3*3, # 3 slices
5*3*2*5*3*4, # whole img
5*3*2*5*3*7, # whole img (more mem than necessary)
]
nb_chunks = 4*3*5
# create input array
origarr_filepath = './original_array.hdf5'
data = np.random.normal(size=R)
fm = get_file_manager(ff)
if os.path.isfile(origarr_filepath):
os.remove(origarr_filepath)
fm.write(origarr_filepath, data, R, _slices=None)
for m in test_case:
create_empty_dir(outdir_path)
clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)
workdir = os.getcwd()
os.chdir(outdir_path)
filenames = list()
for filename in glob.glob("*.hdf5"):
arr = fm.read_all(filename)
assert arr.shape == cs
filenames.append(filename)
assert len(filenames) == nb_chunks
os.chdir(workdir)
| [
1,
3,
4,
5,
6
] |
367 | c09c02a36a64e9522cfc8c0951bd6c98f404f09c | <mask token>
def main():
keep_going = 'y'
while keep_going == 'y':
guess = int(input('\nGuess a number between 1 and 100: '))
if guess > randomNumber:
print('\nToo high, try again.')
elif guess < randomNumber:
print('\nToo low, try again')
else:
print('\nCongratulations, you guessed the correct number!')
keep_going = 'n'
<mask token>
| <mask token>
def main():
keep_going = 'y'
while keep_going == 'y':
guess = int(input('\nGuess a number between 1 and 100: '))
if guess > randomNumber:
print('\nToo high, try again.')
elif guess < randomNumber:
print('\nToo low, try again')
else:
print('\nCongratulations, you guessed the correct number!')
keep_going = 'n'
main()
| <mask token>
randomNumber = random.randint(1, 100)
def main():
keep_going = 'y'
while keep_going == 'y':
guess = int(input('\nGuess a number between 1 and 100: '))
if guess > randomNumber:
print('\nToo high, try again.')
elif guess < randomNumber:
print('\nToo low, try again')
else:
print('\nCongratulations, you guessed the correct number!')
keep_going = 'n'
main()
| import random
randomNumber = random.randint(1, 100)
def main():
keep_going = 'y'
while keep_going == 'y':
guess = int(input('\nGuess a number between 1 and 100: '))
if guess > randomNumber:
print('\nToo high, try again.')
elif guess < randomNumber:
print('\nToo low, try again')
else:
print('\nCongratulations, you guessed the correct number!')
keep_going = 'n'
main()
| # Random number guessing game.
# 10 July 20
# CTI-110 P5HW1 - Random Number
# Thelma Majette
import random
randomNumber = random.randint (1,100)
# main function
def main():
# Create a variable to control the loop.
keep_going = 'y'
while keep_going == 'y':
# Ask user for a number ()
guess = int(input('\nGuess a number between 1 and 100: '))
# Perform the selected action.
if guess > randomNumber:
print ('\nToo high, try again.' )
elif guess < randomNumber:
print ('\nToo low, try again' )
else:
print ('\nCongratulations, you guessed the correct number!')
keep_going ='n'
main ()
| [
1,
2,
3,
4,
5
] |
368 | 798ddd4a6e4febb4664bf1c973877628d1a45c71 | <mask token>
| <mask token>
urlpatterns = patterns('accounts.views', url('^$', 'home', name='home'),
url('^login/$', 'login', name='login'), url('^logout/$', 'logout', name
='logout'), url('^register/$', 'register', name='register'), url(
'^dashboard/', 'dashboard', name='dashboard'), url('^rewards/',
'rewards', name='rewards'), url('get_all_data/', 'get_all_data', name=
'get_all_data'))
| from django.conf.urls import patterns, include, url
urlpatterns = patterns('accounts.views', url('^$', 'home', name='home'),
url('^login/$', 'login', name='login'), url('^logout/$', 'logout', name
='logout'), url('^register/$', 'register', name='register'), url(
'^dashboard/', 'dashboard', name='dashboard'), url('^rewards/',
'rewards', name='rewards'), url('get_all_data/', 'get_all_data', name=
'get_all_data'))
| from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('accounts.views',
url(r'^$', 'home', name='home'),
url(r'^login/$', 'login', name='login'),
url(r'^logout/$', 'logout', name='logout'),
url(r'^register/$', 'register', name='register'),
url(r'^dashboard/', 'dashboard', name='dashboard'),
url(r'^rewards/', 'rewards', name='rewards'),
url(r'get_all_data/', 'get_all_data', name='get_all_data'),
)
| null | [
0,
1,
2,
3
] |
369 | 3340277df91f1421dab8d204eddce65b4604432b | <mask token>
class CourceCreateView(CreateView):
template_name = 'cources/create_cource.html'
form_class = CourceCreateForm
success_url = reverse_lazy('cources:cource_list')
class CourceUpdateView(UpdateView):
model = Courses
form_class = CourceCreateForm
template_name = 'cources/course_update.html'
success_url = reverse_lazy('cources:cource_list')
<mask token>
| <mask token>
class CourceListView(ListView):
model = Courses
template_name = 'cources/cource_list.html'
context_object_name = 'cources'
class CourceCreateView(CreateView):
template_name = 'cources/create_cource.html'
form_class = CourceCreateForm
success_url = reverse_lazy('cources:cource_list')
class CourceUpdateView(UpdateView):
model = Courses
form_class = CourceCreateForm
template_name = 'cources/course_update.html'
success_url = reverse_lazy('cources:cource_list')
<mask token>
| <mask token>
class CourceListView(ListView):
model = Courses
template_name = 'cources/cource_list.html'
context_object_name = 'cources'
class CourceCreateView(CreateView):
template_name = 'cources/create_cource.html'
form_class = CourceCreateForm
success_url = reverse_lazy('cources:cource_list')
class CourceUpdateView(UpdateView):
model = Courses
form_class = CourceCreateForm
template_name = 'cources/course_update.html'
success_url = reverse_lazy('cources:cource_list')
def DeleteView(request, pk):
cource = Courses.objects.filter(pk=pk)
cource.delete()
return redirect(reverse('cources:cource_list'))
| from django.shortcuts import render, redirect
from .models import Courses
from django.views.generic import CreateView, ListView, UpdateView, DeleteView
from .forms import CourceCreateForm
from django.urls import reverse_lazy
from django.urls import reverse
class CourceListView(ListView):
model = Courses
template_name = 'cources/cource_list.html'
context_object_name = 'cources'
class CourceCreateView(CreateView):
template_name = 'cources/create_cource.html'
form_class = CourceCreateForm
success_url = reverse_lazy('cources:cource_list')
class CourceUpdateView(UpdateView):
model = Courses
form_class = CourceCreateForm
template_name = 'cources/course_update.html'
success_url = reverse_lazy('cources:cource_list')
def DeleteView(request, pk):
cource = Courses.objects.filter(pk=pk)
cource.delete()
return redirect(reverse('cources:cource_list'))
| null | [
4,
6,
7,
8
] |
370 | 66444047f9e5eea845c8ac2dbaaf16fc2914d6ec | <mask token>
| if answ[1] == 'дата':
apisay(datetime.date.today(), toho, torep)
| null | null | null | [
0,
1
] |
371 | d2f6d7c779d3d6e61d9da7af01a2931fdabec828 | <mask token>
def gameOver(board, symbol):
if board[0] == board[3] == board[6] == symbol or board[1] == board[7
] == board[4] == symbol or board[2] == board[5] == board[8
] == symbol or board[0] == board[1] == board[2] == symbol or board[5
] == board[3] == board[4] == symbol or board[6] == board[7] == board[8
] == symbol or board[2] == board[4] == board[6] == symbol or board[0
] == board[4] == board[8] == symbol:
return True
def mark(pos, symbol):
board[pos] = symbol
unmarked.remove(pos)
def displayBoard():
for i in range(len(board)):
if i == 1 or i == 4 or i == 7:
print(f'|{board[i]}|', end=' ')
elif i == 2 or i == 5:
print(f'{board[i]}\n--------')
else:
print(f'{board[i]}', end=' ')
<mask token>
| <mask token>
try:
player_sym = input("Choose 'X' or 'O' : ")
if player_sym != 'X' and player_sym != 'O':
raise Exception('Symbol not found')
except Exception as e:
print(e.args)
else:
choices.remove(player_sym)
comp_sym = choices[0]
player_dict = {player_sym: 'Player', comp_sym: 'Computer'}
<mask token>
def gameOver(board, symbol):
if board[0] == board[3] == board[6] == symbol or board[1] == board[7
] == board[4] == symbol or board[2] == board[5] == board[8
] == symbol or board[0] == board[1] == board[2] == symbol or board[5
] == board[3] == board[4] == symbol or board[6] == board[7] == board[8
] == symbol or board[2] == board[4] == board[6] == symbol or board[0
] == board[4] == board[8] == symbol:
return True
def mark(pos, symbol):
board[pos] = symbol
unmarked.remove(pos)
def displayBoard():
for i in range(len(board)):
if i == 1 or i == 4 or i == 7:
print(f'|{board[i]}|', end=' ')
elif i == 2 or i == 5:
print(f'{board[i]}\n--------')
else:
print(f'{board[i]}', end=' ')
if __name__ == '__main__':
while not gameEnd:
try:
player_pos = int(input('\n\nWhere would you mark? '))
if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:
raise Exception('Position out of Board')
break
except Exception as e:
print(e.args)
else:
mark(player_pos, player_sym)
if gameOver(board, player_sym):
displayBoard()
print('\n\nPlayer Won!!!')
break
comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]
mark(comp_pos, comp_sym)
if gameOver(board, comp_sym):
displayBoard()
print('\n\nComputer WON!!!')
break
displayBoard()
print('GAME OVER')
| <mask token>
choices = ['X', 'O']
try:
player_sym = input("Choose 'X' or 'O' : ")
if player_sym != 'X' and player_sym != 'O':
raise Exception('Symbol not found')
except Exception as e:
print(e.args)
else:
choices.remove(player_sym)
comp_sym = choices[0]
player_dict = {player_sym: 'Player', comp_sym: 'Computer'}
board = [' '] * 9
gameEnd = False
unmarked = [i for i in range(9)]
def gameOver(board, symbol):
if board[0] == board[3] == board[6] == symbol or board[1] == board[7
] == board[4] == symbol or board[2] == board[5] == board[8
] == symbol or board[0] == board[1] == board[2] == symbol or board[5
] == board[3] == board[4] == symbol or board[6] == board[7] == board[8
] == symbol or board[2] == board[4] == board[6] == symbol or board[0
] == board[4] == board[8] == symbol:
return True
def mark(pos, symbol):
board[pos] = symbol
unmarked.remove(pos)
def displayBoard():
for i in range(len(board)):
if i == 1 or i == 4 or i == 7:
print(f'|{board[i]}|', end=' ')
elif i == 2 or i == 5:
print(f'{board[i]}\n--------')
else:
print(f'{board[i]}', end=' ')
if __name__ == '__main__':
while not gameEnd:
try:
player_pos = int(input('\n\nWhere would you mark? '))
if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:
raise Exception('Position out of Board')
break
except Exception as e:
print(e.args)
else:
mark(player_pos, player_sym)
if gameOver(board, player_sym):
displayBoard()
print('\n\nPlayer Won!!!')
break
comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]
mark(comp_pos, comp_sym)
if gameOver(board, comp_sym):
displayBoard()
print('\n\nComputer WON!!!')
break
displayBoard()
print('GAME OVER')
| import random
choices = ['X', 'O']
try:
player_sym = input("Choose 'X' or 'O' : ")
if player_sym != 'X' and player_sym != 'O':
raise Exception('Symbol not found')
except Exception as e:
print(e.args)
else:
choices.remove(player_sym)
comp_sym = choices[0]
player_dict = {player_sym: 'Player', comp_sym: 'Computer'}
board = [' '] * 9
gameEnd = False
unmarked = [i for i in range(9)]
def gameOver(board, symbol):
if board[0] == board[3] == board[6] == symbol or board[1] == board[7
] == board[4] == symbol or board[2] == board[5] == board[8
] == symbol or board[0] == board[1] == board[2] == symbol or board[5
] == board[3] == board[4] == symbol or board[6] == board[7] == board[8
] == symbol or board[2] == board[4] == board[6] == symbol or board[0
] == board[4] == board[8] == symbol:
return True
def mark(pos, symbol):
board[pos] = symbol
unmarked.remove(pos)
def displayBoard():
for i in range(len(board)):
if i == 1 or i == 4 or i == 7:
print(f'|{board[i]}|', end=' ')
elif i == 2 or i == 5:
print(f'{board[i]}\n--------')
else:
print(f'{board[i]}', end=' ')
if __name__ == '__main__':
while not gameEnd:
try:
player_pos = int(input('\n\nWhere would you mark? '))
if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:
raise Exception('Position out of Board')
break
except Exception as e:
print(e.args)
else:
mark(player_pos, player_sym)
if gameOver(board, player_sym):
displayBoard()
print('\n\nPlayer Won!!!')
break
comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]
mark(comp_pos, comp_sym)
if gameOver(board, comp_sym):
displayBoard()
print('\n\nComputer WON!!!')
break
displayBoard()
print('GAME OVER')
| import random
choices = ['X', 'O']
try:
# Choice of X-O given to the player
player_sym = input("Choose 'X' or 'O' : ")
# raising an exception if the variable is not X or O
if player_sym!='X' and player_sym!='O':
raise Exception("Symbol not found")
except Exception as e:
print(e.args)
else:
# Allotting the other one as the computer symbol
choices.remove(player_sym)
comp_sym = choices[0]
player_dict = {player_sym:'Player', comp_sym:'Computer'}
# creating the board
board = [' ']*9
gameEnd = False # to track when the game ends
unmarked = [i for i in range(9)] # to track all the blank boxes left
# gameOver function check if the game already has a winner
def gameOver(board, symbol):
# below is the sequence of all the possible winning combinations
if board[0]==board[3]==board[6]==symbol or board[1]==board[7]==board[4]==symbol or board[2]==board[5]==board[8]==symbol or board[0]==board[1]==board[2]==symbol or board[5]==board[3]==board[4]==symbol or board[6]==board[7]==board[8]==symbol or board[2]==board[4]==board[6]==symbol or board[0]==board[4]==board[8]==symbol:
# if there is a pattern match the game is over hence return True
return True
# function for marking the box with the symbol
def mark(pos, symbol):
board[pos] = symbol
unmarked.remove(pos)
# Used it for debugging : print(f"Unmarked : {unmarked}")
# function to display the board at a particular time
def displayBoard():
for i in range(len(board)):
# formatting the output for the middle elements
if i==1 or i==4 or i==7:
print(f'|{board[i]}|', end=' ')
elif i==2 or i==5:
print(f'{board[i]}\n--------') # marks the end of a line and hence bifurcates two lines
else:
print(f'{board[i]}', end=' ')
if __name__== "__main__":
# this is where the game starts
while not gameEnd: # loop until game ends
try:
player_pos = int(input("\n\nWhere would you mark? "))
# check if position index is on the board and is available for marking else raise Exception
if player_pos<0 or player_pos>8 or (player_pos not in unmarked):
raise Exception("Position out of Board")
break
except Exception as e:
print(e.args)
else:
mark(player_pos, player_sym)
# check if the game has already ended and if yes, declare the player as winner
if gameOver(board, player_sym):
displayBoard()
print("\n\nPlayer Won!!!")
break
# computer will mark on some random square that is not marked yet
comp_pos = unmarked[random.randint(0, len(unmarked)-1)]
mark(comp_pos, comp_sym)
# check if the game has already ended and if yes, declare the computer as winner
if gameOver(board, comp_sym):
displayBoard()
print("\n\nComputer WON!!!")
break
# display the board after each iteration
displayBoard()
# marks the end of the game
print("GAME OVER") | [
3,
4,
5,
6,
7
] |
372 | b4c6075aabe833f6fe23471f608d928edd25ef63 | <mask token>
| <mask token>
class warning_test(paw_test):
<mask token>
| <mask token>
class warning_test(paw_test):
def test_warning_badchars(self):
self.paw.cset_lookup(self.badchar)
self.assertEqual(1, self.paw.wcount)
| from .base import paw_test
class warning_test(paw_test):
def test_warning_badchars(self):
self.paw.cset_lookup(self.badchar)
self.assertEqual(1, self.paw.wcount)
| null | [
0,
1,
2,
3
] |
373 | 24c9b562411a63f0d3f2ee509bb60dafe7fbecd1 | <mask token>
| <mask token>
app.config.from_object(__name__)
<mask token>
| <mask token>
app = Flask(__name__)
csrf = CSRFProtect(app)
app.config['SECRET_KEY'] = 'vù÷\x11\x13\x18úMYpí_èÉw\x06\x8eðfÒºý\x8cÚ'
app.config['SQLALCHEMY_DATABASE_URI'] = (
'postgres://vwfeollskqmjyw:1d738da99074015b148d72cfd94ea584dcb39e81c1bb197fb9da65455c756b0f@ec2-50-17-227-28.compute-1.amazonaws.com:5432/dc8lr6j69aeqjt'
)
PRO_PIC_UPLOAD_FOLDER = './app/static/profile_photos'
POSTS_UPLOAD_FOLDER = './app/static/posts_photos'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
app.config.from_object(__name__)
<mask token>
| import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_wtf.csrf import CSRFProtect
app = Flask(__name__)
csrf = CSRFProtect(app)
app.config['SECRET_KEY'] = 'vù÷\x11\x13\x18úMYpí_èÉw\x06\x8eðfÒºý\x8cÚ'
app.config['SQLALCHEMY_DATABASE_URI'] = (
'postgres://vwfeollskqmjyw:1d738da99074015b148d72cfd94ea584dcb39e81c1bb197fb9da65455c756b0f@ec2-50-17-227-28.compute-1.amazonaws.com:5432/dc8lr6j69aeqjt'
)
PRO_PIC_UPLOAD_FOLDER = './app/static/profile_photos'
POSTS_UPLOAD_FOLDER = './app/static/posts_photos'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
app.config.from_object(__name__)
from app import views, models
| import os
from flask import Flask
# from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
# from flask_bcrypt import Bcrypt
from flask_wtf.csrf import CSRFProtect
app = Flask(__name__)
csrf = CSRFProtect(app)
# bcrypt = Bcrypt(app)
app.config['SECRET_KEY'] = 'v\xf9\xf7\x11\x13\x18\xfaMYp\xed_\xe8\xc9w\x06\x8e\xf0f\xd2\xba\xfd\x8c\xda'
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://vwfeollskqmjyw:1d738da99074015b148d72cfd94ea584dcb39e81c1bb197fb9da65455c756b0f@ec2-50-17-227-28.compute-1.amazonaws.com:5432/dc8lr6j69aeqjt'
#app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql: //infoproj2:info3180@localhost/infoproj2'
PRO_PIC_UPLOAD_FOLDER = "./app/static/profile_photos"
POSTS_UPLOAD_FOLDER = "./app/static/posts_photos"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # added just to suppress a warning
db = SQLAlchemy(app)
#Flask-Login LoginManager
# login_manager = LoginManager()
# login_manager.init_app(app)
# login_manager.login_view = 'login'
app.config.from_object(__name__)
from app import views,models
#intense-basin-58864 #name of project on heroku | [
0,
1,
2,
3,
4
] |
374 | 14a357f3dfb3d59f1d8cfd566edeaf8b0e5bb56d | <mask token>
def callback(data):
global first_a
global first_d
global oldvar
global base_throttle
global peak_throttle
global base_brake
global peak_brake
global button
axis1 = -data.axes[1]
axis3 = -data.axes[3]
button1 = data.buttons[1]
button4 = data.buttons[4]
button5 = data.buttons[5]
button_ = button1 + button4 + button5
if axis1 > 0.1:
bval = int(axis1 * (peak_brake - base_brake) + base_brake)
print(bval)
ser.write(str(bval).encode('utf-8'))
ser.write('a'.encode('utf-8'))
print('Brake')
elif axis1 < -0.1 and axis3 < 0.1:
tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -
base_throttle) * 0.5 + base_throttle)
if abs(tval - oldvar) > 5:
ser.write(str(tval).encode('utf-8'))
ser.write('a'.encode('utf-8'))
ser.write('450a'.encode('utf-8'))
print('Throttle')
oldvar = tval
elif axis1 > -0.1 and axis1 < 0.1:
ser.write('4000a'.encode('utf-8'))
ser.write('450a'.encode('utf-8'))
print('Zero Throttle')
print(axis1)
print(axis3)
if button1 == 1:
print('Emergency Brake')
ser.write('4600a'.encode('utf-8'))
ser.write('600a'.encode('utf-8'))
if button4 and button5 == 0:
if first_a == 0:
ser.write('1000a'.encode('utf-8'))
print('Joystick button 4 pressed.')
first_a = 1
if button5 and button4 == 0:
if first_d == 0:
ser.write('2000a'.encode('utf-8'))
print('Joystick button 5 pressed.')
first_d = 1
if button - button_ != 0:
if button4 == 0:
first_a = 0
if button5 == 0:
first_d = 0
ser.write('3000a'.encode('utf-8'))
print('Joystick button released.')
button = button_
def start():
rospy.Subscriber('joy', Joy, callback)
rospy.init_node('Joy2Turtle')
rospy.spin()
<mask token>
| <mask token>
if platform == 'linux' or platform == 'linux2':
ser = serial.Serial('/dev/ttyACM0')
elif platform == 'darwin':
pass
elif platform == 'win32':
ser = serial.Serial('COM16')
<mask token>
def callback(data):
global first_a
global first_d
global oldvar
global base_throttle
global peak_throttle
global base_brake
global peak_brake
global button
axis1 = -data.axes[1]
axis3 = -data.axes[3]
button1 = data.buttons[1]
button4 = data.buttons[4]
button5 = data.buttons[5]
button_ = button1 + button4 + button5
if axis1 > 0.1:
bval = int(axis1 * (peak_brake - base_brake) + base_brake)
print(bval)
ser.write(str(bval).encode('utf-8'))
ser.write('a'.encode('utf-8'))
print('Brake')
elif axis1 < -0.1 and axis3 < 0.1:
tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -
base_throttle) * 0.5 + base_throttle)
if abs(tval - oldvar) > 5:
ser.write(str(tval).encode('utf-8'))
ser.write('a'.encode('utf-8'))
ser.write('450a'.encode('utf-8'))
print('Throttle')
oldvar = tval
elif axis1 > -0.1 and axis1 < 0.1:
ser.write('4000a'.encode('utf-8'))
ser.write('450a'.encode('utf-8'))
print('Zero Throttle')
print(axis1)
print(axis3)
if button1 == 1:
print('Emergency Brake')
ser.write('4600a'.encode('utf-8'))
ser.write('600a'.encode('utf-8'))
if button4 and button5 == 0:
if first_a == 0:
ser.write('1000a'.encode('utf-8'))
print('Joystick button 4 pressed.')
first_a = 1
if button5 and button4 == 0:
if first_d == 0:
ser.write('2000a'.encode('utf-8'))
print('Joystick button 5 pressed.')
first_d = 1
if button - button_ != 0:
if button4 == 0:
first_a = 0
if button5 == 0:
first_d = 0
ser.write('3000a'.encode('utf-8'))
print('Joystick button released.')
button = button_
def start():
rospy.Subscriber('joy', Joy, callback)
rospy.init_node('Joy2Turtle')
rospy.spin()
if __name__ == '__main__':
start()
| <mask token>
if platform == 'linux' or platform == 'linux2':
ser = serial.Serial('/dev/ttyACM0')
elif platform == 'darwin':
pass
elif platform == 'win32':
ser = serial.Serial('COM16')
<mask token>
oldvar = 0
first_a = 0
first_d = 0
base_throttle = 5500
peak_throttle = 6500
base_brake = 450
peak_brake = 600
button = 0
def callback(data):
global first_a
global first_d
global oldvar
global base_throttle
global peak_throttle
global base_brake
global peak_brake
global button
axis1 = -data.axes[1]
axis3 = -data.axes[3]
button1 = data.buttons[1]
button4 = data.buttons[4]
button5 = data.buttons[5]
button_ = button1 + button4 + button5
if axis1 > 0.1:
bval = int(axis1 * (peak_brake - base_brake) + base_brake)
print(bval)
ser.write(str(bval).encode('utf-8'))
ser.write('a'.encode('utf-8'))
print('Brake')
elif axis1 < -0.1 and axis3 < 0.1:
tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -
base_throttle) * 0.5 + base_throttle)
if abs(tval - oldvar) > 5:
ser.write(str(tval).encode('utf-8'))
ser.write('a'.encode('utf-8'))
ser.write('450a'.encode('utf-8'))
print('Throttle')
oldvar = tval
elif axis1 > -0.1 and axis1 < 0.1:
ser.write('4000a'.encode('utf-8'))
ser.write('450a'.encode('utf-8'))
print('Zero Throttle')
print(axis1)
print(axis3)
if button1 == 1:
print('Emergency Brake')
ser.write('4600a'.encode('utf-8'))
ser.write('600a'.encode('utf-8'))
if button4 and button5 == 0:
if first_a == 0:
ser.write('1000a'.encode('utf-8'))
print('Joystick button 4 pressed.')
first_a = 1
if button5 and button4 == 0:
if first_d == 0:
ser.write('2000a'.encode('utf-8'))
print('Joystick button 5 pressed.')
first_d = 1
if button - button_ != 0:
if button4 == 0:
first_a = 0
if button5 == 0:
first_d = 0
ser.write('3000a'.encode('utf-8'))
print('Joystick button released.')
button = button_
def start():
rospy.Subscriber('joy', Joy, callback)
rospy.init_node('Joy2Turtle')
rospy.spin()
if __name__ == '__main__':
start()
| import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Joy
import serial
from sys import platform
if platform == 'linux' or platform == 'linux2':
ser = serial.Serial('/dev/ttyACM0')
elif platform == 'darwin':
pass
elif platform == 'win32':
ser = serial.Serial('COM16')
<mask token>
oldvar = 0
first_a = 0
first_d = 0
base_throttle = 5500
peak_throttle = 6500
base_brake = 450
peak_brake = 600
button = 0
def callback(data):
global first_a
global first_d
global oldvar
global base_throttle
global peak_throttle
global base_brake
global peak_brake
global button
axis1 = -data.axes[1]
axis3 = -data.axes[3]
button1 = data.buttons[1]
button4 = data.buttons[4]
button5 = data.buttons[5]
button_ = button1 + button4 + button5
if axis1 > 0.1:
bval = int(axis1 * (peak_brake - base_brake) + base_brake)
print(bval)
ser.write(str(bval).encode('utf-8'))
ser.write('a'.encode('utf-8'))
print('Brake')
elif axis1 < -0.1 and axis3 < 0.1:
tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -
base_throttle) * 0.5 + base_throttle)
if abs(tval - oldvar) > 5:
ser.write(str(tval).encode('utf-8'))
ser.write('a'.encode('utf-8'))
ser.write('450a'.encode('utf-8'))
print('Throttle')
oldvar = tval
elif axis1 > -0.1 and axis1 < 0.1:
ser.write('4000a'.encode('utf-8'))
ser.write('450a'.encode('utf-8'))
print('Zero Throttle')
print(axis1)
print(axis3)
if button1 == 1:
print('Emergency Brake')
ser.write('4600a'.encode('utf-8'))
ser.write('600a'.encode('utf-8'))
if button4 and button5 == 0:
if first_a == 0:
ser.write('1000a'.encode('utf-8'))
print('Joystick button 4 pressed.')
first_a = 1
if button5 and button4 == 0:
if first_d == 0:
ser.write('2000a'.encode('utf-8'))
print('Joystick button 5 pressed.')
first_d = 1
if button - button_ != 0:
if button4 == 0:
first_a = 0
if button5 == 0:
first_d = 0
ser.write('3000a'.encode('utf-8'))
print('Joystick button released.')
button = button_
def start():
rospy.Subscriber('joy', Joy, callback)
rospy.init_node('Joy2Turtle')
rospy.spin()
if __name__ == '__main__':
start()
| #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Joy
import serial
from sys import platform
if platform == "linux" or platform == "linux2":
ser = serial.Serial('/dev/ttyACM0')
elif platform == "darwin":
pass
elif platform == "win32":
# Windows...
ser = serial.Serial('COM16')
"""
In this test code we are testing basic vehicle control over the network
we use ROS middleware to send the control commands
This script runs at the remote driver end.
Receives joystick messages (subscribed to Joy topic)
then converts the joystick inputs into commands
WE ARE NOT USING THIS METHOD NOW
--- WE HAVE SEPERATED OUT ALL THE STREAMS FROM THE JOYSTICK
"""
oldvar = 0
first_a = 0
first_d = 0
# Configuatrion tuned for CAR in LOW speed
base_throttle = 5500
peak_throttle = 6500
base_brake = 450
peak_brake = 600
button = 0
def callback(data):
global first_a
global first_d
global oldvar
global base_throttle
global peak_throttle
global base_brake
global peak_brake
global button
# print data
axis1 = -data.axes[1]
axis3 = -data.axes[3] # in logitech axis 3 is axis 4 confirm with ashish
button1 = data.buttons[1]
button4 = data.buttons[4]
button5 = data.buttons[5]
button_ = button1+button4+button5
if axis1 > 0.1:
bval = int((axis1) * (peak_brake - base_brake) + base_brake)
print(bval)
ser.write(str(bval).encode('utf-8'))
ser.write("a".encode('utf-8'))
#### ser.write("4000a".encode('utf-8')) #throttle released on braking
print("Brake")
elif (axis1 < -0.1 and axis3 < 0.1):
tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle - base_throttle) * 0.5 + base_throttle)
if (abs(tval - oldvar) > 5):
#print(tval)
ser.write(str(tval).encode('utf-8'))
ser.write("a".encode('utf-8'))
ser.write("450a".encode('utf-8')) # brake released on acceleration
print("Throttle")
oldvar = tval
elif (axis1 > -0.1 and axis1 < 0.1):
ser.write("4000a".encode('utf-8'))
ser.write("450a".encode('utf-8')) # brake released
print("Zero Throttle")
print (axis1)
print (axis3)
if button1 == 1:
print("Emergency Brake")
ser.write("4600a".encode('utf-8')) # throttle released
ser.write("600a".encode('utf-8')) # brake engaged
if (button4 and button5 == 0):
if (first_a == 0):
ser.write("1000a".encode('utf-8'))
print("Joystick button 4 pressed.")
first_a = 1
if (button5 and button4 == 0):
if (first_d == 0):
ser.write("2000a".encode('utf-8'))
print("Joystick button 5 pressed.")
first_d = 1
if(button-button_!= 0):
if(button4 == 0):
first_a = 0
if(button5 == 0):
first_d = 0
ser.write("3000a".encode('utf-8'))
print("Joystick button released.")
button = button_
# Intializes everything
def start():
rospy.Subscriber("joy", Joy, callback)
# starts the node
rospy.init_node('Joy2Turtle')
rospy.spin()
if __name__ == '__main__':
start()
| [
2,
3,
4,
5,
6
] |
375 | 1ef9df43725196904ec6c0c881f4a1204174b176 | <mask token>
| <mask token>
with open(os.path.join(os.path.dirname(__file__), 'storage/robot_list.csv'),
'w') as file:
writer = csv.writer(file, delimiter=',')
headers = [cell.value for cell in sheet.row(0)]
writer.writerow(headers)
for i in range(1, sheet.nrows):
rowvalue_list = [(str(cell.value).strip() if cell.value else None) for
cell in sheet.row(i)]
writer.writerow(rowvalue_list)
| <mask token>
xlsfile = glob.glob(os.path.join(os.path.dirname(__file__),
'storage/robot*.xls'))[0]
wb = open_workbook(xlsfile)
sheet = wb.sheet_by_name('robot_list')
with open(os.path.join(os.path.dirname(__file__), 'storage/robot_list.csv'),
'w') as file:
writer = csv.writer(file, delimiter=',')
headers = [cell.value for cell in sheet.row(0)]
writer.writerow(headers)
for i in range(1, sheet.nrows):
rowvalue_list = [(str(cell.value).strip() if cell.value else None) for
cell in sheet.row(i)]
writer.writerow(rowvalue_list)
| import requests, shutil, os, glob
from zipfile import ZipFile
import pandas as pd
from xlrd import open_workbook
import csv
xlsfile = glob.glob(os.path.join(os.path.dirname(__file__),
'storage/robot*.xls'))[0]
wb = open_workbook(xlsfile)
sheet = wb.sheet_by_name('robot_list')
with open(os.path.join(os.path.dirname(__file__), 'storage/robot_list.csv'),
'w') as file:
writer = csv.writer(file, delimiter=',')
headers = [cell.value for cell in sheet.row(0)]
writer.writerow(headers)
for i in range(1, sheet.nrows):
rowvalue_list = [(str(cell.value).strip() if cell.value else None) for
cell in sheet.row(i)]
writer.writerow(rowvalue_list)
|
import requests, shutil, os, glob
from zipfile import ZipFile
import pandas as pd
from xlrd import open_workbook
import csv
# zipfilename = 'desiya_hotels'
# try:
# # downloading zip file
# r = requests.get('http://staticstore.travelguru.com/testdump/1300001176/Excel.zip', auth=('testdump', 'testdump'), verify=False,stream=True) #Note web_link is https://
# r.raw.decode_content = True
# with open(os.path.join(os.path.dirname(__file__), 'storage/{}.zip'.format(zipfilename)), 'wb') as f:
# shutil.copyfileobj(r.raw, f)
#
# #extracting zip file as xls file
# with ZipFile(glob.glob(os.path.join(os.path.dirname(__file__), 'storage/desiya*.zip'))[0], 'r') as zip:
# zip.extractall(os.path.join(os.path.dirname(__file__), 'storage/'))
# #Rename xls file name as "desiya_hotels"
# if glob.glob(os.path.join(os.path.dirname(__file__), 'storage/*[0-9].xls')):
# for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'storage/*[a-zA-z].xls')):
# os.remove(filename)
# os.rename(glob.glob(os.path.join(os.path.dirname(__file__), 'storage/*[0-9].xls'))[0], os.path.join(os.path.dirname(__file__),'storage/{}.xls'.format(zipfilename)))
# else:
# print('unzipped xls file is not found in storare folder')
# except Exception as e:
# print("Error while downloading zip file")
#read xls file
# xls = pd.ExcelFile(glob.glob(os.path.join(os.path.dirname(__file__), 'storage/desiya*.xls'))[0])
# df = pd.read_excel(xls, sheet_name=0, index_col=None)
# print(df['Name'])
# print(df.head(5))
# for index, row in df.iterrows():
# print(index, row[3])
#convert xls to csvc
# df.to_csv(os.path.join(os.path.dirname(__file__),'storage/{}'.format('robot.csv')), encoding='utf-8', index=False)
#convert xls file to csv using xlrd module
xlsfile = glob.glob(os.path.join(os.path.dirname(__file__), 'storage/robot*.xls'))[0]
wb = open_workbook(xlsfile)
sheet = wb.sheet_by_name('robot_list')
with open(os.path.join(os.path.dirname(__file__), 'storage/robot_list.csv'), "w") as file:
writer = csv.writer(file, delimiter=",")
headers = [cell.value for cell in sheet.row(0)]
writer.writerow(headers)
for i in range(1, sheet.nrows):
rowvalue_list = [str(cell.value).strip() if cell.value else None for cell in sheet.row(i)]
writer.writerow(rowvalue_list)
| [
0,
1,
2,
3,
4
] |
376 | 7a59c8c883a9aaa723175783e01aa62e23503fde | <mask token>
| from urllib.request import urlopen
from bs4 import BeautifulSoup
| #!/C:\Program Files (x86)\Python35-32
#importar librarias necesarias
from urllib.request import urlopen
from bs4 import BeautifulSoup
| null | null | [
0,
1,
2
] |
377 | 48d0bfdc607a4605ef82f5c7dc7fd6fc85c4255f | <mask token>
| <mask token>
if wht == 0:
print('wht is', wht)
else:
print('whtsdsb')
<mask token>
print('BMI=', bmi)
if bmi < 18.5:
print('too light')
elif bmi < 25:
print('normal')
elif bmi < 30:
print('over')
else:
print('wanghantangshidssb')
| <mask token>
wht = 2
if wht == 0:
print('wht is', wht)
else:
print('whtsdsb')
wei = float(input('wei='))
hei = float(input('hei='))
bmi = wei * 0.45259227 / (hei * 0.0254) ** 2
print('BMI=', bmi)
if bmi < 18.5:
print('too light')
elif bmi < 25:
print('normal')
elif bmi < 30:
print('over')
else:
print('wanghantangshidssb')
|
'''
BMI=weight*0.45259227/(hei*0.0254)**
'''
wht=2
if wht==0:
print("wht is",wht)
else:
print("whtsdsb")
#今天也完成了100波比跳
wei=float(input("wei="))
hei=float(input("hei="))
bmi=(wei*0.45259227)/((hei*0.0254)**2)
print("BMI=",bmi)
if bmi<18.5:
print("too light")
elif bmi<25:
print("normal")
elif bmi<30:
print("over")
else:
print("wanghantangshidssb")
| null | [
0,
1,
2,
3
] |
378 | 05e4bcc7323b908a7b45d766ada463ce172e25c4 | <mask token>
| <mask token>
class Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.
constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.
schema.Query, f1hub.circuits.schema.Query, f1hub.constructorresults.
schema.Query, f1hub.constructorstandings.schema.Query, f1hub.
driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.
pitstops.schema.Query, f1hub.qualifying.schema.Query, f1hub.seasons.
schema.Query, graphene.ObjectType):
pass
<mask token>
| <mask token>
class Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.
constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.
schema.Query, f1hub.circuits.schema.Query, f1hub.constructorresults.
schema.Query, f1hub.constructorstandings.schema.Query, f1hub.
driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.
pitstops.schema.Query, f1hub.qualifying.schema.Query, f1hub.seasons.
schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query)
| import graphene
import f1hub.drivers.schema
import f1hub.results.schema
import f1hub.constructors.schema
import f1hub.races.schema
import f1hub.status.schema
import f1hub.circuits.schema
import f1hub.constructorresults.schema
import f1hub.constructorstandings.schema
import f1hub.driverstandings.schema
import f1hub.laptimes.schema
import f1hub.pitstops.schema
import f1hub.qualifying.schema
import f1hub.seasons.schema
class Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.
constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.
schema.Query, f1hub.circuits.schema.Query, f1hub.constructorresults.
schema.Query, f1hub.constructorstandings.schema.Query, f1hub.
driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.
pitstops.schema.Query, f1hub.qualifying.schema.Query, f1hub.seasons.
schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query)
| import graphene
import f1hub.drivers.schema
import f1hub.results.schema
import f1hub.constructors.schema
import f1hub.races.schema
import f1hub.status.schema
import f1hub.circuits.schema
import f1hub.constructorresults.schema
import f1hub.constructorstandings.schema
import f1hub.driverstandings.schema
import f1hub.laptimes.schema
import f1hub.pitstops.schema
import f1hub.qualifying.schema
import f1hub.seasons.schema
class Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.schema.Query, f1hub.circuits.schema.Query,\
f1hub.constructorresults.schema.Query, f1hub.constructorstandings.schema.Query, f1hub.driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.pitstops.schema.Query,\
f1hub.qualifying.schema.Query, f1hub.seasons.schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query)
| [
0,
1,
2,
3,
4
] |
379 | 7530c2c85f83d1714840ba97c1ec702f063658c5 | <mask token>
class VertexArrayObject:
def __init__(self, primitive):
self._primitive = primitive
self._buffers: List[pxng.BufferObject] = []
self._indices = pxng.BufferObject(data_type=self.index_data_type,
array_type=gl.GL_ELEMENT_ARRAY_BUFFER)
self._vao = gl.glGenVertexArrays(1)
<mask token>
def add_quad(self, p1, p2, p3, p4):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._buffers[0].set_value(p3)
self._buffers[0].set_value(p4)
self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))
self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))
def add_triangle(self, p1, p2, p3):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._buffers[0].set_value(p3)
self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))
def add_line(self, p1, p2):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._indices.set_value(glm.u16vec2(i, i + 1))
<mask token>
def set_colors(self, *args: glm.vec4, target=1):
for c in args:
self._buffers[target].set_value(c)
def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):
for c in args:
self._buffers[target].set_value(c)
<mask token>
<mask token>
def draw(self):
index_count = len(self._indices) * self.primitive_component_count
gl.glDrawElements(self._primitive, index_count, gl.
GL_UNSIGNED_SHORT, None)
<mask token>
@property
def primitive_component_count(self):
if self._primitive == gl.GL_TRIANGLES:
return 3
elif self._primitive == gl.GL_LINES:
return 2
elif self._primitive == gl.GL_POINTS:
return 1
else:
raise UserWarning(f'Unknown primitive type {self._primitive}')
<mask token>
| <mask token>
class VertexArrayObject:
def __init__(self, primitive):
self._primitive = primitive
self._buffers: List[pxng.BufferObject] = []
self._indices = pxng.BufferObject(data_type=self.index_data_type,
array_type=gl.GL_ELEMENT_ARRAY_BUFFER)
self._vao = gl.glGenVertexArrays(1)
<mask token>
def add_quad(self, p1, p2, p3, p4):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._buffers[0].set_value(p3)
self._buffers[0].set_value(p4)
self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))
self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))
def add_triangle(self, p1, p2, p3):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._buffers[0].set_value(p3)
self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))
def add_line(self, p1, p2):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._indices.set_value(glm.u16vec2(i, i + 1))
<mask token>
def set_colors(self, *args: glm.vec4, target=1):
for c in args:
self._buffers[target].set_value(c)
def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):
for c in args:
self._buffers[target].set_value(c)
def create(self):
gl.glBindVertexArray(self._vao)
for index, vbo in enumerate(self._buffers):
vbo.bind(index)
self._indices.bind(None)
<mask token>
def draw(self):
index_count = len(self._indices) * self.primitive_component_count
gl.glDrawElements(self._primitive, index_count, gl.
GL_UNSIGNED_SHORT, None)
@property
def index_data_type(self):
if self._primitive == gl.GL_TRIANGLES:
return glm.u16vec3
elif self._primitive == gl.GL_LINES:
return glm.u16vec2
elif self._primitive == gl.GL_POINTS:
return glm.u16vec1
else:
raise UserWarning(f'Unknown primitive type {self._primitive}')
@property
def primitive_component_count(self):
if self._primitive == gl.GL_TRIANGLES:
return 3
elif self._primitive == gl.GL_LINES:
return 2
elif self._primitive == gl.GL_POINTS:
return 1
else:
raise UserWarning(f'Unknown primitive type {self._primitive}')
<mask token>
| <mask token>
class VertexArrayObject:
def __init__(self, primitive):
self._primitive = primitive
self._buffers: List[pxng.BufferObject] = []
self._indices = pxng.BufferObject(data_type=self.index_data_type,
array_type=gl.GL_ELEMENT_ARRAY_BUFFER)
self._vao = gl.glGenVertexArrays(1)
<mask token>
def add_quad(self, p1, p2, p3, p4):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._buffers[0].set_value(p3)
self._buffers[0].set_value(p4)
self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))
self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))
def add_triangle(self, p1, p2, p3):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._buffers[0].set_value(p3)
self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))
def add_line(self, p1, p2):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._indices.set_value(glm.u16vec2(i, i + 1))
<mask token>
def set_colors(self, *args: glm.vec4, target=1):
for c in args:
self._buffers[target].set_value(c)
def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):
for c in args:
self._buffers[target].set_value(c)
def create(self):
gl.glBindVertexArray(self._vao)
for index, vbo in enumerate(self._buffers):
vbo.bind(index)
self._indices.bind(None)
<mask token>
def draw(self):
index_count = len(self._indices) * self.primitive_component_count
gl.glDrawElements(self._primitive, index_count, gl.
GL_UNSIGNED_SHORT, None)
@property
def index_data_type(self):
if self._primitive == gl.GL_TRIANGLES:
return glm.u16vec3
elif self._primitive == gl.GL_LINES:
return glm.u16vec2
elif self._primitive == gl.GL_POINTS:
return glm.u16vec1
else:
raise UserWarning(f'Unknown primitive type {self._primitive}')
@property
def primitive_component_count(self):
if self._primitive == gl.GL_TRIANGLES:
return 3
elif self._primitive == gl.GL_LINES:
return 2
elif self._primitive == gl.GL_POINTS:
return 1
else:
raise UserWarning(f'Unknown primitive type {self._primitive}')
def bind(self):
gl.glBindVertexArray(self._vao)
if self._indices.bind(None):
if any(vbo.changed for vbo in self._buffers):
self.create()
return True
gl.glBindVertexArray(0)
return False
| <mask token>
class VertexArrayObject:
def __init__(self, primitive):
self._primitive = primitive
self._buffers: List[pxng.BufferObject] = []
self._indices = pxng.BufferObject(data_type=self.index_data_type,
array_type=gl.GL_ELEMENT_ARRAY_BUFFER)
self._vao = gl.glGenVertexArrays(1)
<mask token>
def add_quad(self, p1, p2, p3, p4):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._buffers[0].set_value(p3)
self._buffers[0].set_value(p4)
self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))
self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))
def add_triangle(self, p1, p2, p3):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._buffers[0].set_value(p3)
self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))
def add_line(self, p1, p2):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._indices.set_value(glm.u16vec2(i, i + 1))
<mask token>
def set_colors(self, *args: glm.vec4, target=1):
for c in args:
self._buffers[target].set_value(c)
def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):
for c in args:
self._buffers[target].set_value(c)
def create(self):
gl.glBindVertexArray(self._vao)
for index, vbo in enumerate(self._buffers):
vbo.bind(index)
self._indices.bind(None)
def reset(self):
self._indices.reset()
for vbo in self._buffers:
vbo.reset()
def draw(self):
index_count = len(self._indices) * self.primitive_component_count
gl.glDrawElements(self._primitive, index_count, gl.
GL_UNSIGNED_SHORT, None)
@property
def index_data_type(self):
if self._primitive == gl.GL_TRIANGLES:
return glm.u16vec3
elif self._primitive == gl.GL_LINES:
return glm.u16vec2
elif self._primitive == gl.GL_POINTS:
return glm.u16vec1
else:
raise UserWarning(f'Unknown primitive type {self._primitive}')
@property
def primitive_component_count(self):
if self._primitive == gl.GL_TRIANGLES:
return 3
elif self._primitive == gl.GL_LINES:
return 2
elif self._primitive == gl.GL_POINTS:
return 1
else:
raise UserWarning(f'Unknown primitive type {self._primitive}')
def bind(self):
gl.glBindVertexArray(self._vao)
if self._indices.bind(None):
if any(vbo.changed for vbo in self._buffers):
self.create()
return True
gl.glBindVertexArray(0)
return False
| from typing import List
import glm
import pxng
import OpenGL.GL as gl
class VertexArrayObject:
def __init__(self, primitive):
self._primitive = primitive
self._buffers: List[pxng.BufferObject] = []
self._indices = pxng.BufferObject(data_type=self.index_data_type,
array_type=gl.GL_ELEMENT_ARRAY_BUFFER)
self._vao = gl.glGenVertexArrays(1)
def attach_buffer(self, vbo: pxng.BufferObject):
self._buffers.append(vbo)
return len(self._buffers) - 1
def add_quad(self, p1, p2, p3, p4):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._buffers[0].set_value(p3)
self._buffers[0].set_value(p4)
self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))
self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))
def add_triangle(self, p1, p2, p3):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._buffers[0].set_value(p3)
self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))
def add_line(self, p1, p2):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._buffers[0].set_value(p2)
self._indices.set_value(glm.u16vec2(i, i + 1))
def add_point(self, p1):
i = self._buffers[0].index
self._buffers[0].set_value(p1)
self._indices.set_value(glm.u16vec1(i))
def set_colors(self, *args: glm.vec4, target=1):
for c in args:
self._buffers[target].set_value(c)
def set_texture(self, *args: glm.vec2 or glm.uvec2, target=1):
for c in args:
self._buffers[target].set_value(c)
def create(self):
gl.glBindVertexArray(self._vao)
for index, vbo in enumerate(self._buffers):
vbo.bind(index)
self._indices.bind(None)
def reset(self):
self._indices.reset()
for vbo in self._buffers:
vbo.reset()
def draw(self):
index_count = len(self._indices) * self.primitive_component_count
gl.glDrawElements(self._primitive, index_count, gl.GL_UNSIGNED_SHORT, None)
@property
def index_data_type(self):
if self._primitive == gl.GL_TRIANGLES:
return glm.u16vec3
elif self._primitive == gl.GL_LINES:
return glm.u16vec2
elif self._primitive == gl.GL_POINTS:
return glm.u16vec1
else:
raise UserWarning(f'Unknown primitive type {self._primitive}')
@property
def primitive_component_count(self):
if self._primitive == gl.GL_TRIANGLES:
return 3
elif self._primitive == gl.GL_LINES:
return 2
elif self._primitive == gl.GL_POINTS:
return 1
else:
raise UserWarning(f'Unknown primitive type {self._primitive}')
def bind(self):
gl.glBindVertexArray(self._vao)
if self._indices.bind(None):
if any(vbo.changed for vbo in self._buffers):
self.create()
return True
gl.glBindVertexArray(0)
return False
| [
9,
11,
12,
13,
17
] |
380 | 39fdb9c586c3cf92d493269ceac419e0058a763a | import pandas as pd
import numpy as np
import pyten.tenclass
import pyten.method
import pyten.tools
def scalable(file_name=None, function_name=None, recover=None, omega=None, r=2, tol=1e-8, maxiter=100, init='random',
printitn=0):
"""
Helios1 API returns CP_ALS, TUCKER_ALS, or NNCP decomposition or Recovery Result
arg can be list, tuple, set, and array with numerical values.
-----------
:param file_name: {Default: None}
:param function_name: Tensor-based Method
:param recover: Input '1' to recover other to decompose.{Default: None}
:param omega: Index Tensor of Obseved Entries
:param r: The rank of the Tensor you want to use for approximation (recover or decompose).{Default: 2}
:param tol: Tolerance on difference in fit.(Convergence tolerance for both cp(als) or tucker(als).){Default: 1.0e-4}
:param maxiter: Maximum number of iterations {Default: 50}
:param init: Initial guess 'random'|'nvecs'|'eigs'. {Default 'random'}
:param printitn: Print fit every n iterations; 0 for no printing.
-----------
:return Ori: Original Tensor
:return full: Full Tensor reconstructed by decomposed matrices
:return Final: Decomposition Results e.g. Ttensor or Ktensor
:return Rec: Recovered Tensor (Completed Tensor)
-----------
"""
# User Interface
if file_name is None:
file_name = raw_input("Please input the file_name of the data: \n")
print("\n")
if function_name is None:
function_name = raw_input("Please choose the method you want to use to recover data(Input one number):\n"
" 1. Distributed CP(ALS) 2.Distributed CP(ADMM) 3. DisTenC 0.Exit \n")
print("\n")
#if recover is None:
# recover = raw_input("If there are missing values in the file? (Input one number)\n"
# "1.Yes, recover it 2.No, just decompose (Missing entries in the original tensor will be replaced by 0) 0.Exit\n")
# Use pandas package to load data
## if file_name[-3:] == 'csv':
# dat1 = pd.read_csv(file_name, delimiter=';')
# Data preprocessing
# First: create Sptensor
# dat = dat1.values
# sha = dat.shape
# subs = dat[:, range(sha[1] - 1)]
# subs = subs - 1
# vals = dat[:, sha[1] - 1]
# vals = vals.reshape(len(vals), 1)
# siz = np.max(subs, 0)
# siz = np.int32(siz + 1)
# X1 = pyten.tenclass.Sptensor(subs, vals, siz)
# Second: create Tensor object and find missing data
# X = X1.totensor()
# Ori = X.data
# lstnan = np.isnan(X.data)
# X.data = np.nan_to_num(X.data)
# Construct omega
#output = 1 # An output indicate flag. (Decompose: 1, Recover:2)
Ori = None
#if type(omega) != np.ndarray:
# # if True in lstnan:
# omega = X.data * 0 + 1
# omega[lstnan] = 0
# if recover == '1':
# output = 2
# Choose method to recover or decompose
if type(function_name) == str:
if function_name == '1' or function_name == 'D_cp_als':
Dals = pyten.method.TensorDecompositionALS()
Dals.dir_data = file_name
Dals.rank = r
Dals.run()
Dals.maxIter = maxiter
Dals.tol = tol
######
Final = Dals.ktensor
Rec = None
full = Final.totensor()
######
elif function_name == '2' or function_name == 'D_ADMM':
Dadmm = pyten.method.DistTensorADMM()
Dadmm.dir_data = file_name
Dadmm.rank = r
Dadmm.run()
Dadmm.maxIter = maxiter
Dadmm.tol = tol
######
Final = Dadmm.ktensor
Rec = None
full = Final.totensor()
######
elif function_name == '3' or function_name == 'D_ADMM_C':
DadmmC = pyten.method.DistTensorCompletionADMM()
DadmmC.dir_data = file_name
DadmmC.rank = r
DadmmC.run()
DadmmC.maxIter = maxiter
DadmmC.tol = tol
######
Final = DadmmC.ktensor
#Rec = Final.totensor().data * omega + X.data * (1 - omega)
full = Final.totensor()
Rec = full
######
elif function_name == '0':
print 'Successfully Exit'
return None, None, None, None
else:
raise ValueError('No Such Method')
else:
raise TypeError('No Such Method')
# Output Result
# [nv, nd] = subs.shape
if function_name == 1 or function_name == 2:
newsubs = full.tosptensor().subs
tempvals = full.tosptensor().vals
newfilename = file_name[:-4] + '_Decomposite' + file_name[-4:]
#print "\n" + "The original Tensor is: "
#print X1
print "\n" + "The Decomposed Result is: "
print Final
else:
newsubs = Rec.tosptensor().subs
tempvals = Rec.tosptensor().vals
newfilename = file_name[:-4] + '_Recover' + file_name[-4:]
#print "\n" + "The original Tensor is: "
#print Ori
print "\n" + "The Recovered Tensor is: "
print Rec.data
# Return result
return Ori, full, Final, Rec
| null | null | null | null | [
0
] |
381 | 1f7d770106ea8e7d1c0bb90e1fc576b7ee2f0220 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('shop', '0003_auto_20200828_1836')]
operations = [migrations.AddField(model_name='order', name='total',
field=models.CharField(default=0, max_length=200), preserve_default
=False), migrations.AlterField(model_name='order', name='items',
field=models.CharField(max_length=300))]
| from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('shop', '0003_auto_20200828_1836')]
operations = [migrations.AddField(model_name='order', name='total',
field=models.CharField(default=0, max_length=200), preserve_default
=False), migrations.AlterField(model_name='order', name='items',
field=models.CharField(max_length=300))]
| # Generated by Django 3.0.8 on 2020-08-28 17:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_auto_20200828_1836'),
]
operations = [
migrations.AddField(
model_name='order',
name='total',
field=models.CharField(default=0, max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='order',
name='items',
field=models.CharField(max_length=300),
),
]
| [
0,
1,
2,
3,
4
] |
382 | 01847c9e601eae6775cd4324483740c30e344557 | <mask token>
| <mask token>
class CfCoreConfig(AppConfig):
<mask token>
| <mask token>
class CfCoreConfig(AppConfig):
name = 'cf_core'
| from django.apps import AppConfig
class CfCoreConfig(AppConfig):
name = 'cf_core'
| null | [
0,
1,
2,
3
] |
383 | ac2d4372f8913ea9ae1066833cca09985e521f99 | #!/usr/bin/env python
"""\
Simple g-code streaming script for grbl
"""
import serial
import time
import csv
import json
import RPi.GPIO as GPIO
from multiprocessing import Process, Queue
class motion():
def __init__(self):
# Open grbl serial port
#self.s = serial.Serial("/dev/ttyUSB0",baudrate=115200,xonxoff=True,timeout=1)
self.s = serial.Serial("/dev/ttyUSB0",
baudrate=115200,
timeout=0.1,
rtscts=True,
xonxoff=False)
self.rsp=''
self.posx=0.0
self.posy=0.0
self.positions_file = '/home/pi/Work/Wall2.0/system/positions.csv'
self.home_position_file = '/home/pi/Work/Wall2.0/system/home.csv'
self.mode = 'delay'
self.sensor_pin = 3
self.interval = 1
GPIO.setmode(GPIO.BOARD)
# GPIO.setup(self.sensor_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.sensor_pin, GPIO.IN)
# Wake up grbl
self.s.write("\r\n\r\n")
time.sleep(2) # Wait for grbl to initialize
self.s.flushInput() # Flush startup text in serial input
self.feedrate = 100
self.update_feedrate(0)
with open(self.positions_file,'w') as f:
f.write('posx,posy\n')
self.homex=None
self.homey=None
with open(self.home_position_file,'r') as f:
lines = csv.DictReader(f)
for l in lines:
print 'x_home: '+l['homex']
print 'y_home: '+l['homey']
self.homex = float(l['homex'])
self.homey = float(l['homey'])
# set origin offset
#self.send("g92 x0 y0")
self.set_relative_position()
self.pos_queue = Queue()
self.serial_proc = Process(target=self.get_response,
args=(self.pos_queue,))
self.serial_proc.start()
def update_feedrate(self, feedrate):
tmp = self.feedrate + feedrate
if(tmp >= 100) and (tmp <= 800):
self.feedrate = tmp
# feedrate speed
self.send("f"+str(self.feedrate))
def update_interval(self, interval):
if(self.interval >= 1) and (self.interval <= 10):
self.interval += interval
def send(self, cmd):
print 'Sending: ' + cmd
self.s.write(cmd + '\n') # Send g-code block to grbl
def move(self,sign_x, sign_y):
x = "x"+str(sign_x*10)
y = "y"+str(sign_y*10)
#self.send("%")
self.send(" ".join(["g1",x,y]))
def move_to_position(self,x,y):
x = "x"+str(x)
y = "y"+str(y)
self.send(" ".join(["g1",x,y]))
def stop(self):
self.send("!")
self.send("%")
if (self.homex!=None) and (self.homey!=None):
time.sleep(0.5)
self.set_absolute_position()
self.update_current_position()
self.move_to_position(self.homex,self.homey)
self.set_relative_position()
def disconnect(self):
# Close file and serial port
self.s.close()
def get_response(self, q):
while(1):
tmp = self.s.readline()
tmp = tmp.strip()
if tmp is not '':
try:
tmp = json.loads(tmp)
print tmp
if 'r' in tmp.keys():
if 'sr' in tmp['r'].keys():
tmp = tmp['r']
if 'sr' in tmp.keys():
if 'posx' in tmp['sr'].keys():
self.posx=tmp['sr']['posx']
if 'posy' in tmp['sr'].keys():
self.posy=tmp['sr']['posy']
q.put((self.posx, self.posy))
print 'pos1: '+str((self.posx, self.posy))
except ValueError:
print "get_response chocked"
self.stop()
time.sleep(1)
else:
time.sleep(.2)
def record_current_position(self):
self.send('{"sr":null}')
print "Saving"
# TODO: Check if serial_proc is running?
self.update_current_position()
with open(self.positions_file,'a') as f:
f.write(str(self.posx)+','+str(self.posy)+'\n')
def record_home_position(self):
self.send('{"sr":null}')
print "Saving home"
# TODO: Check if serial_proc is running?
self.update_current_position()
self.homex = self.posx
self.homey = self.posy
with open(self.home_position_file,'w') as f:
f.write('homex,homey\n')
f.write(str(self.posx)+','+str(self.posy)+'\n')
def delete_home_position(self):
print "Deleting home"
with open(self.home_position_file,'w') as f:
f.write('homex,homey\n')
self.homex = None
self.homey = None
def update_current_position(self):
while not self.pos_queue.empty():
self.posx, self.posy = self.pos_queue.get()
def getTrigger(self):
return GPIO.input(self.sensor_pin)
def changeMode(self):
if self.mode == 'delay':
self.mode = 'sensor'
elif self.mode == 'sensor':
self.mode = 'delay'
def set_absolute_position(self):
# absolute mode
self.send("g90")
def set_relative_position(self):
# relative mode
self.send("g91")
def playback_saved_positions(self):
self.set_absolute_position()
self.update_current_position()
with open(self.positions_file) as f:
lines = csv.DictReader(f)
for l in lines:
print 'x_dst: '+l['posx']+' - '+str(self.posx)
print 'y_dst: '+l['posy']+' - '+str(self.posy)
x_dst = float(l['posx'])#-self.posx
y_dst = float(l['posy'])#-self.posy
x = ' x'+str((x_dst))
y = ' y'+str((y_dst))
print(x,y)
self.send('g1'+x+y)
while(1):
self.update_current_position()
if (self.posx != float(l['posx'])) or \
(self.posy != float(l['posy'])):
time.sleep(.1)
else:
break
if(self.mode == 'delay'):
time.sleep(self.interval)
elif(self.mode == 'sensor'):
num_strikes = 0
while num_strikes < self.interval:
while(not self.getTrigger()):
time.sleep(.01)
num_strikes += 1
# relative mode
self.send("g91")
| null | null | null | null | [
0
] |
384 | ca11e9cf0bcfcbd714c45b5c95bd2c2044b65909 | <mask token>
class Client(object):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class Request(BaseRequest):
"""A webob.Request with additional properties."""
@property
def kwargs(self):
"""Returns 'router.kwargs' from environ if present, or {} otherwise."""
return self.environ.get('router.kwargs', {})
class Response(BaseResponse):
"""A webob.Response that can be initialized with defaults from request."""
@classmethod
def for_request(cls, request):
"""Initialize a Response with defaults based on the request.
>>> request = Request({})
>>> request.headers['Content-Type'] = 'text/html; charset=latin1'
>>> response = Response.for_request(request)
>>> response.content_type
'text/html'
>>> response.charset
'latin1'
"""
return cls(status_code=200, content_type=request.content_type or
'text/plain', charset=request.charset or 'UTF-8')
def write(self, text):
"""An alias for `response.text = text`.
>>> response = Response()
>>> response.write('some text')
>>> response.text
'some text'
"""
self.text = text
| <mask token>
class Client(object):
<mask token>
def __init__(self, app):
self.app = app
<mask token>
def get(self, path=None):
return self.request(path, 'GET')
<mask token>
def put(self, path=None, body=None):
return self.request(path, 'PUT', body)
class Request(BaseRequest):
"""A webob.Request with additional properties."""
@property
def kwargs(self):
"""Returns 'router.kwargs' from environ if present, or {} otherwise."""
return self.environ.get('router.kwargs', {})
class Response(BaseResponse):
"""A webob.Response that can be initialized with defaults from request."""
@classmethod
def for_request(cls, request):
"""Initialize a Response with defaults based on the request.
>>> request = Request({})
>>> request.headers['Content-Type'] = 'text/html; charset=latin1'
>>> response = Response.for_request(request)
>>> response.content_type
'text/html'
>>> response.charset
'latin1'
"""
return cls(status_code=200, content_type=request.content_type or
'text/plain', charset=request.charset or 'UTF-8')
def write(self, text):
"""An alias for `response.text = text`.
>>> response = Response()
>>> response.write('some text')
>>> response.text
'some text'
"""
self.text = text
| <mask token>
class Client(object):
<mask token>
def __init__(self, app):
self.app = app
def request(self, path, method, body=None):
path = path or '/'
request = BaseRequest.blank(path)
request.method = method
request.text = body or ''
return request.get_response(self.app)
def get(self, path=None):
return self.request(path, 'GET')
<mask token>
def put(self, path=None, body=None):
return self.request(path, 'PUT', body)
class Request(BaseRequest):
"""A webob.Request with additional properties."""
@property
def kwargs(self):
"""Returns 'router.kwargs' from environ if present, or {} otherwise."""
return self.environ.get('router.kwargs', {})
class Response(BaseResponse):
"""A webob.Response that can be initialized with defaults from request."""
@classmethod
def for_request(cls, request):
"""Initialize a Response with defaults based on the request.
>>> request = Request({})
>>> request.headers['Content-Type'] = 'text/html; charset=latin1'
>>> response = Response.for_request(request)
>>> response.content_type
'text/html'
>>> response.charset
'latin1'
"""
return cls(status_code=200, content_type=request.content_type or
'text/plain', charset=request.charset or 'UTF-8')
def write(self, text):
"""An alias for `response.text = text`.
>>> response = Response()
>>> response.write('some text')
>>> response.text
'some text'
"""
self.text = text
| <mask token>
class Client(object):
"""Make requests to a wsgi app and return the response."""
def __init__(self, app):
self.app = app
def request(self, path, method, body=None):
path = path or '/'
request = BaseRequest.blank(path)
request.method = method
request.text = body or ''
return request.get_response(self.app)
def get(self, path=None):
return self.request(path, 'GET')
def post(self, path=None, body=None):
return self.request(path, 'POST', body)
def put(self, path=None, body=None):
return self.request(path, 'PUT', body)
class Request(BaseRequest):
"""A webob.Request with additional properties."""
@property
def kwargs(self):
"""Returns 'router.kwargs' from environ if present, or {} otherwise."""
return self.environ.get('router.kwargs', {})
class Response(BaseResponse):
"""A webob.Response that can be initialized with defaults from request."""
@classmethod
def for_request(cls, request):
"""Initialize a Response with defaults based on the request.
>>> request = Request({})
>>> request.headers['Content-Type'] = 'text/html; charset=latin1'
>>> response = Response.for_request(request)
>>> response.content_type
'text/html'
>>> response.charset
'latin1'
"""
return cls(status_code=200, content_type=request.content_type or
'text/plain', charset=request.charset or 'UTF-8')
def write(self, text):
"""An alias for `response.text = text`.
>>> response = Response()
>>> response.write('some text')
>>> response.text
'some text'
"""
self.text = text
| """Woma objects for dealing with HTTP.
Request and Response inherit from webob's Request and Response objects, so see
http://docs.webob.org/en/latest/ for full documentation. The only things
documented here are the customizations.
"""
from webob import Request as BaseRequest
from webob import Response as BaseResponse
class Client(object):
"""Make requests to a wsgi app and return the response."""
def __init__(self, app):
self.app = app
def request(self, path, method, body=None):
path = path or '/'
request = BaseRequest.blank(path)
request.method = method
request.text = body or ''
return request.get_response(self.app)
def get(self, path=None):
return self.request(path, 'GET')
def post(self, path=None, body=None):
return self.request(path, 'POST', body)
def put(self, path=None, body=None):
return self.request(path, 'PUT', body)
class Request(BaseRequest):
"""A webob.Request with additional properties."""
@property
def kwargs(self):
"""Returns 'router.kwargs' from environ if present, or {} otherwise."""
return self.environ.get('router.kwargs', {})
class Response(BaseResponse):
"""A webob.Response that can be initialized with defaults from request."""
@classmethod
def for_request(cls, request):
"""Initialize a Response with defaults based on the request.
>>> request = Request({})
>>> request.headers['Content-Type'] = 'text/html; charset=latin1'
>>> response = Response.for_request(request)
>>> response.content_type
'text/html'
>>> response.charset
'latin1'
"""
return cls(
status_code=200,
content_type=request.content_type or 'text/plain',
charset=request.charset or 'UTF-8')
def write(self, text):
"""An alias for `response.text = text`.
>>> response = Response()
>>> response.write('some text')
>>> response.text
'some text'
"""
self.text = text
| [
8,
11,
12,
14,
16
] |
385 | 2c22f891f30825bcb97987c78a98988ad2a92210 | <mask token>
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def run():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-base', required=True, help='')
parser.add_argument('--output-base', default='parsed_pages', help='')
parser.add_argument('--datetime', required=True, help='YYYYMMDD')
parser.add_argument('--hour', default='*', help='HH')
parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(
), help='')
parser.add_argument('--load-es', action='store_true')
parser.add_argument('--es-host', default='localhost', help=
'default to localhost')
parser.add_argument('--es-port', default='9200', help='default to 9200')
parser.add_argument('--es-cleanup', action='store_true', help=
'remove index before loading new data')
args = parser.parse_args()
dt_str = args.datetime
hour_str = args.hour
brand_str = args.brand
input_base = args.input_base
output_base = args.output_base
es_host, es_port = args.es_host, args.es_port
load_es = args.load_es
input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,
'*', '*', '*'))
for file_path in input_files:
dt_str, hour_str, br, category, sub_category, filename = (file_path
.split('/')[-6:])
parser = BRAND_PARSERS[brand_str](file_path)
parsed_docs = parser.parse()
if parsed_docs:
doc_list, price_list = parsed_docs
logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),
file_path))
if not load_es:
output_dir = os.path.join(output_base, os.path.join(dt_str,
hour_str, br, category))
ensure_mkdir(output_dir)
output_path = os.path.join(output_dir, filename + '.json')
logging.info('[WRITE] output to %s' % output_path)
with open(output_path + '.doc', 'w') as ofile:
ofile.write(json.dumps(doc_list, default=date_handler))
with open(output_path + '.price', 'w') as ofile:
ofile.write(json.dumps(price_list, default=date_handler))
else:
logging.info('[LOAD ES] loading to ElasticSearch...')
preprocessed_list = []
for doc in doc_list:
preprocessed_list.append({'index': {'_index': br, '_type':
category, '_id': doc['product_id']}})
preprocessed_list.append(doc)
bulk_load_es(es_host, es_port, br, category, preprocessed_list,
opt_dict=None)
bulk_load_es(es_host, es_port, br, 'price', price_list)
| <mask token>
logging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def run():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-base', required=True, help='')
parser.add_argument('--output-base', default='parsed_pages', help='')
parser.add_argument('--datetime', required=True, help='YYYYMMDD')
parser.add_argument('--hour', default='*', help='HH')
parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(
), help='')
parser.add_argument('--load-es', action='store_true')
parser.add_argument('--es-host', default='localhost', help=
'default to localhost')
parser.add_argument('--es-port', default='9200', help='default to 9200')
parser.add_argument('--es-cleanup', action='store_true', help=
'remove index before loading new data')
args = parser.parse_args()
dt_str = args.datetime
hour_str = args.hour
brand_str = args.brand
input_base = args.input_base
output_base = args.output_base
es_host, es_port = args.es_host, args.es_port
load_es = args.load_es
input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,
'*', '*', '*'))
for file_path in input_files:
dt_str, hour_str, br, category, sub_category, filename = (file_path
.split('/')[-6:])
parser = BRAND_PARSERS[brand_str](file_path)
parsed_docs = parser.parse()
if parsed_docs:
doc_list, price_list = parsed_docs
logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),
file_path))
if not load_es:
output_dir = os.path.join(output_base, os.path.join(dt_str,
hour_str, br, category))
ensure_mkdir(output_dir)
output_path = os.path.join(output_dir, filename + '.json')
logging.info('[WRITE] output to %s' % output_path)
with open(output_path + '.doc', 'w') as ofile:
ofile.write(json.dumps(doc_list, default=date_handler))
with open(output_path + '.price', 'w') as ofile:
ofile.write(json.dumps(price_list, default=date_handler))
else:
logging.info('[LOAD ES] loading to ElasticSearch...')
preprocessed_list = []
for doc in doc_list:
preprocessed_list.append({'index': {'_index': br, '_type':
category, '_id': doc['product_id']}})
preprocessed_list.append(doc)
bulk_load_es(es_host, es_port, br, category, preprocessed_list,
opt_dict=None)
bulk_load_es(es_host, es_port, br, 'price', price_list)
| <mask token>
BRAND_PARSERS = {'forever21': ForeverParser, 'jcrew': JcrewParser}
FORMAT = '[%(asctime)s][%(levelname)s] %(message)s'
logging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def run():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-base', required=True, help='')
parser.add_argument('--output-base', default='parsed_pages', help='')
parser.add_argument('--datetime', required=True, help='YYYYMMDD')
parser.add_argument('--hour', default='*', help='HH')
parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(
), help='')
parser.add_argument('--load-es', action='store_true')
parser.add_argument('--es-host', default='localhost', help=
'default to localhost')
parser.add_argument('--es-port', default='9200', help='default to 9200')
parser.add_argument('--es-cleanup', action='store_true', help=
'remove index before loading new data')
args = parser.parse_args()
dt_str = args.datetime
hour_str = args.hour
brand_str = args.brand
input_base = args.input_base
output_base = args.output_base
es_host, es_port = args.es_host, args.es_port
load_es = args.load_es
input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,
'*', '*', '*'))
for file_path in input_files:
dt_str, hour_str, br, category, sub_category, filename = (file_path
.split('/')[-6:])
parser = BRAND_PARSERS[brand_str](file_path)
parsed_docs = parser.parse()
if parsed_docs:
doc_list, price_list = parsed_docs
logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),
file_path))
if not load_es:
output_dir = os.path.join(output_base, os.path.join(dt_str,
hour_str, br, category))
ensure_mkdir(output_dir)
output_path = os.path.join(output_dir, filename + '.json')
logging.info('[WRITE] output to %s' % output_path)
with open(output_path + '.doc', 'w') as ofile:
ofile.write(json.dumps(doc_list, default=date_handler))
with open(output_path + '.price', 'w') as ofile:
ofile.write(json.dumps(price_list, default=date_handler))
else:
logging.info('[LOAD ES] loading to ElasticSearch...')
preprocessed_list = []
for doc in doc_list:
preprocessed_list.append({'index': {'_index': br, '_type':
category, '_id': doc['product_id']}})
preprocessed_list.append(doc)
bulk_load_es(es_host, es_port, br, category, preprocessed_list,
opt_dict=None)
bulk_load_es(es_host, es_port, br, 'price', price_list)
| import os
import sys
import json
import logging
import argparse
from glob import glob
from pricewatcher.tools import ensure_mkdir
from pricewatcher.parser.f21 import ForeverParser
from pricewatcher.parser.jcrew import JcrewParser
from pricewatcher.utils.load_es import bulk_load_es
BRAND_PARSERS = {'forever21': ForeverParser, 'jcrew': JcrewParser}
FORMAT = '[%(asctime)s][%(levelname)s] %(message)s'
logging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def run():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-base', required=True, help='')
parser.add_argument('--output-base', default='parsed_pages', help='')
parser.add_argument('--datetime', required=True, help='YYYYMMDD')
parser.add_argument('--hour', default='*', help='HH')
parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(
), help='')
parser.add_argument('--load-es', action='store_true')
parser.add_argument('--es-host', default='localhost', help=
'default to localhost')
parser.add_argument('--es-port', default='9200', help='default to 9200')
parser.add_argument('--es-cleanup', action='store_true', help=
'remove index before loading new data')
args = parser.parse_args()
dt_str = args.datetime
hour_str = args.hour
brand_str = args.brand
input_base = args.input_base
output_base = args.output_base
es_host, es_port = args.es_host, args.es_port
load_es = args.load_es
input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,
'*', '*', '*'))
for file_path in input_files:
dt_str, hour_str, br, category, sub_category, filename = (file_path
.split('/')[-6:])
parser = BRAND_PARSERS[brand_str](file_path)
parsed_docs = parser.parse()
if parsed_docs:
doc_list, price_list = parsed_docs
logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),
file_path))
if not load_es:
output_dir = os.path.join(output_base, os.path.join(dt_str,
hour_str, br, category))
ensure_mkdir(output_dir)
output_path = os.path.join(output_dir, filename + '.json')
logging.info('[WRITE] output to %s' % output_path)
with open(output_path + '.doc', 'w') as ofile:
ofile.write(json.dumps(doc_list, default=date_handler))
with open(output_path + '.price', 'w') as ofile:
ofile.write(json.dumps(price_list, default=date_handler))
else:
logging.info('[LOAD ES] loading to ElasticSearch...')
preprocessed_list = []
for doc in doc_list:
preprocessed_list.append({'index': {'_index': br, '_type':
category, '_id': doc['product_id']}})
preprocessed_list.append(doc)
bulk_load_es(es_host, es_port, br, category, preprocessed_list,
opt_dict=None)
bulk_load_es(es_host, es_port, br, 'price', price_list)
| import os
import sys
import json
import logging
import argparse
from glob import glob
from pricewatcher.tools import ensure_mkdir
from pricewatcher.parser.f21 import ForeverParser
from pricewatcher.parser.jcrew import JcrewParser
from pricewatcher.utils.load_es import bulk_load_es
BRAND_PARSERS={
'forever21': ForeverParser,
'jcrew': JcrewParser
}
# Set up logging
FORMAT = '[%(asctime)s][%(levelname)s] %(message)s'
logging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def run():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-base', required=True, help='')
parser.add_argument('--output-base', default='parsed_pages', help='')
parser.add_argument('--datetime', required=True, help='YYYYMMDD')
parser.add_argument('--hour', default='*', help='HH')
parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(), help='')
parser.add_argument('--load-es', action='store_true')
parser.add_argument('--es-host', default='localhost', help='default to localhost')
parser.add_argument('--es-port', default='9200', help='default to 9200')
parser.add_argument('--es-cleanup', action='store_true', help='remove index before loading new data')
args = parser.parse_args()
# Argument parsing
dt_str = args.datetime
hour_str = args.hour
brand_str = args.brand
input_base = args.input_base
output_base = args.output_base
# ES arguments
es_host, es_port = args.es_host, args.es_port
load_es = args.load_es
# Parsing Raw Pages
input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str, '*', '*', '*'))
for file_path in input_files:
dt_str, hour_str, br, category, sub_category, filename = file_path.split('/')[-6:]
parser = BRAND_PARSERS[brand_str](file_path)
parsed_docs = parser.parse()
if parsed_docs:
doc_list, price_list = parsed_docs
logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list), file_path))
if not load_es:
# Output Result
output_dir = os.path.join(output_base, os.path.join(dt_str, hour_str, br, category))
ensure_mkdir(output_dir)
output_path = os.path.join(output_dir, filename + '.json')
logging.info('[WRITE] output to %s' % output_path)
# Dump Product List
with open(output_path + '.doc', 'w') as ofile:
ofile.write(json.dumps(doc_list, default=date_handler))
with open(output_path + '.price', 'w') as ofile:
ofile.write(json.dumps(price_list, default=date_handler))
else:
#es_index, es_doctype = br, category
logging.info('[LOAD ES] loading to ElasticSearch...')
preprocessed_list = []
for doc in doc_list:
preprocessed_list.append({ "index" : { "_index" : br, "_type" : category, "_id" : doc['product_id'] } })
preprocessed_list.append(doc)
bulk_load_es(es_host, es_port, br, category, preprocessed_list, opt_dict=None)
bulk_load_es(es_host, es_port, br, 'price', price_list)
| [
2,
3,
4,
5,
6
] |
386 | 3cd7abf9659fe1db0ef3aa58df8dd7fd959e10a6 | <mask token>
| <mask token>
with open(sourceFile, 'r') as paragraph:
paragraph = paragraph.read().split('\n\n')
for sentence in paragraph:
sentWithPunctuation = sentence
sentNoPunctuation = re.sub('[^\\w\\s]', '', sentence)
words = sentNoPunctuation.split(' ')
for word in words:
wordLen = wordLen + len(word)
totWords = totWords + len(words)
avgSentLen_Words = round(totWords / len(paragraph), 2)
avgLetterCount = round(wordLen / totWords, 2)
totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)
avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)
print(f"""
Paragraph Analysis of '{sourceFile}' file""")
print(f'---------------------------------------------------------')
print(f' Approximate Word Count: {totWords} ')
print(f' Approximate Sentence Count: {len(paragraph)} ')
print(f' Average Letter Count: {avgLetterCount} ')
print(f' Average Sentence Length (words): {avgSentLen_Words} ')
print(f' Average Sentence Length (chars): {avgSentLen_chars} ')
| <mask token>
totWords = 0
wordLen = 0
totSentWithPunctuation = 0
sourceFile = os.path.join('Resources', 'paragraph_2.txt')
with open(sourceFile, 'r') as paragraph:
paragraph = paragraph.read().split('\n\n')
for sentence in paragraph:
sentWithPunctuation = sentence
sentNoPunctuation = re.sub('[^\\w\\s]', '', sentence)
words = sentNoPunctuation.split(' ')
for word in words:
wordLen = wordLen + len(word)
totWords = totWords + len(words)
avgSentLen_Words = round(totWords / len(paragraph), 2)
avgLetterCount = round(wordLen / totWords, 2)
totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)
avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)
print(f"""
Paragraph Analysis of '{sourceFile}' file""")
print(f'---------------------------------------------------------')
print(f' Approximate Word Count: {totWords} ')
print(f' Approximate Sentence Count: {len(paragraph)} ')
print(f' Average Letter Count: {avgLetterCount} ')
print(f' Average Sentence Length (words): {avgSentLen_Words} ')
print(f' Average Sentence Length (chars): {avgSentLen_chars} ')
| import os
import csv
import re
totWords = 0
wordLen = 0
totSentWithPunctuation = 0
sourceFile = os.path.join('Resources', 'paragraph_2.txt')
with open(sourceFile, 'r') as paragraph:
paragraph = paragraph.read().split('\n\n')
for sentence in paragraph:
sentWithPunctuation = sentence
sentNoPunctuation = re.sub('[^\\w\\s]', '', sentence)
words = sentNoPunctuation.split(' ')
for word in words:
wordLen = wordLen + len(word)
totWords = totWords + len(words)
avgSentLen_Words = round(totWords / len(paragraph), 2)
avgLetterCount = round(wordLen / totWords, 2)
totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)
avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)
print(f"""
Paragraph Analysis of '{sourceFile}' file""")
print(f'---------------------------------------------------------')
print(f' Approximate Word Count: {totWords} ')
print(f' Approximate Sentence Count: {len(paragraph)} ')
print(f' Average Letter Count: {avgLetterCount} ')
print(f' Average Sentence Length (words): {avgSentLen_Words} ')
print(f' Average Sentence Length (chars): {avgSentLen_chars} ')
| import os
import csv
import re
totWords = 0
wordLen = 0
totSentWithPunctuation = 0
sourceFile = os.path.join('Resources', 'paragraph_2.txt')
with open(sourceFile, 'r') as paragraph:
paragraph = paragraph.read().split("\n\n")
for sentence in paragraph:
# Remove punctuation from sentences
sentWithPunctuation = sentence
sentNoPunctuation = re.sub(r'[^\w\s]','',sentence)
#Split sentence with no punctuation by words using spaces
words = sentNoPunctuation.split(" ")
for word in words:
wordLen = wordLen + len(word)
# Compute totals for output message
totWords = totWords + len(words) # Total words for all sentences
avgSentLen_Words = round(totWords / len(paragraph),2) # Average words for all sentences
avgLetterCount = round(wordLen/totWords,2) # Average letter by word for all sentences
totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)
avgSentLen_chars = round(totSentWithPunctuation / len(paragraph),2)
#Validate output by printing a test line
# print(f"words: {len(words)} S w Punct. len: {len(sentWithPunctuation)} Sentence: {sentWithPunctuation}")
print(f"\n\nParagraph Analysis of '{sourceFile}' file")
print(f"---------------------------------------------------------")
print(f" Approximate Word Count: {totWords} ")
print(f" Approximate Sentence Count: {len(paragraph)} ")
print(f" Average Letter Count: {avgLetterCount} ")
print(f" Average Sentence Length (words): {avgSentLen_Words} ")
print(f" Average Sentence Length (chars): {avgSentLen_chars} ")
| [
0,
1,
2,
3,
4
] |
387 | b95619f3f52ff3747e38ecc153123962d0122a4d | <mask token>
| {'name': 'ldap_user', 'summary': '', 'description': '域账号用户管理,登录及查询用户信息',
'author': '', 'website': '', 'source': {'git':
'https://github.com/LeiQiao/Parasite-Plugins.git', 'branch': 'master'},
'category': '', 'version': '0.1', 'api': {'/user/token':
'user_api.gen_token', '/user/captcha': 'user_api.gen_captcha',
'/user/login': {'POST': 'user_api.login'}, '/user/search':
'user_api.search_users'}, 'depends': ['base', 'base_api_wrapper',
'redis_client', 'i18n']}
| # noinspection PyStatementEffect
{
'name': 'ldap_user',
'summary': '',
'description': '域账号用户管理,登录及查询用户信息',
'author': '',
'website': '',
'source': {'git': 'https://github.com/LeiQiao/Parasite-Plugins.git', 'branch': 'master'},
'category': '',
'version': '0.1',
'api': {
'/user/token': 'user_api.gen_token',
'/user/captcha': 'user_api.gen_captcha',
'/user/login': {
'POST': 'user_api.login'
},
'/user/search': 'user_api.search_users'
},
# any plugin necessary for this one to work correctly
'depends': ['base', 'base_api_wrapper', 'redis_client', 'i18n']
}
| null | null | [
0,
1,
2
] |
388 | c3527363cfc29ab7d598fe232d784b05ec2ef069 | import models
import json
import reports.models
import common.ot_utils
def analyze_raw_reports(clean=True):
if clean:
delete_all_reports()
COUNT = 100
offset = 0
while True:
cont = analyze_raw_reports_subset(offset,COUNT)
offset += COUNT
if not cont:
return
def analyze_raw_reports_subset(offset,count):
items = _collect_items(offset,count)
if items:
dump_items(items)
return True
return False
def dump_items(items):
wifis = []
locs = []
for (idx,item) in enumerate(items):
if idx % 100 == 0:
print '%d/%d' % (idx,len(items))
if 'wifi' in item.keys():
report_dt = common.ot_utils.get_utc_time_from_timestamp(float(item['time'])/1000)
m = models.Report(device_id=item['device_id'],timestamp=report_dt)
m.save()
item_loc = item.get('location_api')
if item_loc:
loc = models.LocationInfo(report=m,
lat=item_loc['lat'],
lon=item_loc['long'],
provider=item_loc['provider'],
timestamp = common.ot_utils.get_utc_time_from_timestamp(float(item_loc['time'])/1000),
accuracy = item_loc['accuracy'])
locs.append(loc)
for wifi in item['wifi']:
wifis.append(models.SingleWifiReport(SSID=wifi['SSID'],
signal=wifi['signal'],
frequency=wifi['frequency'],
key=wifi['key'],
report=m))
print 'Saving all dependant objects'
models.SingleWifiReport.objects.bulk_create(wifis)
models.LocationInfo.objects.bulk_create(locs)
def delete_all_reports():
common.ot_utils.delete_from_model(models.SingleWifiReport)
common.ot_utils.delete_from_model(models.LocationInfo)
common.ot_utils.delete_from_model(models.Report)
def _collect_items(offset,count):
all_reports_count = reports.models.RawReport.objects.count()
print '*** offset = %d count = %d all_reports_count = %d' % (offset,count,all_reports_count)
all_reports = reports.models.RawReport.objects.all()[offset:offset+count]
result = []
for rj in all_reports:
items = json.loads(rj.text)['items']
result.extend(items)
return result
| null | null | null | null | [
0
] |
389 | 484d104a8481a707a187d0bcb30898c3459a88be | <mask token>
| <mask token>
urlpatterns = patterns('', url('^$', home.HomeView.as_view(), name='home'),
url('^cluster/status/$', cluster.ClusterStatusView.as_view(), name=
'cluster_status'), url('^node/list/$', node.NodeListView.as_view(),
name='node_list'), url('^node/add/$', node.NodeCreateView.as_view(),
name='node_add'), url('^node/(?P<pk>\\d+)/libvirt/$', node.
NodeLibvirtView.as_view(), name='node_libvirt'), url(
'^node/(?P<pk>\\d+)/libvirt/update/$', node.UpdateCapabilitiesView.
as_view(), name='node_libvirt_update'), url(
'^node/(?P<pk>\\d+)/libvirt/update/domains/$', node.UpdateDomainsView.
as_view(), name='node_libvirt_updatedomains'), url(
'^node/(?P<pk>\\d+)/libvirt/create/domains/$', node.
CreateALLDomainsView.as_view(), name='node_libvirt_createdomains'), url
('^node/(?P<pk>\\d+)/edit/$', node.NodeUpdateView.as_view(), name=
'node_edit'), url('^node/(?P<pk>\\d+)/delete/$', node.NodeDeleteView.
as_view(), name='node_delete'), url('^domain/list/$', domain.
DomainListView.as_view(), name='domain_list'), url('^domain/add/$',
domain.DomainCreateView.as_view(), name='domain_add'), url(
'^domain/(?P<pk>\\d+)/libvirt/$', domain.DomainLibvirtView.as_view(),
name='domain_libvirt'), url('^domain/(?P<pk>\\d+)/edit/$', domain.
DomainUpdateView.as_view(), name='domain_edit'), url(
'^domain/(?P<pk>\\d+)/delete/$', domain.DomainDeleteView.as_view(),
name='domain_delete'), url('^domain/(?P<pk>\\d+)/libvirt/create/$',
domain.LibvirtCreateView.as_view(), name='domain_libvirt_create'), url(
'^domain/(?P<pk>\\d+)/libvirt/reboot/$', domain.LibvirtRebootView.
as_view(), name='domain_libvirt_reboot'), url(
'^domain/(?P<pk>\\d+)/libvirt/shutdown/$', domain.LibvirtShutdownView.
as_view(), name='domain_libvirt_shutdown'), url(
'^domain/(?P<pk>\\d+)/libvirt/destroy/$', domain.LibvirtDestroyView.
as_view(), name='domain_libvirt_destroy'), url(
'^domain/(?P<pk>\\d+)/libvirt/migrate/(?P<node_pk>\\d+)/$', domain.
LibvirtMigrateView.as_view(), name='domain_libvirt_migrate'), url(
'^domain/(?P<pk>\\d+)/libvirt/resume/$', domain.LibvirtResumeView.
as_view(), name='domain_libvirt_resume'), url(
'^domain/(?P<pk>\\d+)/libvirt/suspend/$', domain.LibvirtSuspendView.
as_view(), name='domain_libvirt_suspend'), url(
'^domain/(?P<pk>\\d+)/device/(?P<type>\\w+)/add/$', device.
DeviceCreateView.as_view(), name='device_add'), url(
'^device/(?P<pk>\\d+)/$', device.DeviceUpdateView.as_view(), name=
'device_edit'), url('^device/(?P<pk>\\d+)/attach/$', device.
DeviceAttachView.as_view(), name='device_attach'), url(
'^device/(?P<pk>\\d+)/detach/$', device.DeviceDetachView.as_view(),
name='device_detach'), url('^device/(?P<pk>\\d+)/delete/$', device.
DeviceDeleteView.as_view(), name='device_delete'))
| from django.conf.urls import patterns, include, url
from apps.virt.views import node, domain, device, cluster, home
urlpatterns = patterns('', url('^$', home.HomeView.as_view(), name='home'),
url('^cluster/status/$', cluster.ClusterStatusView.as_view(), name=
'cluster_status'), url('^node/list/$', node.NodeListView.as_view(),
name='node_list'), url('^node/add/$', node.NodeCreateView.as_view(),
name='node_add'), url('^node/(?P<pk>\\d+)/libvirt/$', node.
NodeLibvirtView.as_view(), name='node_libvirt'), url(
'^node/(?P<pk>\\d+)/libvirt/update/$', node.UpdateCapabilitiesView.
as_view(), name='node_libvirt_update'), url(
'^node/(?P<pk>\\d+)/libvirt/update/domains/$', node.UpdateDomainsView.
as_view(), name='node_libvirt_updatedomains'), url(
'^node/(?P<pk>\\d+)/libvirt/create/domains/$', node.
CreateALLDomainsView.as_view(), name='node_libvirt_createdomains'), url
('^node/(?P<pk>\\d+)/edit/$', node.NodeUpdateView.as_view(), name=
'node_edit'), url('^node/(?P<pk>\\d+)/delete/$', node.NodeDeleteView.
as_view(), name='node_delete'), url('^domain/list/$', domain.
DomainListView.as_view(), name='domain_list'), url('^domain/add/$',
domain.DomainCreateView.as_view(), name='domain_add'), url(
'^domain/(?P<pk>\\d+)/libvirt/$', domain.DomainLibvirtView.as_view(),
name='domain_libvirt'), url('^domain/(?P<pk>\\d+)/edit/$', domain.
DomainUpdateView.as_view(), name='domain_edit'), url(
'^domain/(?P<pk>\\d+)/delete/$', domain.DomainDeleteView.as_view(),
name='domain_delete'), url('^domain/(?P<pk>\\d+)/libvirt/create/$',
domain.LibvirtCreateView.as_view(), name='domain_libvirt_create'), url(
'^domain/(?P<pk>\\d+)/libvirt/reboot/$', domain.LibvirtRebootView.
as_view(), name='domain_libvirt_reboot'), url(
'^domain/(?P<pk>\\d+)/libvirt/shutdown/$', domain.LibvirtShutdownView.
as_view(), name='domain_libvirt_shutdown'), url(
'^domain/(?P<pk>\\d+)/libvirt/destroy/$', domain.LibvirtDestroyView.
as_view(), name='domain_libvirt_destroy'), url(
'^domain/(?P<pk>\\d+)/libvirt/migrate/(?P<node_pk>\\d+)/$', domain.
LibvirtMigrateView.as_view(), name='domain_libvirt_migrate'), url(
'^domain/(?P<pk>\\d+)/libvirt/resume/$', domain.LibvirtResumeView.
as_view(), name='domain_libvirt_resume'), url(
'^domain/(?P<pk>\\d+)/libvirt/suspend/$', domain.LibvirtSuspendView.
as_view(), name='domain_libvirt_suspend'), url(
'^domain/(?P<pk>\\d+)/device/(?P<type>\\w+)/add/$', device.
DeviceCreateView.as_view(), name='device_add'), url(
'^device/(?P<pk>\\d+)/$', device.DeviceUpdateView.as_view(), name=
'device_edit'), url('^device/(?P<pk>\\d+)/attach/$', device.
DeviceAttachView.as_view(), name='device_attach'), url(
'^device/(?P<pk>\\d+)/detach/$', device.DeviceDetachView.as_view(),
name='device_detach'), url('^device/(?P<pk>\\d+)/delete/$', device.
DeviceDeleteView.as_view(), name='device_delete'))
| # -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from apps.virt.views import node, domain,device,cluster,home
urlpatterns = patterns('',
# Home
url(r'^$', home.HomeView.as_view(), name='home'),
# Cluster
url(r'^cluster/status/$', cluster.ClusterStatusView.as_view(), name='cluster_status'),
# Node
url(r'^node/list/$', node.NodeListView.as_view(), name='node_list'),
url(r'^node/add/$', node.NodeCreateView.as_view(), name='node_add'),
url(r'^node/(?P<pk>\d+)/libvirt/$', node.NodeLibvirtView.as_view(), name='node_libvirt'),
url(r'^node/(?P<pk>\d+)/libvirt/update/$', node.UpdateCapabilitiesView.as_view(), name='node_libvirt_update'),
url(r'^node/(?P<pk>\d+)/libvirt/update/domains/$', node.UpdateDomainsView.as_view(), name='node_libvirt_updatedomains'),
url(r'^node/(?P<pk>\d+)/libvirt/create/domains/$', node.CreateALLDomainsView.as_view(), name='node_libvirt_createdomains'),
url(r'^node/(?P<pk>\d+)/edit/$', node.NodeUpdateView.as_view(), name='node_edit'),
url(r'^node/(?P<pk>\d+)/delete/$', node.NodeDeleteView.as_view(), name='node_delete'),
# Domain
url(r'^domain/list/$', domain.DomainListView.as_view(), name='domain_list'),
url(r'^domain/add/$', domain.DomainCreateView.as_view(), name='domain_add'),
url(r'^domain/(?P<pk>\d+)/libvirt/$', domain.DomainLibvirtView.as_view(), name='domain_libvirt'),
url(r'^domain/(?P<pk>\d+)/edit/$', domain.DomainUpdateView.as_view(), name='domain_edit'),
url(r'^domain/(?P<pk>\d+)/delete/$', domain.DomainDeleteView.as_view(), name='domain_delete'),
url(r'^domain/(?P<pk>\d+)/libvirt/create/$', domain.LibvirtCreateView.as_view(), name='domain_libvirt_create'),
url(r'^domain/(?P<pk>\d+)/libvirt/reboot/$', domain.LibvirtRebootView.as_view(), name='domain_libvirt_reboot'),
url(r'^domain/(?P<pk>\d+)/libvirt/shutdown/$', domain.LibvirtShutdownView.as_view(), name='domain_libvirt_shutdown'),
url(r'^domain/(?P<pk>\d+)/libvirt/destroy/$', domain.LibvirtDestroyView.as_view(), name='domain_libvirt_destroy'),
url(r'^domain/(?P<pk>\d+)/libvirt/migrate/(?P<node_pk>\d+)/$', domain.LibvirtMigrateView.as_view(), name='domain_libvirt_migrate'),
url(r'^domain/(?P<pk>\d+)/libvirt/resume/$', domain.LibvirtResumeView.as_view(), name='domain_libvirt_resume'),
url(r'^domain/(?P<pk>\d+)/libvirt/suspend/$', domain.LibvirtSuspendView.as_view(), name='domain_libvirt_suspend'),
# Device
url(r'^domain/(?P<pk>\d+)/device/(?P<type>\w+)/add/$', device.DeviceCreateView.as_view(), name="device_add"),
url(r'^device/(?P<pk>\d+)/$', device.DeviceUpdateView.as_view(), name="device_edit"),
url(r'^device/(?P<pk>\d+)/attach/$', device.DeviceAttachView.as_view(), name="device_attach"),
url(r'^device/(?P<pk>\d+)/detach/$', device.DeviceDetachView.as_view(), name="device_detach"),
url(r'^device/(?P<pk>\d+)/delete/$', device.DeviceDeleteView.as_view(), name="device_delete")
)
| null | [
0,
1,
2,
3
] |
390 | 545794cf4f0b2ab63b6a90951a78f8bdaca3c9e6 | <mask token>
| <mask token>
def grouping(w):
d = dd(list)
for k, v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,
key=str.casefold)):
d[k].append(v)
return dict(sorted(d.items()))
| from collections import defaultdict as dd
def grouping(w):
d = dd(list)
for k, v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,
key=str.casefold)):
d[k].append(v)
return dict(sorted(d.items()))
| null | null | [
0,
1,
2
] |
391 | ab844143ceddf32982682f5092762af0c97db577 | <mask token>
| from ..translators.translator import Translator
| null | null | null | [
0,
1
] |
392 | 0762c5bec2d796bb7888e3de45e29fb20f88f491 | <mask token>
def GE_pearson(this_looper, core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
name = this_looper.sim_name
thtr = this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
times = thtr.times[mask] + 0
times.shape = times.size, 1
times = times / colors.tff
G = colors.G
PearsonR = np.zeros([len(core_list), len(times)])
PearsonP = np.zeros([len(core_list), len(times)])
PearsonRho = np.zeros([len(core_list), len(times)])
PeakRho = np.zeros([len(core_list), len(times)])
for nc, core_id in enumerate(core_list):
print('GE pearson %s %d' % (name, core_id))
ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)
if ms.nparticles < 1000:
sl = slice(None)
c = [0.5] * 4
else:
sl = slice(None, None, 10)
c = [0.1] * 4
rho = ms.density[sl]
rho = rho[:, mask]
PeakRho[nc, :] = rho.max(axis=0)
gx = thtr.c([core_id], 'grav_x')[sl][:, mask]
gy = thtr.c([core_id], 'grav_y')[sl][:, mask]
gz = thtr.c([core_id], 'grav_z')[sl][:, mask]
GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)
RRR = ms.r[sl][:, mask]
for n in range(GE2.shape[1]):
the_x = np.log(RRR[:, n])
the_y = np.log(GE2[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonR[nc, n] = r
PearsonP[nc, n] = p
the_y = np.log(rho[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonRho[nc, n] = r
if 0:
fig, ax = plt.subplots(1, 2)
ax[0].plot(times, PearsonR)
fig.savefig('plots_to_sort/phi_box_%s.png' % name)
return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,
'PeakRho': PeakRho}
if 0:
fig, ax = plt.subplots(1, 1)
ax.plot(times, GE2, c=c, linewidth=0.1)
axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\nabla \\phi)^2/8 pi G$',
yscale='log', ylim=[ge_min, ge_max])
ax2 = ax.twinx()
c = [1.0, 0.1, 0.1, 0.1]
ax2.plot(times, rho, c=c, linewidth=0.1)
axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\rho$', yscale='log')
outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,
core_id)
fig.savefig(outname)
print(outname)
<mask token>
| <mask token>
reload(hair_dryer)
<mask token>
def GE_pearson(this_looper, core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
name = this_looper.sim_name
thtr = this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
times = thtr.times[mask] + 0
times.shape = times.size, 1
times = times / colors.tff
G = colors.G
PearsonR = np.zeros([len(core_list), len(times)])
PearsonP = np.zeros([len(core_list), len(times)])
PearsonRho = np.zeros([len(core_list), len(times)])
PeakRho = np.zeros([len(core_list), len(times)])
for nc, core_id in enumerate(core_list):
print('GE pearson %s %d' % (name, core_id))
ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)
if ms.nparticles < 1000:
sl = slice(None)
c = [0.5] * 4
else:
sl = slice(None, None, 10)
c = [0.1] * 4
rho = ms.density[sl]
rho = rho[:, mask]
PeakRho[nc, :] = rho.max(axis=0)
gx = thtr.c([core_id], 'grav_x')[sl][:, mask]
gy = thtr.c([core_id], 'grav_y')[sl][:, mask]
gz = thtr.c([core_id], 'grav_z')[sl][:, mask]
GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)
RRR = ms.r[sl][:, mask]
for n in range(GE2.shape[1]):
the_x = np.log(RRR[:, n])
the_y = np.log(GE2[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonR[nc, n] = r
PearsonP[nc, n] = p
the_y = np.log(rho[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonRho[nc, n] = r
if 0:
fig, ax = plt.subplots(1, 2)
ax[0].plot(times, PearsonR)
fig.savefig('plots_to_sort/phi_box_%s.png' % name)
return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,
'PeakRho': PeakRho}
if 0:
fig, ax = plt.subplots(1, 1)
ax.plot(times, GE2, c=c, linewidth=0.1)
axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\nabla \\phi)^2/8 pi G$',
yscale='log', ylim=[ge_min, ge_max])
ax2 = ax.twinx()
c = [1.0, 0.1, 0.1, 0.1]
ax2.plot(times, rho, c=c, linewidth=0.1)
axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\rho$', yscale='log')
outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,
core_id)
fig.savefig(outname)
print(outname)
<mask token>
if 'stuff' not in dir():
stuff = {}
for sim in sims:
core_list = np.unique(TL.loops[sim].tr.core_ids)
stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
Rphi = stuff[sim]['PR']
ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
ax.plot(T, rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)
if 0:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
c = [0.1] * 4
XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()
ok = ~np.isnan(XX) * ~np.isnan(YY)
XX = XX[ok]
YY = YY[ok]
xbins = np.linspace(XX.min(), XX.max(), 64)
ybins = np.linspace(YY.min(), YY.max(), 64)
hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])
import pcolormesh_helper as pch
pch.helper(hist, xb, yb, ax=ax)
fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 2)
Rphi = stuff[sim]['PR']
ax[0].boxplot(Rphi)
ax[0].plot(Rphi.mean(axis=0))
ax[1].boxplot(stuff[sim]['Prho'])
axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')
axbonk(ax[1], xlabel='frame', ylabel='R rho')
fig.savefig('plots_to_sort/Boxes_%s.png' % sim)
if 0:
from scipy.ndimage import gaussian_filter
fig, ax = plt.subplots()
for sim in stuff:
Rphi = stuff[sim]['PR']
Rrho = stuff[sim]['Prho']
ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'
)
ax.plot(Rrho.mean(axis=0), colors.color[sim])
axbonk(ax, xlabel='frame', ylabel='Rgrad phi')
fig.savefig('plots_to_sort/MeanR_%s.png' % sim)
| <mask token>
reload(hair_dryer)
<mask token>
def GE_pearson(this_looper, core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
name = this_looper.sim_name
thtr = this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
times = thtr.times[mask] + 0
times.shape = times.size, 1
times = times / colors.tff
G = colors.G
PearsonR = np.zeros([len(core_list), len(times)])
PearsonP = np.zeros([len(core_list), len(times)])
PearsonRho = np.zeros([len(core_list), len(times)])
PeakRho = np.zeros([len(core_list), len(times)])
for nc, core_id in enumerate(core_list):
print('GE pearson %s %d' % (name, core_id))
ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)
if ms.nparticles < 1000:
sl = slice(None)
c = [0.5] * 4
else:
sl = slice(None, None, 10)
c = [0.1] * 4
rho = ms.density[sl]
rho = rho[:, mask]
PeakRho[nc, :] = rho.max(axis=0)
gx = thtr.c([core_id], 'grav_x')[sl][:, mask]
gy = thtr.c([core_id], 'grav_y')[sl][:, mask]
gz = thtr.c([core_id], 'grav_z')[sl][:, mask]
GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)
RRR = ms.r[sl][:, mask]
for n in range(GE2.shape[1]):
the_x = np.log(RRR[:, n])
the_y = np.log(GE2[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonR[nc, n] = r
PearsonP[nc, n] = p
the_y = np.log(rho[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonRho[nc, n] = r
if 0:
fig, ax = plt.subplots(1, 2)
ax[0].plot(times, PearsonR)
fig.savefig('plots_to_sort/phi_box_%s.png' % name)
return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,
'PeakRho': PeakRho}
if 0:
fig, ax = plt.subplots(1, 1)
ax.plot(times, GE2, c=c, linewidth=0.1)
axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\nabla \\phi)^2/8 pi G$',
yscale='log', ylim=[ge_min, ge_max])
ax2 = ax.twinx()
c = [1.0, 0.1, 0.1, 0.1]
ax2.plot(times, rho, c=c, linewidth=0.1)
axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\rho$', yscale='log')
outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,
core_id)
fig.savefig(outname)
print(outname)
sims = ['u501', 'u502', 'u503']
if 'stuff' not in dir():
stuff = {}
for sim in sims:
core_list = np.unique(TL.loops[sim].tr.core_ids)
stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
Rphi = stuff[sim]['PR']
ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
ax.plot(T, rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)
if 0:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
c = [0.1] * 4
XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()
ok = ~np.isnan(XX) * ~np.isnan(YY)
XX = XX[ok]
YY = YY[ok]
xbins = np.linspace(XX.min(), XX.max(), 64)
ybins = np.linspace(YY.min(), YY.max(), 64)
hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])
import pcolormesh_helper as pch
pch.helper(hist, xb, yb, ax=ax)
fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 2)
Rphi = stuff[sim]['PR']
ax[0].boxplot(Rphi)
ax[0].plot(Rphi.mean(axis=0))
ax[1].boxplot(stuff[sim]['Prho'])
axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')
axbonk(ax[1], xlabel='frame', ylabel='R rho')
fig.savefig('plots_to_sort/Boxes_%s.png' % sim)
if 0:
from scipy.ndimage import gaussian_filter
fig, ax = plt.subplots()
for sim in stuff:
Rphi = stuff[sim]['PR']
Rrho = stuff[sim]['Prho']
ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'
)
ax.plot(Rrho.mean(axis=0), colors.color[sim])
axbonk(ax, xlabel='frame', ylabel='Rgrad phi')
fig.savefig('plots_to_sort/MeanR_%s.png' % sim)
| from starter2 import *
from collections import defaultdict
import scipy
import colors
import hair_dryer
reload(hair_dryer)
import three_loopers_u500 as TL
import movie_frames
def GE_pearson(this_looper, core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
name = this_looper.sim_name
thtr = this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
times = thtr.times[mask] + 0
times.shape = times.size, 1
times = times / colors.tff
G = colors.G
PearsonR = np.zeros([len(core_list), len(times)])
PearsonP = np.zeros([len(core_list), len(times)])
PearsonRho = np.zeros([len(core_list), len(times)])
PeakRho = np.zeros([len(core_list), len(times)])
for nc, core_id in enumerate(core_list):
print('GE pearson %s %d' % (name, core_id))
ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)
if ms.nparticles < 1000:
sl = slice(None)
c = [0.5] * 4
else:
sl = slice(None, None, 10)
c = [0.1] * 4
rho = ms.density[sl]
rho = rho[:, mask]
PeakRho[nc, :] = rho.max(axis=0)
gx = thtr.c([core_id], 'grav_x')[sl][:, mask]
gy = thtr.c([core_id], 'grav_y')[sl][:, mask]
gz = thtr.c([core_id], 'grav_z')[sl][:, mask]
GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)
RRR = ms.r[sl][:, mask]
for n in range(GE2.shape[1]):
the_x = np.log(RRR[:, n])
the_y = np.log(GE2[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonR[nc, n] = r
PearsonP[nc, n] = p
the_y = np.log(rho[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonRho[nc, n] = r
if 0:
fig, ax = plt.subplots(1, 2)
ax[0].plot(times, PearsonR)
fig.savefig('plots_to_sort/phi_box_%s.png' % name)
return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,
'PeakRho': PeakRho}
if 0:
fig, ax = plt.subplots(1, 1)
ax.plot(times, GE2, c=c, linewidth=0.1)
axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\nabla \\phi)^2/8 pi G$',
yscale='log', ylim=[ge_min, ge_max])
ax2 = ax.twinx()
c = [1.0, 0.1, 0.1, 0.1]
ax2.plot(times, rho, c=c, linewidth=0.1)
axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\rho$', yscale='log')
outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,
core_id)
fig.savefig(outname)
print(outname)
sims = ['u501', 'u502', 'u503']
if 'stuff' not in dir():
stuff = {}
for sim in sims:
core_list = np.unique(TL.loops[sim].tr.core_ids)
stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
Rphi = stuff[sim]['PR']
ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
ax.plot(T, rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)
if 0:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
c = [0.1] * 4
XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()
ok = ~np.isnan(XX) * ~np.isnan(YY)
XX = XX[ok]
YY = YY[ok]
xbins = np.linspace(XX.min(), XX.max(), 64)
ybins = np.linspace(YY.min(), YY.max(), 64)
hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])
import pcolormesh_helper as pch
pch.helper(hist, xb, yb, ax=ax)
fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 2)
Rphi = stuff[sim]['PR']
ax[0].boxplot(Rphi)
ax[0].plot(Rphi.mean(axis=0))
ax[1].boxplot(stuff[sim]['Prho'])
axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')
axbonk(ax[1], xlabel='frame', ylabel='R rho')
fig.savefig('plots_to_sort/Boxes_%s.png' % sim)
if 0:
from scipy.ndimage import gaussian_filter
fig, ax = plt.subplots()
for sim in stuff:
Rphi = stuff[sim]['PR']
Rrho = stuff[sim]['Prho']
ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'
)
ax.plot(Rrho.mean(axis=0), colors.color[sim])
axbonk(ax, xlabel='frame', ylabel='Rgrad phi')
fig.savefig('plots_to_sort/MeanR_%s.png' % sim)
|
from starter2 import *
from collections import defaultdict
import scipy
import colors
import hair_dryer
reload(hair_dryer)
import three_loopers_u500 as TL
import movie_frames
def GE_pearson(this_looper,core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
name = this_looper.sim_name
thtr=this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
times=thtr.times[mask]+0 #the zero makes a copy
times.shape=times.size,1
times=times/colors.tff
G = colors.G
#gx = thtr.track_dict['grav_x']
#gy = thtr.track_dict['grav_y']
#gz = thtr.track_dict['grav_z']
#GE2 = -1/(8*np.pi)*(gx*gx+gy*gy+gz*gz)
#ge_min=GE2.min()
#ge_max=GE2.max()
PearsonR = np.zeros([len(core_list), len(times)])
PearsonP = np.zeros([len(core_list), len(times)])
PearsonRho = np.zeros([len(core_list), len(times)])
PeakRho = np.zeros([len(core_list), len(times)])
for nc, core_id in enumerate(core_list):
print('GE pearson %s %d'%(name,core_id))
ms = trackage.mini_scrubber(thtr,core_id, do_velocity=False)
#ms.particle_pos(core_id)
if ms.nparticles < 1000:
sl=slice(None)
c=[0.5]*4
else:
sl = slice(None,None,10)
#c=[0,0,0,0.1]
c=[0.1]*4
rho = ms.density[sl]
rho = rho[:,mask]
PeakRho[nc,:]=rho.max(axis=0)
gx = thtr.c([core_id],'grav_x')[sl][:,mask]
gy = thtr.c([core_id],'grav_y')[sl][:,mask]
gz = thtr.c([core_id],'grav_z')[sl][:,mask]
GE2 = 1/(8*np.pi*G)*(gx*gx+gy*gy+gz*gz)
RRR = ms.r[sl][:,mask]
for n in range(GE2.shape[1]):
the_x=np.log(RRR[:,n])
the_y=np.log(GE2[:,n])
#the_y=rho[:,n]
r,p=scipy.stats.pearsonr(the_x,the_y)
PearsonR[nc,n]=r
PearsonP[nc,n]=p
the_y=np.log(rho[:,n])
r,p=scipy.stats.pearsonr(the_x,the_y)
PearsonRho[nc,n]=r
if 0:
fig,ax=plt.subplots(1,2)
ax[0].plot(times,PearsonR)
#ax[0].boxplot(PearsonR)
#ax[1].boxplot(PearsonRho)
fig.savefig('plots_to_sort/phi_box_%s.png'%name)
return {'PR':PearsonR, 'PP':PearsonP, 'Prho':PearsonRho, 'T':times, 'PeakRho':PeakRho}
if 0:
fig,ax=plt.subplots(1,1)
ax.plot(times , GE2, c=c, linewidth=0.1)
axbonk(ax,xlabel=r'$t/t_{ff}$', ylabel=r'$(\nabla \phi)^2/8 pi G$',yscale='log', ylim=[ge_min,ge_max])
ax2=ax.twinx()
c=[1.0,0.1,0.1,0.1]
ax2.plot(times , rho, c=c, linewidth=0.1)
axbonk(ax2,xlabel=r'$t/t_{ff}$', ylabel=r'$\rho$',yscale='log')
outname='plots_to_sort/%s_GE_t_c%04d.png'%(this_looper.sim_name,core_id)
fig.savefig(outname)
print(outname)
sims=['u501', 'u502','u503']
if 'stuff' not in dir():
stuff={}
for sim in sims:
core_list = np.unique(TL.loops[sim].tr.core_ids)
#core_list=core_list[:10]
stuff[sim] = GE_pearson(TL.loops[sim],core_list=core_list)
if 1:
for sim in stuff:
fig,ax=plt.subplots(1,1)
T = stuff[sim]['T']
rho=stuff[sim]['PeakRho']
Rphi=stuff[sim]['PR']
ax.plot(Rphi.transpose() ,rho.transpose(),c=[0.1]*4)
axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png'%sim)
if 1:
for sim in stuff:
fig,ax=plt.subplots(1,1)
T = stuff[sim]['T']
rho=stuff[sim]['PeakRho']
ax.plot(T,rho.transpose(),c=[0.1]*4)
axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_%s.png'%sim)
if 0:
for sim in stuff:
fig,ax=plt.subplots(1,1)
c=[0.1]*4
#ax.plot( stuff[sim]['T'], stuff[sim]['PR'].transpose(),c=c)
#ax.scatter( stuff[sim]['Prho'].transpose(), stuff[sim]['PR'].transpose(),c=c)
XX,YY= stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()
ok = (~np.isnan(XX))*(~np.isnan(YY))
XX=XX[ok]
YY=YY[ok]
xbins = np.linspace( XX.min(), XX.max(), 64)
ybins = np.linspace( YY.min(), YY.max(), 64)
hist, xb, yb = np.histogram2d(XX,YY, bins=[xbins,ybins])
import pcolormesh_helper as pch
pch.helper(hist,xb,yb,ax=ax)
fig.savefig('plots_to_sort/RGE_Rrho_%s.png'%sim)
if 1:
for sim in stuff:
fig,ax=plt.subplots(1,2)
Rphi = stuff[sim]['PR']
ax[0].boxplot( Rphi )
ax[0].plot( Rphi.mean(axis=0))
ax[1].boxplot( stuff[sim]['Prho'])
axbonk(ax[0],xlabel='frame',ylabel='Rgrad phi')
axbonk(ax[1],xlabel='frame',ylabel='R rho')
fig.savefig('plots_to_sort/Boxes_%s.png'%(sim))
if 0:
from scipy.ndimage import gaussian_filter
fig,ax=plt.subplots()
for sim in stuff:
Rphi = stuff[sim]['PR']
Rrho = stuff[sim]['Prho']
ax.plot( gaussian_filter(Rphi.mean(axis=0),1), colors.color[sim] +'--')
ax.plot( Rrho.mean(axis=0), colors.color[sim])
axbonk(ax,xlabel='frame',ylabel='Rgrad phi')
fig.savefig('plots_to_sort/MeanR_%s.png'%(sim))
| [
1,
2,
3,
4,
5
] |
393 | 7bbbd30ba1578c1165ccf5c2fff22609c16dfd64 | <mask token>
| <mask token>
print('Os valores são \x1b[32m{}\x1b[m e \x1b[31m{}\x1b[m !!!'.format(a, b))
<mask token>
print('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores[
'amarelo']))
| <mask token>
a = 3
b = 5
print('Os valores são \x1b[32m{}\x1b[m e \x1b[31m{}\x1b[m !!!'.format(a, b))
nome = 'Kátia'
cores = {'limpa': '\x1b]m', 'azul': '\x1b[34m', 'amarelo': '\x1b[33m',
'pretoebranco': '\x1b[7;30m'}
print('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores[
'amarelo']))
| """
Cores no terminal
"""
a = 3
b = 5
print('Os valores são \033[32m{}\033[m e \033[31m{}\033[m !!!'.format(a, b))
# Dicionário de cores:
nome = 'Kátia'
cores = {'limpa':'\033]m',
'azul':'\033[34m',
'amarelo':'\033[33m',
'pretoebranco':'\033[7;30m'}
print('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores['amarelo']))
# dá pra colocar as cores dentro das chaves tb.
| null | [
0,
1,
2,
3
] |
394 | 8ca77ed608108a9aa693acb686156e661794d7ab | # A perfect number is a number for which the sum of its proper divisors is exactly equal to the number.
# For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28,
# which means that 28 is a perfect number.
#
# A number whose proper divisors are less than the number is called deficient and
# a number whose proper divisors exceed the number is called abundant.
#
# As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16,
# the smallest number that can be written as the sum of two abundant numbers is 24.
# By mathematical analysis, it can be shown that all integers greater than 28123
# can be written as the sum of two abundant numbers.
# However, this upper limit cannot be reduced any further by analysis even though
# it is known that the greatest number that cannot be expressed as the sum of two abundant numbers
# is less than this limit.
#
# Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers.
UPPER_LIMIT = 28124
import math
import cProfile
from bisect import bisect
def sum_divisors(N):
total = 1
for i in xrange(2, math.sqrt(N)+1):
if (N % i == 0):
total += i
if ((i * i) != N):
total += (N / i)
return total
abundant = []
for i in xrange(11, UPPER_LIMIT):
if (sum_divisors(i) > i):
abundant.append(i)
print "found: ", len(abundant), " abundant numbers less than ", UPPER_LIMIT
print "highest abundant number: ", abundant[-1]
# Smart: compute all the sums of the abundant numbers we have. Store everything in an array.
def AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers():
# Create an array that is zero everywhere, then punch out the number
# that are expressible as the sum of two abundant numbers
integers = [0] * UPPER_LIMIT
for i in xrange(0, len(abundant)):
for j in xrange(i, len(abundant)):
addend = abundant[i] + abundant[j]
if (addend < UPPER_LIMIT):
integers[addend] = 1
else:
break; #don't bother going this high
# We've filled in the array. Now do the sum
return sum(i for i in xrange(0, UPPER_LIMIT) if integers[i] == 0)
#cProfile.run('AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers()')
print AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers()
# Somebody else (norvig) did this, which is really slick!
def norvig():
abundants = set(i for i in range(1,28124) if sum_divisors(i) > i)
def abundantsum(i):
return any(i-a in abundants for a in abundants)
return sum(i for i in range(1,28124) if not abundantsum(i))
| null | null | null | null | [
0
] |
395 | b5ac3695a224d531f5baa53a07d3c894d44e8c4c | <mask token>
def AtoD(vin):
code = [(0) for i in range(12)]
code[0] = 1 if vin > 0 else 0
for i in range(6):
vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2
code[i + 1] = 1 if vin > 0 else 0
for i in range(5):
vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2
code[i + 7] = 1 if vin > 0 else 0
dec_num = 0
for b in code:
dec_num = dec_num * 2 + b
return dec_num
<mask token>
def DtoA_ideal(code):
v = -1.0
for i in range(12):
v += 2 ** (11 - i) * code[i] / 2048
return v
<mask token>
| <mask token>
print(CP_LSB)
<mask token>
print(Wi_MSB)
print(Wi_LSB)
def AtoD(vin):
code = [(0) for i in range(12)]
code[0] = 1 if vin > 0 else 0
for i in range(6):
vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2
code[i + 1] = 1 if vin > 0 else 0
for i in range(5):
vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2
code[i + 7] = 1 if vin > 0 else 0
dec_num = 0
for b in code:
dec_num = dec_num * 2 + b
return dec_num
print(AtoD(0.5))
def DtoA_ideal(code):
v = -1.0
for i in range(12):
v += 2 ** (11 - i) * code[i] / 2048
return v
print(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))
<mask token>
for i in range(2 * n):
if y[i + 1] != y[i]:
bin_size[y[i]] = x[i + 1] - left
left = x[i + 1]
<mask token>
plt.plot(bin_num[1:4094], DNL[1:4094])
plt.show()
| <mask token>
Ci_MSB = [32, 16, 8, 4, 2, 1]
Ci_LSB = [16, 8, 4, 2, 1]
CB = 1
CP_B = 0
CP_LSB = (32 - 1) * (CB + CP_B - 1) + 10
print(CP_LSB)
CP_MSB = 0
Csum_LSB = sum(Ci_LSB) + CP_LSB
Csum_MSB = sum(Ci_MSB) + CP_MSB
Cx = Csum_LSB * Csum_MSB + (CB + CP_B) * Csum_LSB + (CB + CP_B) * Csum_MSB
Wi_MSB = [(Ci_MSB[i] * (CB + CP_B + Csum_LSB) / Cx) for i in range(6)]
Wi_LSB = [(Ci_LSB[i] * (CB + CP_B) / Cx) for i in range(5)]
print(Wi_MSB)
print(Wi_LSB)
def AtoD(vin):
code = [(0) for i in range(12)]
code[0] = 1 if vin > 0 else 0
for i in range(6):
vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2
code[i + 1] = 1 if vin > 0 else 0
for i in range(5):
vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2
code[i + 7] = 1 if vin > 0 else 0
dec_num = 0
for b in code:
dec_num = dec_num * 2 + b
return dec_num
print(AtoD(0.5))
def DtoA_ideal(code):
v = -1.0
for i in range(12):
v += 2 ** (11 - i) * code[i] / 2048
return v
print(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))
n = 1000000
x = [(-1 + i / n) for i in range(2 * n + 1)]
y = [AtoD(v) for v in x]
bin_num = [i for i in range(4096)]
bin_size = [(0) for i in range(4096)]
left = x[0]
for i in range(2 * n):
if y[i + 1] != y[i]:
bin_size[y[i]] = x[i + 1] - left
left = x[i + 1]
DNL = [(data * 2047 - 1) for data in bin_size]
plt.plot(bin_num[1:4094], DNL[1:4094])
plt.show()
| import matplotlib.pyplot as plt
Ci_MSB = [32, 16, 8, 4, 2, 1]
Ci_LSB = [16, 8, 4, 2, 1]
CB = 1
CP_B = 0
CP_LSB = (32 - 1) * (CB + CP_B - 1) + 10
print(CP_LSB)
CP_MSB = 0
Csum_LSB = sum(Ci_LSB) + CP_LSB
Csum_MSB = sum(Ci_MSB) + CP_MSB
Cx = Csum_LSB * Csum_MSB + (CB + CP_B) * Csum_LSB + (CB + CP_B) * Csum_MSB
Wi_MSB = [(Ci_MSB[i] * (CB + CP_B + Csum_LSB) / Cx) for i in range(6)]
Wi_LSB = [(Ci_LSB[i] * (CB + CP_B) / Cx) for i in range(5)]
print(Wi_MSB)
print(Wi_LSB)
def AtoD(vin):
code = [(0) for i in range(12)]
code[0] = 1 if vin > 0 else 0
for i in range(6):
vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2
code[i + 1] = 1 if vin > 0 else 0
for i in range(5):
vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2
code[i + 7] = 1 if vin > 0 else 0
dec_num = 0
for b in code:
dec_num = dec_num * 2 + b
return dec_num
print(AtoD(0.5))
def DtoA_ideal(code):
v = -1.0
for i in range(12):
v += 2 ** (11 - i) * code[i] / 2048
return v
print(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))
n = 1000000
x = [(-1 + i / n) for i in range(2 * n + 1)]
y = [AtoD(v) for v in x]
bin_num = [i for i in range(4096)]
bin_size = [(0) for i in range(4096)]
left = x[0]
for i in range(2 * n):
if y[i + 1] != y[i]:
bin_size[y[i]] = x[i + 1] - left
left = x[i + 1]
DNL = [(data * 2047 - 1) for data in bin_size]
plt.plot(bin_num[1:4094], DNL[1:4094])
plt.show()
| import matplotlib.pyplot as plt
Ci_MSB = [32,16,8,4,2,1]
Ci_LSB = [16,8,4,2,1]
CB = 1
CP_B = 0
CP_LSB = (32-1)*(CB+CP_B-1)+10
print(CP_LSB)
CP_MSB = 0
Csum_LSB = sum(Ci_LSB)+CP_LSB
Csum_MSB = sum(Ci_MSB)+CP_MSB
Cx = Csum_LSB*Csum_MSB+(CB+CP_B)*Csum_LSB+(CB+CP_B)*Csum_MSB
Wi_MSB = [Ci_MSB[i]*(CB+CP_B+Csum_LSB)/Cx for i in range (6)]
Wi_LSB = [Ci_LSB[i]*(CB+CP_B)/Cx for i in range (5)]
print(Wi_MSB)
print(Wi_LSB)
def AtoD(vin):
code = [0 for i in range(12)]
code[0] = 1 if vin > 0 else 0
for i in range(6):
vin = vin - Wi_MSB[i] * (code[i]-0.5)*2
code[i+1] = 1 if vin > 0 else 0
for i in range(5):
vin = vin - Wi_LSB[i] * (code[i+6]-0.5)*2
code[i + 7] = 1 if vin > 0 else 0
dec_num = 0
for b in code:
dec_num = dec_num * 2 + b
return dec_num
print(AtoD(0.50))
def DtoA_ideal(code):
v = -1.0
for i in range(12):
v += 2**(11-i)*code[i]/2048
return v
print(DtoA_ideal([1,1,1,1,1,1,1,1,1,1,1,1]))
n=1000000
x = [-1+i/n for i in range(2*n+1)]
y = [AtoD(v) for v in x]
# print(y[int(n/6):int(n/6)+100])
bin_num = [i for i in range(4096)]
bin_size = [0 for i in range(4096)]
left = x[0]
for i in range(2*n):
if y[i+1]!=y[i]:
bin_size[y[i]] = x[i+1] - left
left = x[i+1]
# print(bin_size)
DNL = [data*2047 -1 for data in bin_size]
plt.plot(bin_num[1:4094],DNL[1:4094])
# plt.xlim(1000,1005)
plt.show()
# y = [DtoA_ideal(AtoD(v)) for v in x]
# plt.plot(x,y)
# plt.xlim(-0.01,0)
# plt.ylim(-0.01,0)
# plt.show()
# def Vout(index):
# V = 0.0
# for i in range(6):
# V = V + Wi_MSB[i] * int(format(index,'b').zfill(11)[i])*1
# for i in range(5):
# V = V + Wi_LSB[i] * int(format(index,'b').zfill(11)[i+6])*1
# return V
# print(Vout(2047))
#
# x = [i for i in range(2048)]
# y = [Vout(i) for i in range(2048)]
# DNL = [0]+[y[i+1]-y[i]-Vout(2047)/2047 for i in range(2047)]
# DNL = [data*2048 for data in DNL]
# INL = [y[i] -i*Vout(2047)/2047 for i in range (2048)]
# INL = [data*2048 for data in INL]
#
# plt.plot(x,DNL)
# plt.show()
| [
2,
3,
4,
5,
6
] |
396 | c9d12f14fa0e46e4590746d45862fe255b415a1d | <mask token>
| <mask token>
@register.simple_tag
def gender(gender, masculine, feminine, neuter, plurale):
if gender == Obligee.GENDERS.MASCULINE:
return masculine
elif gender == Obligee.GENDERS.FEMININE:
return feminine
elif gender == Obligee.GENDERS.NEUTER:
return neuter
elif gender == Obligee.GENDERS.PLURALE:
return plurale
else:
return u''
| <mask token>
register = Library()
@register.simple_tag
def gender(gender, masculine, feminine, neuter, plurale):
if gender == Obligee.GENDERS.MASCULINE:
return masculine
elif gender == Obligee.GENDERS.FEMININE:
return feminine
elif gender == Obligee.GENDERS.NEUTER:
return neuter
elif gender == Obligee.GENDERS.PLURALE:
return plurale
else:
return u''
| from poleno.utils.template import Library
from chcemvediet.apps.obligees.models import Obligee
register = Library()
@register.simple_tag
def gender(gender, masculine, feminine, neuter, plurale):
if gender == Obligee.GENDERS.MASCULINE:
return masculine
elif gender == Obligee.GENDERS.FEMININE:
return feminine
elif gender == Obligee.GENDERS.NEUTER:
return neuter
elif gender == Obligee.GENDERS.PLURALE:
return plurale
else:
return u''
| # vim: expandtab
# -*- coding: utf-8 -*-
from poleno.utils.template import Library
from chcemvediet.apps.obligees.models import Obligee
register = Library()
@register.simple_tag
def gender(gender, masculine, feminine, neuter, plurale):
if gender == Obligee.GENDERS.MASCULINE:
return masculine
elif gender == Obligee.GENDERS.FEMININE:
return feminine
elif gender == Obligee.GENDERS.NEUTER:
return neuter
elif gender == Obligee.GENDERS.PLURALE:
return plurale
else:
return u''
| [
0,
1,
2,
3,
4
] |
397 | 58e023c3c453d1e190fdb5bc457358f42d1bd93f | class BruteForceSolution:
<mask token>
class Solution:
def smallerNumbersThanCurrent(self, nums):
answer = []
sortedNums = sorted(nums)
for num in nums:
answer.append(sortedNums.index(num))
return answer
<mask token>
| class BruteForceSolution:
def smallerNumbersThanCurrent(self, nums):
answer = []
for num in nums:
counter = 0
for i in range(len(nums)):
if nums[i] < num:
counter += 1
answer.append(counter)
return answer
class Solution:
def smallerNumbersThanCurrent(self, nums):
answer = []
sortedNums = sorted(nums)
for num in nums:
answer.append(sortedNums.index(num))
return answer
<mask token>
| class BruteForceSolution:
def smallerNumbersThanCurrent(self, nums):
answer = []
for num in nums:
counter = 0
for i in range(len(nums)):
if nums[i] < num:
counter += 1
answer.append(counter)
return answer
class Solution:
def smallerNumbersThanCurrent(self, nums):
answer = []
sortedNums = sorted(nums)
for num in nums:
answer.append(sortedNums.index(num))
return answer
<mask token>
print(example.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))
print(exampleTwo.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))
| class BruteForceSolution:
def smallerNumbersThanCurrent(self, nums):
answer = []
for num in nums:
counter = 0
for i in range(len(nums)):
if nums[i] < num:
counter += 1
answer.append(counter)
return answer
class Solution:
def smallerNumbersThanCurrent(self, nums):
answer = []
sortedNums = sorted(nums)
for num in nums:
answer.append(sortedNums.index(num))
return answer
example = BruteForceSolution()
exampleTwo = Solution()
print(example.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))
print(exampleTwo.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))
| # https://leetcode.com/problems/how-many-numbers-are-smaller-than-the-current-number/
# BruteForce
class BruteForceSolution:
def smallerNumbersThanCurrent(self, nums):
answer = []
for num in nums:
counter = 0
for i in range(len(nums)):
if nums[i] < num:
counter += 1
answer.append(counter)
return answer
class Solution:
def smallerNumbersThanCurrent(self, nums):
answer = []
sortedNums = sorted(nums)
for num in nums:
answer.append(sortedNums.index(num))
return answer
example = BruteForceSolution()
exampleTwo = Solution()
print(example.smallerNumbersThanCurrent([8,1,2,2,3]))
print(exampleTwo.smallerNumbersThanCurrent([8,1,2,2,3]))
| [
3,
4,
5,
6,
7
] |
398 | b4b7e20c9558bd1b29a1c1fa24bfca8a2d292b27 | <mask token>
| <mask token>
for root in rootsToAdd:
for elem in root:
root1.append(elem)
rutas0k_10k.write('rutas/rutas0k-110k.xml')
| <mask token>
rutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')
rutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')
rutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')
rutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')
rutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')
rutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')
root1 = rutas0k_10k.getroot()
root2 = rutas10k_30k.getroot()
root3 = rutas30k_50k.getroot()
root4 = rutas50k_70k.getroot()
root5 = rutas70k_90k.getroot()
root6 = rutas90k_110k.getroot()
rootsToAdd = [root2, root3, root4, root5, root6]
for root in rootsToAdd:
for elem in root:
root1.append(elem)
rutas0k_10k.write('rutas/rutas0k-110k.xml')
| import xml.etree.ElementTree as ET
rutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')
rutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')
rutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')
rutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')
rutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')
rutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')
root1 = rutas0k_10k.getroot()
root2 = rutas10k_30k.getroot()
root3 = rutas30k_50k.getroot()
root4 = rutas50k_70k.getroot()
root5 = rutas70k_90k.getroot()
root6 = rutas90k_110k.getroot()
rootsToAdd = [root2, root3, root4, root5, root6]
for root in rootsToAdd:
for elem in root:
root1.append(elem)
rutas0k_10k.write('rutas/rutas0k-110k.xml')
| import xml.etree.ElementTree as ET
#tree = ET.parse('rutas/rutas_prueba.xml')
#treeToAdd = ET.parse('rutas/rutas_prueba_agregar.xml')
#root = tree.getroot()
#git rootToAdd = treeToAdd.getroot()
#for child in root:
# for test in child:
# print(test.tag, test.attrib)
#for elem in root.iter():
# print(elem.tag)
#prueba = [elem.tag for elem in root.iter()]
#print(prueba)
#print(ET.tostring(root, encoding='utf8').decode('utf8'))
# for elem in rootToAdd:
# root.append(elem)
#
# tree.write('rutas/probando_agregados.xml')
#get the tree for each routes file
rutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')
rutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')
rutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')
rutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')
rutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')
rutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')
#root for each routes tree
root1 = rutas0k_10k.getroot()
root2 = rutas10k_30k.getroot()
root3 = rutas30k_50k.getroot()
root4 = rutas50k_70k.getroot()
root5 = rutas70k_90k.getroot()
root6 = rutas90k_110k.getroot()
#each root except first root
rootsToAdd = [root2,root3,root4,root5,root6]
#add each element to the first tree
for root in rootsToAdd:
for elem in root:
root1.append(elem)
#write the tree to a new file
rutas0k_10k.write('rutas/rutas0k-110k.xml')
| [
0,
1,
2,
3,
4
] |
399 | 97bbb181cbc0f5bfbf0b2298133fc226b6217d91 | <mask token>
| <mask token>
if not os.path.exists(pb_path):
os.makedirs(pb_path)
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
<mask token>
if __name__ == '__main__':
first_shape = None
anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,
input_height, input_width, channals], name='anchor')
similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,
input_height, input_width, channals], name='similar')
dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,
input_height, input_width, channals], name='dissimilar')
labels_placeholder = tf.placeholder(tf.float32, shape=[None if
first_shape is None else first_shape * 3], name='labels')
is_training_placeholder = tf.placeholder_with_default(False, shape=(),
name='is_training')
siamese_net = siameseNet.siameseNet()
anchor = siamese_net.inference(anchor_placeholder, reuse=False,
is_training=is_training_placeholder)
similar = siamese_net.inference(similar_placeholder, reuse=True,
is_training=is_training_placeholder)
dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,
is_training=is_training_placeholder)
loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,
labels_placeholder, margin)
flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')
flatten_out_similar = tf.identity(similar, name='flatten_similar')
flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
global_step = tf.Variable(0, trainable=False)
with tf.control_dependencies([tf.group(*update_ops)]):
train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,
global_step=global_step)
var_list = tf.trainable_variables()
if global_step is not None:
var_list.append(global_step)
g_list = tf.global_variables()
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
var_list += bn_moving_vars
ckpt_saver = tf.train.Saver()
train_dataset = dataset.dataset(train_image_root, batch_size,
support_image_extensions, input_height, input_width, channals)
test_dataset = dataset.dataset(test_image_root, batch_size,
support_image_extensions, input_height, input_width, channals)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
total_iters_num = 0
for epoch_num in range(total_epoch_num):
train_images_num = train_dataset.sample_len
cur_epoch_iters_num = train_images_num // batch_size
for iters_num in range(cur_epoch_iters_num):
(train_anchor, train_similar, train_dissimilar, train_labels
) = train_dataset.next_triplet_batch()
test_anchor, test_similar, test_dissimilar, test_labels = (
test_dataset.next_triplet_batch())
if train_anchor is None or test_anchor is None:
continue
train_dict = {anchor_placeholder: train_anchor,
similar_placeholder: train_similar,
dissimilar_placeholder: train_dissimilar,
labels_placeholder: train_labels,
is_training_placeholder: True}
test_dict = {anchor_placeholder: test_anchor,
similar_placeholder: test_similar,
dissimilar_placeholder: test_dissimilar,
labels_placeholder: test_labels,
is_training_placeholder: False}
_, _global_step = sess.run([train_step, global_step],
feed_dict=train_dict)
anchor_out, similar_out, dissimilar_out = sess.run([
flatten_out_anchor, flatten_out_similar,
flatten_out_dissimilar], feed_dict=train_dict)
_train_loss, _train_pos_dist, _train_neg_dist = sess.run([
loss, pos_dist, neg_dist], feed_dict=train_dict)
_test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,
pos_dist, neg_dist], feed_dict=test_dict)
print('distance:', list(zip(_train_pos_dist.flatten(),
_train_neg_dist.flatten()))[:5])
one_moving_meaning_show = 'No mean or variance'
if len(bn_moving_vars) > 0:
one_moving_meaning = sess.graph.get_tensor_by_name(
bn_moving_vars[0].name)
one_moving_meaning_show = '{}={}'.format(bn_moving_vars
[0].name, np.mean(one_moving_meaning.eval()))
print(one_moving_meaning_show)
show_text = (
'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'
.format(epoch_num, iters_num + 1, _global_step,
_train_loss, '0.99', _test_loss))
print(show_text)
if _global_step % snapshot == 0:
constant_graph = graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['flatten_anchor'])
save_model_name = model_name + '-' + str(_global_step
) + '.pb'
with tf.gfile.FastGFile(pb_path + save_model_name, mode
='wb') as fw:
fw.write(constant_graph.SerializeToString())
ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',
global_step=total_iters_num)
print('Successfully saved model {}'.format(save_model_name)
)
| <mask token>
batch_size = 64
input_height = 32
input_width = 32
total_epoch_num = 50
snapshot = 100
support_image_extensions = ['.jpg', '.png', '.jpeg', '.bmp']
margin = 1.0
channals = 3
train_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/train'
test_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/test'
model_path = 'models/'
pb_path = os.path.join(model_path, 'pb/')
ckpt_path = os.path.join(model_path, 'ckpt/')
if not os.path.exists(pb_path):
os.makedirs(pb_path)
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
model_name = 'siamese_triplet_28out_allloss_bn'
if __name__ == '__main__':
first_shape = None
anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,
input_height, input_width, channals], name='anchor')
similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,
input_height, input_width, channals], name='similar')
dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,
input_height, input_width, channals], name='dissimilar')
labels_placeholder = tf.placeholder(tf.float32, shape=[None if
first_shape is None else first_shape * 3], name='labels')
is_training_placeholder = tf.placeholder_with_default(False, shape=(),
name='is_training')
siamese_net = siameseNet.siameseNet()
anchor = siamese_net.inference(anchor_placeholder, reuse=False,
is_training=is_training_placeholder)
similar = siamese_net.inference(similar_placeholder, reuse=True,
is_training=is_training_placeholder)
dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,
is_training=is_training_placeholder)
loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,
labels_placeholder, margin)
flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')
flatten_out_similar = tf.identity(similar, name='flatten_similar')
flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
global_step = tf.Variable(0, trainable=False)
with tf.control_dependencies([tf.group(*update_ops)]):
train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,
global_step=global_step)
var_list = tf.trainable_variables()
if global_step is not None:
var_list.append(global_step)
g_list = tf.global_variables()
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
var_list += bn_moving_vars
ckpt_saver = tf.train.Saver()
train_dataset = dataset.dataset(train_image_root, batch_size,
support_image_extensions, input_height, input_width, channals)
test_dataset = dataset.dataset(test_image_root, batch_size,
support_image_extensions, input_height, input_width, channals)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
total_iters_num = 0
for epoch_num in range(total_epoch_num):
train_images_num = train_dataset.sample_len
cur_epoch_iters_num = train_images_num // batch_size
for iters_num in range(cur_epoch_iters_num):
(train_anchor, train_similar, train_dissimilar, train_labels
) = train_dataset.next_triplet_batch()
test_anchor, test_similar, test_dissimilar, test_labels = (
test_dataset.next_triplet_batch())
if train_anchor is None or test_anchor is None:
continue
train_dict = {anchor_placeholder: train_anchor,
similar_placeholder: train_similar,
dissimilar_placeholder: train_dissimilar,
labels_placeholder: train_labels,
is_training_placeholder: True}
test_dict = {anchor_placeholder: test_anchor,
similar_placeholder: test_similar,
dissimilar_placeholder: test_dissimilar,
labels_placeholder: test_labels,
is_training_placeholder: False}
_, _global_step = sess.run([train_step, global_step],
feed_dict=train_dict)
anchor_out, similar_out, dissimilar_out = sess.run([
flatten_out_anchor, flatten_out_similar,
flatten_out_dissimilar], feed_dict=train_dict)
_train_loss, _train_pos_dist, _train_neg_dist = sess.run([
loss, pos_dist, neg_dist], feed_dict=train_dict)
_test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,
pos_dist, neg_dist], feed_dict=test_dict)
print('distance:', list(zip(_train_pos_dist.flatten(),
_train_neg_dist.flatten()))[:5])
one_moving_meaning_show = 'No mean or variance'
if len(bn_moving_vars) > 0:
one_moving_meaning = sess.graph.get_tensor_by_name(
bn_moving_vars[0].name)
one_moving_meaning_show = '{}={}'.format(bn_moving_vars
[0].name, np.mean(one_moving_meaning.eval()))
print(one_moving_meaning_show)
show_text = (
'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'
.format(epoch_num, iters_num + 1, _global_step,
_train_loss, '0.99', _test_loss))
print(show_text)
if _global_step % snapshot == 0:
constant_graph = graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['flatten_anchor'])
save_model_name = model_name + '-' + str(_global_step
) + '.pb'
with tf.gfile.FastGFile(pb_path + save_model_name, mode
='wb') as fw:
fw.write(constant_graph.SerializeToString())
ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',
global_step=total_iters_num)
print('Successfully saved model {}'.format(save_model_name)
)
| import tensorflow as tf
from tensorflow.python.framework import graph_util
from net import siameseNet_batchnorm as siameseNet
import dataset
import numpy as np
import cv2
import os
batch_size = 64
input_height = 32
input_width = 32
total_epoch_num = 50
snapshot = 100
support_image_extensions = ['.jpg', '.png', '.jpeg', '.bmp']
margin = 1.0
channals = 3
train_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/train'
test_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/test'
model_path = 'models/'
pb_path = os.path.join(model_path, 'pb/')
ckpt_path = os.path.join(model_path, 'ckpt/')
if not os.path.exists(pb_path):
os.makedirs(pb_path)
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
model_name = 'siamese_triplet_28out_allloss_bn'
if __name__ == '__main__':
first_shape = None
anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,
input_height, input_width, channals], name='anchor')
similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,
input_height, input_width, channals], name='similar')
dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,
input_height, input_width, channals], name='dissimilar')
labels_placeholder = tf.placeholder(tf.float32, shape=[None if
first_shape is None else first_shape * 3], name='labels')
is_training_placeholder = tf.placeholder_with_default(False, shape=(),
name='is_training')
siamese_net = siameseNet.siameseNet()
anchor = siamese_net.inference(anchor_placeholder, reuse=False,
is_training=is_training_placeholder)
similar = siamese_net.inference(similar_placeholder, reuse=True,
is_training=is_training_placeholder)
dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,
is_training=is_training_placeholder)
loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,
labels_placeholder, margin)
flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')
flatten_out_similar = tf.identity(similar, name='flatten_similar')
flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
global_step = tf.Variable(0, trainable=False)
with tf.control_dependencies([tf.group(*update_ops)]):
train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,
global_step=global_step)
var_list = tf.trainable_variables()
if global_step is not None:
var_list.append(global_step)
g_list = tf.global_variables()
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
var_list += bn_moving_vars
ckpt_saver = tf.train.Saver()
train_dataset = dataset.dataset(train_image_root, batch_size,
support_image_extensions, input_height, input_width, channals)
test_dataset = dataset.dataset(test_image_root, batch_size,
support_image_extensions, input_height, input_width, channals)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
total_iters_num = 0
for epoch_num in range(total_epoch_num):
train_images_num = train_dataset.sample_len
cur_epoch_iters_num = train_images_num // batch_size
for iters_num in range(cur_epoch_iters_num):
(train_anchor, train_similar, train_dissimilar, train_labels
) = train_dataset.next_triplet_batch()
test_anchor, test_similar, test_dissimilar, test_labels = (
test_dataset.next_triplet_batch())
if train_anchor is None or test_anchor is None:
continue
train_dict = {anchor_placeholder: train_anchor,
similar_placeholder: train_similar,
dissimilar_placeholder: train_dissimilar,
labels_placeholder: train_labels,
is_training_placeholder: True}
test_dict = {anchor_placeholder: test_anchor,
similar_placeholder: test_similar,
dissimilar_placeholder: test_dissimilar,
labels_placeholder: test_labels,
is_training_placeholder: False}
_, _global_step = sess.run([train_step, global_step],
feed_dict=train_dict)
anchor_out, similar_out, dissimilar_out = sess.run([
flatten_out_anchor, flatten_out_similar,
flatten_out_dissimilar], feed_dict=train_dict)
_train_loss, _train_pos_dist, _train_neg_dist = sess.run([
loss, pos_dist, neg_dist], feed_dict=train_dict)
_test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,
pos_dist, neg_dist], feed_dict=test_dict)
print('distance:', list(zip(_train_pos_dist.flatten(),
_train_neg_dist.flatten()))[:5])
one_moving_meaning_show = 'No mean or variance'
if len(bn_moving_vars) > 0:
one_moving_meaning = sess.graph.get_tensor_by_name(
bn_moving_vars[0].name)
one_moving_meaning_show = '{}={}'.format(bn_moving_vars
[0].name, np.mean(one_moving_meaning.eval()))
print(one_moving_meaning_show)
show_text = (
'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'
.format(epoch_num, iters_num + 1, _global_step,
_train_loss, '0.99', _test_loss))
print(show_text)
if _global_step % snapshot == 0:
constant_graph = graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['flatten_anchor'])
save_model_name = model_name + '-' + str(_global_step
) + '.pb'
with tf.gfile.FastGFile(pb_path + save_model_name, mode
='wb') as fw:
fw.write(constant_graph.SerializeToString())
ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',
global_step=total_iters_num)
print('Successfully saved model {}'.format(save_model_name)
)
| import tensorflow as tf
from tensorflow.python.framework import graph_util
from net import siameseNet_batchnorm as siameseNet
import dataset
import numpy as np
import cv2
import os
batch_size=64
input_height=32
input_width=32
total_epoch_num=50
snapshot=100
support_image_extensions=[".jpg",".png",".jpeg",".bmp"]
margin=1.0
channals=3
train_image_root="D:/forTensorflow/charRecTrain/forMyDNNCode/train"
test_image_root="D:/forTensorflow/charRecTrain/forMyDNNCode/test"
model_path="models/"
pb_path=os.path.join(model_path,"pb/")
ckpt_path=os.path.join(model_path,"ckpt/")
if not os.path.exists(pb_path):
os.makedirs(pb_path)
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
model_name="siamese_triplet_28out_allloss_bn"
if __name__ == '__main__':
# image_paths,labels=get_images_path(test_image_root)
# data=next_batch(True,None,image_paths,labels)
# for left,right,label in zip(*data):
# cv2.imshow("left",left)
# cv2.imshow("right", right)
# print(label)
# cv2.waitKey(0)
first_shape=None
anchor_placeholder = tf.placeholder(tf.float32,shape=[first_shape,input_height,input_width,channals],name="anchor")
similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name="similar")
dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name="dissimilar")
labels_placeholder = tf.placeholder(tf.float32, shape=
[None if first_shape is None else first_shape * 3, ], name="labels")
is_training_placeholder = tf.placeholder_with_default(False, shape=(), name="is_training")
siamese_net=siameseNet.siameseNet()
anchor = siamese_net.inference(anchor_placeholder,reuse=False,is_training=is_training_placeholder)
similar = siamese_net.inference(similar_placeholder,reuse=True,is_training=is_training_placeholder)
dissimilar = siamese_net.inference(dissimilar_placeholder,reuse=True,is_training=is_training_placeholder)
loss,pos_dist,neg_dist = siamese_net.loss(anchor,similar,dissimilar,labels_placeholder,margin)
flatten_out_anchor = tf.identity(anchor, name="flatten_anchor")
flatten_out_similar = tf.identity(similar, name="flatten_similar")
flatten_out_dissimilar = tf.identity(dissimilar, name="flatten_dissimilar")
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
global_step = tf.Variable(0, trainable=False)
# learning_rate = tf.train.exponential_decay(0.01, global_step, 100, 0.9)
# optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)
with tf.control_dependencies([tf.group(*update_ops)]):
# train_step = optimizer.minimize(loss, global_step)
train_step = tf.train.MomentumOptimizer(0.01, 0.90).\
minimize(loss, global_step=global_step)
var_list = tf.trainable_variables()
if global_step is not None:
var_list.append(global_step)
g_list = tf.global_variables() # 从全局变量中获得batch norm的缩放和偏差
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
var_list += bn_moving_vars
ckpt_saver = tf.train.Saver()
train_dataset = dataset.dataset(train_image_root,batch_size,support_image_extensions,
input_height,input_width,channals)
test_dataset = dataset.dataset(test_image_root, batch_size, support_image_extensions,
input_height, input_width, channals)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# if os.path.exists(os.path.join(ckpt_path, "checkpoint")):
# ckpt_saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))
total_iters_num = 0
for epoch_num in range(total_epoch_num):
train_images_num = train_dataset.sample_len
cur_epoch_iters_num = train_images_num // batch_size
for iters_num in range(cur_epoch_iters_num):
train_anchor, train_similar, train_dissimilar,train_labels = \
train_dataset.next_triplet_batch()
test_anchor, test_similar, test_dissimilar,test_labels = \
test_dataset.next_triplet_batch()
if train_anchor is None or test_anchor is None:
continue
train_dict = {anchor_placeholder: train_anchor,
similar_placeholder: train_similar,
dissimilar_placeholder: train_dissimilar,
labels_placeholder:train_labels,
is_training_placeholder:True}
test_dict = {anchor_placeholder: test_anchor,
similar_placeholder: test_similar,
dissimilar_placeholder: test_dissimilar,
labels_placeholder:test_labels,
is_training_placeholder: False}
_,_global_step=sess.run([train_step,global_step], feed_dict=train_dict)
anchor_out,similar_out,dissimilar_out = sess.run([
flatten_out_anchor,flatten_out_similar,flatten_out_dissimilar],
feed_dict=train_dict)
_train_loss,_train_pos_dist,_train_neg_dist = \
sess.run([loss,pos_dist,neg_dist], feed_dict=train_dict)
_test_loss,_test_pos_dist,_test_neg_dist =\
sess.run([loss,pos_dist,neg_dist], feed_dict=test_dict)
print("distance:",list(zip(_train_pos_dist.flatten(),_train_neg_dist.flatten()))[:5])
one_moving_meaning_show = "No mean or variance"
if len(bn_moving_vars) > 0:
one_moving_meaning = sess.graph.get_tensor_by_name(bn_moving_vars[0].name)
one_moving_meaning_show = "{}={}".\
format(bn_moving_vars[0].name,np.mean(one_moving_meaning.eval()))
print(one_moving_meaning_show)
show_text = "epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}".format \
(epoch_num, iters_num + 1, _global_step, _train_loss, "0.99", _test_loss)
print(show_text)
if _global_step % snapshot == 0:
# 保存PB
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["flatten_anchor"])
save_model_name=model_name + "-" + str(_global_step) + ".pb"
with tf.gfile.FastGFile(pb_path + save_model_name, mode="wb") as fw:
fw.write(constant_graph.SerializeToString())
# 保存CKPT
ckpt_saver.save(sess, ckpt_path + model_name + ".ckpt", global_step=total_iters_num)
print("Successfully saved model {}".format(save_model_name))
| [
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.