index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
700 | 360e661d8538a8f40b7546a54e9a9582fa64bd67 | <mask token>
| <mask token>
def mutual_info(parent, child):
parent = [int(x) for x in parent]
child = [int(x) for x in child]
return mutual_info_score(parent, child)
| <mask token>
def mimic_binary(max_iter=100, fitness_func=None, space=None):
assert fitness_func is not None
assert space is not None
idx = np.random.permutation(np.arange(len(space)))
pool = space[idx[:int(len(space) / 2)]]
new_pool = []
for i in range(max_iter):
print('mimic: {}|{}'.format(i + 1, max_iter))
theta += delta
for j, parent in enumerate(pool):
if j in new_pool or fitness_func(parent) < theta:
continue
best_score = 0
best_child = parent
for k, child in enumerate(pool):
if k <= j or child in new_pool:
continue
score = mutual_info(parent, child)
if score > best_score and fitness_func(child) >= theta:
best_score = score
new_pool.append(parent)
new_pool.append(child)
return None
def mutual_info(parent, child):
parent = [int(x) for x in parent]
child = [int(x) for x in child]
return mutual_info_score(parent, child)
| import numpy as np
from sklearn.metrics import mutual_info_score
def mimic_binary(max_iter=100, fitness_func=None, space=None):
assert fitness_func is not None
assert space is not None
idx = np.random.permutation(np.arange(len(space)))
pool = space[idx[:int(len(space) / 2)]]
new_pool = []
for i in range(max_iter):
print('mimic: {}|{}'.format(i + 1, max_iter))
theta += delta
for j, parent in enumerate(pool):
if j in new_pool or fitness_func(parent) < theta:
continue
best_score = 0
best_child = parent
for k, child in enumerate(pool):
if k <= j or child in new_pool:
continue
score = mutual_info(parent, child)
if score > best_score and fitness_func(child) >= theta:
best_score = score
new_pool.append(parent)
new_pool.append(child)
return None
def mutual_info(parent, child):
parent = [int(x) for x in parent]
child = [int(x) for x in child]
return mutual_info_score(parent, child)
| import numpy as np
from sklearn.metrics import mutual_info_score
def mimic_binary(max_iter=100, fitness_func=None, space=None):
assert fitness_func is not None
assert space is not None
idx = np.random.permutation(np.arange(len(space)))
pool = space[idx[:int(len(space)/2)]] # randomly sample 50% of the oringal space
new_pool = []
for i in range(max_iter):
print("mimic: {}|{}".format(i+1, max_iter))
theta += delta
for j, parent in enumerate(pool):
if j in new_pool or fitness_func(parent)<theta: continue
best_score = 0
best_child = parent
for k, child in enumerate(pool):
if k<=j or child in new_pool: continue
score = mutual_info(parent, child)
if score > best_score and fitness_func(child)>=theta:
best_score = score
new_pool.append(parent)
new_pool.append(child)
return None
def mutual_info(parent, child):
parent = [int(x) for x in parent]
child = [int(x) for x in child]
return mutual_info_score(parent,child) | [
0,
1,
2,
3,
4
] |
701 | f489058c922d405754ad32a737f67bc03c08772b | <mask token>
def blackbox(name, backend, targets, params, target='target', path='/probe',
labels=None):
labels = {} if labels is None else labels
banned_oses = ['debian']
filtered_targets = [x for x in targets if lib.get_os(x) not in banned_oses]
return {'job_name': name, 'metrics_path': path, 'params': params,
'static_configs': [{'targets': sorted(filtered_targets), 'labels':
labels}], 'relabel_configs': [{'source_labels': ['__address__'],
'regex': '(.*)(:80)?', 'target_label': '__param_%s' % target,
'replacement': '${1}'}, {'source_labels': ['__param_%s' % target],
'regex': '(.*)', 'target_label': 'instance', 'replacement': '${1}'},
{'source_labels': [], 'regex': '.*', 'target_label': '__address__',
'replacement': backend}]}
def generate_backend(host, local_services):
scrape_configs = []
scrape_configs.extend(local_services)
domain = lib.get_domain(host)
basic_auth = lib.read_secret('services/monitoring:login')
manifest = yaml.load(file(MANIFEST_PATH).read())
for package, spec in manifest['packages'].iteritems():
if spec is None or 'monitor' not in spec:
continue
urls = spec['monitor']['url'] if isinstance(spec['monitor']['url'],
dict) else {None: spec['monitor']['url']}
for url_id, url_str in urls.iteritems():
url = urlparse.urlparse(url_str)
targets = []
for target in sorted(lib.get_nodes_with_package(package, domain
).keys()):
targets.append(target if url.port is None else '%s:%d' % (
target, url.port))
scrape_config = {'job_name': package + ('-%s' % url_id if
url_id else ''), 'metrics_path': url.path, 'scheme': url.
scheme, 'static_configs': [{'targets': sorted(targets)}]}
if 'interval' in spec['monitor']:
scrape_config['scrape_interval'] = spec['monitor']['interval']
if 'labels' in spec['monitor']:
scrape_config['static_configs'][0]['labels'] = spec['monitor'][
'labels']
if spec['monitor'].get('auth', False) and url.scheme == 'https':
scrape_config['basic_auth'] = basic_auth
scrape_configs.append(scrape_config)
layers = lib.get_layers(domain)
snmp_nodes = {}
ssh_nodes = {}
for layer in layers:
hosts = lib.get_nodes_with_layer(layer, domain)
snmp_mute = lib.get_nodes_with_layer(layer, domain, 'no-snmp')
ssh_mute = lib.get_nodes_with_layer(layer, domain, 'no-ssh')
snmp_nodes[layer] = list(set(hosts) - set(snmp_mute))
ssh_nodes[layer] = [(x + ':22') for x in set(hosts) - set(ssh_mute)]
for layer in layers:
if layer == 'access':
snmp_host = 'snmp2.event.dreamhack.se'
else:
snmp_host = 'snmp1.event.dreamhack.se'
snmp = blackbox('snmp_%s' % layer, snmp_host, snmp_nodes[layer], {
'layer': [layer]}, labels={'layer': layer})
snmp['scrape_interval'] = '30s'
snmp['scrape_timeout'] = '30s'
scrape_configs.append(snmp)
for layer in layers:
for ssh_host in ['jumpgate1', 'jumpgate2', 'rancid']:
fqdn = ssh_host + '.event.dreamhack.se:9115'
ssh = blackbox('ssh_%s_%s' % (layer, ssh_host), fqdn, ssh_nodes
[layer], {'module': ['ssh_banner']}, labels={'layer': layer})
ssh['scrape_interval'] = '30s'
ssh['scrape_timeout'] = '30s'
scrape_configs.append(ssh)
external = {'job_name': 'external', 'file_sd_configs': [{'files': [
'/etc/prometheus/external/*.yaml']}]}
scrape_configs.append(external)
if host.endswith('.event.dreamhack.se'):
puppet = {'job_name': 'puppet_runs', 'metrics_path': '/metrics',
'scrape_interval': '60s', 'scrape_timeout': '55s',
'static_configs': [{'targets': ['puppet.tech.dreamhack.se:9100']}]}
scrape_configs.append(puppet)
vcenter = {'job_name': 'vmware_vcenter', 'metrics_path': '/metrics',
'scrape_interval': '60s', 'scrape_timeout': '55s', 'static_configs':
[{'targets': ['provision.event.dreamhack.se:9272']}]}
scrape_configs.append(vcenter)
relabel = {'regex': ':?([^:]*):?.*', 'separator': ':', 'replacement':
'${1}', 'source_labels': ['host', 'instance'], 'target_label': 'host'}
mrc = 'metric_relabel_configs'
for scrape in scrape_configs:
if mrc in scrape:
scrape[mrc].append(relabel)
else:
scrape[mrc] = [relabel]
return {'scrape_configs': scrape_configs}
<mask token>
def generate(host, *args):
info = {}
local_targets = []
local_targets.append({'job_name': 'prometheus', 'scheme': 'http',
'static_configs': [{'targets': ['localhost:9090']}]})
info['prometheus'] = generate_backend(host, local_targets)
info['prometheus']['current_event'] = lib.get_current_event()
return info
| <mask token>
def blackbox(name, backend, targets, params, target='target', path='/probe',
labels=None):
labels = {} if labels is None else labels
banned_oses = ['debian']
filtered_targets = [x for x in targets if lib.get_os(x) not in banned_oses]
return {'job_name': name, 'metrics_path': path, 'params': params,
'static_configs': [{'targets': sorted(filtered_targets), 'labels':
labels}], 'relabel_configs': [{'source_labels': ['__address__'],
'regex': '(.*)(:80)?', 'target_label': '__param_%s' % target,
'replacement': '${1}'}, {'source_labels': ['__param_%s' % target],
'regex': '(.*)', 'target_label': 'instance', 'replacement': '${1}'},
{'source_labels': [], 'regex': '.*', 'target_label': '__address__',
'replacement': backend}]}
def generate_backend(host, local_services):
scrape_configs = []
scrape_configs.extend(local_services)
domain = lib.get_domain(host)
basic_auth = lib.read_secret('services/monitoring:login')
manifest = yaml.load(file(MANIFEST_PATH).read())
for package, spec in manifest['packages'].iteritems():
if spec is None or 'monitor' not in spec:
continue
urls = spec['monitor']['url'] if isinstance(spec['monitor']['url'],
dict) else {None: spec['monitor']['url']}
for url_id, url_str in urls.iteritems():
url = urlparse.urlparse(url_str)
targets = []
for target in sorted(lib.get_nodes_with_package(package, domain
).keys()):
targets.append(target if url.port is None else '%s:%d' % (
target, url.port))
scrape_config = {'job_name': package + ('-%s' % url_id if
url_id else ''), 'metrics_path': url.path, 'scheme': url.
scheme, 'static_configs': [{'targets': sorted(targets)}]}
if 'interval' in spec['monitor']:
scrape_config['scrape_interval'] = spec['monitor']['interval']
if 'labels' in spec['monitor']:
scrape_config['static_configs'][0]['labels'] = spec['monitor'][
'labels']
if spec['monitor'].get('auth', False) and url.scheme == 'https':
scrape_config['basic_auth'] = basic_auth
scrape_configs.append(scrape_config)
layers = lib.get_layers(domain)
snmp_nodes = {}
ssh_nodes = {}
for layer in layers:
hosts = lib.get_nodes_with_layer(layer, domain)
snmp_mute = lib.get_nodes_with_layer(layer, domain, 'no-snmp')
ssh_mute = lib.get_nodes_with_layer(layer, domain, 'no-ssh')
snmp_nodes[layer] = list(set(hosts) - set(snmp_mute))
ssh_nodes[layer] = [(x + ':22') for x in set(hosts) - set(ssh_mute)]
for layer in layers:
if layer == 'access':
snmp_host = 'snmp2.event.dreamhack.se'
else:
snmp_host = 'snmp1.event.dreamhack.se'
snmp = blackbox('snmp_%s' % layer, snmp_host, snmp_nodes[layer], {
'layer': [layer]}, labels={'layer': layer})
snmp['scrape_interval'] = '30s'
snmp['scrape_timeout'] = '30s'
scrape_configs.append(snmp)
for layer in layers:
for ssh_host in ['jumpgate1', 'jumpgate2', 'rancid']:
fqdn = ssh_host + '.event.dreamhack.se:9115'
ssh = blackbox('ssh_%s_%s' % (layer, ssh_host), fqdn, ssh_nodes
[layer], {'module': ['ssh_banner']}, labels={'layer': layer})
ssh['scrape_interval'] = '30s'
ssh['scrape_timeout'] = '30s'
scrape_configs.append(ssh)
external = {'job_name': 'external', 'file_sd_configs': [{'files': [
'/etc/prometheus/external/*.yaml']}]}
scrape_configs.append(external)
if host.endswith('.event.dreamhack.se'):
puppet = {'job_name': 'puppet_runs', 'metrics_path': '/metrics',
'scrape_interval': '60s', 'scrape_timeout': '55s',
'static_configs': [{'targets': ['puppet.tech.dreamhack.se:9100']}]}
scrape_configs.append(puppet)
vcenter = {'job_name': 'vmware_vcenter', 'metrics_path': '/metrics',
'scrape_interval': '60s', 'scrape_timeout': '55s', 'static_configs':
[{'targets': ['provision.event.dreamhack.se:9272']}]}
scrape_configs.append(vcenter)
relabel = {'regex': ':?([^:]*):?.*', 'separator': ':', 'replacement':
'${1}', 'source_labels': ['host', 'instance'], 'target_label': 'host'}
mrc = 'metric_relabel_configs'
for scrape in scrape_configs:
if mrc in scrape:
scrape[mrc].append(relabel)
else:
scrape[mrc] = [relabel]
return {'scrape_configs': scrape_configs}
def requires(host, *args):
return ['apache(ldap)']
def generate(host, *args):
info = {}
local_targets = []
local_targets.append({'job_name': 'prometheus', 'scheme': 'http',
'static_configs': [{'targets': ['localhost:9090']}]})
info['prometheus'] = generate_backend(host, local_targets)
info['prometheus']['current_event'] = lib.get_current_event()
return info
| <mask token>
MANIFEST_PATH = '/etc/manifest'
HTTP_BASIC_AUTH = None
def blackbox(name, backend, targets, params, target='target', path='/probe',
labels=None):
labels = {} if labels is None else labels
banned_oses = ['debian']
filtered_targets = [x for x in targets if lib.get_os(x) not in banned_oses]
return {'job_name': name, 'metrics_path': path, 'params': params,
'static_configs': [{'targets': sorted(filtered_targets), 'labels':
labels}], 'relabel_configs': [{'source_labels': ['__address__'],
'regex': '(.*)(:80)?', 'target_label': '__param_%s' % target,
'replacement': '${1}'}, {'source_labels': ['__param_%s' % target],
'regex': '(.*)', 'target_label': 'instance', 'replacement': '${1}'},
{'source_labels': [], 'regex': '.*', 'target_label': '__address__',
'replacement': backend}]}
def generate_backend(host, local_services):
scrape_configs = []
scrape_configs.extend(local_services)
domain = lib.get_domain(host)
basic_auth = lib.read_secret('services/monitoring:login')
manifest = yaml.load(file(MANIFEST_PATH).read())
for package, spec in manifest['packages'].iteritems():
if spec is None or 'monitor' not in spec:
continue
urls = spec['monitor']['url'] if isinstance(spec['monitor']['url'],
dict) else {None: spec['monitor']['url']}
for url_id, url_str in urls.iteritems():
url = urlparse.urlparse(url_str)
targets = []
for target in sorted(lib.get_nodes_with_package(package, domain
).keys()):
targets.append(target if url.port is None else '%s:%d' % (
target, url.port))
scrape_config = {'job_name': package + ('-%s' % url_id if
url_id else ''), 'metrics_path': url.path, 'scheme': url.
scheme, 'static_configs': [{'targets': sorted(targets)}]}
if 'interval' in spec['monitor']:
scrape_config['scrape_interval'] = spec['monitor']['interval']
if 'labels' in spec['monitor']:
scrape_config['static_configs'][0]['labels'] = spec['monitor'][
'labels']
if spec['monitor'].get('auth', False) and url.scheme == 'https':
scrape_config['basic_auth'] = basic_auth
scrape_configs.append(scrape_config)
layers = lib.get_layers(domain)
snmp_nodes = {}
ssh_nodes = {}
for layer in layers:
hosts = lib.get_nodes_with_layer(layer, domain)
snmp_mute = lib.get_nodes_with_layer(layer, domain, 'no-snmp')
ssh_mute = lib.get_nodes_with_layer(layer, domain, 'no-ssh')
snmp_nodes[layer] = list(set(hosts) - set(snmp_mute))
ssh_nodes[layer] = [(x + ':22') for x in set(hosts) - set(ssh_mute)]
for layer in layers:
if layer == 'access':
snmp_host = 'snmp2.event.dreamhack.se'
else:
snmp_host = 'snmp1.event.dreamhack.se'
snmp = blackbox('snmp_%s' % layer, snmp_host, snmp_nodes[layer], {
'layer': [layer]}, labels={'layer': layer})
snmp['scrape_interval'] = '30s'
snmp['scrape_timeout'] = '30s'
scrape_configs.append(snmp)
for layer in layers:
for ssh_host in ['jumpgate1', 'jumpgate2', 'rancid']:
fqdn = ssh_host + '.event.dreamhack.se:9115'
ssh = blackbox('ssh_%s_%s' % (layer, ssh_host), fqdn, ssh_nodes
[layer], {'module': ['ssh_banner']}, labels={'layer': layer})
ssh['scrape_interval'] = '30s'
ssh['scrape_timeout'] = '30s'
scrape_configs.append(ssh)
external = {'job_name': 'external', 'file_sd_configs': [{'files': [
'/etc/prometheus/external/*.yaml']}]}
scrape_configs.append(external)
if host.endswith('.event.dreamhack.se'):
puppet = {'job_name': 'puppet_runs', 'metrics_path': '/metrics',
'scrape_interval': '60s', 'scrape_timeout': '55s',
'static_configs': [{'targets': ['puppet.tech.dreamhack.se:9100']}]}
scrape_configs.append(puppet)
vcenter = {'job_name': 'vmware_vcenter', 'metrics_path': '/metrics',
'scrape_interval': '60s', 'scrape_timeout': '55s', 'static_configs':
[{'targets': ['provision.event.dreamhack.se:9272']}]}
scrape_configs.append(vcenter)
relabel = {'regex': ':?([^:]*):?.*', 'separator': ':', 'replacement':
'${1}', 'source_labels': ['host', 'instance'], 'target_label': 'host'}
mrc = 'metric_relabel_configs'
for scrape in scrape_configs:
if mrc in scrape:
scrape[mrc].append(relabel)
else:
scrape[mrc] = [relabel]
return {'scrape_configs': scrape_configs}
def requires(host, *args):
return ['apache(ldap)']
def generate(host, *args):
info = {}
local_targets = []
local_targets.append({'job_name': 'prometheus', 'scheme': 'http',
'static_configs': [{'targets': ['localhost:9090']}]})
info['prometheus'] = generate_backend(host, local_targets)
info['prometheus']['current_event'] = lib.get_current_event()
return info
| import lib
import urlparse
import yaml
MANIFEST_PATH = '/etc/manifest'
HTTP_BASIC_AUTH = None
def blackbox(name, backend, targets, params, target='target', path='/probe',
labels=None):
labels = {} if labels is None else labels
banned_oses = ['debian']
filtered_targets = [x for x in targets if lib.get_os(x) not in banned_oses]
return {'job_name': name, 'metrics_path': path, 'params': params,
'static_configs': [{'targets': sorted(filtered_targets), 'labels':
labels}], 'relabel_configs': [{'source_labels': ['__address__'],
'regex': '(.*)(:80)?', 'target_label': '__param_%s' % target,
'replacement': '${1}'}, {'source_labels': ['__param_%s' % target],
'regex': '(.*)', 'target_label': 'instance', 'replacement': '${1}'},
{'source_labels': [], 'regex': '.*', 'target_label': '__address__',
'replacement': backend}]}
def generate_backend(host, local_services):
scrape_configs = []
scrape_configs.extend(local_services)
domain = lib.get_domain(host)
basic_auth = lib.read_secret('services/monitoring:login')
manifest = yaml.load(file(MANIFEST_PATH).read())
for package, spec in manifest['packages'].iteritems():
if spec is None or 'monitor' not in spec:
continue
urls = spec['monitor']['url'] if isinstance(spec['monitor']['url'],
dict) else {None: spec['monitor']['url']}
for url_id, url_str in urls.iteritems():
url = urlparse.urlparse(url_str)
targets = []
for target in sorted(lib.get_nodes_with_package(package, domain
).keys()):
targets.append(target if url.port is None else '%s:%d' % (
target, url.port))
scrape_config = {'job_name': package + ('-%s' % url_id if
url_id else ''), 'metrics_path': url.path, 'scheme': url.
scheme, 'static_configs': [{'targets': sorted(targets)}]}
if 'interval' in spec['monitor']:
scrape_config['scrape_interval'] = spec['monitor']['interval']
if 'labels' in spec['monitor']:
scrape_config['static_configs'][0]['labels'] = spec['monitor'][
'labels']
if spec['monitor'].get('auth', False) and url.scheme == 'https':
scrape_config['basic_auth'] = basic_auth
scrape_configs.append(scrape_config)
layers = lib.get_layers(domain)
snmp_nodes = {}
ssh_nodes = {}
for layer in layers:
hosts = lib.get_nodes_with_layer(layer, domain)
snmp_mute = lib.get_nodes_with_layer(layer, domain, 'no-snmp')
ssh_mute = lib.get_nodes_with_layer(layer, domain, 'no-ssh')
snmp_nodes[layer] = list(set(hosts) - set(snmp_mute))
ssh_nodes[layer] = [(x + ':22') for x in set(hosts) - set(ssh_mute)]
for layer in layers:
if layer == 'access':
snmp_host = 'snmp2.event.dreamhack.se'
else:
snmp_host = 'snmp1.event.dreamhack.se'
snmp = blackbox('snmp_%s' % layer, snmp_host, snmp_nodes[layer], {
'layer': [layer]}, labels={'layer': layer})
snmp['scrape_interval'] = '30s'
snmp['scrape_timeout'] = '30s'
scrape_configs.append(snmp)
for layer in layers:
for ssh_host in ['jumpgate1', 'jumpgate2', 'rancid']:
fqdn = ssh_host + '.event.dreamhack.se:9115'
ssh = blackbox('ssh_%s_%s' % (layer, ssh_host), fqdn, ssh_nodes
[layer], {'module': ['ssh_banner']}, labels={'layer': layer})
ssh['scrape_interval'] = '30s'
ssh['scrape_timeout'] = '30s'
scrape_configs.append(ssh)
external = {'job_name': 'external', 'file_sd_configs': [{'files': [
'/etc/prometheus/external/*.yaml']}]}
scrape_configs.append(external)
if host.endswith('.event.dreamhack.se'):
puppet = {'job_name': 'puppet_runs', 'metrics_path': '/metrics',
'scrape_interval': '60s', 'scrape_timeout': '55s',
'static_configs': [{'targets': ['puppet.tech.dreamhack.se:9100']}]}
scrape_configs.append(puppet)
vcenter = {'job_name': 'vmware_vcenter', 'metrics_path': '/metrics',
'scrape_interval': '60s', 'scrape_timeout': '55s', 'static_configs':
[{'targets': ['provision.event.dreamhack.se:9272']}]}
scrape_configs.append(vcenter)
relabel = {'regex': ':?([^:]*):?.*', 'separator': ':', 'replacement':
'${1}', 'source_labels': ['host', 'instance'], 'target_label': 'host'}
mrc = 'metric_relabel_configs'
for scrape in scrape_configs:
if mrc in scrape:
scrape[mrc].append(relabel)
else:
scrape[mrc] = [relabel]
return {'scrape_configs': scrape_configs}
def requires(host, *args):
return ['apache(ldap)']
def generate(host, *args):
info = {}
local_targets = []
local_targets.append({'job_name': 'prometheus', 'scheme': 'http',
'static_configs': [{'targets': ['localhost:9090']}]})
info['prometheus'] = generate_backend(host, local_targets)
info['prometheus']['current_event'] = lib.get_current_event()
return info
| # Copyright 2018 dhtech
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file
import lib
import urlparse
import yaml
MANIFEST_PATH = '/etc/manifest'
HTTP_BASIC_AUTH = None
def blackbox(name, backend, targets, params,
target='target', path='/probe', labels=None):
labels = {} if labels is None else labels
# Strip banned OSes
banned_oses = ['debian']
filtered_targets = [x for x in targets if lib.get_os(x) not in banned_oses]
return {
'job_name': name,
'metrics_path': path,
'params': params,
'static_configs': [{
'targets': sorted(filtered_targets),
'labels': labels
}],
'relabel_configs': [{
'source_labels': ['__address__'],
'regex': '(.*)(:80)?',
'target_label': '__param_%s' % target,
'replacement': '${1}',
}, {
'source_labels': ['__param_%s' % target],
'regex': '(.*)',
'target_label': 'instance',
'replacement': '${1}',
}, {
'source_labels': [],
'regex': '.*',
'target_label': '__address__',
'replacement': backend,
}]
}
def generate_backend(host, local_services):
scrape_configs = []
scrape_configs.extend(local_services)
domain = lib.get_domain(host)
basic_auth = lib.read_secret('services/monitoring:login')
# Find services that wants to be monitored
manifest = yaml.load(file(MANIFEST_PATH).read())
for package, spec in manifest['packages'].iteritems():
if spec is None or 'monitor' not in spec:
continue
urls = (spec['monitor']['url']
if isinstance(spec['monitor']['url'], dict) else
{None: spec['monitor']['url']})
for url_id, url_str in urls.iteritems():
url = urlparse.urlparse(url_str)
targets = []
for target in sorted(
lib.get_nodes_with_package(package, domain).keys()):
targets.append(target if url.port is None else '%s:%d' % (
target, url.port))
scrape_config = {
'job_name': package + ('-%s' % url_id if url_id else ''),
'metrics_path': url.path,
'scheme': url.scheme,
'static_configs': [
{'targets': sorted(targets)}
],
}
if 'interval' in spec['monitor']:
scrape_config['scrape_interval'] = spec['monitor']['interval']
if 'labels' in spec['monitor']:
scrape_config['static_configs'][0]['labels'] = spec['monitor']['labels']
# Only allow authentication over https
if spec['monitor'].get('auth', False) and url.scheme == 'https':
scrape_config['basic_auth'] = basic_auth
scrape_configs.append(scrape_config)
# Layer specific monitoring
layers = lib.get_layers(domain)
snmp_nodes = {}
ssh_nodes = {}
for layer in layers:
hosts = lib.get_nodes_with_layer(layer, domain)
snmp_mute = lib.get_nodes_with_layer(layer, domain, 'no-snmp')
ssh_mute = lib.get_nodes_with_layer(layer, domain, 'no-ssh')
snmp_nodes[layer] = list(set(hosts) - set(snmp_mute))
ssh_nodes[layer] = [x+':22' for x in set(hosts) - set(ssh_mute)]
# SNMP
for layer in layers:
# TODO(bluecmd): Use options for this
if layer == 'access':
snmp_host = 'snmp2.event.dreamhack.se'
else:
snmp_host = 'snmp1.event.dreamhack.se'
snmp = blackbox(
'snmp_%s' % layer, snmp_host,
snmp_nodes[layer], {'layer': [layer]}, labels={
'layer': layer})
snmp['scrape_interval'] = '30s'
snmp['scrape_timeout'] = '30s'
scrape_configs.append(snmp)
# SSH
for layer in layers:
for ssh_host in ['jumpgate1', 'jumpgate2', 'rancid']:
fqdn = ssh_host + '.event.dreamhack.se:9115'
ssh = blackbox(
'ssh_%s_%s' % (layer, ssh_host), fqdn,
ssh_nodes[layer], {'module': ['ssh_banner']}, labels={'layer': layer})
ssh['scrape_interval'] = '30s'
ssh['scrape_timeout'] = '30s'
scrape_configs.append(ssh)
# Add external service-discovery
external = {
'job_name': 'external',
'file_sd_configs': [{
'files': ['/etc/prometheus/external/*.yaml'],
}],
}
scrape_configs.append(external)
if host.endswith('.event.dreamhack.se'):
# Event should scrape puppet.tech.dreamhack.se to get information about
# puppet runs
puppet = {
'job_name': 'puppet_runs',
'metrics_path': '/metrics',
'scrape_interval': '60s',
'scrape_timeout': '55s',
'static_configs': [{
'targets': ['puppet.tech.dreamhack.se:9100'],
}],
}
scrape_configs.append(puppet)
vcenter = {
'job_name': 'vmware_vcenter',
'metrics_path': '/metrics',
'scrape_interval': '60s',
'scrape_timeout': '55s',
'static_configs': [{
'targets': ['provision.event.dreamhack.se:9272'],
}],
}
scrape_configs.append(vcenter)
# Make sure that all metrics have a host label.
# This rule uses the existing host label if there is one,
# stripping of the port (which shouldn't be part of the host label anyway)
# *or* if that label does not exist it uses the instance label
# (again stripping of the port)
relabel = {
'regex': r':?([^:]*):?.*',
'separator': ':',
'replacement': '${1}',
'source_labels': ['host', 'instance'],
'target_label': 'host',
}
mrc = 'metric_relabel_configs'
for scrape in scrape_configs:
if mrc in scrape:
scrape[mrc].append(relabel)
else:
scrape[mrc] = [relabel]
return {'scrape_configs': scrape_configs}
def requires(host, *args):
return ['apache(ldap)']
def generate(host, *args):
info = {}
local_targets = []
local_targets.append({
'job_name': 'prometheus',
'scheme': 'http',
'static_configs': [{'targets': ['localhost:9090']}]})
info['prometheus'] = generate_backend(host, local_targets)
# Get current event
info['prometheus']['current_event'] = lib.get_current_event()
return info
# vim: ts=4: sts=4: sw=4: expandtab
| [
3,
4,
5,
6,
7
] |
702 | 5299f2c66fd287be667ecbe11b8470263eafab5c | <mask token>
class ConsoleLogger:
<mask token>
def set_level(self, level):
self.logger.setLevel(level)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, name=__name__, default_level=logging.DEBUG):
self.logger = logging.Logger(name)
if not self.logger.handlers or len(self.logger.handlers) < 1:
for handler_class, params, formatted, level in self.handlers:
handler = handler_class(**params)
handler.setFormatter(logging.Formatter(formatted))
handler.setLevel(level if not default_level else default_level)
self.logger.addHandler(handler)
| <mask token>
class ConsoleLogger:
<mask token>
def set_level(self, level):
self.logger.setLevel(level)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
<mask token>
<mask token>
def exception(self, message):
self.logger.exception(message)
def __init__(self, name=__name__, default_level=logging.DEBUG):
self.logger = logging.Logger(name)
if not self.logger.handlers or len(self.logger.handlers) < 1:
for handler_class, params, formatted, level in self.handlers:
handler = handler_class(**params)
handler.setFormatter(logging.Formatter(formatted))
handler.setLevel(level if not default_level else default_level)
self.logger.addHandler(handler)
| <mask token>
class ConsoleLogger:
<mask token>
def set_level(self, level):
self.logger.setLevel(level)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warning(self, message):
self.logger.warning(message)
<mask token>
def exception(self, message):
self.logger.exception(message)
def __init__(self, name=__name__, default_level=logging.DEBUG):
self.logger = logging.Logger(name)
if not self.logger.handlers or len(self.logger.handlers) < 1:
for handler_class, params, formatted, level in self.handlers:
handler = handler_class(**params)
handler.setFormatter(logging.Formatter(formatted))
handler.setLevel(level if not default_level else default_level)
self.logger.addHandler(handler)
| <mask token>
class ConsoleLogger:
<mask token>
def set_level(self, level):
self.logger.setLevel(level)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warning(self, message):
self.logger.warning(message)
def error(self, message):
self.logger.error(message)
def exception(self, message):
self.logger.exception(message)
def __init__(self, name=__name__, default_level=logging.DEBUG):
self.logger = logging.Logger(name)
if not self.logger.handlers or len(self.logger.handlers) < 1:
for handler_class, params, formatted, level in self.handlers:
handler = handler_class(**params)
handler.setFormatter(logging.Formatter(formatted))
handler.setLevel(level if not default_level else default_level)
self.logger.addHandler(handler)
| import logging
class ConsoleLogger:
handlers = [
(logging.StreamHandler,
dict(),
"[%(name)s]\t %(asctime)s [%(levelname)s] %(message)s ",
logging.DEBUG)
]
def set_level(self, level):
self.logger.setLevel(level)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warning(self, message):
self.logger.warning(message)
def error(self, message):
self.logger.error(message)
def exception(self, message):
self.logger.exception(message)
def __init__(self, name=__name__, default_level=logging.DEBUG):
self.logger = logging.Logger(name)
if not self.logger.handlers or len(self.logger.handlers) < 1:
for handler_class, params, formatted, level in self.handlers:
handler = handler_class(**params)
handler.setFormatter(logging.Formatter(formatted))
handler.setLevel(level if not default_level else default_level)
self.logger.addHandler(handler)
| [
3,
6,
7,
8,
11
] |
703 | e73e40a63b67ee1a6cca53a328af05e3eb3d8519 | <mask token>
def test_negativity():
assert make_it_negative(8) == -8
assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'
<mask token>
| <mask token>
def test_negativity():
assert make_it_negative(8) == -8
assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'
<mask token>
def test_cleverness():
assert make_it_negative(-3) == 3
| <mask token>
def test_negativity():
assert make_it_negative(8) == -8
assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'
def test_easy():
assert 1 == 1
def test_cleverness():
assert make_it_negative(-3) == 3
| import pytest
from debbiedowner import make_it_negative, complain_about
def test_negativity():
assert make_it_negative(8) == -8
assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'
def test_easy():
assert 1 == 1
def test_cleverness():
assert make_it_negative(-3) == 3
| import pytest
from debbiedowner import make_it_negative, complain_about
def test_negativity():
assert make_it_negative(8) == -8
assert complain_about('enthusiasm') == "I hate enthusiasm. Totally boring."
def test_easy():
assert 1 == 1
def test_cleverness():
assert make_it_negative(-3) == 3 | [
1,
2,
3,
4,
5
] |
704 | 01b615f8282d4d42c5e83181fffc2d7cb612c096 | import sys
def saludar(saludo):
print saludo
def iniciales(nombre,ape1,ape2):
iniciales=nombre[0]+'.'+ape1[0]+'.'+ape2[0]+'.'
return "Tus iniciales son:"+iniciales.upper()
def iniciales1(nombre,ape1,*apellidos):
iniciales=nombre[0]+'.'+ape1[0]
for ape in apellidos:
iniciales=iniciales+'.'+ape[0]
return iniciales.upper()
| null | null | null | null | [
0
] |
705 | eeb87891d1a02484a61537745ec6f13387017929 | <mask token>
def logined(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
return render(request, 'login.html')
else:
return func(request, *args, **kwargs)
return wrapper
def api_check(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
res = dict(state_code=-3, error_msg='登陆过期')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
else:
return func(request, *args, **kwargs)
return wrapper
def login(request):
if request.method == 'GET':
if request.session.get('user'):
return render(request, 'index.html')
return render(request, 'login.html')
else:
req = json.loads(request.body)
user = req.get('username')
pwd = req.get('pwd')
obj_user = models.Users.objects.filter(user_name=user).all()
if not obj_user:
res = dict(state_code=1, error_msg='用户不存在')
else:
password = obj_user.first().password
if str(pwd) != str(password):
res = dict(state_code=2, error_msg='密码错误')
else:
request.session['user'] = user
request.session.set_expiry(60 * 60 * 4)
res = dict(state_code=0, error_msg='密码错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
def logout(request):
if request.session.get('user'):
del request.session['user']
return render(request, 'login.html')
@logined
def index(request):
return render(request, 'index.html')
@api_check
def get_dir_list(request):
user = request.session.get('user')
obj_dir = models.Dirs.objects.filter(user_name=user).all()
dir_list = []
for dirs in obj_dir:
user_dir = dirs.dir
dir_list.append(user_dir)
res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
<mask token>
@api_check
def upload_file(request):
dir_name = request.POST.get('dir_name')
if not dir_name:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if not os.path.exists(dir_path):
res = dict(state_code=1, error_msg='目录不存在')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
File = request.FILES.get('file', None)
if File is None:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
file_name = File.name
file_path = os.path.join(dir_path, file_name)
with open(file_path, 'wb+') as f:
for chunk in File.chunks():
f.write(chunk)
res = dict(state_code=0, error_msg='ok')
return HttpResponse(json.dumps(res), content_type='application/json')
@api_check
def query_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
dir_path = os.path.join(files_folder, dir_name)
cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()
file_list = cmd_info.split('\n')[1:-1]
file_list_data = []
for file_info_cmd in file_list:
file_info_list = file_info_cmd.split(' ')
file_info = list(filter(None, file_info_list))
file = file_info[-1]
file_size = file_info[4]
name_type = file.rsplit('.', 1)
if len(name_type) < 2:
name_type.append('未知')
file_name, file_type = name_type
file_list_data.append({'file_name': file_name, 'file_type':
file_type, 'file_size': file_size})
res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def del_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
file_name = req.get('file_name')
file_type = req.get('file_type')
file = file_name + '.' + file_type if file_type != '未知' else file_name
file_path = os.path.join(os.path.join(files_folder, dir_name), file)
if not os.path.exists(file_path):
res = dict(state_code=1, error_msg='文件不存在')
else:
os.remove(file_path)
res = dict(state_code=0, error_msg='ok')
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
<mask token>
| <mask token>
def logined(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
return render(request, 'login.html')
else:
return func(request, *args, **kwargs)
return wrapper
def api_check(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
res = dict(state_code=-3, error_msg='登陆过期')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
else:
return func(request, *args, **kwargs)
return wrapper
def login(request):
if request.method == 'GET':
if request.session.get('user'):
return render(request, 'index.html')
return render(request, 'login.html')
else:
req = json.loads(request.body)
user = req.get('username')
pwd = req.get('pwd')
obj_user = models.Users.objects.filter(user_name=user).all()
if not obj_user:
res = dict(state_code=1, error_msg='用户不存在')
else:
password = obj_user.first().password
if str(pwd) != str(password):
res = dict(state_code=2, error_msg='密码错误')
else:
request.session['user'] = user
request.session.set_expiry(60 * 60 * 4)
res = dict(state_code=0, error_msg='密码错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
def logout(request):
if request.session.get('user'):
del request.session['user']
return render(request, 'login.html')
@logined
def index(request):
return render(request, 'index.html')
@api_check
def get_dir_list(request):
user = request.session.get('user')
obj_dir = models.Dirs.objects.filter(user_name=user).all()
dir_list = []
for dirs in obj_dir:
user_dir = dirs.dir
dir_list.append(user_dir)
res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
<mask token>
@api_check
def upload_file(request):
dir_name = request.POST.get('dir_name')
if not dir_name:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if not os.path.exists(dir_path):
res = dict(state_code=1, error_msg='目录不存在')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
File = request.FILES.get('file', None)
if File is None:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
file_name = File.name
file_path = os.path.join(dir_path, file_name)
with open(file_path, 'wb+') as f:
for chunk in File.chunks():
f.write(chunk)
res = dict(state_code=0, error_msg='ok')
return HttpResponse(json.dumps(res), content_type='application/json')
@api_check
def query_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
dir_path = os.path.join(files_folder, dir_name)
cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()
file_list = cmd_info.split('\n')[1:-1]
file_list_data = []
for file_info_cmd in file_list:
file_info_list = file_info_cmd.split(' ')
file_info = list(filter(None, file_info_list))
file = file_info[-1]
file_size = file_info[4]
name_type = file.rsplit('.', 1)
if len(name_type) < 2:
name_type.append('未知')
file_name, file_type = name_type
file_list_data.append({'file_name': file_name, 'file_type':
file_type, 'file_size': file_size})
res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def del_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
file_name = req.get('file_name')
file_type = req.get('file_type')
file = file_name + '.' + file_type if file_type != '未知' else file_name
file_path = os.path.join(os.path.join(files_folder, dir_name), file)
if not os.path.exists(file_path):
res = dict(state_code=1, error_msg='文件不存在')
else:
os.remove(file_path)
res = dict(state_code=0, error_msg='ok')
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def download_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
file_name = req.get('file_name')
file_type = req.get('file_type')
file = file_name + '.' + file_type if file_type != '未知' else file_name
file_path = os.path.join(os.path.join(files_folder, dir_name), file)
if not os.path.exists(file_path):
res = dict(state_code=1, error_msg='文件不存在')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
from django.http import StreamingHttpResponse
file_size = os.path.getsize(file_path)
def file_iterator(file_name, chunk_size=512):
with open(file_name, 'rb') as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
the_file_name = file_path
res = file_iterator(the_file_name)
response = StreamingHttpResponse(res)
response['Content-Type'] = 'application/octet-stream; charset=UTF-8'
response['Content-Length'] = file_size
response['Content-Disposition'] = 'attachment;filename="{}"'.format(
escape_uri_path(file))
return response
| <mask token>
def logined(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
return render(request, 'login.html')
else:
return func(request, *args, **kwargs)
return wrapper
def api_check(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
res = dict(state_code=-3, error_msg='登陆过期')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
else:
return func(request, *args, **kwargs)
return wrapper
def login(request):
if request.method == 'GET':
if request.session.get('user'):
return render(request, 'index.html')
return render(request, 'login.html')
else:
req = json.loads(request.body)
user = req.get('username')
pwd = req.get('pwd')
obj_user = models.Users.objects.filter(user_name=user).all()
if not obj_user:
res = dict(state_code=1, error_msg='用户不存在')
else:
password = obj_user.first().password
if str(pwd) != str(password):
res = dict(state_code=2, error_msg='密码错误')
else:
request.session['user'] = user
request.session.set_expiry(60 * 60 * 4)
res = dict(state_code=0, error_msg='密码错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
def logout(request):
if request.session.get('user'):
del request.session['user']
return render(request, 'login.html')
@logined
def index(request):
return render(request, 'index.html')
@api_check
def get_dir_list(request):
user = request.session.get('user')
obj_dir = models.Dirs.objects.filter(user_name=user).all()
dir_list = []
for dirs in obj_dir:
user_dir = dirs.dir
dir_list.append(user_dir)
res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def user_mkdir(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
if not dir_name:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if os.path.exists(dir_path):
res = dict(state_code=1, error_msg='该目录已被使用')
else:
user = request.session.get('user')
if user:
models.Dirs.objects.create(user_name=user, dir=dir_name)
os.mkdir(dir_path)
res = dict(state_code=0, error_msg='ok')
else:
res = dict(state_code=-3, error_msg='登陆过期')
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def del_dir(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
if not dir_name:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if not os.path.exists(dir_path):
res = dict(state_code=1, error_msg='目录不存在')
else:
with transaction.atomic():
obj_dir = models.Dirs.objects.filter(dir=dir_name).all()
if obj_dir:
obj_dir.delete()
shutil.rmtree(dir_path)
res = dict(state_code=0, eror_msg='ok')
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def upload_file(request):
dir_name = request.POST.get('dir_name')
if not dir_name:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if not os.path.exists(dir_path):
res = dict(state_code=1, error_msg='目录不存在')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
File = request.FILES.get('file', None)
if File is None:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
file_name = File.name
file_path = os.path.join(dir_path, file_name)
with open(file_path, 'wb+') as f:
for chunk in File.chunks():
f.write(chunk)
res = dict(state_code=0, error_msg='ok')
return HttpResponse(json.dumps(res), content_type='application/json')
@api_check
def query_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
dir_path = os.path.join(files_folder, dir_name)
cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()
file_list = cmd_info.split('\n')[1:-1]
file_list_data = []
for file_info_cmd in file_list:
file_info_list = file_info_cmd.split(' ')
file_info = list(filter(None, file_info_list))
file = file_info[-1]
file_size = file_info[4]
name_type = file.rsplit('.', 1)
if len(name_type) < 2:
name_type.append('未知')
file_name, file_type = name_type
file_list_data.append({'file_name': file_name, 'file_type':
file_type, 'file_size': file_size})
res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def del_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
file_name = req.get('file_name')
file_type = req.get('file_type')
file = file_name + '.' + file_type if file_type != '未知' else file_name
file_path = os.path.join(os.path.join(files_folder, dir_name), file)
if not os.path.exists(file_path):
res = dict(state_code=1, error_msg='文件不存在')
else:
os.remove(file_path)
res = dict(state_code=0, error_msg='ok')
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def download_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
file_name = req.get('file_name')
file_type = req.get('file_type')
file = file_name + '.' + file_type if file_type != '未知' else file_name
file_path = os.path.join(os.path.join(files_folder, dir_name), file)
if not os.path.exists(file_path):
res = dict(state_code=1, error_msg='文件不存在')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
from django.http import StreamingHttpResponse
file_size = os.path.getsize(file_path)
def file_iterator(file_name, chunk_size=512):
with open(file_name, 'rb') as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
the_file_name = file_path
res = file_iterator(the_file_name)
response = StreamingHttpResponse(res)
response['Content-Type'] = 'application/octet-stream; charset=UTF-8'
response['Content-Length'] = file_size
response['Content-Disposition'] = 'attachment;filename="{}"'.format(
escape_uri_path(file))
return response
| import os
import shutil
import json
from django.shortcuts import render, HttpResponse
from django.utils.encoding import escape_uri_path
from django.db import transaction
from web_pan.settings import files_folder
from disk import models
def logined(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
return render(request, 'login.html')
else:
return func(request, *args, **kwargs)
return wrapper
def api_check(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
res = dict(state_code=-3, error_msg='登陆过期')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
else:
return func(request, *args, **kwargs)
return wrapper
def login(request):
if request.method == 'GET':
if request.session.get('user'):
return render(request, 'index.html')
return render(request, 'login.html')
else:
req = json.loads(request.body)
user = req.get('username')
pwd = req.get('pwd')
obj_user = models.Users.objects.filter(user_name=user).all()
if not obj_user:
res = dict(state_code=1, error_msg='用户不存在')
else:
password = obj_user.first().password
if str(pwd) != str(password):
res = dict(state_code=2, error_msg='密码错误')
else:
request.session['user'] = user
request.session.set_expiry(60 * 60 * 4)
res = dict(state_code=0, error_msg='密码错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
def logout(request):
if request.session.get('user'):
del request.session['user']
return render(request, 'login.html')
@logined
def index(request):
return render(request, 'index.html')
@api_check
def get_dir_list(request):
user = request.session.get('user')
obj_dir = models.Dirs.objects.filter(user_name=user).all()
dir_list = []
for dirs in obj_dir:
user_dir = dirs.dir
dir_list.append(user_dir)
res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def user_mkdir(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
if not dir_name:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if os.path.exists(dir_path):
res = dict(state_code=1, error_msg='该目录已被使用')
else:
user = request.session.get('user')
if user:
models.Dirs.objects.create(user_name=user, dir=dir_name)
os.mkdir(dir_path)
res = dict(state_code=0, error_msg='ok')
else:
res = dict(state_code=-3, error_msg='登陆过期')
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def del_dir(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
if not dir_name:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if not os.path.exists(dir_path):
res = dict(state_code=1, error_msg='目录不存在')
else:
with transaction.atomic():
obj_dir = models.Dirs.objects.filter(dir=dir_name).all()
if obj_dir:
obj_dir.delete()
shutil.rmtree(dir_path)
res = dict(state_code=0, eror_msg='ok')
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def upload_file(request):
dir_name = request.POST.get('dir_name')
if not dir_name:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if not os.path.exists(dir_path):
res = dict(state_code=1, error_msg='目录不存在')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
File = request.FILES.get('file', None)
if File is None:
res = dict(state_code=-2, error_msg='参数错误')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
file_name = File.name
file_path = os.path.join(dir_path, file_name)
with open(file_path, 'wb+') as f:
for chunk in File.chunks():
f.write(chunk)
res = dict(state_code=0, error_msg='ok')
return HttpResponse(json.dumps(res), content_type='application/json')
@api_check
def query_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
dir_path = os.path.join(files_folder, dir_name)
cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()
file_list = cmd_info.split('\n')[1:-1]
file_list_data = []
for file_info_cmd in file_list:
file_info_list = file_info_cmd.split(' ')
file_info = list(filter(None, file_info_list))
file = file_info[-1]
file_size = file_info[4]
name_type = file.rsplit('.', 1)
if len(name_type) < 2:
name_type.append('未知')
file_name, file_type = name_type
file_list_data.append({'file_name': file_name, 'file_type':
file_type, 'file_size': file_size})
res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def del_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
file_name = req.get('file_name')
file_type = req.get('file_type')
file = file_name + '.' + file_type if file_type != '未知' else file_name
file_path = os.path.join(os.path.join(files_folder, dir_name), file)
if not os.path.exists(file_path):
res = dict(state_code=1, error_msg='文件不存在')
else:
os.remove(file_path)
res = dict(state_code=0, error_msg='ok')
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=
'application/json')
@api_check
def download_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
file_name = req.get('file_name')
file_type = req.get('file_type')
file = file_name + '.' + file_type if file_type != '未知' else file_name
file_path = os.path.join(os.path.join(files_folder, dir_name), file)
if not os.path.exists(file_path):
res = dict(state_code=1, error_msg='文件不存在')
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type='application/json')
from django.http import StreamingHttpResponse
file_size = os.path.getsize(file_path)
def file_iterator(file_name, chunk_size=512):
with open(file_name, 'rb') as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
the_file_name = file_path
res = file_iterator(the_file_name)
response = StreamingHttpResponse(res)
response['Content-Type'] = 'application/octet-stream; charset=UTF-8'
response['Content-Length'] = file_size
response['Content-Disposition'] = 'attachment;filename="{}"'.format(
escape_uri_path(file))
return response
| import os
import shutil
import json
from django.shortcuts import render, HttpResponse
from django.utils.encoding import escape_uri_path
from django.db import transaction
from web_pan.settings import files_folder
from disk import models
# Create your views here.
def logined(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
return render(request, 'login.html')
else:
return func(request, *args, **kwargs)
return wrapper
def api_check(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
res = dict(
state_code=-3,
error_msg="登陆过期"
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
else:
return func(request, *args, **kwargs)
return wrapper
def login(request):
if request.method == 'GET':
if request.session.get('user'):
return render(request, 'index.html')
return render(request, 'login.html')
else:
req = json.loads(request.body)
user = req.get('username')
pwd = req.get('pwd')
obj_user = models.Users.objects.filter(user_name=user).all()
if not obj_user:
res = dict(
state_code=1,
error_msg="用户不存在"
)
else:
password = obj_user.first().password
if str(pwd) != str(password):
res = dict(
state_code=2,
error_msg="密码错误"
)
else:
request.session['user'] = user
request.session.set_expiry(60*60*4)
res = dict(
state_code=0,
error_msg="密码错误"
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
def logout(request):
if request.session.get('user'):
del request.session['user']
return render(request, 'login.html')
@logined
def index(request):
return render(request, 'index.html')
@api_check
def get_dir_list(request):
user = request.session.get('user')
obj_dir = models.Dirs.objects.filter(user_name=user).all()
dir_list = []
for dirs in obj_dir:
user_dir = dirs.dir
dir_list.append(user_dir)
res = dict(
state_code=0,
error_msg='ok',
data={
"dir_list": dir_list
}
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
@api_check
def user_mkdir(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
if not dir_name:
res = dict(
state_code=-2,
error_msg='参数错误'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if os.path.exists(dir_path):
res = dict(
state_code=1,
error_msg="该目录已被使用"
)
else:
user = request.session.get('user')
if user:
models.Dirs.objects.create(
user_name=user,
dir=dir_name
)
os.mkdir(dir_path)
res = dict(
state_code=0,
error_msg='ok'
)
else:
res = dict(
state_code=-3,
error_msg="登陆过期"
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
@api_check
def del_dir(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
if not dir_name:
res = dict(
state_code=-2,
error_msg='参数错误'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if not os.path.exists(dir_path):
res = dict(
state_code=1,
error_msg='目录不存在'
)
else:
with transaction.atomic():
obj_dir = models.Dirs.objects.filter(dir=dir_name).all()
if obj_dir:
obj_dir.delete()
shutil.rmtree(dir_path)
res = dict(
state_code=0,
eror_msg='ok'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
@api_check
def upload_file(request):
dir_name = request.POST.get('dir_name')
if not dir_name:
res = dict(
state_code=-2,
error_msg='参数错误'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if not os.path.exists(dir_path):
res = dict(
state_code=1,
error_msg='目录不存在'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
# 获取上传的文件,如果没有文件,则默认为None;
File = request.FILES.get("file", None)
if File is None:
res = dict(
state_code=-2,
error_msg='参数错误'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
file_name = File.name
file_path = os.path.join(dir_path, file_name)
# 打开特定的文件进行二进制的写操作;
with open(file_path, 'wb+') as f:
# 分块写入文件;
for chunk in File.chunks():
f.write(chunk)
res = dict(
state_code=0,
error_msg='ok',
)
return HttpResponse(json.dumps(res), content_type='application/json')
@api_check
def query_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
dir_path = os.path.join(files_folder, dir_name)
cmd_info = os.popen("ls -l -h {}".format(dir_path)).read()
file_list = cmd_info.split('\n')[1:-1]
file_list_data = []
for file_info_cmd in file_list:
file_info_list = file_info_cmd.split(' ')
file_info = list(filter(None, file_info_list))
file = file_info[-1]
file_size = file_info[4]
name_type = file.rsplit('.', 1)
if len(name_type) < 2:
name_type.append('未知')
file_name, file_type = name_type
file_list_data.append({
'file_name': file_name,
'file_type': file_type,
'file_size': file_size
})
res = dict(
state_code=0,
error_msg='ok',
data={
'file_list': file_list_data
}
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
@api_check
def del_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
file_name = req.get('file_name')
file_type = req.get('file_type')
file = file_name + '.' + file_type if file_type != '未知' else file_name
file_path = os.path.join(os.path.join(files_folder,dir_name),file)
if not os.path.exists(file_path):
res = dict(
state_code=1,
error_msg='文件不存在'
)
else:
os.remove(file_path)
res = dict(
state_code=0,
error_msg='ok'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
@api_check
def download_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
file_name = req.get('file_name')
file_type = req.get('file_type')
file = file_name+'.'+file_type if file_type != '未知' else file_name
file_path = os.path.join(os.path.join(files_folder,dir_name),file)
if not os.path.exists(file_path):
res = dict(
state_code=1,
error_msg='文件不存在'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
from django.http import StreamingHttpResponse
file_size = os.path.getsize(file_path)
def file_iterator(file_name, chunk_size=512): # 用于形成二进制数据
with open(file_name, 'rb') as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
the_file_name = file_path # 要下载的文件路径
res = file_iterator(the_file_name)
response = StreamingHttpResponse(res) # 这里创建返回
response['Content-Type'] = 'application/octet-stream; charset=UTF-8' # 注意格式
response['Content-Length'] = file_size
response['Content-Disposition'] = 'attachment;filename="{}"'.format(escape_uri_path(file)) # 注意filename 这个是下载后的名字
return response
| [
9,
10,
12,
13,
14
] |
706 | 1b529d8bafc81ef4dd9ff355de6abbd6f4ebddf1 | <mask token>
def lazy(func):
class Lazy:
def __init__(self, original) ->None:
self._value_computed = False
self._value = None
self._original = [original]
def get_value(self, *args, **kwargs):
if self._value_computed:
return self._value
else:
self._value = func(*args, **kwargs)
self._value_computed = True
return self._value
_lazy = Lazy(func)
def replacement(*args, **kwargs):
return _lazy.get_value(*args, **kwargs)
return replacement
| <mask token>
def asynchronous(func):
def task(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
logger.exception('Exception during asynchronous execution: ' +
str(e))
raise e
def replacement(*args, **kwargs):
return xdcs().executor().submit(lambda : task(*args, **kwargs))
return replacement
def lazy(func):
class Lazy:
def __init__(self, original) ->None:
self._value_computed = False
self._value = None
self._original = [original]
def get_value(self, *args, **kwargs):
if self._value_computed:
return self._value
else:
self._value = func(*args, **kwargs)
self._value_computed = True
return self._value
_lazy = Lazy(func)
def replacement(*args, **kwargs):
return _lazy.get_value(*args, **kwargs)
return replacement
| <mask token>
logger = logging.getLogger(__name__)
def asynchronous(func):
def task(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
logger.exception('Exception during asynchronous execution: ' +
str(e))
raise e
def replacement(*args, **kwargs):
return xdcs().executor().submit(lambda : task(*args, **kwargs))
return replacement
def lazy(func):
class Lazy:
def __init__(self, original) ->None:
self._value_computed = False
self._value = None
self._original = [original]
def get_value(self, *args, **kwargs):
if self._value_computed:
return self._value
else:
self._value = func(*args, **kwargs)
self._value_computed = True
return self._value
_lazy = Lazy(func)
def replacement(*args, **kwargs):
return _lazy.get_value(*args, **kwargs)
return replacement
| import logging
from xdcs.app import xdcs
logger = logging.getLogger(__name__)
def asynchronous(func):
def task(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
logger.exception('Exception during asynchronous execution: ' +
str(e))
raise e
def replacement(*args, **kwargs):
return xdcs().executor().submit(lambda : task(*args, **kwargs))
return replacement
def lazy(func):
class Lazy:
def __init__(self, original) ->None:
self._value_computed = False
self._value = None
self._original = [original]
def get_value(self, *args, **kwargs):
if self._value_computed:
return self._value
else:
self._value = func(*args, **kwargs)
self._value_computed = True
return self._value
_lazy = Lazy(func)
def replacement(*args, **kwargs):
return _lazy.get_value(*args, **kwargs)
return replacement
| import logging
from xdcs.app import xdcs
logger = logging.getLogger(__name__)
def asynchronous(func):
def task(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
logger.exception('Exception during asynchronous execution: ' + str(e))
raise e
def replacement(*args, **kwargs):
return xdcs().executor() \
.submit(lambda: task(*args, **kwargs))
return replacement
def lazy(func):
class Lazy:
def __init__(self, original) -> None:
self._value_computed = False
self._value = None
self._original = [original]
def get_value(self, *args, **kwargs):
if self._value_computed:
return self._value
else:
self._value = func(*args, **kwargs)
self._value_computed = True
return self._value
_lazy = Lazy(func)
def replacement(*args, **kwargs):
return _lazy.get_value(*args, **kwargs)
return replacement
| [
1,
2,
3,
4,
5
] |
707 | cb48a1601798f72f9cf3759d3c13969bc824a0f6 | <mask token>
def gen_windows(plan_grid, n, m, window_model):
return STRUCT([T([1, 2])([j, i])(gen_cube_windows(plan_grid,
window_model)(i, j, n, m)) for i in range(n) for j in range(m) if
plan_grid[i][j]])
<mask token>
def gen_body(plan_grid, n, m):
c = CUBE(1)
return STRUCT([T([1, 2])([j, i])(c) for i in range(n) for j in range(m) if
plan_grid[i][j]])
<mask token>
def q_shaped_house(box):
grid = [[True, True, True], [True, True, True], [True, False, False]]
roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 2, 0], [1, 2, 0], [1, 3, 0], [
0.5, 3, 0.5], [0, 3, 0], [0.5, 0.5, 0.5], [2.5, 0.5, 0.5], [2.5,
1.5, 0.5], [0.5, 1.5, 0.5]], [[1, 8, 6, 7], [1, 2, 9, 8], [2, 3, 10,
9], [10, 3, 4, 11], [4, 5, 6, 11], [6, 5, 7], [8, 9, 10, 11]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
def rectangular_house(box):
grid = [[True, True], [True, True], [True, True]]
roof = MKPOL([[[0, 0, 0], [1, 0, 1], [2, 0, 0], [2, 3, 0], [1, 3, 1], [
0, 3, 0]], [[1, 2, 5, 6], [2, 3, 4, 5], [1, 3, 2], [5, 4, 6]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
def squared_house(box):
grid = [[True, True, True], [True, True, True], [True, True, True]]
roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 3, 0], [0, 3, 0], [1.5, 1.5, 1
]], [[5, 1, 2], [5, 2, 3], [5, 3, 4], [5, 4, 1]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
<mask token>
| <mask token>
def gen_windows(plan_grid, n, m, window_model):
return STRUCT([T([1, 2])([j, i])(gen_cube_windows(plan_grid,
window_model)(i, j, n, m)) for i in range(n) for j in range(m) if
plan_grid[i][j]])
<mask token>
def gen_body(plan_grid, n, m):
c = CUBE(1)
return STRUCT([T([1, 2])([j, i])(c) for i in range(n) for j in range(m) if
plan_grid[i][j]])
def gen_house(box, plan_grid, door_model, window_model, roof_model):
n = len(plan_grid)
m = len(plan_grid[0])
body = STRUCT([gen_body(plan_grid, n, m), T(3)(1), roof_model])
l2s_scale = map(lambda x, y: x / y, SIZE([1, 2, 3])(body), box)
s2l_scale = [(1 / elem) for elem in l2s_scale]
scaled_win = S([1, 2, 3])(l2s_scale)(window_model)
windows = gen_windows(plan_grid, n, m, scaled_win)
house = STRUCT([body, windows])
return TEXTURE(['wood.jpg', True, True, 300, 300, r.random() * 3.1415,
0.1, 0.1, 0, 0])(S([1, 2, 3])(s2l_scale)(house))
def l_shaped_house(box):
grid = [[False, False, True], [True, True, True]]
roof = MKPOL([[[2, 0, 0], [2.5, 0, 0.5], [3, 0, 0], [3, 2, 0], [0, 2, 0
], [0, 1.5, 0.5], [0, 1, 0], [2, 1, 0], [2.5, 1.5, 0.5]], [[3, 2, 1
], [9, 2, 3, 4], [5, 6, 9, 4], [7, 6, 5], [7, 8, 9, 6], [9, 8, 1, 2
]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
def q_shaped_house(box):
grid = [[True, True, True], [True, True, True], [True, False, False]]
roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 2, 0], [1, 2, 0], [1, 3, 0], [
0.5, 3, 0.5], [0, 3, 0], [0.5, 0.5, 0.5], [2.5, 0.5, 0.5], [2.5,
1.5, 0.5], [0.5, 1.5, 0.5]], [[1, 8, 6, 7], [1, 2, 9, 8], [2, 3, 10,
9], [10, 3, 4, 11], [4, 5, 6, 11], [6, 5, 7], [8, 9, 10, 11]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
def rectangular_house(box):
grid = [[True, True], [True, True], [True, True]]
roof = MKPOL([[[0, 0, 0], [1, 0, 1], [2, 0, 0], [2, 3, 0], [1, 3, 1], [
0, 3, 0]], [[1, 2, 5, 6], [2, 3, 4, 5], [1, 3, 2], [5, 4, 6]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
def squared_house(box):
grid = [[True, True, True], [True, True, True], [True, True, True]]
roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 3, 0], [0, 3, 0], [1.5, 1.5, 1
]], [[5, 1, 2], [5, 2, 3], [5, 3, 4], [5, 4, 1]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
<mask token>
| <mask token>
def gen_windows(plan_grid, n, m, window_model):
return STRUCT([T([1, 2])([j, i])(gen_cube_windows(plan_grid,
window_model)(i, j, n, m)) for i in range(n) for j in range(m) if
plan_grid[i][j]])
def gen_cube_windows(plan_grid, window_model):
w = window_model
hpcs = [CUBE(1e-05)]
def gen_cube0(i, j, n, m):
if j + 1 == m or not plan_grid[i][j + 1]:
hpcs.append(T([1, 2])([1, 0.5])(MAP([S2, S1, S3])(w)))
if j - 1 < 0 or not plan_grid[i][j - 1]:
hpcs.append(T(2)(0.5)(MAP([S2, S1, S3])(w)))
if i + 1 == n or not plan_grid[i + 1][j]:
hpcs.append(T([1, 2])([0.5, 1])(w))
if i - 1 < 0 or not plan_grid[i - 1][j]:
hpcs.append(T(1)(0.5)(w))
return STRUCT(hpcs)
return gen_cube0
def gen_body(plan_grid, n, m):
c = CUBE(1)
return STRUCT([T([1, 2])([j, i])(c) for i in range(n) for j in range(m) if
plan_grid[i][j]])
def gen_house(box, plan_grid, door_model, window_model, roof_model):
n = len(plan_grid)
m = len(plan_grid[0])
body = STRUCT([gen_body(plan_grid, n, m), T(3)(1), roof_model])
l2s_scale = map(lambda x, y: x / y, SIZE([1, 2, 3])(body), box)
s2l_scale = [(1 / elem) for elem in l2s_scale]
scaled_win = S([1, 2, 3])(l2s_scale)(window_model)
windows = gen_windows(plan_grid, n, m, scaled_win)
house = STRUCT([body, windows])
return TEXTURE(['wood.jpg', True, True, 300, 300, r.random() * 3.1415,
0.1, 0.1, 0, 0])(S([1, 2, 3])(s2l_scale)(house))
def l_shaped_house(box):
grid = [[False, False, True], [True, True, True]]
roof = MKPOL([[[2, 0, 0], [2.5, 0, 0.5], [3, 0, 0], [3, 2, 0], [0, 2, 0
], [0, 1.5, 0.5], [0, 1, 0], [2, 1, 0], [2.5, 1.5, 0.5]], [[3, 2, 1
], [9, 2, 3, 4], [5, 6, 9, 4], [7, 6, 5], [7, 8, 9, 6], [9, 8, 1, 2
]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
def q_shaped_house(box):
grid = [[True, True, True], [True, True, True], [True, False, False]]
roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 2, 0], [1, 2, 0], [1, 3, 0], [
0.5, 3, 0.5], [0, 3, 0], [0.5, 0.5, 0.5], [2.5, 0.5, 0.5], [2.5,
1.5, 0.5], [0.5, 1.5, 0.5]], [[1, 8, 6, 7], [1, 2, 9, 8], [2, 3, 10,
9], [10, 3, 4, 11], [4, 5, 6, 11], [6, 5, 7], [8, 9, 10, 11]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
def rectangular_house(box):
grid = [[True, True], [True, True], [True, True]]
roof = MKPOL([[[0, 0, 0], [1, 0, 1], [2, 0, 0], [2, 3, 0], [1, 3, 1], [
0, 3, 0]], [[1, 2, 5, 6], [2, 3, 4, 5], [1, 3, 2], [5, 4, 6]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
def squared_house(box):
grid = [[True, True, True], [True, True, True], [True, True, True]]
roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 3, 0], [0, 3, 0], [1.5, 1.5, 1
]], [[5, 1, 2], [5, 2, 3], [5, 3, 4], [5, 4, 1]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
<mask token>
| <mask token>
def gen_windows(plan_grid, n, m, window_model):
return STRUCT([T([1, 2])([j, i])(gen_cube_windows(plan_grid,
window_model)(i, j, n, m)) for i in range(n) for j in range(m) if
plan_grid[i][j]])
def gen_cube_windows(plan_grid, window_model):
w = window_model
hpcs = [CUBE(1e-05)]
def gen_cube0(i, j, n, m):
if j + 1 == m or not plan_grid[i][j + 1]:
hpcs.append(T([1, 2])([1, 0.5])(MAP([S2, S1, S3])(w)))
if j - 1 < 0 or not plan_grid[i][j - 1]:
hpcs.append(T(2)(0.5)(MAP([S2, S1, S3])(w)))
if i + 1 == n or not plan_grid[i + 1][j]:
hpcs.append(T([1, 2])([0.5, 1])(w))
if i - 1 < 0 or not plan_grid[i - 1][j]:
hpcs.append(T(1)(0.5)(w))
return STRUCT(hpcs)
return gen_cube0
def gen_body(plan_grid, n, m):
c = CUBE(1)
return STRUCT([T([1, 2])([j, i])(c) for i in range(n) for j in range(m) if
plan_grid[i][j]])
def gen_house(box, plan_grid, door_model, window_model, roof_model):
n = len(plan_grid)
m = len(plan_grid[0])
body = STRUCT([gen_body(plan_grid, n, m), T(3)(1), roof_model])
l2s_scale = map(lambda x, y: x / y, SIZE([1, 2, 3])(body), box)
s2l_scale = [(1 / elem) for elem in l2s_scale]
scaled_win = S([1, 2, 3])(l2s_scale)(window_model)
windows = gen_windows(plan_grid, n, m, scaled_win)
house = STRUCT([body, windows])
return TEXTURE(['wood.jpg', True, True, 300, 300, r.random() * 3.1415,
0.1, 0.1, 0, 0])(S([1, 2, 3])(s2l_scale)(house))
def l_shaped_house(box):
grid = [[False, False, True], [True, True, True]]
roof = MKPOL([[[2, 0, 0], [2.5, 0, 0.5], [3, 0, 0], [3, 2, 0], [0, 2, 0
], [0, 1.5, 0.5], [0, 1, 0], [2, 1, 0], [2.5, 1.5, 0.5]], [[3, 2, 1
], [9, 2, 3, 4], [5, 6, 9, 4], [7, 6, 5], [7, 8, 9, 6], [9, 8, 1, 2
]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
def q_shaped_house(box):
grid = [[True, True, True], [True, True, True], [True, False, False]]
roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 2, 0], [1, 2, 0], [1, 3, 0], [
0.5, 3, 0.5], [0, 3, 0], [0.5, 0.5, 0.5], [2.5, 0.5, 0.5], [2.5,
1.5, 0.5], [0.5, 1.5, 0.5]], [[1, 8, 6, 7], [1, 2, 9, 8], [2, 3, 10,
9], [10, 3, 4, 11], [4, 5, 6, 11], [6, 5, 7], [8, 9, 10, 11]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
def rectangular_house(box):
grid = [[True, True], [True, True], [True, True]]
roof = MKPOL([[[0, 0, 0], [1, 0, 1], [2, 0, 0], [2, 3, 0], [1, 3, 1], [
0, 3, 0]], [[1, 2, 5, 6], [2, 3, 4, 5], [1, 3, 2], [5, 4, 6]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
def squared_house(box):
grid = [[True, True, True], [True, True, True], [True, True, True]]
roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 3, 0], [0, 3, 0], [1.5, 1.5, 1
]], [[5, 1, 2], [5, 2, 3], [5, 3, 4], [5, 4, 1]], [1]])
window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))
return gen_house(box, grid, None, window, roof)
if __name__ == '__main__':
VIEW(squared_house([15, 15, 8]))
| from pyplasm import *
import random as r
def gen_windows(plan_grid, n, m, window_model):
return STRUCT([
T([1,2])([j,i])(
gen_cube_windows(plan_grid, window_model)(i, j, n, m))
for i in range(n)
for j in range(m)
if plan_grid[i][j]])
def gen_cube_windows(plan_grid, window_model):
w = window_model
hpcs = [CUBE(0.00001)]
def gen_cube0(i, j, n, m):
if j+1 == m or not plan_grid[i][j+1]:
hpcs.append(T([1, 2])([1, .5])(MAP([S2, S1, S3])(w)))
if j-1 < 0 or not plan_grid[i][j-1]:
hpcs.append(T(2)(.5)(MAP([S2, S1, S3])(w)))
if i+1 == n or not plan_grid[i+1][j]:
hpcs.append(T([1, 2])([.5, 1])(w))
if i-1 < 0 or not plan_grid[i-1][j]:
hpcs.append(T(1)(.5)(w))
return STRUCT(hpcs)
return gen_cube0
def gen_body(plan_grid, n, m):
c = CUBE(1)
return STRUCT([
T([1,2])([j,i])(c)
for i in range(n)
for j in range(m)
if plan_grid[i][j]])
def gen_house(
box,
plan_grid,
door_model,
window_model,
roof_model):
n = len(plan_grid)
m = len(plan_grid[0])
body = STRUCT([
gen_body(plan_grid, n, m),
T(3)(1),
roof_model])
l2s_scale = map(lambda x,y: x/y, SIZE([1,2,3])(body), box)
s2l_scale = [1/elem for elem in l2s_scale]
scaled_win = S([1,2,3])(l2s_scale)(window_model)
windows = gen_windows(plan_grid, n, m, scaled_win)
house = STRUCT([body, windows])
return TEXTURE(['wood.jpg',True, True, 300,300, r.random()*3.1415, .1,.1, 0,0])(
S([1,2,3])(s2l_scale)(house))
def l_shaped_house(box):
grid = [
[False, False, True],
[True, True, True]]
roof = MKPOL([
[
[ 2, 0, 0],
[2.5, 0, .5],
[ 3, 0, 0],
[ 3, 2, 0],
[ 0, 2, 0],
[ 0, 1.5, .5],
[ 0, 1, 0],
[ 2, 1, 0],
[2.5, 1.5, .5]
],
[
[3,2,1],
[9,2,3,4],
[5,6,9,4],
[7,6,5],
[7,8,9,6],
[9,8,1,2]
],
[1]])
window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))
return gen_house(box, grid, None, window, roof)
def q_shaped_house(box):
grid = [
[True, True, True],
[True, True, True],
[True, False, False]]
roof = MKPOL([
[
[0,0,0], #1
[3,0,0], #2
[3,2,0], #3
[1,2,0], #4
[1,3,0], #5
[.5,3,.5], #6
[0,3,0], #7
[.5,.5,.5], #8
[2.5,.5,.5], #9
[2.5,1.5,.5], #10
[.5,1.5,.5] #11
],
[
[1,8,6,7],
[1,2,9,8],
[2,3,10,9],
[10,3,4,11],
[4,5,6,11],
[6,5,7],
[8,9,10,11]
],
[1]])
window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))
return gen_house(box, grid, None, window, roof)
def rectangular_house(box):
grid = [
[True, True],
[True, True],
[True, True]]
roof = MKPOL([
[
[0,0,0], #1
[1,0,1], #2
[2,0,0], #3
[2,3,0], #4
[1,3,1], #5
[0,3,0] #6
],
[
[1,2,5,6],
[2,3,4,5],
[1,3,2],
[5,4,6]
],
[1]])
window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))
return gen_house(box, grid, None, window, roof)
def squared_house(box):
grid = [
[True, True, True],
[True, True, True],
[True, True, True]]
roof = MKPOL([
[
[0,0,0], #1
[3,0,0], #2
[3,3,0], #3
[0,3,0], #4
[1.5,1.5,1] #5
],
[
[5,1,2],
[5,2,3],
[5,3,4],
[5,4,1]
],
[1]])
window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))
return gen_house(box, grid, None, window, roof)
if __name__=='__main__':
VIEW(squared_house([15, 15, 8]))
| [
5,
7,
8,
9,
11
] |
708 | 7d3355ee775f759412308ab68a7aa409b9c74b20 | <mask token>
| <mask token>
print(num1)
| <mask token>
num1 = random.randint(50, 151)
print(num1)
| <mask token>
import random
num1 = random.randint(50, 151)
print(num1)
| '''
使用random模块,如何产生 50~150之间的数?
'''
import random
num1 = random.randint(50,151)
print(num1) | [
0,
1,
2,
3,
4
] |
709 | 7554b00f8c4d40f1d3ee2341f118048ca7ad10ea | <mask token>
class Event(object):
<mask token>
<mask token>
| <mask token>
class Event(object):
<mask token>
def to_dict(self):
d = {}
for item in self.__dict__:
val = getattr(self, item)
if val != None:
d[item] = val
return d
| <mask token>
class Event(object):
def __init__(self):
self.id = None
self.raw = None
self.create_dt = datetime.datetime.now()
self.device_id = None
self.collector_id = None
self.device_hostname = None
self.device_domain_name = None
self.device_ip_address = None
self.types = []
def to_dict(self):
d = {}
for item in self.__dict__:
val = getattr(self, item)
if val != None:
d[item] = val
return d
| import datetime
class Event(object):
def __init__(self):
self.id = None
self.raw = None
self.create_dt = datetime.datetime.now()
self.device_id = None
self.collector_id = None
self.device_hostname = None
self.device_domain_name = None
self.device_ip_address = None
self.types = []
def to_dict(self):
d = {}
for item in self.__dict__:
val = getattr(self, item)
if val != None:
d[item] = val
return d
| null | [
1,
2,
3,
4
] |
710 | f9db3c96bc3fd4911640d0428672c87072564b0d | <mask token>
| <mask token>
while True:
r, f = stream.read()
cv2.imshow('IP Camera stream', f)
if cv2.waitKey(1) & 255 == ord('q'):
break
cv2.destroyAllWindows()
| <mask token>
stream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video')
while True:
r, f = stream.read()
cv2.imshow('IP Camera stream', f)
if cv2.waitKey(1) & 255 == ord('q'):
break
cv2.destroyAllWindows()
| <mask token>
import cv2
stream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video')
while True:
r, f = stream.read()
cv2.imshow('IP Camera stream', f)
if cv2.waitKey(1) & 255 == ord('q'):
break
cv2.destroyAllWindows()
| """Access IP Camera in Python OpenCV"""
import cv2
#stream = cv2.VideoCapture('protocol://IP:port/1')
# Use the next line if your camera has a username and password
stream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video')
while True:
r, f = stream.read()
cv2.imshow('IP Camera stream',f)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows() | [
0,
1,
2,
3,
4
] |
711 | 7da2be1b530faa8ce9a8570247887e8e0d74c310 | <mask token>
| <mask token>
print(data.head(6))
<mask token>
print(data)
<mask token>
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
<mask token>
model.fit(X_train, y_train)
<mask token>
print("""Predictions of test set:
%s""" % y_pred)
print(model.feature_importances_)
print(model.score(X_test, y_test))
<mask token>
model.fit(X_train, y_train)
print(model.score(X_test, y_test))
<mask token>
print(clf.score(X_test, y_test))
<mask token>
model.fit(X_train, y_train)
print(model.score(X_test, y_test))
| <mask token>
data = pd.read_csv('wheat.csv', index_col=0)
print(data.head(6))
data = data.fillna(data.mean())
print(data)
<mask token>
X = data.iloc[:, :7]
y = data.iloc[:, 7]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=300)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
<mask token>
model = RandomForestClassifier(n_estimators=10)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("""Predictions of test set:
%s""" % y_pred)
print(model.feature_importances_)
print(model.score(X_test, y_test))
model = RandomForestClassifier(n_estimators=30)
model.fit(X_train, y_train)
print(model.score(X_test, y_test))
<mask token>
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
model = RandomForestClassifier(n_estimators=1)
model.fit(X_train, y_train)
print(model.score(X_test, y_test))
| import pandas as pd
data = pd.read_csv('wheat.csv', index_col=0)
print(data.head(6))
data = data.fillna(data.mean())
print(data)
from sklearn.model_selection import train_test_split
X = data.iloc[:, :7]
y = data.iloc[:, 7]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=300)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=10)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("""Predictions of test set:
%s""" % y_pred)
print(model.feature_importances_)
print(model.score(X_test, y_test))
model = RandomForestClassifier(n_estimators=30)
model.fit(X_train, y_train)
print(model.score(X_test, y_test))
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
model = RandomForestClassifier(n_estimators=1)
model.fit(X_train, y_train)
print(model.score(X_test, y_test))
| import pandas as pd
#1. 读入数据
#从本地读入“wheat.csv”文件,指定index_col参数为00,即将第一列作为每行的索引。用head()函数查看前几行数据。
data = pd.read_csv("wheat.csv",index_col=0)
print(data.head(6))
#2. 缺失值处理
#该数据集中包含部分缺失值,在模型训练时会遇到特征值为空的问题,故对缺失值进行处理,
## 用DataFrame的fillna方法进行缺失值填充,填充值为用mean方法得到的该列平均值。
data = data.fillna(data.mean())
print(data)
#3. 划分数据集从sklearn.model_selection模块导入train_test_split函数,
# 并将返回值放入变量X_train、X_test、y_train和y_test之中,指定参数test_size=0.3,
# 即将70%的数据样本作为训练集,将30%的数据样本作为测试集。输出训练集和测试集大小。
from sklearn.model_selection import train_test_split
X = data.iloc[:,:7]
y = data.iloc[:,7]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=300)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
#4.构建随机森林模型并训练
#从sklearn.ensemble模块中导入RandomForestClassifier函数,
## 并用其构建随机森林分类模型,指定n_estimators参数为1010,
# 即使用1010棵决策树构建模型。将训练集传入模型进行模型训练。
from sklearn.ensemble import RandomForestClassifier
model= RandomForestClassifier(n_estimators=10)
model.fit(X_train, y_train)
#5.利用随机森林模型预测分类
#运用predict方法预测测试集中样本的分类,该方法返回一个预测结果数组,输出预测的分类结果。
y_pred = model.predict(X_test)
print("Predictions of test set:\n%s"%y_pred)
#6. 查看各特征重要性
#用feature_importances_属性查看每个特征的重要性,相对而言第11、22、55、77个特征在随机森林分类中的重要性强一些。
print(model.feature_importances_)
#7. 评估模型准确率
#利用score方法计算模型在测试集上的预测准确率。
print(model.score(X_test,y_test))
#8. 调整随机森林中的树木数量
#随机森林中的数目数量是模型中较为重要的参数,
#通过指定n_estimators参数进行设置,设置为30时模型的性能较10时有所提升,
#但设置为100时,其准确度不但没有提升已不明显,甚至可能下降,可能已经过拟合。
model= RandomForestClassifier(n_estimators=30)
model.fit(X_train, y_train)
print(model.score(X_test,y_test))
#9. 与决策树分类进行比较
#决策树与随机森林在分类效果上进行比较,
# 决策树模型的分类准确率与仅含单棵决策树的随机森林类似,
# 但是总体上随机森林的准确度要高于决策树,但其模型的解释性较差,无法轻易得到树的基本结构。
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
print(clf.score(X_test,y_test))
model= RandomForestClassifier(n_estimators=1)
model.fit(X_train, y_train)
print(model.score(X_test,y_test)) | [
0,
1,
2,
3,
4
] |
712 | 7036ae5f74e6cb04518c20bb52122a1dfae76f23 | <mask token>
def read_summary(summary_file):
return json.loads(open(summary_file, 'r').read())
def get_descriptions(summary):
d = {}
for o in summary['ontology_events']:
print(o)
d[o] = summary['ontology_events'][o].get('description', summary[
'ontology_events'][o]['method']) + '_' + str(o)
return d
def plot_totals(summary):
descriptions = get_descriptions(summary)
totals = {}
for event in summary['ontology_events'].keys():
totals[str(event)] = {'genes': [], 'rxns': [], 'terms': []}
for gene in summary['genes']:
for term in summary['genes'][gene]['terms']:
for event in summary['genes'][gene]['terms'][term]:
totals[str(event)]['genes'].append(gene)
for term in summary['terms']:
for event in summary['terms'][term]:
totals[str(event)]['terms'].append(term)
for rxn in summary['rxns']:
for event in summary['rxns'][rxn]:
totals[str(event)]['rxns'].append(rxn)
events = []
types = ['genes', 'terms', 'rxns']
gene_counts = []
rxn_counts = []
term_counts = []
for event in totals:
events.append(descriptions[event])
gene_counts.append(len(set(totals[event]['genes'])))
rxn_counts.append(len(set(totals[event]['rxns'])))
term_counts.append(len(set(totals[event]['terms'])))
data = {'events': events, 'genes': gene_counts, 'terms': term_counts,
'rxns': rxn_counts}
x = [(event, type) for event in events for type in types]
counts = sum(zip(data['genes'], data['terms'], data['rxns']), ())
source = ColumnDataSource(data=dict(x=x, counts=counts))
p = figure(y_range=FactorRange(*x), plot_height=400, plot_width=1000,
title='Unique Counts per Annotation Event', tools=
'wheel_zoom,box_zoom,reset,save')
p.hbar(y='x', right='counts', height=0.9, source=source, line_color=
'black', fill_color=factor_cmap('x', palette=inferno(len(types)),
factors=types, start=1, end=2))
p.x_range.start = 0
p.y_range.range_padding = 0.1
p.yaxis.major_label_orientation = 'horizontal'
p.yaxis.subgroup_label_orientation = 'horizontal'
p.yaxis.group_label_orientation = 'horizontal'
p.ygrid.grid_line_color = None
p.title.text_font_size = '12pt'
p.xaxis.major_label_text_font_size = '12pt'
p.yaxis.major_label_text_font_size = '12pt'
p.yaxis.group_text_font_size = '12pt'
p.add_tools(HoverTool(tooltips=[('Type', '@x'), ('Count', '@counts')]))
return p
<mask token>
| <mask token>
def read_summary(summary_file):
return json.loads(open(summary_file, 'r').read())
def get_descriptions(summary):
d = {}
for o in summary['ontology_events']:
print(o)
d[o] = summary['ontology_events'][o].get('description', summary[
'ontology_events'][o]['method']) + '_' + str(o)
return d
def plot_totals(summary):
descriptions = get_descriptions(summary)
totals = {}
for event in summary['ontology_events'].keys():
totals[str(event)] = {'genes': [], 'rxns': [], 'terms': []}
for gene in summary['genes']:
for term in summary['genes'][gene]['terms']:
for event in summary['genes'][gene]['terms'][term]:
totals[str(event)]['genes'].append(gene)
for term in summary['terms']:
for event in summary['terms'][term]:
totals[str(event)]['terms'].append(term)
for rxn in summary['rxns']:
for event in summary['rxns'][rxn]:
totals[str(event)]['rxns'].append(rxn)
events = []
types = ['genes', 'terms', 'rxns']
gene_counts = []
rxn_counts = []
term_counts = []
for event in totals:
events.append(descriptions[event])
gene_counts.append(len(set(totals[event]['genes'])))
rxn_counts.append(len(set(totals[event]['rxns'])))
term_counts.append(len(set(totals[event]['terms'])))
data = {'events': events, 'genes': gene_counts, 'terms': term_counts,
'rxns': rxn_counts}
x = [(event, type) for event in events for type in types]
counts = sum(zip(data['genes'], data['terms'], data['rxns']), ())
source = ColumnDataSource(data=dict(x=x, counts=counts))
p = figure(y_range=FactorRange(*x), plot_height=400, plot_width=1000,
title='Unique Counts per Annotation Event', tools=
'wheel_zoom,box_zoom,reset,save')
p.hbar(y='x', right='counts', height=0.9, source=source, line_color=
'black', fill_color=factor_cmap('x', palette=inferno(len(types)),
factors=types, start=1, end=2))
p.x_range.start = 0
p.y_range.range_padding = 0.1
p.yaxis.major_label_orientation = 'horizontal'
p.yaxis.subgroup_label_orientation = 'horizontal'
p.yaxis.group_label_orientation = 'horizontal'
p.ygrid.grid_line_color = None
p.title.text_font_size = '12pt'
p.xaxis.major_label_text_font_size = '12pt'
p.yaxis.major_label_text_font_size = '12pt'
p.yaxis.group_text_font_size = '12pt'
p.add_tools(HoverTool(tooltips=[('Type', '@x'), ('Count', '@counts')]))
return p
<mask token>
output_file('totals.html', title='Totals')
<mask token>
show(totals)
| <mask token>
def read_summary(summary_file):
return json.loads(open(summary_file, 'r').read())
def get_descriptions(summary):
d = {}
for o in summary['ontology_events']:
print(o)
d[o] = summary['ontology_events'][o].get('description', summary[
'ontology_events'][o]['method']) + '_' + str(o)
return d
def plot_totals(summary):
descriptions = get_descriptions(summary)
totals = {}
for event in summary['ontology_events'].keys():
totals[str(event)] = {'genes': [], 'rxns': [], 'terms': []}
for gene in summary['genes']:
for term in summary['genes'][gene]['terms']:
for event in summary['genes'][gene]['terms'][term]:
totals[str(event)]['genes'].append(gene)
for term in summary['terms']:
for event in summary['terms'][term]:
totals[str(event)]['terms'].append(term)
for rxn in summary['rxns']:
for event in summary['rxns'][rxn]:
totals[str(event)]['rxns'].append(rxn)
events = []
types = ['genes', 'terms', 'rxns']
gene_counts = []
rxn_counts = []
term_counts = []
for event in totals:
events.append(descriptions[event])
gene_counts.append(len(set(totals[event]['genes'])))
rxn_counts.append(len(set(totals[event]['rxns'])))
term_counts.append(len(set(totals[event]['terms'])))
data = {'events': events, 'genes': gene_counts, 'terms': term_counts,
'rxns': rxn_counts}
x = [(event, type) for event in events for type in types]
counts = sum(zip(data['genes'], data['terms'], data['rxns']), ())
source = ColumnDataSource(data=dict(x=x, counts=counts))
p = figure(y_range=FactorRange(*x), plot_height=400, plot_width=1000,
title='Unique Counts per Annotation Event', tools=
'wheel_zoom,box_zoom,reset,save')
p.hbar(y='x', right='counts', height=0.9, source=source, line_color=
'black', fill_color=factor_cmap('x', palette=inferno(len(types)),
factors=types, start=1, end=2))
p.x_range.start = 0
p.y_range.range_padding = 0.1
p.yaxis.major_label_orientation = 'horizontal'
p.yaxis.subgroup_label_orientation = 'horizontal'
p.yaxis.group_label_orientation = 'horizontal'
p.ygrid.grid_line_color = None
p.title.text_font_size = '12pt'
p.xaxis.major_label_text_font_size = '12pt'
p.yaxis.major_label_text_font_size = '12pt'
p.yaxis.group_text_font_size = '12pt'
p.add_tools(HoverTool(tooltips=[('Type', '@x'), ('Count', '@counts')]))
return p
summary = read_summary('PT19DW.7.json')
output_file('totals.html', title='Totals')
totals = plot_totals(summary)
show(totals)
| import json
from bokeh.plotting import figure, output_file
from bokeh.io import show
from bokeh.palettes import inferno
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.transform import factor_cmap
from bokeh.models import HoverTool
def read_summary(summary_file):
return json.loads(open(summary_file, 'r').read())
def get_descriptions(summary):
d = {}
for o in summary['ontology_events']:
print(o)
d[o] = summary['ontology_events'][o].get('description', summary[
'ontology_events'][o]['method']) + '_' + str(o)
return d
def plot_totals(summary):
descriptions = get_descriptions(summary)
totals = {}
for event in summary['ontology_events'].keys():
totals[str(event)] = {'genes': [], 'rxns': [], 'terms': []}
for gene in summary['genes']:
for term in summary['genes'][gene]['terms']:
for event in summary['genes'][gene]['terms'][term]:
totals[str(event)]['genes'].append(gene)
for term in summary['terms']:
for event in summary['terms'][term]:
totals[str(event)]['terms'].append(term)
for rxn in summary['rxns']:
for event in summary['rxns'][rxn]:
totals[str(event)]['rxns'].append(rxn)
events = []
types = ['genes', 'terms', 'rxns']
gene_counts = []
rxn_counts = []
term_counts = []
for event in totals:
events.append(descriptions[event])
gene_counts.append(len(set(totals[event]['genes'])))
rxn_counts.append(len(set(totals[event]['rxns'])))
term_counts.append(len(set(totals[event]['terms'])))
data = {'events': events, 'genes': gene_counts, 'terms': term_counts,
'rxns': rxn_counts}
x = [(event, type) for event in events for type in types]
counts = sum(zip(data['genes'], data['terms'], data['rxns']), ())
source = ColumnDataSource(data=dict(x=x, counts=counts))
p = figure(y_range=FactorRange(*x), plot_height=400, plot_width=1000,
title='Unique Counts per Annotation Event', tools=
'wheel_zoom,box_zoom,reset,save')
p.hbar(y='x', right='counts', height=0.9, source=source, line_color=
'black', fill_color=factor_cmap('x', palette=inferno(len(types)),
factors=types, start=1, end=2))
p.x_range.start = 0
p.y_range.range_padding = 0.1
p.yaxis.major_label_orientation = 'horizontal'
p.yaxis.subgroup_label_orientation = 'horizontal'
p.yaxis.group_label_orientation = 'horizontal'
p.ygrid.grid_line_color = None
p.title.text_font_size = '12pt'
p.xaxis.major_label_text_font_size = '12pt'
p.yaxis.major_label_text_font_size = '12pt'
p.yaxis.group_text_font_size = '12pt'
p.add_tools(HoverTool(tooltips=[('Type', '@x'), ('Count', '@counts')]))
return p
summary = read_summary('PT19DW.7.json')
output_file('totals.html', title='Totals')
totals = plot_totals(summary)
show(totals)
| import json
from bokeh.plotting import figure, output_file
from bokeh.io import show
from bokeh.palettes import inferno
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.transform import factor_cmap
from bokeh.models import HoverTool
# from bokeh.io import export_svgs
def read_summary(summary_file):
return json.loads(open(summary_file, "r").read())
def get_descriptions(summary):
d = {}
for o in summary["ontology_events"]:
print(o)
d[o] = summary["ontology_events"][o].get(
'description', summary["ontology_events"][o]['method']) + '_' + str(o)
return(d)
def plot_totals(summary):
descriptions = get_descriptions(summary)
totals = {}
for event in summary['ontology_events'].keys():
totals[str(event)] = {'genes': [],
'rxns': [],
'terms': []}
# genes
for gene in summary['genes']:
for term in summary['genes'][gene]['terms']:
for event in summary['genes'][gene]['terms'][term]:
totals[str(event)]['genes'].append(gene)
# terms
for term in summary['terms']:
for event in summary['terms'][term]:
totals[str(event)]['terms'].append(term)
# rxns
for rxn in summary['rxns']:
for event in summary['rxns'][rxn]:
totals[str(event)]['rxns'].append(rxn)
# sums
events = []
types = ['genes', 'terms', 'rxns']
gene_counts = []
rxn_counts = []
term_counts = []
for event in totals:
events.append(descriptions[event])
gene_counts.append(len(set(totals[event]['genes'])))
rxn_counts.append(len(set(totals[event]['rxns'])))
term_counts.append(len(set(totals[event]['terms'])))
data = {'events': events,
'genes': gene_counts,
'terms': term_counts,
'rxns': rxn_counts
}
x = [(event, type) for event in events for type in types]
counts = sum(zip(data['genes'], data['terms'], data['rxns']), ())
source = ColumnDataSource(data=dict(x=x, counts=counts))
p = figure(y_range=FactorRange(*x),
plot_height=400,
plot_width=1000,
title="Unique Counts per Annotation Event",
tools="wheel_zoom,box_zoom,reset,save")
p.hbar(y='x',
right='counts',
height=0.9,
source=source,
line_color="black",
fill_color=factor_cmap('x',
palette=inferno(len(types)),
factors=types,
start=1,
end=2))
p.x_range.start = 0
p.y_range.range_padding = 0.1
p.yaxis.major_label_orientation = "horizontal"
p.yaxis.subgroup_label_orientation = "horizontal"
p.yaxis.group_label_orientation = "horizontal"
p.ygrid.grid_line_color = None
p.title.text_font_size = '12pt'
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
p.yaxis.group_text_font_size = "12pt"
p.add_tools(HoverTool(tooltips=[("Type", "@x"), ("Count", "@counts")]))
return(p)
#summary = read_summary("PT19DW.5.json")
summary = read_summary("PT19DW.7.json")
output_file("totals.html", title="Totals")
totals = plot_totals(summary)
show(totals)
| [
3,
4,
5,
6,
7
] |
713 | 5eb5388ffe7a7c880d8fcfaa137c2c9a133a0636 | <mask token>
| <mask token>
print(wikipedia.summary(input_))
| <mask token>
input_ = input('Type in your question ')
print(wikipedia.summary(input_))
| import wikipedia
input_ = input('Type in your question ')
print(wikipedia.summary(input_))
| import wikipedia
input_ = input("Type in your question ")
print(wikipedia.summary(input_))
| [
0,
1,
2,
3,
4
] |
714 | 51540a80c7b29dc0bbb6342ee45008108d54b6f2 | <mask token>
| <mask token>
def gauss_seidel(relax, est, stop):
"""
Método iterativo de Gauss-Seidel para o sistema linear do trabalho.
Onde relax é o fator de relaxação, est é o valor inicial, stop é o
critério de parada, n é a quantidade de linhas do sistema e k é o
número de iterações.
"""
k = 0
dif = 10000
n = len(est)
diff = np.zeros(n)
while dif > stop:
k += 1
est[0] = (1 - relax) * est[0] + relax * (1.5 - est[1])
for i in range(1, int(n / 2)):
est[i] = (1 - relax) * est[i] + relax * ((1.0 - est[i - 1] -
est[i + 1] - est[i + 25]) / 4)
for j in range(int(n / 2), n - 1):
est[j] = (1 - relax) * est[j] + relax * ((2.0 - est[j - 25] -
est[j - 1] - est[j + 1]) / 5)
est[n - 1] = (1 - relax) * est[n - 1] + relax * (3.0 - est[n - 2])
dif = max(abs(np.subtract(est, diff)))
diff = np.copy(est)
return [est, k]
| import numpy as np
def gauss_seidel(relax, est, stop):
"""
Método iterativo de Gauss-Seidel para o sistema linear do trabalho.
Onde relax é o fator de relaxação, est é o valor inicial, stop é o
critério de parada, n é a quantidade de linhas do sistema e k é o
número de iterações.
"""
k = 0
dif = 10000
n = len(est)
diff = np.zeros(n)
while dif > stop:
k += 1
est[0] = (1 - relax) * est[0] + relax * (1.5 - est[1])
for i in range(1, int(n / 2)):
est[i] = (1 - relax) * est[i] + relax * ((1.0 - est[i - 1] -
est[i + 1] - est[i + 25]) / 4)
for j in range(int(n / 2), n - 1):
est[j] = (1 - relax) * est[j] + relax * ((2.0 - est[j - 25] -
est[j - 1] - est[j + 1]) / 5)
est[n - 1] = (1 - relax) * est[n - 1] + relax * (3.0 - est[n - 2])
dif = max(abs(np.subtract(est, diff)))
diff = np.copy(est)
return [est, k]
| # -*- coding: utf-8 -*-
import numpy as np
def gauss_seidel(relax, est, stop):
"""
Método iterativo de Gauss-Seidel para o sistema linear do trabalho.
Onde relax é o fator de relaxação, est é o valor inicial, stop é o
critério de parada, n é a quantidade de linhas do sistema e k é o
número de iterações.
"""
k = 0
dif = 10000
n = len(est)
diff = np.zeros(n)
while dif > stop:
k += 1
est[0] = ((1 - relax) * est[0]) + relax * (1.50 - est[1])
for i in range(1, int(n/2)):
est[i] = ((1 - relax) * est[i]) + relax * \
((1.0 - est[i-1] - est[i+1] - est[i+25])/4)
for j in range(int(n/2), n-1):
est[j] = ((1 - relax) * est[j]) + relax * \
((2.0 - est[j-25] - est[j-1] - est[j+1])/5)
est[n-1] = ((1 - relax) * est[n-1]) + relax * (3.00 - est[n-2])
dif = max(abs(np.subtract(est, diff)))
diff = np.copy(est)
return [est, k]
| null | [
0,
1,
2,
3
] |
715 | eb981a2d7f0ff5e6cc4a4a76f269c93c547965ba | from typing import Any, Dict, List
import numpy as np
from kedro.io import AbstractDataSet
from msrest.exceptions import HttpOperationError
from azureml.core import Workspace, Datastore
from azureml.data.data_reference import DataReference
class AZblob_datastore_data(AbstractDataSet):
"""``ImageDataSet`` loads / save image data from a given filepath as `numpy` array using Pillow.
Example:
::
>>> ImageDataSet(filepath='/img/file/path.png')
"""
def __init__(self,
container_path: str,
local_path : str,
credentials: Dict[str, Any] = None):
"""Creates a new instance of ImageDataSet to load / save image data at the given filepath.
Args:
filepath: The location of the image file to load / save data.
"""
self._container_path = container_path
self._local_path = local_path
self._credentials = credentials
def _load(self) -> np.ndarray:
"""Loads data from the image file.
Returns:
Data from the image file as a numpy array.
"""
# Initialis Workspace
ws = Workspace.from_config()
blob_datastore_name = self._credentials['storage_name']
account_name = self._credentials['storage_name'] # Storage account name
container_name = self._credentials['container_name'] # Name of Azure blob container
account_key = self._credentials['key'] # Storage account key
# Register a new datastore
try:
blob_datastore = blob_datastore = Datastore.get(ws, blob_datastore_name)
print("Found Blob Datastore with name: %s" % blob_datastore_name)
except HttpOperationError:
blob_datastore = Datastore.register_azure_blob_container(workspace = ws,
datastore_name = blob_datastore_name,
container_name = container_name,
account_name = account_name,
blob_datastore.download(target_path=self._local_path,
prefix=self._container_path,
show_progress=False)
...
def _save(self, data: np.ndarray) -> None:
"""Saves image data to the specified filepath"""
...
def _describe(self) -> Dict[str, Any]:
"""Returns a dict that describes the attributes of the dataset""" | null | null | null | null | [
0
] |
716 | 0beb5c5c5db9247d66a5a49cfff7282ead52a9b7 | <mask token>
class HDF5_Parser(object):
<mask token>
<mask token>
<mask token>
<mask token>
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
| <mask token>
class HDF5_Parser(object):
<mask token>
plugin_name = 'hdf5.read'
plugin_descript = 'read *.hdf5 (in read mode) files using h5py'
file_regex = '*.hdf5'
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
| <mask token>
class HDF5_Parser(object):
"""
Examples
--------
>>> import h5py
>>> indata = h5py.File('test.hdf5')
>>> dataset = indata.create_dataset("mydataset", (10,), dtype='i')
>>> indata.close()
>>> with open('test.hdf5') as f:
... data = HDF5_Parser().read_file(f)
>>> data['mydataset'][:]
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)
>>> import os
>>> os.remove('test.hdf5')
"""
plugin_name = 'hdf5.read'
plugin_descript = 'read *.hdf5 (in read mode) files using h5py'
file_regex = '*.hdf5'
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
| import h5py
class HDF5_Parser(object):
"""
Examples
--------
>>> import h5py
>>> indata = h5py.File('test.hdf5')
>>> dataset = indata.create_dataset("mydataset", (10,), dtype='i')
>>> indata.close()
>>> with open('test.hdf5') as f:
... data = HDF5_Parser().read_file(f)
>>> data['mydataset'][:]
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)
>>> import os
>>> os.remove('test.hdf5')
"""
plugin_name = 'hdf5.read'
plugin_descript = 'read *.hdf5 (in read mode) files using h5py'
file_regex = '*.hdf5'
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
| #!/usr/bin/env python
import h5py
class HDF5_Parser(object): # noqa: N801
"""
Examples
--------
>>> import h5py
>>> indata = h5py.File('test.hdf5')
>>> dataset = indata.create_dataset("mydataset", (10,), dtype='i')
>>> indata.close()
>>> with open('test.hdf5') as f:
... data = HDF5_Parser().read_file(f)
>>> data['mydataset'][:]
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)
>>> import os
>>> os.remove('test.hdf5')
"""
plugin_name = 'hdf5.read'
plugin_descript = 'read *.hdf5 (in read mode) files using h5py'
file_regex = '*.hdf5'
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
| [
2,
3,
4,
5,
6
] |
717 | 93d4c6b6aef827d6746afc684c32a9cf1d0229e4 | <mask token>
| <mask token>
while person_win_count < 4 or person_lose_count < 4:
player = input('가위, 바위, 보 중 어떤 것을 낼래요? ')
if player != '가위' and player != '바위' and player != '보':
player = input('다시 입력해 주세요.(예: 가위, 바위, 보)')
computer = random.choice(list_b)
print('컴퓨터:', computer)
if player == computer:
print('비겼습니다.')
elif player == '가위':
if computer == '바위':
person_lose_count = person_lose_count + 1
print('컴퓨터가 이겼습니다.')
if computer == '보':
person_win_count = person_win_count + 1
print('당신이 이겼습니다.')
elif player == '바위':
if computer == '가위':
person_win_count = person_win_count + 1
print('당신이 이겼습니다.')
if computer == '보':
person_lose_count = person_lose_count + 1
print('컴퓨터가 이겼습니다.')
elif player == '보':
if computer == '바위':
person_win_count = person_win_count + 1
print('당신이 이겼습니다.')
if computer == '가위':
person_lose_count = person_lose_count + 1
print('컴퓨터가 이겼습니다.')
if person_win_count == 3:
print('당신이 3번을 이겼습니다.^^; 가위바위보 게임을 종료합니다.')
break
elif person_lose_count == 3:
print('당신이 3번을 졌습니다.-_-; 가위바위보 게임을 종료합니다.')
break
| <mask token>
list_b = ['가위', '바위', '보']
person_win_count = 0
person_lose_count = 0
while person_win_count < 4 or person_lose_count < 4:
player = input('가위, 바위, 보 중 어떤 것을 낼래요? ')
if player != '가위' and player != '바위' and player != '보':
player = input('다시 입력해 주세요.(예: 가위, 바위, 보)')
computer = random.choice(list_b)
print('컴퓨터:', computer)
if player == computer:
print('비겼습니다.')
elif player == '가위':
if computer == '바위':
person_lose_count = person_lose_count + 1
print('컴퓨터가 이겼습니다.')
if computer == '보':
person_win_count = person_win_count + 1
print('당신이 이겼습니다.')
elif player == '바위':
if computer == '가위':
person_win_count = person_win_count + 1
print('당신이 이겼습니다.')
if computer == '보':
person_lose_count = person_lose_count + 1
print('컴퓨터가 이겼습니다.')
elif player == '보':
if computer == '바위':
person_win_count = person_win_count + 1
print('당신이 이겼습니다.')
if computer == '가위':
person_lose_count = person_lose_count + 1
print('컴퓨터가 이겼습니다.')
if person_win_count == 3:
print('당신이 3번을 이겼습니다.^^; 가위바위보 게임을 종료합니다.')
break
elif person_lose_count == 3:
print('당신이 3번을 졌습니다.-_-; 가위바위보 게임을 종료합니다.')
break
| import random
list_b = ['가위', '바위', '보']
person_win_count = 0
person_lose_count = 0
while person_win_count < 4 or person_lose_count < 4:
player = input('가위, 바위, 보 중 어떤 것을 낼래요? ')
if player != '가위' and player != '바위' and player != '보':
player = input('다시 입력해 주세요.(예: 가위, 바위, 보)')
computer = random.choice(list_b)
print('컴퓨터:', computer)
if player == computer:
print('비겼습니다.')
elif player == '가위':
if computer == '바위':
person_lose_count = person_lose_count + 1
print('컴퓨터가 이겼습니다.')
if computer == '보':
person_win_count = person_win_count + 1
print('당신이 이겼습니다.')
elif player == '바위':
if computer == '가위':
person_win_count = person_win_count + 1
print('당신이 이겼습니다.')
if computer == '보':
person_lose_count = person_lose_count + 1
print('컴퓨터가 이겼습니다.')
elif player == '보':
if computer == '바위':
person_win_count = person_win_count + 1
print('당신이 이겼습니다.')
if computer == '가위':
person_lose_count = person_lose_count + 1
print('컴퓨터가 이겼습니다.')
if person_win_count == 3:
print('당신이 3번을 이겼습니다.^^; 가위바위보 게임을 종료합니다.')
break
elif person_lose_count == 3:
print('당신이 3번을 졌습니다.-_-; 가위바위보 게임을 종료합니다.')
break
| # 가위, 바위, 보 게임
# 컴퓨터 가위, 바위, 보 리스트에서 랜덤하게 뽑기 위해 random 함수 호출
import random
# 컴퓨터 가위, 바위, 보 리스트
list_b = ["가위", "바위", "보"]
# 이긴횟수, 진 횟수 카운팅 하기 위한 변수
person_win_count = 0
person_lose_count = 0
while person_win_count < 4 or person_lose_count < 4:
# 가위, 바위, 보 입력 받기
player = input("가위, 바위, 보 중 어떤 것을 낼래요? ")
if player != "가위" and player != "바위" and player != "보":
player = input("다시 입력해 주세요.(예: 가위, 바위, 보)")
# 컴퓨터 가위, 바위, 보 임의 추출
computer = random.choice(list_b)
print("컴퓨터:", computer)
# 사람과 컴퓨터간 가위, 바위, 보 비교 및 카운팅
if player == computer:
print("비겼습니다.")
elif player == "가위":
if computer == "바위":
person_lose_count = person_lose_count + 1
print("컴퓨터가 이겼습니다.")
if computer == "보":
person_win_count = person_win_count + 1
print("당신이 이겼습니다.")
elif player == "바위":
if computer == "가위":
person_win_count = person_win_count + 1
print("당신이 이겼습니다.")
if computer == "보":
person_lose_count = person_lose_count + 1
print("컴퓨터가 이겼습니다.")
elif player == "보":
if computer == "바위":
person_win_count = person_win_count + 1
print("당신이 이겼습니다.")
if computer == "가위":
person_lose_count = person_lose_count + 1
print("컴퓨터가 이겼습니다.")
# 3번 이겼는지, 3번 졌는지 조건비교, 최종결과, 게임종료
if person_win_count == 3:
print("당신이 3번을 이겼습니다.^^; 가위바위보 게임을 종료합니다.")
break
elif person_lose_count == 3:
print("당신이 3번을 졌습니다.-_-; 가위바위보 게임을 종료합니다.")
break
| [
0,
1,
2,
3,
4
] |
718 | d7d94cfed0b819297069c3434c70359a327403cd | <mask token>
| <mask token>
admin.site.register(models.Comentario)
| from django.contrib import admin
from . import models
admin.site.register(models.Comentario)
| from django.contrib import admin
from . import models
admin.site.register(models.Comentario)
# Register your models here.
| null | [
0,
1,
2,
3
] |
719 | 32f9b5c32acbb6411fe6ab99616d8459acfd7c74 | <mask token>
| <mask token>
articlesFileNameList(reverse=True)
<mask token>
for fileName in articlesFileNameList:
print(fileName)
dictOut = pp.parse_medline_xml(articlesFolderPath + '/' + fileName)
for item in dictOut:
resultFile.write(item['abstract'] + '\n')
| <mask token>
nlpPath = '/Users/kapmayn/Desktop/nlp'
articlesFolderPath = nlpPath + '/articles'
abstractsFilePath = nlpPath + '/abstracts.txt'
articlesFileNameList = os.listdir(articlesFolderPath)
articlesFileNameList(reverse=True)
resultFile = open(abstractsFilePath, 'w')
for fileName in articlesFileNameList:
print(fileName)
dictOut = pp.parse_medline_xml(articlesFolderPath + '/' + fileName)
for item in dictOut:
resultFile.write(item['abstract'] + '\n')
| import os
import pubmed_parser as pp
nlpPath = '/Users/kapmayn/Desktop/nlp'
articlesFolderPath = nlpPath + '/articles'
abstractsFilePath = nlpPath + '/abstracts.txt'
articlesFileNameList = os.listdir(articlesFolderPath)
articlesFileNameList(reverse=True)
resultFile = open(abstractsFilePath, 'w')
for fileName in articlesFileNameList:
print(fileName)
dictOut = pp.parse_medline_xml(articlesFolderPath + '/' + fileName)
for item in dictOut:
resultFile.write(item['abstract'] + '\n')
| import os
import pubmed_parser as pp
nlpPath = "/Users/kapmayn/Desktop/nlp"
articlesFolderPath = nlpPath + "/articles"
abstractsFilePath = nlpPath + "/abstracts.txt"
articlesFileNameList = os.listdir(articlesFolderPath)
articlesFileNameList(reverse = True)
resultFile = open(abstractsFilePath, 'w')
for fileName in articlesFileNameList:
print(fileName)
dictOut = pp.parse_medline_xml(articlesFolderPath + "/" + fileName)
for item in dictOut:
resultFile.write((item['abstract'] + '\n')) | [
0,
1,
2,
3,
4
] |
720 | 373c102018fdcc5211263304c368c2e8beef3257 | <mask token>
| <mask token>
urlpatterns = [url('get_img_api$', router.get_img_api), url('add_book$',
views.add_book), url('show_books$', views.show_books), url('add_story$',
story.add_story), url('show_storys$', story.show_storys), url(
'add_comment$', story.add_comment), url('show_comments$', story.
show_comments), url('uploadImg$', img.uploadImg), url('showImg$', img.
showImg), url('uploadImgForUs$', img.uploadImgForUs), url(
'showImgForUs', img.showImgForUs), url('add_user', login.add_user), url
('login', login.login), url('get_username', login.get_username), url(
'send_register_email', login.send_register_email), url('check_username',
login.check_username), url('chat_with_tuling', tuling.chat_with_tuling),
url('utilView_getLive2d', utilView.get_live2d), url(
'utilView_getRandJson', utilView.get_rand_json), url('get_wechat',
wechat_modules.on_get), url('', login.other_request)]
| from django.conf.urls import url
from myapp.view import views
from myapp.view import story
from myapp.view import img
from myapp.view import login
from myapp.view import tuling
from myapp.view import utilView
from myapp.view.wechat import wechat_modules
from myapp.view import router
urlpatterns = [url('get_img_api$', router.get_img_api), url('add_book$',
views.add_book), url('show_books$', views.show_books), url('add_story$',
story.add_story), url('show_storys$', story.show_storys), url(
'add_comment$', story.add_comment), url('show_comments$', story.
show_comments), url('uploadImg$', img.uploadImg), url('showImg$', img.
showImg), url('uploadImgForUs$', img.uploadImgForUs), url(
'showImgForUs', img.showImgForUs), url('add_user', login.add_user), url
('login', login.login), url('get_username', login.get_username), url(
'send_register_email', login.send_register_email), url('check_username',
login.check_username), url('chat_with_tuling', tuling.chat_with_tuling),
url('utilView_getLive2d', utilView.get_live2d), url(
'utilView_getRandJson', utilView.get_rand_json), url('get_wechat',
wechat_modules.on_get), url('', login.other_request)]
| # -- coding: utf-8 --
from django.conf.urls import url
from myapp.view import views
from myapp.view import story
from myapp.view import img # 添加
from myapp.view import login
from myapp.view import tuling
from myapp.view import utilView
from myapp.view.wechat import wechat_modules
from myapp.view import router
urlpatterns = [
url(r'get_img_api$', router.get_img_api),
url(r'add_book$', views.add_book, ),
url(r'show_books$', views.show_books, ),
url(r'add_story$', story.add_story),
url(r'show_storys$', story.show_storys),
url(r'add_comment$', story.add_comment),
url(r'show_comments$', story.show_comments),
url(r'uploadImg$', img.uploadImg),
url(r'showImg$', img.showImg),
url(r'uploadImgForUs$', img.uploadImgForUs),
url(r'showImgForUs', img.showImgForUs),
url(r'add_user', login.add_user),
url(r'login', login.login),
url(r'get_username', login.get_username),
url(r'send_register_email', login.send_register_email),
url(r'check_username', login.check_username),
url(r'chat_with_tuling', tuling.chat_with_tuling),
url(r'utilView_getLive2d', utilView.get_live2d),
url(r'utilView_getRandJson', utilView.get_rand_json),
url(r'get_wechat', wechat_modules.on_get),
url(r'', login.other_request),
]
| null | [
0,
1,
2,
3
] |
721 | 622b388beb56eba85bbb08510c2bcea55f23da9a | <mask token>
| <mask token>
for name in lst:
name = name.strip().upper()
rname = name[::-1]
if name == rname:
print(name)
<mask token>
print('-' * 20)
<mask token>
for name in names:
name = name.upper()
if 'D' in name:
print(name)
| data = (
' Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu'
)
lst = data.split(',')
for name in lst:
name = name.strip().upper()
rname = name[::-1]
if name == rname:
print(name)
girlsdata = 'Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya'
print('-' * 20)
names = girlsdata.split(',')
for name in names:
name = name.upper()
if 'D' in name:
print(name)
| data = " Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu"
lst = data.split(",")
for name in lst:
name = name.strip().upper()
rname = name[::-1]
if name == rname:
print(name)
girlsdata = "Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya"
# Name which start with DEE get those name
print("-"*20)
names = girlsdata.split(",")
for name in names:
name = name.upper()
if "D" in name:
print(name) | null | [
0,
1,
2,
3
] |
722 | e1787fd4be66d19ab83ece44eacfd96cb488b504 | with expression [as var]
#...BODY...
#object is the result of the expression and must have __enter__ and __exit__ methods
#result of the expression must be context manager - implements context management protocol
#https://www.python.org/dev/peps/pep-0343/
# This PEP adds a new statement "with" to the Python language to make
# it possible to factor out standard uses of try/finally statements.
# In this PEP, context managers provide __enter__() and __exit__()
# methods that are invoked on entry to and exit from the body of the
# with statement. | null | null | null | null | [
0
] |
723 | f3167d8f1a806c38fb10672605d8e94265d2fc9c | <mask token>
class Event(db.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'venue', 'location_lat', 'location_long',
'date_created', 'start_time', 'duration', 'coordinator_name',
'coordinator_contact', 'status_id', 'org_id', 'description')
<mask token>
class EventFullInfoSchema(ma.Schema):
event = ma.Nested(event_schema)
admin = ma.Nested(admin_limited_schema)
status = ma.Nested(event_status_schema)
org_unit = ma.Nested(org_unit_schema)
<mask token>
| <mask token>
class Event(db.Model):
__tablename__ = 'event'
__table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']
), ForeignKeyConstraint(['org_id'], ['org_unit.id']
), ForeignKeyConstraint(['created_by'], ['admin.id'])
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(45), nullable=False)
venue = db.Column(db.String(45), nullable=False)
location_lat = db.Column(db.Float, nullable=False)
location_long = db.Column(db.Float, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)
start_time = db.Column(db.DateTime, nullable=False)
duration = db.Column(db.Float, nullable=False)
coordinator_name = db.Column(db.String(45), nullable=False)
coordinator_contact = db.Column(db.Integer, nullable=False)
status_id = db.Column(db.Integer, nullable=False)
org_id = db.Column(db.Integer, nullable=False)
created_by = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(500), nullable=False)
def __init__(self, name, venue, location_lat, location_long, start_time,
duration, coordinator_name, coordinator_contact, status_id, org_id,
created_by, description):
self.name = name
self.venue = venue
self.location_lat = location_lat
self.location_long = location_long
self.start_time = start_time
self.duration = duration
self.coordinator_name = coordinator_name
self.coordinator_contact = coordinator_contact
self.status_id = status_id
self.org_id = org_id
self.created_by = created_by
self.description = description
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'venue', 'location_lat', 'location_long',
'date_created', 'start_time', 'duration', 'coordinator_name',
'coordinator_contact', 'status_id', 'org_id', 'description')
<mask token>
class EventFullInfoSchema(ma.Schema):
event = ma.Nested(event_schema)
admin = ma.Nested(admin_limited_schema)
status = ma.Nested(event_status_schema)
org_unit = ma.Nested(org_unit_schema)
<mask token>
| <mask token>
class Event(db.Model):
__tablename__ = 'event'
__table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']
), ForeignKeyConstraint(['org_id'], ['org_unit.id']
), ForeignKeyConstraint(['created_by'], ['admin.id'])
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(45), nullable=False)
venue = db.Column(db.String(45), nullable=False)
location_lat = db.Column(db.Float, nullable=False)
location_long = db.Column(db.Float, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)
start_time = db.Column(db.DateTime, nullable=False)
duration = db.Column(db.Float, nullable=False)
coordinator_name = db.Column(db.String(45), nullable=False)
coordinator_contact = db.Column(db.Integer, nullable=False)
status_id = db.Column(db.Integer, nullable=False)
org_id = db.Column(db.Integer, nullable=False)
created_by = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(500), nullable=False)
def __init__(self, name, venue, location_lat, location_long, start_time,
duration, coordinator_name, coordinator_contact, status_id, org_id,
created_by, description):
self.name = name
self.venue = venue
self.location_lat = location_lat
self.location_long = location_long
self.start_time = start_time
self.duration = duration
self.coordinator_name = coordinator_name
self.coordinator_contact = coordinator_contact
self.status_id = status_id
self.org_id = org_id
self.created_by = created_by
self.description = description
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'venue', 'location_lat', 'location_long',
'date_created', 'start_time', 'duration', 'coordinator_name',
'coordinator_contact', 'status_id', 'org_id', 'description')
event_schema = EventSchema()
events_schema = EventSchema(many=True)
class EventFullInfoSchema(ma.Schema):
event = ma.Nested(event_schema)
admin = ma.Nested(admin_limited_schema)
status = ma.Nested(event_status_schema)
org_unit = ma.Nested(org_unit_schema)
event_with_full_schema = EventFullInfoSchema()
events_with_full_schema = EventFullInfoSchema(many=True)
| from database import db
from database import ma
from datetime import datetime
from sqlalchemy import ForeignKeyConstraint
from models.admin import Admin, admin_limited_schema
from models.event_status import EventStatus, event_status_schema
from models.org_unit import org_unit_schema
class Event(db.Model):
__tablename__ = 'event'
__table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']
), ForeignKeyConstraint(['org_id'], ['org_unit.id']
), ForeignKeyConstraint(['created_by'], ['admin.id'])
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(45), nullable=False)
venue = db.Column(db.String(45), nullable=False)
location_lat = db.Column(db.Float, nullable=False)
location_long = db.Column(db.Float, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)
start_time = db.Column(db.DateTime, nullable=False)
duration = db.Column(db.Float, nullable=False)
coordinator_name = db.Column(db.String(45), nullable=False)
coordinator_contact = db.Column(db.Integer, nullable=False)
status_id = db.Column(db.Integer, nullable=False)
org_id = db.Column(db.Integer, nullable=False)
created_by = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(500), nullable=False)
def __init__(self, name, venue, location_lat, location_long, start_time,
duration, coordinator_name, coordinator_contact, status_id, org_id,
created_by, description):
self.name = name
self.venue = venue
self.location_lat = location_lat
self.location_long = location_long
self.start_time = start_time
self.duration = duration
self.coordinator_name = coordinator_name
self.coordinator_contact = coordinator_contact
self.status_id = status_id
self.org_id = org_id
self.created_by = created_by
self.description = description
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'venue', 'location_lat', 'location_long',
'date_created', 'start_time', 'duration', 'coordinator_name',
'coordinator_contact', 'status_id', 'org_id', 'description')
event_schema = EventSchema()
events_schema = EventSchema(many=True)
class EventFullInfoSchema(ma.Schema):
event = ma.Nested(event_schema)
admin = ma.Nested(admin_limited_schema)
status = ma.Nested(event_status_schema)
org_unit = ma.Nested(org_unit_schema)
event_with_full_schema = EventFullInfoSchema()
events_with_full_schema = EventFullInfoSchema(many=True)
| from database import db
from database import ma
from datetime import datetime
from sqlalchemy import ForeignKeyConstraint
from models.admin import Admin, admin_limited_schema
from models.event_status import EventStatus, event_status_schema
from models.org_unit import org_unit_schema
class Event(db.Model):
# class corresponding to the event table in the database
__tablename__ = 'event'
__table_args__ = (
ForeignKeyConstraint(['status_id'], ['event_status.id']),
ForeignKeyConstraint(['org_id'], ['org_unit.id']),
ForeignKeyConstraint(['created_by'], ['admin.id']),
)
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(45), nullable=False)
venue = db.Column(db.String(45), nullable=False)
location_lat = db.Column(db.Float, nullable=False)
location_long = db.Column(db.Float, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)
start_time = db.Column(db.DateTime, nullable=False)
duration = db.Column(db.Float, nullable=False)
coordinator_name = db.Column(db.String(45), nullable=False)
coordinator_contact = db.Column(db.Integer, nullable=False)
status_id = db.Column(db.Integer, nullable=False)
org_id = db.Column(db.Integer, nullable=False)
created_by = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(500), nullable=False)
def __init__(self, name, venue, location_lat, location_long, start_time, duration, coordinator_name, coordinator_contact, status_id, org_id, created_by, description):
self.name = name
self.venue = venue
self.location_lat = location_lat
self.location_long = location_long
self.start_time = start_time
self.duration = duration
self.coordinator_name = coordinator_name
self.coordinator_contact = coordinator_contact
self.status_id = status_id
self.org_id = org_id
self.created_by = created_by
self.description = description
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'venue', 'location_lat', 'location_long', 'date_created', 'start_time',
'duration', 'coordinator_name', 'coordinator_contact', 'status_id', 'org_id', 'description')
# init schema
event_schema = EventSchema()
events_schema = EventSchema(many=True)
class EventFullInfoSchema(ma.Schema):
event = ma.Nested(event_schema)
admin = ma.Nested(admin_limited_schema)
status = ma.Nested(event_status_schema)
org_unit = ma.Nested(org_unit_schema)
event_with_full_schema = EventFullInfoSchema()
events_with_full_schema = EventFullInfoSchema(many=True) | [
4,
6,
7,
8,
9
] |
724 | a26cab29f0777764f014eeff13745be60e55b62d | <mask token>
| <mask token>
try:
r = requests.get('http://skitter.com')
print(r)
except (requests.ConnectionError, requests.Timeout) as x:
pass
| import requests
try:
r = requests.get('http://skitter.com')
print(r)
except (requests.ConnectionError, requests.Timeout) as x:
pass
| import requests
#try make the request
try:
r = requests.get('http://skitter.com')
print(r) # see the results
# catch a failue
except (requests.ConnectionError, requests.Timeout) as x:
pass | null | [
0,
1,
2,
3
] |
725 | de77edaccdaada785f41828135ad2da4ae2b403e | <mask token>
class Post(models.Model):
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, on_delete=models.CASCADE)
header = models.CharField(max_length=50)
text = models.CharField(max_length=2048)
create_date = models.DateTimeField(auto_now=True)
<mask token>
class ReadPost(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
class Subscription(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
| <mask token>
class Blog(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Post(models.Model):
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, on_delete=models.CASCADE)
header = models.CharField(max_length=50)
text = models.CharField(max_length=2048)
create_date = models.DateTimeField(auto_now=True)
<mask token>
class ReadPost(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
class Subscription(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
| <mask token>
class Blog(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Post(models.Model):
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, on_delete=models.CASCADE)
header = models.CharField(max_length=50)
text = models.CharField(max_length=2048)
create_date = models.DateTimeField(auto_now=True)
@receiver(post_save, sender=Post)
def send_email(sender, **kwargs):
post = Post.objects.get(id=kwargs.get('instance').id)
template = loader.get_template('post2email.html')
subject = 'Post in blog ' + post.blog.user.username
context = {'header': post.header, 'text': post.text, 'id': post.id,
'host': getattr(settings, 'MY_DJANGO_URL_PATH', '')}
html_content = template.render(context)
msg = EmailMultiAlternatives(subject, '', '', [post.user.email])
msg.attach_alternative(html_content, 'text/html')
msg.send()
class ReadPost(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
class Subscription(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
| from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
class Blog(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Post(models.Model):
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, on_delete=models.CASCADE)
header = models.CharField(max_length=50)
text = models.CharField(max_length=2048)
create_date = models.DateTimeField(auto_now=True)
@receiver(post_save, sender=Post)
def send_email(sender, **kwargs):
post = Post.objects.get(id=kwargs.get('instance').id)
template = loader.get_template('post2email.html')
subject = 'Post in blog ' + post.blog.user.username
context = {'header': post.header, 'text': post.text, 'id': post.id,
'host': getattr(settings, 'MY_DJANGO_URL_PATH', '')}
html_content = template.render(context)
msg = EmailMultiAlternatives(subject, '', '', [post.user.email])
msg.attach_alternative(html_content, 'text/html')
msg.send()
class ReadPost(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
class Subscription(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
| from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
class Blog(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE,)
class Post(models.Model):
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User,on_delete=models.CASCADE,)
header = models.CharField(max_length=50)
text = models.CharField(max_length=2048)
create_date = models.DateTimeField(auto_now=True)
@receiver(post_save, sender=Post)
def send_email(sender, **kwargs):
post = Post.objects.get(id=kwargs.get('instance').id)
template = loader.get_template('post2email.html')
subject = "Post in blog " + post.blog.user.username
context = { "header": post.header,
"text": post.text,
"id": post.id,
"host": getattr(settings, 'MY_DJANGO_URL_PATH', ''),
}
html_content = template.render(context)
msg = EmailMultiAlternatives(subject, "", "", [post.user.email])
msg.attach_alternative(html_content, "text/html")
msg.send()
class ReadPost(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE,)
post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
class Subscription(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE,)
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING) | [
6,
8,
9,
10,
11
] |
726 | c3efaeab600ec9a7a9fffdfad5c9dc1faad8fee7 | <mask token>
class Plugin(LoggerPlugin):
<mask token>
def __init__(self, *args, **kwargs):
super(Plugin, self).__init__(*args, **kwargs)
self.setDeviceName(devicename)
self.smallGUI = True
self._last_value = 0
self._jump_allowed = True
self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)
def __openPort(self, portname=default_device):
self.portname = portname
try:
self._serial_port = serial.Serial(self.portname, baudrate=
SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,
timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)
self._serial_port.dtr = True
self._serial_port.rts = False
return True
except Exception:
tb = traceback.format_exc()
logging.debug(tb)
return False
def _updateT(self):
valid, value, unit = self._get_data()
if unit == 'V':
datanames = ['Spannung']
elif unit == 'A':
datanames = ['Strom']
elif unit == 'Ohm':
datanames = ['Widerstand']
elif unit == '°C':
datanames = ['Temperatur']
elif unit == 'F':
datanames = ['Kapazität']
elif unit == 'Hz':
datanames = ['Frequenz']
else:
datanames = [unit]
if valid:
if abs(self._last_value - value) >= 2 and not self._jump_allowed:
self._jump_allowed = True
else:
self.stream(y=[value], snames=datanames, unit=unit)
self._jump_allowed = False
self._last_value = value
def loadGUI(self):
self.widget = QtWidgets.QWidget()
packagedir = self.getDir(__file__)
uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self
.widget)
self.widget.pushButton.clicked.connect(self.__openPortCallback)
self.__openPortCallback()
return self.widget
<mask token>
<mask token>
<mask token>
| <mask token>
class Plugin(LoggerPlugin):
"""
Zeichnet die Messdaten eines HoldPeak VC820 Multimeters auf
"""
def __init__(self, *args, **kwargs):
super(Plugin, self).__init__(*args, **kwargs)
self.setDeviceName(devicename)
self.smallGUI = True
self._last_value = 0
self._jump_allowed = True
self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)
def __openPort(self, portname=default_device):
self.portname = portname
try:
self._serial_port = serial.Serial(self.portname, baudrate=
SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,
timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)
self._serial_port.dtr = True
self._serial_port.rts = False
return True
except Exception:
tb = traceback.format_exc()
logging.debug(tb)
return False
def _updateT(self):
valid, value, unit = self._get_data()
if unit == 'V':
datanames = ['Spannung']
elif unit == 'A':
datanames = ['Strom']
elif unit == 'Ohm':
datanames = ['Widerstand']
elif unit == '°C':
datanames = ['Temperatur']
elif unit == 'F':
datanames = ['Kapazität']
elif unit == 'Hz':
datanames = ['Frequenz']
else:
datanames = [unit]
if valid:
if abs(self._last_value - value) >= 2 and not self._jump_allowed:
self._jump_allowed = True
else:
self.stream(y=[value], snames=datanames, unit=unit)
self._jump_allowed = False
self._last_value = value
def loadGUI(self):
self.widget = QtWidgets.QWidget()
packagedir = self.getDir(__file__)
uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self
.widget)
self.widget.pushButton.clicked.connect(self.__openPortCallback)
self.__openPortCallback()
return self.widget
def __openPortCallback(self):
if self.run:
self.cancel()
self.widget.pushButton.setText('Verbinden')
else:
port = self.widget.comboBox.currentText()
if self.__openPort(port):
self.start()
self.widget.pushButton.setText('Beenden')
else:
self.cancel()
self.widget.pushButton.setText('Fehler')
def _get_data(self):
test = self._serial_port.read(1)
if len(test) != 1:
logging.error('recieved incomplete data, skipping...', file=sys
.stderr)
return False, None, None
if MultimeterMessage.check_first_byte(test[0]):
data = test + self._serial_port.read(MultimeterMessage.
MESSAGE_LENGTH - 1)
else:
logging.error('received incorrect data (%s), skipping...' %
test.hex(), file=sys.stderr)
return False, None, None
if len(data) != MultimeterMessage.MESSAGE_LENGTH:
logging.error('received incomplete message (%s), skipping...' %
data.hex(), file=sys.stderr)
return False, None, None
try:
message = MultimeterMessage(data)
except ValueError as e:
logging.debug(e)
logging.error('Error decoding: %s on message %s' % (str(e),
data.hex()))
return False, None, None
return True, round(message.value * message.multiplier, 10
), message.base_unit
<mask token>
| try:
from LoggerPlugin import LoggerPlugin
except ImportError:
from RTOC.LoggerPlugin import LoggerPlugin
<mask token>
log.basicConfig(level=log.INFO)
logging = log.getLogger(__name__)
devicename = 'HoldPeak'
default_device = 'COM7'
SERIAL_BAUDRATE = 2400
SERIAL_BYTESIZE = 8
SERIAL_TIMEOUT = 1
SAMPLERATE = 1
class Plugin(LoggerPlugin):
"""
Zeichnet die Messdaten eines HoldPeak VC820 Multimeters auf
"""
def __init__(self, *args, **kwargs):
super(Plugin, self).__init__(*args, **kwargs)
self.setDeviceName(devicename)
self.smallGUI = True
self._last_value = 0
self._jump_allowed = True
self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)
def __openPort(self, portname=default_device):
self.portname = portname
try:
self._serial_port = serial.Serial(self.portname, baudrate=
SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,
timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)
self._serial_port.dtr = True
self._serial_port.rts = False
return True
except Exception:
tb = traceback.format_exc()
logging.debug(tb)
return False
def _updateT(self):
valid, value, unit = self._get_data()
if unit == 'V':
datanames = ['Spannung']
elif unit == 'A':
datanames = ['Strom']
elif unit == 'Ohm':
datanames = ['Widerstand']
elif unit == '°C':
datanames = ['Temperatur']
elif unit == 'F':
datanames = ['Kapazität']
elif unit == 'Hz':
datanames = ['Frequenz']
else:
datanames = [unit]
if valid:
if abs(self._last_value - value) >= 2 and not self._jump_allowed:
self._jump_allowed = True
else:
self.stream(y=[value], snames=datanames, unit=unit)
self._jump_allowed = False
self._last_value = value
def loadGUI(self):
self.widget = QtWidgets.QWidget()
packagedir = self.getDir(__file__)
uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self
.widget)
self.widget.pushButton.clicked.connect(self.__openPortCallback)
self.__openPortCallback()
return self.widget
def __openPortCallback(self):
if self.run:
self.cancel()
self.widget.pushButton.setText('Verbinden')
else:
port = self.widget.comboBox.currentText()
if self.__openPort(port):
self.start()
self.widget.pushButton.setText('Beenden')
else:
self.cancel()
self.widget.pushButton.setText('Fehler')
def _get_data(self):
test = self._serial_port.read(1)
if len(test) != 1:
logging.error('recieved incomplete data, skipping...', file=sys
.stderr)
return False, None, None
if MultimeterMessage.check_first_byte(test[0]):
data = test + self._serial_port.read(MultimeterMessage.
MESSAGE_LENGTH - 1)
else:
logging.error('received incorrect data (%s), skipping...' %
test.hex(), file=sys.stderr)
return False, None, None
if len(data) != MultimeterMessage.MESSAGE_LENGTH:
logging.error('received incomplete message (%s), skipping...' %
data.hex(), file=sys.stderr)
return False, None, None
try:
message = MultimeterMessage(data)
except ValueError as e:
logging.debug(e)
logging.error('Error decoding: %s on message %s' % (str(e),
data.hex()))
return False, None, None
return True, round(message.value * message.multiplier, 10
), message.base_unit
if __name__ == '__main__':
standalone = Plugin()
standalone.setup()
| try:
from LoggerPlugin import LoggerPlugin
except ImportError:
from RTOC.LoggerPlugin import LoggerPlugin
from .holdPeak_VC820.vc820py.vc820 import MultimeterMessage
import serial
import sys
import traceback
from PyQt5 import uic
from PyQt5 import QtWidgets
import logging as log
log.basicConfig(level=log.INFO)
logging = log.getLogger(__name__)
devicename = 'HoldPeak'
default_device = 'COM7'
SERIAL_BAUDRATE = 2400
SERIAL_BYTESIZE = 8
SERIAL_TIMEOUT = 1
SAMPLERATE = 1
class Plugin(LoggerPlugin):
"""
Zeichnet die Messdaten eines HoldPeak VC820 Multimeters auf
"""
def __init__(self, *args, **kwargs):
super(Plugin, self).__init__(*args, **kwargs)
self.setDeviceName(devicename)
self.smallGUI = True
self._last_value = 0
self._jump_allowed = True
self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)
def __openPort(self, portname=default_device):
self.portname = portname
try:
self._serial_port = serial.Serial(self.portname, baudrate=
SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,
timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)
self._serial_port.dtr = True
self._serial_port.rts = False
return True
except Exception:
tb = traceback.format_exc()
logging.debug(tb)
return False
def _updateT(self):
valid, value, unit = self._get_data()
if unit == 'V':
datanames = ['Spannung']
elif unit == 'A':
datanames = ['Strom']
elif unit == 'Ohm':
datanames = ['Widerstand']
elif unit == '°C':
datanames = ['Temperatur']
elif unit == 'F':
datanames = ['Kapazität']
elif unit == 'Hz':
datanames = ['Frequenz']
else:
datanames = [unit]
if valid:
if abs(self._last_value - value) >= 2 and not self._jump_allowed:
self._jump_allowed = True
else:
self.stream(y=[value], snames=datanames, unit=unit)
self._jump_allowed = False
self._last_value = value
def loadGUI(self):
self.widget = QtWidgets.QWidget()
packagedir = self.getDir(__file__)
uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self
.widget)
self.widget.pushButton.clicked.connect(self.__openPortCallback)
self.__openPortCallback()
return self.widget
def __openPortCallback(self):
if self.run:
self.cancel()
self.widget.pushButton.setText('Verbinden')
else:
port = self.widget.comboBox.currentText()
if self.__openPort(port):
self.start()
self.widget.pushButton.setText('Beenden')
else:
self.cancel()
self.widget.pushButton.setText('Fehler')
def _get_data(self):
test = self._serial_port.read(1)
if len(test) != 1:
logging.error('recieved incomplete data, skipping...', file=sys
.stderr)
return False, None, None
if MultimeterMessage.check_first_byte(test[0]):
data = test + self._serial_port.read(MultimeterMessage.
MESSAGE_LENGTH - 1)
else:
logging.error('received incorrect data (%s), skipping...' %
test.hex(), file=sys.stderr)
return False, None, None
if len(data) != MultimeterMessage.MESSAGE_LENGTH:
logging.error('received incomplete message (%s), skipping...' %
data.hex(), file=sys.stderr)
return False, None, None
try:
message = MultimeterMessage(data)
except ValueError as e:
logging.debug(e)
logging.error('Error decoding: %s on message %s' % (str(e),
data.hex()))
return False, None, None
return True, round(message.value * message.multiplier, 10
), message.base_unit
if __name__ == '__main__':
standalone = Plugin()
standalone.setup()
| try:
from LoggerPlugin import LoggerPlugin
except ImportError:
from RTOC.LoggerPlugin import LoggerPlugin
from .holdPeak_VC820.vc820py.vc820 import MultimeterMessage
import serial
import sys
import traceback
from PyQt5 import uic
from PyQt5 import QtWidgets
import logging as log
log.basicConfig(level=log.INFO)
logging = log.getLogger(__name__)
devicename = "HoldPeak"
default_device = 'COM7'
SERIAL_BAUDRATE = 2400
SERIAL_BYTESIZE = 8
SERIAL_TIMEOUT = 1
SAMPLERATE = 1
class Plugin(LoggerPlugin):
"""
Zeichnet die Messdaten eines HoldPeak VC820 Multimeters auf
"""
def __init__(self, *args, **kwargs):
# Plugin setup
super(Plugin, self).__init__(*args, **kwargs)
self.setDeviceName(devicename)
self.smallGUI = True
self._last_value = 0
self._jump_allowed = True
# Data-logger thread
self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)
# self.updater.start()
def __openPort(self, portname=default_device):
# Communication setup
#self.portname = "/dev/ttyUSB0"
#self.portname = "COM7"
self.portname = portname
#################################################################################
# os.system("sudo chmod a+rw /dev/ttyUSB0")
# #######
# uncomment this line if you do not set device rules:
# > sudo nano /etc/udev/rules.d/50-myusb.rules
# > * SUBSYSTEMS=="usb", ATTRS{idVendor}=="067b", ATTRS{idProduct}=="2303", GROUP="users", MODE="0666"
# > [Strg+O, Strg+X]
# > sudo udevadm control --reload
# Ref: http://ask.xmodulo.com/change-usb-device-permission-linux.html
#################################################################################
try:
self._serial_port = serial.Serial(
self.portname, baudrate=SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE, timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)
# dtr and rts settings required for adapter
self._serial_port.dtr = True
self._serial_port.rts = False
# -------------
return True
except Exception:
tb = traceback.format_exc()
logging.debug(tb)
return False
# THIS IS YOUR THREAD
def _updateT(self):
valid, value, unit = self._get_data()
if unit == "V":
datanames = ["Spannung"]
elif unit == "A":
datanames = ["Strom"]
elif unit == "Ohm":
datanames = ["Widerstand"]
elif unit == "°C":
datanames = ["Temperatur"]
elif unit == "F":
datanames = ["Kapazität"]
elif unit == "Hz":
datanames = ["Frequenz"]
else:
datanames = [unit]
if valid:
if abs(self._last_value-value) >= 2 and not self._jump_allowed:
self._jump_allowed = True
else:
self.stream(y=[value], snames=datanames, unit=unit)
self._jump_allowed = False
self._last_value = value
def loadGUI(self):
self.widget = QtWidgets.QWidget()
packagedir = self.getDir(__file__)
uic.loadUi(packagedir+"/holdPeak_VC820/portSelectWidget.ui", self.widget)
# self.setCallbacks()
self.widget.pushButton.clicked.connect(self.__openPortCallback)
self.__openPortCallback()
return self.widget
def __openPortCallback(self):
if self.run:
self.cancel()
self.widget.pushButton.setText("Verbinden")
else:
port = self.widget.comboBox.currentText()
if self.__openPort(port):
self.start()
self.widget.pushButton.setText("Beenden")
else:
self.cancel()
self.widget.pushButton.setText("Fehler")
def _get_data(self):
test = self._serial_port.read(1)
if len(test) != 1:
logging.error("recieved incomplete data, skipping...", file=sys.stderr)
return False, None, None
if MultimeterMessage.check_first_byte(test[0]):
data = test + self._serial_port.read(MultimeterMessage.MESSAGE_LENGTH-1)
else:
logging.error("received incorrect data (%s), skipping..." % test.hex(), file=sys.stderr)
return False, None, None
if len(data) != MultimeterMessage.MESSAGE_LENGTH:
logging.error("received incomplete message (%s), skipping..." %
data.hex(), file=sys.stderr)
return False, None, None
try:
message = MultimeterMessage(data)
#message.value = message.get_base_reading()
except ValueError as e:
logging.debug(e)
logging.error("Error decoding: %s on message %s" % (str(e), data.hex()))
return False, None, None
# logging.debug(str(message))
# return True, message.value, message.unit
return True, round(message.value*message.multiplier, 10), message.base_unit
if __name__ == "__main__":
standalone = Plugin()
standalone.setup()
| [
5,
8,
10,
11,
12
] |
727 | 776470546585257bf06073e2d894e8a04cf2376d | """
Duck typing
Ref: http://www.voidspace.org.uk/python/articles/duck_typing.shtml
"""
##########
# mathmatic operator (syntactic sugar)
print 3 + 3
# same as >>>
print int.__add__(3, 3)
# <<<
# overload '+' operator
class Klass1(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __add__(self, other):
return self.a - other.b
class Klass2(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __add__(self, other):
return self.b - other.a
obj1 = Klass1(1, 2)
obj2 = Klass2(10, 20)
print obj1 + obj2
# same as >>>
print obj1.__add__(obj2)
# <<<
##########
# data access for sequence type objects(list, tuple) and mapping type object(dict)
# (syntactic sugar)
a = [0,1,2]
print a[0]
# same as >>>
print list.__getitem__(a, 0)
# <<<
b = {'a':0, 'b':1}
print b['a']
# same as >>>
print dict.__getitem__(b, 'a')
# <<<
##########
# function call
# callable checks where a var has __call__ attr.
def f(arg):
print arg
f(123)
# >>> 123
# same as >>>
f.__call__(123)
# >>> 123
# <<<
\
# 'Duck typing' happens because when we do var['member'] Python doesn't care what type object var is.
# All it cares is whether the call to its __getitem__ method returns anything sensible. If not - an error will be raised. Something like TypeError: Unsubscriptable object..
# This means you can create your own classes that have their own internal data structures - but are accessed using normal Python syntax. This is awfully convenient.
# isinstance(object, dict) returns True if object is a dictionary - or an instance of a subclass of dict.
# Instead of:
#
# if isinstance(object, dict):
# value = object[member]
#
# it is considered more pythonic to do :
#
# try:
# value = object[member]
# except TypeError:
# # do something else
#
# Our example above could become :
#
# if hasattr(object, 'keys'):
# value = object[member]
#
| null | null | null | null | [
0
] |
728 | 60b5e515c7275bfa0f79e22f54302a578c2f7b79 | <mask token>
| <mask token>
def find_square_sum(num):
_sum = 0
while num > 0:
digit = num % 10
_sum += digit * digit
num //= 10
return _sum
<mask token>
| def find_happy_number(num):
slow, fast = num, num
while True:
slow = find_square_sum(slow)
fast = find_square_sum(find_square_sum(fast))
if slow == fast:
break
return slow == 1
def find_square_sum(num):
_sum = 0
while num > 0:
digit = num % 10
_sum += digit * digit
num //= 10
return _sum
<mask token>
| def find_happy_number(num):
slow, fast = num, num
while True:
slow = find_square_sum(slow)
fast = find_square_sum(find_square_sum(fast))
if slow == fast:
break
return slow == 1
def find_square_sum(num):
_sum = 0
while num > 0:
digit = num % 10
_sum += digit * digit
num //= 10
return _sum
print(find_happy_number(23))
print(find_happy_number(12))
| def find_happy_number(num):
slow, fast = num, num
while True:
slow = find_square_sum(slow) # move one step
fast = find_square_sum(find_square_sum(fast)) # move two steps
if slow == fast: # found the cycle
break
return slow == 1 # see if the cycle is stuck on the number '1'
def find_square_sum(num):
_sum = 0
while (num > 0):
digit = num % 10
_sum += digit * digit
num //= 10
return _sum
print(find_happy_number(23))
print(find_happy_number(12)) | [
0,
1,
2,
3,
4
] |
729 | af1a6c6009b21962228fbe737f27c22bf9460762 | <mask token>
| <mask token>
received_event = Event()
leave_rooms_event = Event()
exit_event = Event()
output_message_queue = AGQueue()
input_message_queue = AGQueue()
matrix_to_aio_queue = AGQueue()
aio_to_matrix_queue = AGQueue()
sync_to_matrix_queue = Queue()
SERVER_URL = 'https://transport.transport01.raiden.network'
| from gevent.event import Event
from gevent.queue import Queue
from ping_pong_chat.aio_queue import AGQueue
received_event = Event()
leave_rooms_event = Event()
exit_event = Event()
output_message_queue = AGQueue()
input_message_queue = AGQueue()
matrix_to_aio_queue = AGQueue()
aio_to_matrix_queue = AGQueue()
sync_to_matrix_queue = Queue()
SERVER_URL = 'https://transport.transport01.raiden.network'
| from gevent.event import Event
from gevent.queue import Queue
from ping_pong_chat.aio_queue import AGQueue
received_event = Event()
leave_rooms_event = Event()
exit_event = Event()
output_message_queue = AGQueue()
input_message_queue = AGQueue()
matrix_to_aio_queue = AGQueue()
aio_to_matrix_queue = AGQueue()
sync_to_matrix_queue = Queue()
SERVER_URL = "https://transport.transport01.raiden.network"
| null | [
0,
1,
2,
3
] |
730 | ed80f5f898548ca012779543051ccff5b34e4fcc | <mask token>
def getdata(url):
data = requests.get(url)
return data.text
def get_json():
file_path = staticfiles_storage.path('coordinates.json')
with open(file_path, 'r') as f:
data = json.load(f)
html_doc = getdata('https://www.mygov.in/covid-19')
soup = BeautifulSoup(html_doc, 'html.parser')
k = soup.tbody.get_text()
k = ('\n' + k).split('\n\n')[1:-1]
datarow = {}
for index, item in enumerate(k):
value = item.split('\n')
datarow[value[1].lower()] = {'coordinates': list(data.values())[
index], 'confirmed': value[2], 'active': value[3], 'recovered':
value[4], 'deceased': value[5]}
return datarow
<mask token>
| <mask token>
def getdata(url):
data = requests.get(url)
return data.text
def get_json():
file_path = staticfiles_storage.path('coordinates.json')
with open(file_path, 'r') as f:
data = json.load(f)
html_doc = getdata('https://www.mygov.in/covid-19')
soup = BeautifulSoup(html_doc, 'html.parser')
k = soup.tbody.get_text()
k = ('\n' + k).split('\n\n')[1:-1]
datarow = {}
for index, item in enumerate(k):
value = item.split('\n')
datarow[value[1].lower()] = {'coordinates': list(data.values())[
index], 'confirmed': value[2], 'active': value[3], 'recovered':
value[4], 'deceased': value[5]}
return datarow
<mask token>
@api_view(['GET'])
def coordinates(request):
url = (
'https://raw.githubusercontent.com/namantam1/indian_coordinated/master/india.json'
)
resp = requests.get(url)
data = json.loads(resp.text)
return Response(data, status=status.HTTP_200_OK)
| <mask token>
def getdata(url):
data = requests.get(url)
return data.text
def get_json():
file_path = staticfiles_storage.path('coordinates.json')
with open(file_path, 'r') as f:
data = json.load(f)
html_doc = getdata('https://www.mygov.in/covid-19')
soup = BeautifulSoup(html_doc, 'html.parser')
k = soup.tbody.get_text()
k = ('\n' + k).split('\n\n')[1:-1]
datarow = {}
for index, item in enumerate(k):
value = item.split('\n')
datarow[value[1].lower()] = {'coordinates': list(data.values())[
index], 'confirmed': value[2], 'active': value[3], 'recovered':
value[4], 'deceased': value[5]}
return datarow
@api_view(['GET'])
@authentication_classes([TokenAuthentication])
@permission_classes([IsAuthenticated])
def get_map_josn(request):
"""
List all code snippets, or create a new snippet.
"""
if request.method == 'GET':
data = get_json()
print('Responsed')
return Response(data, status=status.HTTP_200_OK)
@api_view(['GET'])
def coordinates(request):
url = (
'https://raw.githubusercontent.com/namantam1/indian_coordinated/master/india.json'
)
resp = requests.get(url)
data = json.loads(resp.text)
return Response(data, status=status.HTTP_200_OK)
| from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
import json
from rest_framework.response import Response
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework import status
from django.contrib.staticfiles.storage import staticfiles_storage
from user.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
def getdata(url):
data = requests.get(url)
return data.text
def get_json():
file_path = staticfiles_storage.path('coordinates.json')
with open(file_path, 'r') as f:
data = json.load(f)
html_doc = getdata('https://www.mygov.in/covid-19')
soup = BeautifulSoup(html_doc, 'html.parser')
k = soup.tbody.get_text()
k = ('\n' + k).split('\n\n')[1:-1]
datarow = {}
for index, item in enumerate(k):
value = item.split('\n')
datarow[value[1].lower()] = {'coordinates': list(data.values())[
index], 'confirmed': value[2], 'active': value[3], 'recovered':
value[4], 'deceased': value[5]}
return datarow
@api_view(['GET'])
@authentication_classes([TokenAuthentication])
@permission_classes([IsAuthenticated])
def get_map_josn(request):
"""
List all code snippets, or create a new snippet.
"""
if request.method == 'GET':
data = get_json()
print('Responsed')
return Response(data, status=status.HTTP_200_OK)
@api_view(['GET'])
def coordinates(request):
url = (
'https://raw.githubusercontent.com/namantam1/indian_coordinated/master/india.json'
)
resp = requests.get(url)
data = json.loads(resp.text)
return Response(data, status=status.HTTP_200_OK)
| from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
import json
from rest_framework.response import Response
from rest_framework.decorators import api_view,authentication_classes,permission_classes
from rest_framework import status
from django.contrib.staticfiles.storage import staticfiles_storage
from user.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
# Create your views here.
def getdata(url):
data = requests.get(url)
return data.text
def get_json():
file_path = staticfiles_storage.path('coordinates.json')
with open(file_path,'r') as f:
data = json.load(f)
html_doc = getdata("https://www.mygov.in/covid-19")
soup = BeautifulSoup(html_doc, 'html.parser')
k = soup.tbody.get_text()
k = (("\n"+k).split("\n\n"))[1:-1]
datarow = {}
for index,item in enumerate(k):
value = item.split("\n")
datarow[value[1].lower()] = {
'coordinates':list(data.values())[index],
'confirmed':value[2],
'active':value[3],
'recovered':value[4],
'deceased':value[5]
}
return datarow
@api_view(['GET'])
@authentication_classes([TokenAuthentication])
@permission_classes([IsAuthenticated])
def get_map_josn(request):
"""
List all code snippets, or create a new snippet.
"""
if request.method == 'GET':
data = get_json()
print('Responsed')
return Response(data,status=status.HTTP_200_OK)
@api_view(['GET'])
def coordinates(request):
url = 'https://raw.githubusercontent.com/namantam1/indian_coordinated/master/india.json'
resp = requests.get(url)
data = json.loads(resp.text)
return Response(data,status=status.HTTP_200_OK) | [
2,
3,
4,
5,
6
] |
731 | 53fae0103168f4074ba0645c33e4640fcefdfc96 | <mask token>
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
soup = BeautifulSoup(page_html, 'html.parser')
for h1 in soup.find_all('h1'):
return h1.get_text()
def get_link_list(page_html):
soup = BeautifulSoup(page_html, 'html.parser')
list = []
for a_link in soup.find_all('a'):
link = a_link['href']
if 'https://' in link or 'http://' in link:
list.append(link)
return list
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
conn = pymysql.connect(host='localhost', port=3306, user='root',
password='Huhaohao@123', charset='utf8')
with conn.cursor() as cursor:
cursor.execute('use crawler')
cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +
'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
page_html = get_page_html(current_url, charsets=(
'utf-8', 'gbk', 'gb2312'))
links_list = get_link_list(page_html)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=(
'utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html,
'<h1>(.*)<span')
if headings:
param_list.append((headings, link))
cursor.executemany(
'insert into tb_result(title, link) values(%s, %s)',
param_list)
conn.commit()
except Error:
pass
finally:
conn.close()
<mask token>
| <mask token>
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
soup = BeautifulSoup(page_html, 'html.parser')
for h1 in soup.find_all('h1'):
return h1.get_text()
def get_link_list(page_html):
soup = BeautifulSoup(page_html, 'html.parser')
list = []
for a_link in soup.find_all('a'):
link = a_link['href']
if 'https://' in link or 'http://' in link:
list.append(link)
return list
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
conn = pymysql.connect(host='localhost', port=3306, user='root',
password='Huhaohao@123', charset='utf8')
with conn.cursor() as cursor:
cursor.execute('use crawler')
cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +
'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
page_html = get_page_html(current_url, charsets=(
'utf-8', 'gbk', 'gb2312'))
links_list = get_link_list(page_html)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=(
'utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html,
'<h1>(.*)<span')
if headings:
param_list.append((headings, link))
cursor.executemany(
'insert into tb_result(title, link) values(%s, %s)',
param_list)
conn.commit()
except Error:
pass
finally:
conn.close()
def main():
"""主函数"""
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
'<a[^>]*href=["\\\'](.*?)["\\\']', max_depth=2)
<mask token>
| <mask token>
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
soup = BeautifulSoup(page_html, 'html.parser')
for h1 in soup.find_all('h1'):
return h1.get_text()
def get_link_list(page_html):
soup = BeautifulSoup(page_html, 'html.parser')
list = []
for a_link in soup.find_all('a'):
link = a_link['href']
if 'https://' in link or 'http://' in link:
list.append(link)
return list
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
conn = pymysql.connect(host='localhost', port=3306, user='root',
password='Huhaohao@123', charset='utf8')
with conn.cursor() as cursor:
cursor.execute('use crawler')
cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +
'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
page_html = get_page_html(current_url, charsets=(
'utf-8', 'gbk', 'gb2312'))
links_list = get_link_list(page_html)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=(
'utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html,
'<h1>(.*)<span')
if headings:
param_list.append((headings, link))
cursor.executemany(
'insert into tb_result(title, link) values(%s, %s)',
param_list)
conn.commit()
except Error:
pass
finally:
conn.close()
def main():
"""主函数"""
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
'<a[^>]*href=["\\\'](.*?)["\\\']', max_depth=2)
if __name__ == '__main__':
main()
| from urllib.error import URLError
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import pymysql
import ssl
from pymysql import Error
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
soup = BeautifulSoup(page_html, 'html.parser')
for h1 in soup.find_all('h1'):
return h1.get_text()
def get_link_list(page_html):
soup = BeautifulSoup(page_html, 'html.parser')
list = []
for a_link in soup.find_all('a'):
link = a_link['href']
if 'https://' in link or 'http://' in link:
list.append(link)
return list
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
conn = pymysql.connect(host='localhost', port=3306, user='root',
password='Huhaohao@123', charset='utf8')
with conn.cursor() as cursor:
cursor.execute('use crawler')
cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +
'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
page_html = get_page_html(current_url, charsets=(
'utf-8', 'gbk', 'gb2312'))
links_list = get_link_list(page_html)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=(
'utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html,
'<h1>(.*)<span')
if headings:
param_list.append((headings, link))
cursor.executemany(
'insert into tb_result(title, link) values(%s, %s)',
param_list)
conn.commit()
except Error:
pass
finally:
conn.close()
def main():
"""主函数"""
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
'<a[^>]*href=["\\\'](.*?)["\\\']', max_depth=2)
if __name__ == '__main__':
main()
| from urllib.error import URLError
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import pymysql
import ssl
from pymysql import Error
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
# logging.error('Decode:', error)
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
# logging.error('URL:', error)
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
soup = BeautifulSoup(page_html, 'html.parser')
for h1 in soup.find_all('h1'):
return h1.get_text()
def get_link_list(page_html):
soup = BeautifulSoup(page_html, 'html.parser')
list = []
for a_link in soup.find_all('a'):
link = a_link['href']
if ('https://' in link) or ('http://' in link):
list.append(link)
# print(page_html)
#print(list)
return list
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
# conn = pymysql.connect(host='localhost', port=3306,
# database='crawler', user='root',
# password='Huhaohao@123', charset='utf8')
conn = pymysql.connect(host='localhost', port=3306,
user='root', password='Huhaohao@123', charset='utf8')
with conn.cursor() as cursor:
#cursor.execute("create database crawler if not exists;")
cursor.execute('use crawler')
cursor.execute(
"CREATE TABLE IF NOT EXISTS tb_result " +
"(" +
"title TEXT NOT NULL," +
"link TEXT NOT NULL" +
")"
)
try:
with conn.cursor() as cursor:
url_list = [seed_url]
# 通过下面的字典避免重复抓取并控制抓取深度
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
# 尝试用utf-8/gbk/gb2312三种字符集进行页面解码
page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))
links_list = get_link_list(page_html)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html, r'<h1>(.*)<span')
if headings:
param_list.append((headings, link))
cursor.executemany('insert into tb_result(title, link) values(%s, %s)',
param_list)
conn.commit()
except Error:
pass
# logging.error('SQL:', error)
finally:
conn.close()
def main():
"""主函数"""
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
r'<a[^>]*href=["\'](.*?)["\']',
max_depth=2)
if __name__ == '__main__':
main()
| [
5,
6,
7,
8,
9
] |
732 | 45fcafdd30f890ddf5eaa090152fde2e2da4dbef | <mask token>
| <mask token>
print(t)
print(t[:2])
print(t[1:])
<mask token>
print(t2)
print(t3)
print(t2 + t3)
| t = '코스모스', '민들레', '국화'
print(t)
print(t[:2])
print(t[1:])
t2 = 1, 2, 3
t3 = 4,
print(t2)
print(t3)
print(t2 + t3)
| # 튜플(tuple) - 리스트와 구조가 비슷함
#변경, 삭제 할 수 없다.
t = ('코스모스', '민들레', '국화')
print(t)
print(t[:2])
print(t[1:])
#del t[0] - 삭제 안됨
#t[2] ="매화" - 수정 안됨
t2 = (1, 2, 3)
t3 = (4,) # 1개 추가하기 (쉼표를 붙임)
print(t2)
print(t3)
print(t2 + t3) # 요소 더하기
| null | [
0,
1,
2,
3
] |
733 | 73e7e43e9cfb3c0884480809bc03ade687d641d6 | <mask token>
def getScale(NumFrame, t_gt, seq_num):
txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.
format(seq_num))
x_prev = float(t_gt[0])
y_prev = float(t_gt[1])
z_prev = float(t_gt[2])
line = txt_file.readlines()
line_sp = line[NumFrame].split(' ')
x = float(line_sp[3])
y = float(line_sp[7])
z = float(line_sp[11])
t_gt[0] = x
t_gt[1] = y
t_gt[2] = z
txt_file.close()
scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2
)
return scale, t_gt
<mask token>
| <mask token>
def getScale(NumFrame, t_gt, seq_num):
txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.
format(seq_num))
x_prev = float(t_gt[0])
y_prev = float(t_gt[1])
z_prev = float(t_gt[2])
line = txt_file.readlines()
line_sp = line[NumFrame].split(' ')
x = float(line_sp[3])
y = float(line_sp[7])
z = float(line_sp[11])
t_gt[0] = x
t_gt[1] = y
t_gt[2] = z
txt_file.close()
scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2
)
return scale, t_gt
if __name__ == '__main__':
MAX_FRAME = 1000
SEQ_NUM = 2
focal = 718.856
pp = 607.1928, 185.2157
textOrg1 = 10, 30
textOrg2 = 10, 80
textOrg3 = 10, 130
img_1_c = cv2.imread(
'/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png'
.format(SEQ_NUM))
img_2_c = cv2.imread(
'/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png'
.format(SEQ_NUM))
img_1 = cv2.cvtColor(img_1_c, cv2.COLOR_BGR2GRAY)
img_2 = cv2.cvtColor(img_2_c, cv2.COLOR_BGR2GRAY)
kp1, des1 = orb.detectAndCompute(img_1, None)
kp2, des2 = orb.detectAndCompute(img_2, None)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp1[i.queryIdx].pt)
pts2.append(kp2[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
E, mask = cv2.findEssentialMat(pts1, pts2, focal=focal, pp=pp, method=
cv2.RANSAC, prob=0.999, threshold=1.0)
_, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal=focal, pp=pp)
R_f_seg = R_f
t_f_seg = t_f
t_gt = np.zeros((3, 1), dtype=np.float64)
prevImage = img_2
kp_prev = kp2
des_prev = des2
traj = np.zeros((1000, 2000), dtype=np.uint8)
traj = cv2.cvtColor(traj, cv2.COLOR_GRAY2BGR)
rmse_total = 0
for numFrame in range(2, MAX_FRAME):
filename = (
'/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'
.format(SEQ_NUM, numFrame))
currImage_c = cv2.imread(filename)
currImage = cv2.cvtColor(currImage_c, cv2.COLOR_BGR2GRAY)
kp_curr, des_curr = orb.detectAndCompute(currImage, None)
matches = bf.match(des_prev, des_curr)
matches = sorted(matches, key=lambda x: x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp_prev[i.queryIdx].pt)
pts2.append(kp_curr[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal=focal, pp=pp,
method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal=focal, pp=pp)
abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)
t_f = t_f + abs_scale * R_f.dot(t)
R_f = R.dot(R_f)
error = map(operator.sub, t_gt, t_f)
error_sum_square = sum(map(lambda x: x * x, error))
rmse = math.sqrt(error_sum_square / 3)
rmse_total = rmse_total + rmse
print('rmse = ', rmse_total / numFrame)
prevImage = currImage
kp_prev = kp_curr
des_prev = des_curr
x_gt = int(t_gt[0]) + 1000
y_gt = int(t_gt[2]) + 100
x = int(t_f[0]) + 1000
y = int(t_f[2]) + 100
cv2.circle(traj, (x, y), 1, (0, 0, 255), 2)
cv2.circle(traj, (x_gt, y_gt), 1, (0, 255, 0), 2)
cv2.rectangle(traj, (10, 10), (700, 150), (0, 0, 0), -1)
text1 = ('orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.
format(float(t_f[0]), float(t_f[1]), float(t_f[2])))
cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN, 1, (255,
255, 255), 1, 8)
text3 = ('gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.
format(float(t_gt[0]), float(t_gt[1]), float(t_gt[2])))
cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN, 1, (255,
255, 255), 1, 8)
feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)
cv2.imshow('trajectory', traj)
cv2.imshow('feat_img', feature_img)
cv2.waitKey(1)
cv2.imwrite('result_{0:02d}.png'.format(SEQ_NUM), traj)
| <mask token>
orb = cv2.cv2.ORB_create(nfeatures=5000, scaleFactor=1.2, nlevels=8,
edgeThreshold=31, firstLevel=0, WTA_K=2, scoreType=cv2.ORB_FAST_SCORE,
patchSize=31, fastThreshold=25)
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
def getScale(NumFrame, t_gt, seq_num):
txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.
format(seq_num))
x_prev = float(t_gt[0])
y_prev = float(t_gt[1])
z_prev = float(t_gt[2])
line = txt_file.readlines()
line_sp = line[NumFrame].split(' ')
x = float(line_sp[3])
y = float(line_sp[7])
z = float(line_sp[11])
t_gt[0] = x
t_gt[1] = y
t_gt[2] = z
txt_file.close()
scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2
)
return scale, t_gt
if __name__ == '__main__':
MAX_FRAME = 1000
SEQ_NUM = 2
focal = 718.856
pp = 607.1928, 185.2157
textOrg1 = 10, 30
textOrg2 = 10, 80
textOrg3 = 10, 130
img_1_c = cv2.imread(
'/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png'
.format(SEQ_NUM))
img_2_c = cv2.imread(
'/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png'
.format(SEQ_NUM))
img_1 = cv2.cvtColor(img_1_c, cv2.COLOR_BGR2GRAY)
img_2 = cv2.cvtColor(img_2_c, cv2.COLOR_BGR2GRAY)
kp1, des1 = orb.detectAndCompute(img_1, None)
kp2, des2 = orb.detectAndCompute(img_2, None)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp1[i.queryIdx].pt)
pts2.append(kp2[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
E, mask = cv2.findEssentialMat(pts1, pts2, focal=focal, pp=pp, method=
cv2.RANSAC, prob=0.999, threshold=1.0)
_, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal=focal, pp=pp)
R_f_seg = R_f
t_f_seg = t_f
t_gt = np.zeros((3, 1), dtype=np.float64)
prevImage = img_2
kp_prev = kp2
des_prev = des2
traj = np.zeros((1000, 2000), dtype=np.uint8)
traj = cv2.cvtColor(traj, cv2.COLOR_GRAY2BGR)
rmse_total = 0
for numFrame in range(2, MAX_FRAME):
filename = (
'/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'
.format(SEQ_NUM, numFrame))
currImage_c = cv2.imread(filename)
currImage = cv2.cvtColor(currImage_c, cv2.COLOR_BGR2GRAY)
kp_curr, des_curr = orb.detectAndCompute(currImage, None)
matches = bf.match(des_prev, des_curr)
matches = sorted(matches, key=lambda x: x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp_prev[i.queryIdx].pt)
pts2.append(kp_curr[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal=focal, pp=pp,
method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal=focal, pp=pp)
abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)
t_f = t_f + abs_scale * R_f.dot(t)
R_f = R.dot(R_f)
error = map(operator.sub, t_gt, t_f)
error_sum_square = sum(map(lambda x: x * x, error))
rmse = math.sqrt(error_sum_square / 3)
rmse_total = rmse_total + rmse
print('rmse = ', rmse_total / numFrame)
prevImage = currImage
kp_prev = kp_curr
des_prev = des_curr
x_gt = int(t_gt[0]) + 1000
y_gt = int(t_gt[2]) + 100
x = int(t_f[0]) + 1000
y = int(t_f[2]) + 100
cv2.circle(traj, (x, y), 1, (0, 0, 255), 2)
cv2.circle(traj, (x_gt, y_gt), 1, (0, 255, 0), 2)
cv2.rectangle(traj, (10, 10), (700, 150), (0, 0, 0), -1)
text1 = ('orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.
format(float(t_f[0]), float(t_f[1]), float(t_f[2])))
cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN, 1, (255,
255, 255), 1, 8)
text3 = ('gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.
format(float(t_gt[0]), float(t_gt[1]), float(t_gt[2])))
cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN, 1, (255,
255, 255), 1, 8)
feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)
cv2.imshow('trajectory', traj)
cv2.imshow('feat_img', feature_img)
cv2.waitKey(1)
cv2.imwrite('result_{0:02d}.png'.format(SEQ_NUM), traj)
| from os import wait
import cv2
import numpy as np
import math
import sys
import types
import operator
orb = cv2.cv2.ORB_create(nfeatures=5000, scaleFactor=1.2, nlevels=8,
edgeThreshold=31, firstLevel=0, WTA_K=2, scoreType=cv2.ORB_FAST_SCORE,
patchSize=31, fastThreshold=25)
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
def getScale(NumFrame, t_gt, seq_num):
txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.
format(seq_num))
x_prev = float(t_gt[0])
y_prev = float(t_gt[1])
z_prev = float(t_gt[2])
line = txt_file.readlines()
line_sp = line[NumFrame].split(' ')
x = float(line_sp[3])
y = float(line_sp[7])
z = float(line_sp[11])
t_gt[0] = x
t_gt[1] = y
t_gt[2] = z
txt_file.close()
scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2
)
return scale, t_gt
if __name__ == '__main__':
MAX_FRAME = 1000
SEQ_NUM = 2
focal = 718.856
pp = 607.1928, 185.2157
textOrg1 = 10, 30
textOrg2 = 10, 80
textOrg3 = 10, 130
img_1_c = cv2.imread(
'/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png'
.format(SEQ_NUM))
img_2_c = cv2.imread(
'/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png'
.format(SEQ_NUM))
img_1 = cv2.cvtColor(img_1_c, cv2.COLOR_BGR2GRAY)
img_2 = cv2.cvtColor(img_2_c, cv2.COLOR_BGR2GRAY)
kp1, des1 = orb.detectAndCompute(img_1, None)
kp2, des2 = orb.detectAndCompute(img_2, None)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp1[i.queryIdx].pt)
pts2.append(kp2[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
E, mask = cv2.findEssentialMat(pts1, pts2, focal=focal, pp=pp, method=
cv2.RANSAC, prob=0.999, threshold=1.0)
_, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal=focal, pp=pp)
R_f_seg = R_f
t_f_seg = t_f
t_gt = np.zeros((3, 1), dtype=np.float64)
prevImage = img_2
kp_prev = kp2
des_prev = des2
traj = np.zeros((1000, 2000), dtype=np.uint8)
traj = cv2.cvtColor(traj, cv2.COLOR_GRAY2BGR)
rmse_total = 0
for numFrame in range(2, MAX_FRAME):
filename = (
'/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'
.format(SEQ_NUM, numFrame))
currImage_c = cv2.imread(filename)
currImage = cv2.cvtColor(currImage_c, cv2.COLOR_BGR2GRAY)
kp_curr, des_curr = orb.detectAndCompute(currImage, None)
matches = bf.match(des_prev, des_curr)
matches = sorted(matches, key=lambda x: x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp_prev[i.queryIdx].pt)
pts2.append(kp_curr[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal=focal, pp=pp,
method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal=focal, pp=pp)
abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)
t_f = t_f + abs_scale * R_f.dot(t)
R_f = R.dot(R_f)
error = map(operator.sub, t_gt, t_f)
error_sum_square = sum(map(lambda x: x * x, error))
rmse = math.sqrt(error_sum_square / 3)
rmse_total = rmse_total + rmse
print('rmse = ', rmse_total / numFrame)
prevImage = currImage
kp_prev = kp_curr
des_prev = des_curr
x_gt = int(t_gt[0]) + 1000
y_gt = int(t_gt[2]) + 100
x = int(t_f[0]) + 1000
y = int(t_f[2]) + 100
cv2.circle(traj, (x, y), 1, (0, 0, 255), 2)
cv2.circle(traj, (x_gt, y_gt), 1, (0, 255, 0), 2)
cv2.rectangle(traj, (10, 10), (700, 150), (0, 0, 0), -1)
text1 = ('orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.
format(float(t_f[0]), float(t_f[1]), float(t_f[2])))
cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN, 1, (255,
255, 255), 1, 8)
text3 = ('gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.
format(float(t_gt[0]), float(t_gt[1]), float(t_gt[2])))
cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN, 1, (255,
255, 255), 1, 8)
feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)
cv2.imshow('trajectory', traj)
cv2.imshow('feat_img', feature_img)
cv2.waitKey(1)
cv2.imwrite('result_{0:02d}.png'.format(SEQ_NUM), traj)
| from os import wait
import cv2
import numpy as np
import math
import sys
import types
import operator
## orb 및 bf matcher 선언
orb = cv2.cv2.ORB_create(
nfeatures=5000,
scaleFactor=1.2,
nlevels=8,
edgeThreshold=31,
firstLevel=0,
WTA_K=2,
scoreType=cv2.ORB_FAST_SCORE,
patchSize=31,
fastThreshold=25,
)
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
def getScale(NumFrame, t_gt, seq_num):
txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.format(seq_num))
x_prev = float(t_gt[0])
y_prev = float(t_gt[1])
z_prev = float(t_gt[2])
line = txt_file.readlines()
line_sp = line[NumFrame].split(' ')
x = float(line_sp[3])
y = float(line_sp[7])
z = float(line_sp[11])
t_gt[0] = x
t_gt[1] = y
t_gt[2] = z
txt_file.close()
scale = math.sqrt((x-x_prev)**2 + (y-y_prev)**2 + (z-z_prev)**2)
return scale, t_gt
if __name__ == "__main__":
MAX_FRAME = 1000
SEQ_NUM = 2
#Camera intrinsic parameter
focal = 718.8560
pp = (607.1928, 185.2157)
textOrg1 = (10,30)
textOrg2 = (10,80)
textOrg3 = (10,130)
img_1_c = cv2.imread("/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png".format(SEQ_NUM))
img_2_c = cv2.imread("/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png".format(SEQ_NUM))
img_1 = cv2.cvtColor(img_1_c,cv2.COLOR_BGR2GRAY)
img_2 = cv2.cvtColor(img_2_c,cv2.COLOR_BGR2GRAY)
kp1, des1 = orb.detectAndCompute(img_1,None)
kp2, des2 = orb.detectAndCompute(img_2,None)
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp1[i.queryIdx].pt)
pts2.append(kp2[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
E, mask = cv2.findEssentialMat(pts1,pts2,focal = focal, pp = pp, method=cv2.RANSAC, prob = 0.999, threshold=1.0)
_, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal = focal, pp = pp)
R_f_seg = R_f
t_f_seg = t_f
t_gt = np.zeros((3,1),dtype=np.float64)
prevImage = img_2
kp_prev = kp2
des_prev = des2
traj = np.zeros((1000,2000),dtype=np.uint8)
traj = cv2.cvtColor(traj,cv2.COLOR_GRAY2BGR)
rmse_total = 0
for numFrame in range(2, MAX_FRAME):
filename = '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'.format(SEQ_NUM,numFrame)
currImage_c = cv2.imread(filename)
currImage = cv2.cvtColor(currImage_c,cv2.COLOR_BGR2GRAY)
# feature extraction
kp_curr, des_curr = orb.detectAndCompute(currImage,None)
# feature matching
matches = bf.match(des_prev,des_curr)
matches = sorted(matches, key = lambda x:x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp_prev[i.queryIdx].pt)
pts2.append(kp_curr[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
# caculate R, t
E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal = focal, pp = pp, method=cv2.RANSAC, prob = 0.999, threshold=1.0)
_, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal = focal, pp = pp)
# get scale
abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)
# update trajectory
t_f = t_f + abs_scale*R_f.dot(t)
R_f = R.dot(R_f)
# caculate Error
error = map(operator.sub,t_gt,t_f)
error_sum_square = sum(map(lambda x:x*x,error))
rmse = math.sqrt(error_sum_square/3)
rmse_total = rmse_total + rmse
print("rmse = ",rmse_total/numFrame)
prevImage = currImage
kp_prev = kp_curr
des_prev = des_curr
# visualization
x_gt = int(t_gt[0]) + 1000
y_gt = int(t_gt[2]) + 100
x = int(t_f[0]) + 1000
y = int(t_f[2]) + 100
cv2.circle(traj, (x,y), 1 , (0,0,255), 2)
cv2.circle(traj, (x_gt,y_gt), 1 , (0,255,0), 2)
cv2.rectangle(traj, (10,10), (700,150), (0,0,0), -1)
text1 = 'orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.format(float(t_f[0]),float(t_f[1]),float(t_f[2]))
cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1,8)
text3 = 'gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.format(float(t_gt[0]),float(t_gt[1]),float(t_gt[2]))
cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1,8)
feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)
cv2.imshow("trajectory", traj)
cv2.imshow("feat_img", feature_img)
cv2.waitKey(1)
cv2.imwrite("result_{0:02d}.png".format(SEQ_NUM),traj) | [
1,
2,
3,
4,
5
] |
734 | d9538c030c0225c4255100da70d6bf23f550a64f | <mask token>
class SaleOrderLine(osv.osv):
<mask token>
_inherit = 'sale.order.line'
_columns = {'promotion_line': fields.boolean('Promotion Line', help=
'Indicates if the line was created by promotions')}
<mask token>
| <mask token>
class SaleOrder(osv.osv):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class SaleOrderLine(osv.osv):
"""
Sale Order Line
"""
_inherit = 'sale.order.line'
_columns = {'promotion_line': fields.boolean('Promotion Line', help=
'Indicates if the line was created by promotions')}
<mask token>
| <mask token>
class SaleOrder(osv.osv):
<mask token>
_inherit = 'sale.order'
_columns = {'coupon_code': fields.char('Promo Coupon Code', size=20)}
def apply_promotions(self, cursor, user, ids, context=None):
"""
Applies the promotions to the given records
@param cursor: Database Cursor
@param user: ID of User
@param ids: ID of current record.
@param context: Context(no direct use).
"""
promotions_obj = self.pool.get('promos.rules')
for order_id in ids:
promotions_obj.apply_promotions(cursor, user, order_id, context
=None)
return True
<mask token>
class SaleOrderLine(osv.osv):
"""
Sale Order Line
"""
_inherit = 'sale.order.line'
_columns = {'promotion_line': fields.boolean('Promotion Line', help=
'Indicates if the line was created by promotions')}
<mask token>
| from osv import osv, fields
class SaleOrder(osv.osv):
"""
Sale Order
"""
_inherit = 'sale.order'
_columns = {'coupon_code': fields.char('Promo Coupon Code', size=20)}
def apply_promotions(self, cursor, user, ids, context=None):
"""
Applies the promotions to the given records
@param cursor: Database Cursor
@param user: ID of User
@param ids: ID of current record.
@param context: Context(no direct use).
"""
promotions_obj = self.pool.get('promos.rules')
for order_id in ids:
promotions_obj.apply_promotions(cursor, user, order_id, context
=None)
return True
SaleOrder()
class SaleOrderLine(osv.osv):
"""
Sale Order Line
"""
_inherit = 'sale.order.line'
_columns = {'promotion_line': fields.boolean('Promotion Line', help=
'Indicates if the line was created by promotions')}
SaleOrderLine()
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 NovaPoint Group LLC (<http://www.novapointgroup.com>)
# Copyright (C) 2004-2010 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from osv import osv, fields
class SaleOrder(osv.osv):
'''
Sale Order
'''
_inherit = 'sale.order'
_columns = {
'coupon_code':fields.char('Promo Coupon Code', size=20),
}
def apply_promotions(self, cursor, user, ids, context=None):
"""
Applies the promotions to the given records
@param cursor: Database Cursor
@param user: ID of User
@param ids: ID of current record.
@param context: Context(no direct use).
"""
promotions_obj = self.pool.get('promos.rules')
for order_id in ids:
promotions_obj.apply_promotions(cursor, user,
order_id, context=None)
return True
SaleOrder()
class SaleOrderLine(osv.osv):
'''
Sale Order Line
'''
_inherit = "sale.order.line"
_columns = {
'promotion_line':fields.boolean(
"Promotion Line",
help="Indicates if the line was created by promotions"
)
}
SaleOrderLine()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
2,
4,
6,
9,
10
] |
735 | eb558644283d992af2c324d457dbe674b714235f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import JsonResponse
from knowdb.models import Knowledge
import random
# Create your views here.
def answer(request):
ret = {}
data = Knowledge.objects.all()
num = random.choice(range(1,int(data.count())+1))
ret['name'] = data[num-1].name
ret['answer'] = data[num-1].answer
print ret
return JsonResponse({'exec':'true','ret':ret})
def edit(request):
name = request.POST.get('name')
answer = request.POST.get('answer')
print name,answer
try:
adddata = Knowledge(name=name,answer=answer)
adddata.save()
return JsonResponse({'exec':'true','ret':'提交成功'})
except Exception as e:
return JsonResponse({'exec':'false','ret':'提交失败'})
| null | null | null | null | [
0
] |
736 | 40b6d62f1e360c0df19b7e98fcb67dbd578e709f | #!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
from collections import defaultdict
from docopt import docopt
__doc__ = """{f}
Usage:
{f} <used_file>
{f} -h | --help
Options:
-h --help Show this screen and exit.
""".format(f=__file__)
args = docopt(__doc__)
used_file = args['<used_file>']
exceed_list = []
user_limit_dict = defaultdict(float)
user_limit_f = open('/opt/uge/Accounting_Statistics/etc/user_limit_py.csv', 'r')
reader = csv.reader(user_limit_f)
header = next(reader)
for row in reader:
user_limit_dict[row[0]] = float(row[1])
print user_limit_dict
used_f = open(used_file, 'r')
reader = csv.DictReader(used_f)
for row in reader:
print row
| null | null | null | null | [
0
] |
737 | 4e8a5b0ba13921fb88d5d6371d50e7120ab01265 | <mask token>
def TestDrawGraphs():
manager = MetricsManager()
manager.displayMetricsGraph()
return
<mask token>
| <mask token>
def TestDrawGraphs():
manager = MetricsManager()
manager.displayMetricsGraph()
return
def main():
TestDrawGraphs()
<mask token>
| <mask token>
def TestDrawGraphs():
manager = MetricsManager()
manager.displayMetricsGraph()
return
def main():
TestDrawGraphs()
if __name__ == '__main__':
main()
| from metricsManager import MetricsManager
def TestDrawGraphs():
manager = MetricsManager()
manager.displayMetricsGraph()
return
def main():
TestDrawGraphs()
if __name__ == '__main__':
main()
| from metricsManager import MetricsManager
def TestDrawGraphs():
manager = MetricsManager()
manager.displayMetricsGraph()
return
def main():
TestDrawGraphs()
if __name__ == "__main__":
main()
| [
1,
2,
3,
4,
5
] |
738 | f1f708f00e05941c9a18a24b9a7556558583c3c7 | <mask token>
| TRAIN_INPUT_PATH = '~/Projects/competitions/titanic/data/train.csv'
TEST_INPUT_PATH = '~/Projects/competitions/titanic/data/test.csv'
OUTPUT_PATH = 'output/'
TRAIN_VAL_SPLIT = 0.75
RANDOM_SEED = 42
MODEL = 'LOGISTIC_REGRESSION'
LOG_PATH = 'logs/'
| TRAIN_INPUT_PATH = "~/Projects/competitions/titanic/data/train.csv"
TEST_INPUT_PATH = "~/Projects/competitions/titanic/data/test.csv"
OUTPUT_PATH = "output/"
TRAIN_VAL_SPLIT = 0.75
RANDOM_SEED = 42
MODEL = "LOGISTIC_REGRESSION"
LOG_PATH = "logs/"
| null | null | [
0,
1,
2
] |
739 | 911631e96d21bdf22a219007f1bdc04a5e6965dc | <mask token>
def getRandomUserAgnet():
user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'
]
userAgent = random.choice(user_agents)
return userAgent
def getProxies():
proxies = []
for i in range(1, 10):
url = 'http://www.xicidaili.com/nn/{0}'.format(i)
userAgent = getRandomUserAgnet()
headers = {'User-Agent': userAgent}
opener = urllib.request.build_opener()
opener.addheaders = [headers]
try:
data = opener.open(url, timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector = etree.HTML(data)
ip_addr = selector.xpath("//tr[@class='odd']/td[2]/text()")
port = selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time = selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time = selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip = ip_addr[j] + ':' + port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies = []
testUrl = 'http://www.baidu.com'
userAgent = getRandomUserAgnet()
proxy_support = urllib.request.ProxyHandler({'http': currentIp})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent', userAgent)]
urllib.request.install_opener(opener)
try:
res = urllib.request.urlopen(testUrl, timeout=5).read()
if len(res) != 0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2, 'code'):
logging.debug('unvalid ipaddress' + currentIp + str(er2.code))
if hasattr(er2, 'reason'):
logging.debug('reason is the ' + currentIp + str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
<mask token>
| <mask token>
def getRandomUserAgnet():
user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'
]
userAgent = random.choice(user_agents)
return userAgent
def getProxies():
proxies = []
for i in range(1, 10):
url = 'http://www.xicidaili.com/nn/{0}'.format(i)
userAgent = getRandomUserAgnet()
headers = {'User-Agent': userAgent}
opener = urllib.request.build_opener()
opener.addheaders = [headers]
try:
data = opener.open(url, timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector = etree.HTML(data)
ip_addr = selector.xpath("//tr[@class='odd']/td[2]/text()")
port = selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time = selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time = selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip = ip_addr[j] + ':' + port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies = []
testUrl = 'http://www.baidu.com'
userAgent = getRandomUserAgnet()
proxy_support = urllib.request.ProxyHandler({'http': currentIp})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent', userAgent)]
urllib.request.install_opener(opener)
try:
res = urllib.request.urlopen(testUrl, timeout=5).read()
if len(res) != 0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2, 'code'):
logging.debug('unvalid ipaddress' + currentIp + str(er2.code))
if hasattr(er2, 'reason'):
logging.debug('reason is the ' + currentIp + str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
if __name__ == '__main__':
getProxies()
| __author__ = 'Administrator'
<mask token>
def getRandomUserAgnet():
user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'
]
userAgent = random.choice(user_agents)
return userAgent
def getProxies():
proxies = []
for i in range(1, 10):
url = 'http://www.xicidaili.com/nn/{0}'.format(i)
userAgent = getRandomUserAgnet()
headers = {'User-Agent': userAgent}
opener = urllib.request.build_opener()
opener.addheaders = [headers]
try:
data = opener.open(url, timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector = etree.HTML(data)
ip_addr = selector.xpath("//tr[@class='odd']/td[2]/text()")
port = selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time = selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time = selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip = ip_addr[j] + ':' + port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies = []
testUrl = 'http://www.baidu.com'
userAgent = getRandomUserAgnet()
proxy_support = urllib.request.ProxyHandler({'http': currentIp})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent', userAgent)]
urllib.request.install_opener(opener)
try:
res = urllib.request.urlopen(testUrl, timeout=5).read()
if len(res) != 0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2, 'code'):
logging.debug('unvalid ipaddress' + currentIp + str(er2.code))
if hasattr(er2, 'reason'):
logging.debug('reason is the ' + currentIp + str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
if __name__ == '__main__':
getProxies()
| __author__ = 'Administrator'
from urllib import request
import urllib.parse
import logging
from multiprocessing import pool
from time import sleep
import random
from lxml import etree
def getRandomUserAgnet():
user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'
]
userAgent = random.choice(user_agents)
return userAgent
def getProxies():
proxies = []
for i in range(1, 10):
url = 'http://www.xicidaili.com/nn/{0}'.format(i)
userAgent = getRandomUserAgnet()
headers = {'User-Agent': userAgent}
opener = urllib.request.build_opener()
opener.addheaders = [headers]
try:
data = opener.open(url, timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector = etree.HTML(data)
ip_addr = selector.xpath("//tr[@class='odd']/td[2]/text()")
port = selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time = selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time = selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip = ip_addr[j] + ':' + port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies = []
testUrl = 'http://www.baidu.com'
userAgent = getRandomUserAgnet()
proxy_support = urllib.request.ProxyHandler({'http': currentIp})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent', userAgent)]
urllib.request.install_opener(opener)
try:
res = urllib.request.urlopen(testUrl, timeout=5).read()
if len(res) != 0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2, 'code'):
logging.debug('unvalid ipaddress' + currentIp + str(er2.code))
if hasattr(er2, 'reason'):
logging.debug('reason is the ' + currentIp + str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
if __name__ == '__main__':
getProxies()
| __author__ = 'Administrator'
# 抓取IP的主要逻辑
from urllib import request
import urllib.parse
import logging
from multiprocessing import pool
from time import sleep
import random
from lxml import etree
def getRandomUserAgnet():
user_agents=[
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S"
]
userAgent=random.choice(user_agents)
return userAgent
def getProxies():
proxies=[]
for i in range(1,10):
url="http://www.xicidaili.com/nn/{0}".format(i)
userAgent=getRandomUserAgnet()
headers={"User-Agent":userAgent}
opener=urllib.request.build_opener()
opener.addheaders=[headers]
try:
data=opener.open(url,timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector=etree.HTML(data)
ip_addr=selector.xpath("//tr[@class='odd']/td[2]/text()")
port=selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time=selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time=selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip=ip_addr[j]+":"+port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies=[]
testUrl="http://www.baidu.com"
userAgent=getRandomUserAgnet()
proxy_support=urllib.request.ProxyHandler({"http":currentIp})
opener=urllib.request.build_opener(proxy_support)
opener.addheaders=[("User-Agent",userAgent)]
urllib.request.install_opener(opener)
try:
res=urllib.request.urlopen(testUrl,timeout=5).read()
if len(res)!=0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2,'code'):
logging.debug("unvalid ipaddress"+currentIp+str(er2.code))
if hasattr(er2,"reason"):
logging.debug("reason is the "+currentIp+str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
if __name__=="__main__":
getProxies()
| [
3,
4,
5,
6,
7
] |
740 | bc890f0f40a7e9c916628d491e473b5ecfa9bb9b | <mask token>
class TemperatureSensor:
<mask token>
<mask token>
<mask token>
def __init__(self, average_temperature, temperature_variation,
min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature = max_temperature
self.value = 0.0
<mask token>
def noise(self):
self.noise_value = np.random.normal(0, 1)
return self.noise_value
def simple_random(self):
value = self.min_temperature + random() * (self.max_temperature -
self.min_temperature)
return value
def complex_random(self):
value = self.average_temperature * (1 + self.temperature_variation /
100 * (1 * random() - 1))
value = max(value, self.min_temperature)
value = min(value, self.max_temperature)
return value
<mask token>
| <mask token>
class TemperatureSensor:
sensor_type = 'temperature'
unit = 'celsius'
instance_id = '283h62gsj'
def __init__(self, average_temperature, temperature_variation,
min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature = max_temperature
self.value = 0.0
def sense(self):
self.value = self.complex_random() + self.noise()
return self.value
def noise(self):
self.noise_value = np.random.normal(0, 1)
return self.noise_value
def simple_random(self):
value = self.min_temperature + random() * (self.max_temperature -
self.min_temperature)
return value
def complex_random(self):
value = self.average_temperature * (1 + self.temperature_variation /
100 * (1 * random() - 1))
value = max(value, self.min_temperature)
value = min(value, self.max_temperature)
return value
<mask token>
| <mask token>
class TemperatureSensor:
sensor_type = 'temperature'
unit = 'celsius'
instance_id = '283h62gsj'
def __init__(self, average_temperature, temperature_variation,
min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature = max_temperature
self.value = 0.0
def sense(self):
self.value = self.complex_random() + self.noise()
return self.value
def noise(self):
self.noise_value = np.random.normal(0, 1)
return self.noise_value
def simple_random(self):
value = self.min_temperature + random() * (self.max_temperature -
self.min_temperature)
return value
def complex_random(self):
value = self.average_temperature * (1 + self.temperature_variation /
100 * (1 * random() - 1))
value = max(value, self.min_temperature)
value = min(value, self.max_temperature)
return value
ts = TemperatureSensor(25, 10, 16, 35)
| from random import random
import numpy as np
class TemperatureSensor:
sensor_type = 'temperature'
unit = 'celsius'
instance_id = '283h62gsj'
def __init__(self, average_temperature, temperature_variation,
min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature = max_temperature
self.value = 0.0
def sense(self):
self.value = self.complex_random() + self.noise()
return self.value
def noise(self):
self.noise_value = np.random.normal(0, 1)
return self.noise_value
def simple_random(self):
value = self.min_temperature + random() * (self.max_temperature -
self.min_temperature)
return value
def complex_random(self):
value = self.average_temperature * (1 + self.temperature_variation /
100 * (1 * random() - 1))
value = max(value, self.min_temperature)
value = min(value, self.max_temperature)
return value
ts = TemperatureSensor(25, 10, 16, 35)
| from random import random
import numpy as np
class TemperatureSensor:
sensor_type = "temperature"
unit="celsius"
instance_id="283h62gsj"
#initialisation
def __init__(self, average_temperature, temperature_variation, min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature= max_temperature
self.value = 0.0 #initialise current temp value
#sensing
def sense(self):
#self.value = self.value + self.simple_random()
self.value = self.complex_random() + self.noise()
return self.value
#noise
def noise(self):
self.noise_value = np.random.normal(0,1)
return self.noise_value
#helper function for generating values with min temp as its base
def simple_random(self):
value = self.min_temperature + (random() * (self.max_temperature - self.min_temperature)) #so that it is in the range
return value
def complex_random(self):
value = self.average_temperature * (1 + (self.temperature_variation/100) * (1 * random() -1))
value = max(value,self.min_temperature)
value = min(value,self.max_temperature)
return value
#creating instance of sensor
ts = TemperatureSensor(25,10,16,35)
| [
5,
7,
8,
9,
10
] |
741 | fe597ad4462b1af3f3f99346c759c5fa8a7c14f4 | <mask token>
| def check_ip_or_mask(temp_str):
IPv4_regex = '(?:[0-9]{1,3}\\.){3}[0-9]{1,3}'
temp_list_ip_mask = re.findall(IPv4_regex, temp_str)
binary_temp_list_ip_mask = []
temp_binary_ip_mask = ''
for x in range(len(temp_list_ip_mask)):
split_ip_address = re.split('\\.', temp_list_ip_mask[x])
for y in range(len(split_ip_address)):
temp_binary_ip_mask += str(bin(int(split_ip_address[y]))[2:].
zfill(8))
binary_temp_list_ip_mask.append(temp_binary_ip_mask)
temp_binary_ip_mask = ''
ip_index_list = []
mask_index_list = []
for x in range(2):
if not re.match('^[1]+[0]*$', binary_temp_list_ip_mask[x]):
ip_index_list.append(x)
if re.match('^[1]+[0]*$', binary_temp_list_ip_mask[x]):
mask_index_list.append(x)
if len(ip_index_list) == 1 and len(mask_index_list) == 1:
ipv4 = temp_list_ip_mask[int(ip_index_list[0])]
net_mask = temp_list_ip_mask[int(mask_index_list[0])]
elif binary_temp_list_ip_mask[0].count('1') < binary_temp_list_ip_mask[0
].count('1'):
ipv4 = temp_list_ip_mask[0]
net_mask = temp_list_ip_mask[1]
else:
ipv4 = temp_list_ip_mask[1]
net_mask = temp_list_ip_mask[0]
return 'IPv4: ' + ipv4 + '\n' + 'Net mask: ' + net_mask
| def check_ip_or_mask(temp_str):
IPv4_regex = (r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}')
temp_list_ip_mask = re.findall(IPv4_regex, temp_str)
binary_temp_list_ip_mask = []
temp_binary_ip_mask = ''
for x in range(len(temp_list_ip_mask)):
split_ip_address = re.split(r'\.', temp_list_ip_mask[x])
for y in range(len(split_ip_address)):
temp_binary_ip_mask += str(bin(int(split_ip_address[y]))[2:].zfill(8))
binary_temp_list_ip_mask.append(temp_binary_ip_mask)
temp_binary_ip_mask = ''
ip_index_list = []
mask_index_list = []
for x in range(2):
if not re.match(r'^[1]+[0]*$', binary_temp_list_ip_mask[x]):
ip_index_list.append(x)
if re.match(r'^[1]+[0]*$', binary_temp_list_ip_mask[x]):
mask_index_list.append(x)
if len(ip_index_list) == 1 and len(mask_index_list) == 1:
ipv4 = temp_list_ip_mask[int(ip_index_list[0])]
net_mask = temp_list_ip_mask[int(mask_index_list[0])]
else:
if (binary_temp_list_ip_mask[0].count('1')) < (binary_temp_list_ip_mask[0].count('1')):
ipv4 = temp_list_ip_mask[0]
net_mask = temp_list_ip_mask[1]
else:
ipv4 = temp_list_ip_mask[1]
net_mask = temp_list_ip_mask[0]
return "IPv4: " + ipv4 + "\n" + "Net mask: " + net_mask
| null | null | [
0,
1,
2
] |
742 | 0528d7761cbbf3dbe881ff05b81060f3d97e7f6c | <mask token>
class MonitorList(tp.Generic[T], collections.UserList, Monitor):
<mask token>
def __init__(self, *args):
collections.UserList.__init__(self, *args)
Monitor.__init__(self)
<mask token>
<mask token>
def __getitem__(self, item: tp.Union[slice, int]) ->T:
return self.data[item]
def __setitem__(self, key: int, value: T) ->None:
self.data[key] = value
def __delitem__(self, key: tp.Union[slice, int]) ->None:
del self.data[key]
class MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):
"""
A dict that is also a monitor.
Note that access to it's properties is not automatically synchronized, you got to
invoke the monitor to implement an opportunistic locking of your own choice
"""
def __init__(self, *args, **kwargs):
collections.UserDict.__init__(self, *args, **kwargs)
Monitor.__init__(self)
def __getitem__(self, item: K) ->V:
return self.data[item]
def __setitem__(self, key: K, value: V) ->None:
self.data[key] = value
def __delitem__(self, key: K) ->None:
del self.data[key]
def __copy__(self) ->'MonitorDict':
return MonitorDict(copy.copy(self.data))
def __deepcopy__(self, memo) ->'MonitorDict':
return MonitorDict(copy.deepcopy(self.data, memo=memo))
class MonitorSet(set, Monitor):
"""
A set that allows atomic insert-if-not-already-there operation
"""
def __init__(self, *args):
super().__init__(*args)
Monitor.__init__(self)
def insert_and_check(self, item) ->bool:
"""
Perform an atomic insert if not already in set
:param item: item to insert
:return: whether the item was successfully inserted
"""
with Monitor.acquire(self):
if item in self:
return False
self.add(item)
return True
| <mask token>
class MonitorList(tp.Generic[T], collections.UserList, Monitor):
"""
A list that is also a monitor.
Note that access to it's properties is not automatically synchronized, you got to
invoke the monitor to implement an opportunistic locking of your own choice
"""
def __init__(self, *args):
collections.UserList.__init__(self, *args)
Monitor.__init__(self)
def __copy__(self) ->'MonitorList':
return MonitorList(copy.copy(self.data))
def __deepcopy__(self, memo) ->'MonitorList':
return MonitorList(copy.deepcopy(self.data, memo=memo))
def __getitem__(self, item: tp.Union[slice, int]) ->T:
return self.data[item]
def __setitem__(self, key: int, value: T) ->None:
self.data[key] = value
def __delitem__(self, key: tp.Union[slice, int]) ->None:
del self.data[key]
class MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):
"""
A dict that is also a monitor.
Note that access to it's properties is not automatically synchronized, you got to
invoke the monitor to implement an opportunistic locking of your own choice
"""
def __init__(self, *args, **kwargs):
collections.UserDict.__init__(self, *args, **kwargs)
Monitor.__init__(self)
def __getitem__(self, item: K) ->V:
return self.data[item]
def __setitem__(self, key: K, value: V) ->None:
self.data[key] = value
def __delitem__(self, key: K) ->None:
del self.data[key]
def __copy__(self) ->'MonitorDict':
return MonitorDict(copy.copy(self.data))
def __deepcopy__(self, memo) ->'MonitorDict':
return MonitorDict(copy.deepcopy(self.data, memo=memo))
class MonitorSet(set, Monitor):
"""
A set that allows atomic insert-if-not-already-there operation
"""
def __init__(self, *args):
super().__init__(*args)
Monitor.__init__(self)
def insert_and_check(self, item) ->bool:
"""
Perform an atomic insert if not already in set
:param item: item to insert
:return: whether the item was successfully inserted
"""
with Monitor.acquire(self):
if item in self:
return False
self.add(item)
return True
| <mask token>
class RMonitor(Monitor):
<mask token>
def __init__(self):
self._monitor_lock = threading.RLock()
class MonitorList(tp.Generic[T], collections.UserList, Monitor):
"""
A list that is also a monitor.
Note that access to it's properties is not automatically synchronized, you got to
invoke the monitor to implement an opportunistic locking of your own choice
"""
def __init__(self, *args):
collections.UserList.__init__(self, *args)
Monitor.__init__(self)
def __copy__(self) ->'MonitorList':
return MonitorList(copy.copy(self.data))
def __deepcopy__(self, memo) ->'MonitorList':
return MonitorList(copy.deepcopy(self.data, memo=memo))
def __getitem__(self, item: tp.Union[slice, int]) ->T:
return self.data[item]
def __setitem__(self, key: int, value: T) ->None:
self.data[key] = value
def __delitem__(self, key: tp.Union[slice, int]) ->None:
del self.data[key]
class MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):
"""
A dict that is also a monitor.
Note that access to it's properties is not automatically synchronized, you got to
invoke the monitor to implement an opportunistic locking of your own choice
"""
def __init__(self, *args, **kwargs):
collections.UserDict.__init__(self, *args, **kwargs)
Monitor.__init__(self)
def __getitem__(self, item: K) ->V:
return self.data[item]
def __setitem__(self, key: K, value: V) ->None:
self.data[key] = value
def __delitem__(self, key: K) ->None:
del self.data[key]
def __copy__(self) ->'MonitorDict':
return MonitorDict(copy.copy(self.data))
def __deepcopy__(self, memo) ->'MonitorDict':
return MonitorDict(copy.deepcopy(self.data, memo=memo))
class MonitorSet(set, Monitor):
"""
A set that allows atomic insert-if-not-already-there operation
"""
def __init__(self, *args):
super().__init__(*args)
Monitor.__init__(self)
def insert_and_check(self, item) ->bool:
"""
Perform an atomic insert if not already in set
:param item: item to insert
:return: whether the item was successfully inserted
"""
with Monitor.acquire(self):
if item in self:
return False
self.add(item)
return True
| <mask token>
class RMonitor(Monitor):
"""
Monitor, but using an reentrant lock instead of a normal one
"""
def __init__(self):
self._monitor_lock = threading.RLock()
class MonitorList(tp.Generic[T], collections.UserList, Monitor):
"""
A list that is also a monitor.
Note that access to it's properties is not automatically synchronized, you got to
invoke the monitor to implement an opportunistic locking of your own choice
"""
def __init__(self, *args):
collections.UserList.__init__(self, *args)
Monitor.__init__(self)
def __copy__(self) ->'MonitorList':
return MonitorList(copy.copy(self.data))
def __deepcopy__(self, memo) ->'MonitorList':
return MonitorList(copy.deepcopy(self.data, memo=memo))
def __getitem__(self, item: tp.Union[slice, int]) ->T:
return self.data[item]
def __setitem__(self, key: int, value: T) ->None:
self.data[key] = value
def __delitem__(self, key: tp.Union[slice, int]) ->None:
del self.data[key]
class MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):
"""
A dict that is also a monitor.
Note that access to it's properties is not automatically synchronized, you got to
invoke the monitor to implement an opportunistic locking of your own choice
"""
def __init__(self, *args, **kwargs):
collections.UserDict.__init__(self, *args, **kwargs)
Monitor.__init__(self)
def __getitem__(self, item: K) ->V:
return self.data[item]
def __setitem__(self, key: K, value: V) ->None:
self.data[key] = value
def __delitem__(self, key: K) ->None:
del self.data[key]
def __copy__(self) ->'MonitorDict':
return MonitorDict(copy.copy(self.data))
def __deepcopy__(self, memo) ->'MonitorDict':
return MonitorDict(copy.deepcopy(self.data, memo=memo))
class MonitorSet(set, Monitor):
"""
A set that allows atomic insert-if-not-already-there operation
"""
def __init__(self, *args):
super().__init__(*args)
Monitor.__init__(self)
def insert_and_check(self, item) ->bool:
"""
Perform an atomic insert if not already in set
:param item: item to insert
:return: whether the item was successfully inserted
"""
with Monitor.acquire(self):
if item in self:
return False
self.add(item)
return True
| import collections
import copy
import threading
import typing as tp
from ..decorators.decorators import wraps
from ..typing import K, V, T
class Monitor:
"""
Base utility class for creating monitors (the synchronization thingies!)
These are NOT re-entrant!
Use it like that:
>>> class MyProtectedObject(Monitor):
>>> def __init__(self, *args, **kwargs):
>>> Monitor.__init__(self)
>>> ... do your job ..
>>> @Monitor.synchronized
>>> def function_that_needs_mutual_exclusion(self):
>>> .. do your threadsafe jobs ..
>>> def function_that_partially_needs_protection(self):
>>> .. do your jobs ..
>>> with Monitor.acquire(self):
>>> .. do your threadsafe jobs ..
>>> .. do your jobs ..
>>> with self:
>>> .. do your threadsafe jobs ..
"""
def __enter__(self) -> 'Monitor':
self._monitor_lock.acquire()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> bool:
self._monitor_lock.release()
return False
def __init__(self):
"""You need to invoke this at your constructor
You can also use it to release locks of other objects."""
self._monitor_lock = threading.Lock() # type: threading.Lock
@staticmethod
def synchronize_on_attribute(attr_name: str):
"""
When a Monitor is an attribute of a class, and you have a method instance
that you would like secure by acquiring that monitor, use this.
The first argument taken by that method instance must be self.
:param attr_name: name of the attribute that is the monitor
"""
def outer(fun):
@wraps(fun)
def method(self, *args, **kwargs):
# noinspection PyProtectedMember
with getattr(self, attr_name)._monitor_lock:
return fun(self, *args, **kwargs)
return method
return outer
@staticmethod
def synchronized(fun: tp.Callable) -> tp.Callable:
"""
This is a decorator. Class method decorated with that will lock the
global lock of given instance, making it threadsafe. Depending on
usage pattern of your class and it's data semantics, your performance
may vary
"""
@wraps(fun)
def monitored(*args, **kwargs):
# noinspection PyProtectedMember
with args[0]._monitor_lock:
return fun(*args, **kwargs)
return monitored
class release:
"""
Returns a context manager object that can release another object
as long as that object is a monitor.
Consider foo, which is a monitor. You have a protected function,
but you feel that you can release it for a while as it would
improve parallelism. You can use it as such:
>>> @Monitor.synchronized
>>> def protected_function(self):
>>> .. do some stuff that needs mutual exclusion ..
>>> with Monitor.release(self):
>>> .. do some I/O that does not need mutual exclusion ..
>>> .. back to protected stuff ..
"""
__slots__ = ('foo',)
def __init__(self, foo: 'Monitor'):
self.foo = foo
def __enter__(self) -> None:
# noinspection PyProtectedMember
self.foo._monitor_lock.release()
def __exit__(self, e1, e2, e3) -> bool:
# noinspection PyProtectedMember
self.foo._monitor_lock.acquire()
return False
class acquire:
"""
Returns a context manager object that can lock another object,
as long as that object is a monitor.
Consider foo, which is a monitor. If you needed to lock it from
outside, you would do:
>>> with Monitor.acquire(foo):
>>> .. do operations on foo that need mutual exclusion ..
"""
__slots__ = ('foo',)
def __init__(self, foo: 'Monitor'):
self.foo = foo
def __enter__(self) -> None:
# noinspection PyProtectedMember
self.foo._monitor_lock.acquire()
def __exit__(self, e1, e2, e3) -> bool:
# noinspection PyProtectedMember
self.foo._monitor_lock.release()
return False
@classmethod
def synchronize_on(cls, monitor: 'Monitor') -> tp.Callable[[tp.Callable], tp.Callable]:
"""
A decorator for locking on non-self Monitor objects
Use it like:
>>> class MasterClass(Monitor):
>>> def get_object(self):
>>> class SlaveClass:
>>> @Monitor.synchronize_on(self)
>>> def get_object(self2):
>>> ...
>>> return SlaveClass
"""
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
with cls.acquire(monitor):
return fun(*args, **kwargs)
return inner
return outer
class RMonitor(Monitor):
"""
Monitor, but using an reentrant lock instead of a normal one
"""
def __init__(self):
self._monitor_lock = threading.RLock() # type: threading.RLock
class MonitorList(tp.Generic[T], collections.UserList, Monitor):
"""
A list that is also a monitor.
Note that access to it's properties is not automatically synchronized, you got to
invoke the monitor to implement an opportunistic locking of your own choice
"""
def __init__(self, *args):
collections.UserList.__init__(self, *args)
Monitor.__init__(self)
def __copy__(self) -> 'MonitorList':
return MonitorList(copy.copy(self.data))
def __deepcopy__(self, memo) -> 'MonitorList':
return MonitorList(copy.deepcopy(self.data, memo=memo))
def __getitem__(self, item: tp.Union[slice, int]) -> T:
return self.data[item]
def __setitem__(self, key: int, value: T) -> None:
self.data[key] = value
def __delitem__(self, key: tp.Union[slice, int]) -> None:
del self.data[key]
class MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):
"""
A dict that is also a monitor.
Note that access to it's properties is not automatically synchronized, you got to
invoke the monitor to implement an opportunistic locking of your own choice
"""
def __init__(self, *args, **kwargs):
collections.UserDict.__init__(self, *args, **kwargs)
Monitor.__init__(self)
def __getitem__(self, item: K) -> V:
return self.data[item]
def __setitem__(self, key: K, value: V) -> None:
self.data[key] = value
def __delitem__(self, key: K) -> None:
del self.data[key]
def __copy__(self) -> 'MonitorDict':
return MonitorDict(copy.copy(self.data))
def __deepcopy__(self, memo) -> 'MonitorDict':
return MonitorDict(copy.deepcopy(self.data, memo=memo))
class MonitorSet(set, Monitor):
"""
A set that allows atomic insert-if-not-already-there operation
"""
def __init__(self, *args):
super().__init__(*args)
Monitor.__init__(self)
def insert_and_check(self, item) -> bool:
"""
Perform an atomic insert if not already in set
:param item: item to insert
:return: whether the item was successfully inserted
"""
with Monitor.acquire(self):
if item in self:
return False
self.add(item)
return True
| [
17,
20,
22,
23,
33
] |
743 | 8c336edddadbf4689721b474c254ded061ecf4b5 | <mask token>
| from . import scramsha1, scrammer
| null | null | null | [
0,
1
] |
744 | 0fb288e3ab074e021ec726d71cbd5c8546a8455b | <mask token>
@skipIf(not net.HAS_NAPALM, 'napalm module required for this test')
class NetTest(TestCase, LoaderModuleMockMixin):
<mask token>
<mask token>
def test_interfaces(self):
ret = net.interfaces()
self.assertEqual(None, ret)
def test_findarp(self):
ret = net.findarp()
self.assertEqual(None, ret)
<mask token>
<mask token>
def test_find(self):
ret = net.find('')
self.assertEqual({}, ret)
def test_multi_find(self):
ret = net.multi_find()
self.assertEqual(None, ret)
| <mask token>
@skipIf(not net.HAS_NAPALM, 'napalm module required for this test')
class NetTest(TestCase, LoaderModuleMockMixin):
<mask token>
<mask token>
def test_interfaces(self):
ret = net.interfaces()
self.assertEqual(None, ret)
def test_findarp(self):
ret = net.findarp()
self.assertEqual(None, ret)
def test_findmac(self):
ret = net.findmac()
self.assertEqual(None, ret)
<mask token>
def test_find(self):
ret = net.find('')
self.assertEqual({}, ret)
def test_multi_find(self):
ret = net.multi_find()
self.assertEqual(None, ret)
| <mask token>
@skipIf(not net.HAS_NAPALM, 'napalm module required for this test')
class NetTest(TestCase, LoaderModuleMockMixin):
<mask token>
def setup_loader_modules(self):
mock_get = MagicMock(return_value={})
self.extmods_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, self.extmods_dir, ignore_errors=True)
return {net: {'__opts__': {'optimization_order': [0, 1, 2],
'renderer': 'yaml', 'renderer_blacklist': [],
'renderer_whitelist': [], 'extension_modules': self.extmods_dir
}, '__salt__': {'mine.get': mock_get}}}
def test_interfaces(self):
ret = net.interfaces()
self.assertEqual(None, ret)
def test_findarp(self):
ret = net.findarp()
self.assertEqual(None, ret)
def test_findmac(self):
ret = net.findmac()
self.assertEqual(None, ret)
def test_lldp(self):
ret = net.lldp()
self.assertEqual(None, ret)
def test_find(self):
ret = net.find('')
self.assertEqual({}, ret)
def test_multi_find(self):
ret = net.multi_find()
self.assertEqual(None, ret)
| import shutil
import tempfile
import salt.runners.net as net
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
@skipIf(not net.HAS_NAPALM, 'napalm module required for this test')
class NetTest(TestCase, LoaderModuleMockMixin):
"""
Test the net runner
"""
def setup_loader_modules(self):
mock_get = MagicMock(return_value={})
self.extmods_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, self.extmods_dir, ignore_errors=True)
return {net: {'__opts__': {'optimization_order': [0, 1, 2],
'renderer': 'yaml', 'renderer_blacklist': [],
'renderer_whitelist': [], 'extension_modules': self.extmods_dir
}, '__salt__': {'mine.get': mock_get}}}
def test_interfaces(self):
ret = net.interfaces()
self.assertEqual(None, ret)
def test_findarp(self):
ret = net.findarp()
self.assertEqual(None, ret)
def test_findmac(self):
ret = net.findmac()
self.assertEqual(None, ret)
def test_lldp(self):
ret = net.lldp()
self.assertEqual(None, ret)
def test_find(self):
ret = net.find('')
self.assertEqual({}, ret)
def test_multi_find(self):
ret = net.multi_find()
self.assertEqual(None, ret)
| import shutil
import tempfile
import salt.runners.net as net
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
@skipIf(not net.HAS_NAPALM, "napalm module required for this test")
class NetTest(TestCase, LoaderModuleMockMixin):
"""
Test the net runner
"""
def setup_loader_modules(self):
mock_get = MagicMock(return_value={})
self.extmods_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, self.extmods_dir, ignore_errors=True)
return {
net: {
"__opts__": {
"optimization_order": [0, 1, 2],
"renderer": "yaml",
"renderer_blacklist": [],
"renderer_whitelist": [],
"extension_modules": self.extmods_dir,
},
"__salt__": {"mine.get": mock_get},
}
}
def test_interfaces(self):
ret = net.interfaces()
self.assertEqual(None, ret)
def test_findarp(self):
ret = net.findarp()
self.assertEqual(None, ret)
def test_findmac(self):
ret = net.findmac()
self.assertEqual(None, ret)
def test_lldp(self):
ret = net.lldp()
self.assertEqual(None, ret)
def test_find(self):
ret = net.find("")
self.assertEqual({}, ret)
def test_multi_find(self):
ret = net.multi_find()
self.assertEqual(None, ret)
| [
5,
6,
8,
10,
11
] |
745 | b12c8d0cb1cd1e48df6246fe3f16467b2db296e0 | <mask token>
| <mask token>
def dir_slash():
slash = '/'
if 'win' in sys.platform:
slash = '\\'
return slash
| import sys
def dir_slash():
slash = '/'
if 'win' in sys.platform:
slash = '\\'
return slash
| null | null | [
0,
1,
2
] |
746 | a09bc84a14718422894127a519d67dc0c6b13bc9 | <mask token>
| <mask token>
for i in range(n - 1):
if a[i + 1] - a[i] < m:
ans += a[i + 1] - a[i]
else:
ans += m
print(ans)
| n, m = map(int, input().split())
a = [int(input()) for _ in range(n)]
cnt, ans, mx, mn = 0, m, 0, 100000000
for i in range(n - 1):
if a[i + 1] - a[i] < m:
ans += a[i + 1] - a[i]
else:
ans += m
print(ans)
| #n = int(input())
#s = input()
n, m = map(int, input().split())
#s, t = input().split()
#n, m, l = map(int, input().split())
#s, t, r = input().split()
#a = map(int, input().split())
#a = input().split()
a = [int(input()) for _ in range(n)]
#a = [input() for _ in range(n)]
#t = input()
#m = int(input())
#p, q = map(int, input().split())
#p, q = input().split()
#p, q, r = map(int, input().split())
#p, q, r = input().split()
#b = map(int, input().split())
#b = input().split()
#b = [int(input()) for _ in range(m)]
#b = [input() for _ in range(m)]
cnt, ans, mx, mn = 0, m, 0, 100000000
for i in range(n - 1):
if a[i + 1] - a[i] < m:
ans += a[i + 1] - a[i]
else:
ans += m
print(ans)
| null | [
0,
1,
2,
3
] |
747 | 330b843501e0fdaff21cc4eff1ef930d54ab6e8d | <mask token>
class FRSHTTHolder:
frshtt_code = ''
star_count_lst = [0, 0, 0, 0, 0, 0]
counter = 0
def __init__(self, in_frshtt_code):
self.frshtt_code = in_frshtt_code
self.counter = 0
self.star_count_lst = [0, 0, 0, 0, 0, 0]
def is_in_code(self, in_frshtt_code):
if self.frshtt_code == in_frshtt_code:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ''
return_str += 'Code: ' + str(self.frshtt_code) + '\n'
return_str += 'Count: ' + str(self.counter) + '\n'
if self.star_count_lst[0] == 0:
return_str += '0 Stars: 0.00%\n'
else:
return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[1] == 0:
return_str += '1 Stars: 0.00%\n'
else:
return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[2] == 0:
return_str += '2 Stars: 0.00%\n'
else:
return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[3] == 0:
return_str += '3 Stars: 0.00%\n'
else:
return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[4] == 0:
return_str += '4 Stars: 0.00%\n'
else:
return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[5] == 0:
return_str += '5 Stars: 0.00%\n'
else:
return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /
(self.counter * 1.0), 4) * 100) + '%\n'
return return_str
class FRSHTTAnalysis:
frshtt_holder_lst = list()
def __init__(self):
self.frshtt_holder_lst.append(FRSHTTHolder('000000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010010'))
self.frshtt_holder_lst.append(FRSHTTHolder('100000'))
self.frshtt_holder_lst.append(FRSHTTHolder('001000'))
self.frshtt_holder_lst.append(FRSHTTHolder('011000'))
self.frshtt_holder_lst.append(FRSHTTHolder('000010'))
self.frshtt_holder_lst.append(FRSHTTHolder('000100'))
def add_rating(self, rating, frshtt_code):
for frshtt_holder in self.frshtt_holder_lst:
if frshtt_holder.is_in_code(frshtt_code):
frshtt_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Code:\n'
return_str += '-------------------------\n'
for frshtt_holder in self.frshtt_holder_lst:
return_str += str(frshtt_holder) + '\n'
return return_str
| <mask token>
class TempAnalysis:
<mask token>
<mask token>
<mask token>
def __str__(self):
return_str = 'Breakdown by Temperature:\n'
return_str += '-------------------------\n'
for temp_holder in self.temp_holder_lst:
return_str += str(temp_holder) + '\n'
return return_str
<mask token>
class FRSHTTHolder:
frshtt_code = ''
star_count_lst = [0, 0, 0, 0, 0, 0]
counter = 0
def __init__(self, in_frshtt_code):
self.frshtt_code = in_frshtt_code
self.counter = 0
self.star_count_lst = [0, 0, 0, 0, 0, 0]
def is_in_code(self, in_frshtt_code):
if self.frshtt_code == in_frshtt_code:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ''
return_str += 'Code: ' + str(self.frshtt_code) + '\n'
return_str += 'Count: ' + str(self.counter) + '\n'
if self.star_count_lst[0] == 0:
return_str += '0 Stars: 0.00%\n'
else:
return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[1] == 0:
return_str += '1 Stars: 0.00%\n'
else:
return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[2] == 0:
return_str += '2 Stars: 0.00%\n'
else:
return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[3] == 0:
return_str += '3 Stars: 0.00%\n'
else:
return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[4] == 0:
return_str += '4 Stars: 0.00%\n'
else:
return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[5] == 0:
return_str += '5 Stars: 0.00%\n'
else:
return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /
(self.counter * 1.0), 4) * 100) + '%\n'
return return_str
class FRSHTTAnalysis:
frshtt_holder_lst = list()
def __init__(self):
self.frshtt_holder_lst.append(FRSHTTHolder('000000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010010'))
self.frshtt_holder_lst.append(FRSHTTHolder('100000'))
self.frshtt_holder_lst.append(FRSHTTHolder('001000'))
self.frshtt_holder_lst.append(FRSHTTHolder('011000'))
self.frshtt_holder_lst.append(FRSHTTHolder('000010'))
self.frshtt_holder_lst.append(FRSHTTHolder('000100'))
def add_rating(self, rating, frshtt_code):
for frshtt_holder in self.frshtt_holder_lst:
if frshtt_holder.is_in_code(frshtt_code):
frshtt_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Code:\n'
return_str += '-------------------------\n'
for frshtt_holder in self.frshtt_holder_lst:
return_str += str(frshtt_holder) + '\n'
return return_str
| <mask token>
class TempAnalysis:
<mask token>
def __init__(self):
temp_counter = 0
while temp_counter < 110:
self.temp_holder_lst.append(TempHolder(temp_counter,
temp_counter + 10))
temp_counter += 10
def add_rating(self, rating, temp):
for temp_holder in self.temp_holder_lst:
if temp_holder.is_in_temp_range(temp):
temp_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Temperature:\n'
return_str += '-------------------------\n'
for temp_holder in self.temp_holder_lst:
return_str += str(temp_holder) + '\n'
return return_str
<mask token>
class FRSHTTHolder:
frshtt_code = ''
star_count_lst = [0, 0, 0, 0, 0, 0]
counter = 0
def __init__(self, in_frshtt_code):
self.frshtt_code = in_frshtt_code
self.counter = 0
self.star_count_lst = [0, 0, 0, 0, 0, 0]
def is_in_code(self, in_frshtt_code):
if self.frshtt_code == in_frshtt_code:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ''
return_str += 'Code: ' + str(self.frshtt_code) + '\n'
return_str += 'Count: ' + str(self.counter) + '\n'
if self.star_count_lst[0] == 0:
return_str += '0 Stars: 0.00%\n'
else:
return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[1] == 0:
return_str += '1 Stars: 0.00%\n'
else:
return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[2] == 0:
return_str += '2 Stars: 0.00%\n'
else:
return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[3] == 0:
return_str += '3 Stars: 0.00%\n'
else:
return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[4] == 0:
return_str += '4 Stars: 0.00%\n'
else:
return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[5] == 0:
return_str += '5 Stars: 0.00%\n'
else:
return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /
(self.counter * 1.0), 4) * 100) + '%\n'
return return_str
class FRSHTTAnalysis:
frshtt_holder_lst = list()
def __init__(self):
self.frshtt_holder_lst.append(FRSHTTHolder('000000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010010'))
self.frshtt_holder_lst.append(FRSHTTHolder('100000'))
self.frshtt_holder_lst.append(FRSHTTHolder('001000'))
self.frshtt_holder_lst.append(FRSHTTHolder('011000'))
self.frshtt_holder_lst.append(FRSHTTHolder('000010'))
self.frshtt_holder_lst.append(FRSHTTHolder('000100'))
def add_rating(self, rating, frshtt_code):
for frshtt_holder in self.frshtt_holder_lst:
if frshtt_holder.is_in_code(frshtt_code):
frshtt_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Code:\n'
return_str += '-------------------------\n'
for frshtt_holder in self.frshtt_holder_lst:
return_str += str(frshtt_holder) + '\n'
return return_str
| <mask token>
class TempHolder:
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, in_range_start, in_range_end):
self.range_start = in_range_start
self.range_end = in_range_end
self.counter = 0
self.star_count_lst = [0, 0, 0, 0, 0, 0]
def is_in_temp_range(self, temp):
if self.range_start <= temp and temp < self.range_end:
return True
else:
return False
<mask token>
<mask token>
class TempAnalysis:
temp_holder_lst = list()
def __init__(self):
temp_counter = 0
while temp_counter < 110:
self.temp_holder_lst.append(TempHolder(temp_counter,
temp_counter + 10))
temp_counter += 10
def add_rating(self, rating, temp):
for temp_holder in self.temp_holder_lst:
if temp_holder.is_in_temp_range(temp):
temp_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Temperature:\n'
return_str += '-------------------------\n'
for temp_holder in self.temp_holder_lst:
return_str += str(temp_holder) + '\n'
return return_str
<mask token>
class FRSHTTHolder:
frshtt_code = ''
star_count_lst = [0, 0, 0, 0, 0, 0]
counter = 0
def __init__(self, in_frshtt_code):
self.frshtt_code = in_frshtt_code
self.counter = 0
self.star_count_lst = [0, 0, 0, 0, 0, 0]
def is_in_code(self, in_frshtt_code):
if self.frshtt_code == in_frshtt_code:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ''
return_str += 'Code: ' + str(self.frshtt_code) + '\n'
return_str += 'Count: ' + str(self.counter) + '\n'
if self.star_count_lst[0] == 0:
return_str += '0 Stars: 0.00%\n'
else:
return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[1] == 0:
return_str += '1 Stars: 0.00%\n'
else:
return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[2] == 0:
return_str += '2 Stars: 0.00%\n'
else:
return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[3] == 0:
return_str += '3 Stars: 0.00%\n'
else:
return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[4] == 0:
return_str += '4 Stars: 0.00%\n'
else:
return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[5] == 0:
return_str += '5 Stars: 0.00%\n'
else:
return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /
(self.counter * 1.0), 4) * 100) + '%\n'
return return_str
class FRSHTTAnalysis:
frshtt_holder_lst = list()
def __init__(self):
self.frshtt_holder_lst.append(FRSHTTHolder('000000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010010'))
self.frshtt_holder_lst.append(FRSHTTHolder('100000'))
self.frshtt_holder_lst.append(FRSHTTHolder('001000'))
self.frshtt_holder_lst.append(FRSHTTHolder('011000'))
self.frshtt_holder_lst.append(FRSHTTHolder('000010'))
self.frshtt_holder_lst.append(FRSHTTHolder('000100'))
def add_rating(self, rating, frshtt_code):
for frshtt_holder in self.frshtt_holder_lst:
if frshtt_holder.is_in_code(frshtt_code):
frshtt_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Code:\n'
return_str += '-------------------------\n'
for frshtt_holder in self.frshtt_holder_lst:
return_str += str(frshtt_holder) + '\n'
return return_str
| '''
Temperature Container
'''
class TempHolder:
range_start = 0
range_end = 0
star_count_lst = [0,0,0,0,0,0]
counter = 0
def __init__(self, in_range_start, in_range_end):
self.range_start = in_range_start
self.range_end = in_range_end
self.counter = 0
self.star_count_lst = [0,0,0,0,0,0]
def is_in_temp_range(self, temp):
if self.range_start <= temp and temp < self.range_end:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ""
return_str += "Temp: " + str(self.range_start) + "-" + str(self.range_end) + "\n"
return_str += "Count: " + str(self.counter) + "\n"
if self.star_count_lst[0] == 0:
return_str += "0 Stars: 0.00%\n"
else:
return_str += "0 Stars: " + str(round((self.star_count_lst[0] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[1] == 0:
return_str += "1 Stars: 0.00%\n"
else:
return_str += "1 Stars: " + str(round((self.star_count_lst[1] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[2] == 0:
return_str += "2 Stars: 0.00%\n"
else:
return_str += "2 Stars: " + str(round((self.star_count_lst[2] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[3] == 0:
return_str += "3 Stars: 0.00%\n"
else:
return_str += "3 Stars: " + str(round((self.star_count_lst[3] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[4] == 0:
return_str += "4 Stars: 0.00%\n"
else:
return_str += "4 Stars: " + str(round((self.star_count_lst[4] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[5] == 0:
return_str += "5 Stars: 0.00%\n"
else:
return_str += "5 Stars: " + str(round((self.star_count_lst[5] / (self.counter * 1.0)), 4) * 100) + "%\n"
return return_str
class TempAnalysis:
temp_holder_lst = list()
def __init__(self):
temp_counter = 0
while temp_counter < 110:
self.temp_holder_lst.append(TempHolder(temp_counter, temp_counter + 10))
temp_counter += 10
def add_rating(self, rating, temp):
for temp_holder in self.temp_holder_lst:
if temp_holder.is_in_temp_range(temp):
temp_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = "Breakdown by Temperature:\n"
return_str += "-------------------------\n"
for temp_holder in self.temp_holder_lst:
return_str += str(temp_holder) + "\n"
return return_str
'''
Temperature Container
'''
class FRSHTTHolder:
frshtt_code = ""
star_count_lst = [0,0,0,0,0,0]
counter = 0
def __init__(self, in_frshtt_code):
self.frshtt_code = in_frshtt_code
self.counter = 0
self.star_count_lst = [0,0,0,0,0,0]
def is_in_code(self, in_frshtt_code):
if self.frshtt_code == in_frshtt_code:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ""
return_str += "Code: " + str(self.frshtt_code) + "\n"
return_str += "Count: " + str(self.counter) + "\n"
if self.star_count_lst[0] == 0:
return_str += "0 Stars: 0.00%\n"
else:
return_str += "0 Stars: " + str(round((self.star_count_lst[0] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[1] == 0:
return_str += "1 Stars: 0.00%\n"
else:
return_str += "1 Stars: " + str(round((self.star_count_lst[1] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[2] == 0:
return_str += "2 Stars: 0.00%\n"
else:
return_str += "2 Stars: " + str(round((self.star_count_lst[2] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[3] == 0:
return_str += "3 Stars: 0.00%\n"
else:
return_str += "3 Stars: " + str(round((self.star_count_lst[3] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[4] == 0:
return_str += "4 Stars: 0.00%\n"
else:
return_str += "4 Stars: " + str(round((self.star_count_lst[4] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[5] == 0:
return_str += "5 Stars: 0.00%\n"
else:
return_str += "5 Stars: " + str(round((self.star_count_lst[5] / (self.counter * 1.0)), 4) * 100) + "%\n"
return return_str
class FRSHTTAnalysis:
frshtt_holder_lst = list()
def __init__(self):
# no weather
self.frshtt_holder_lst.append(FRSHTTHolder("000000"))
# rain
self.frshtt_holder_lst.append(FRSHTTHolder("010000"))
# thunder strom
self.frshtt_holder_lst.append(FRSHTTHolder("010010"))
# fog
self.frshtt_holder_lst.append(FRSHTTHolder("100000"))
# snow
self.frshtt_holder_lst.append(FRSHTTHolder("001000"))
# mixed (snow/rain)
self.frshtt_holder_lst.append(FRSHTTHolder("011000"))
# dry thunder
self.frshtt_holder_lst.append(FRSHTTHolder("000010"))
# hail
self.frshtt_holder_lst.append(FRSHTTHolder("000100"))
def add_rating(self, rating, frshtt_code):
for frshtt_holder in self.frshtt_holder_lst:
if frshtt_holder.is_in_code(frshtt_code):
frshtt_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = "Breakdown by Code:\n"
return_str += "-------------------------\n"
for frshtt_holder in self.frshtt_holder_lst:
return_str += str(frshtt_holder) + "\n"
return return_str
| [
11,
13,
15,
19,
23
] |
748 | 28d8f9d9b39c40c43a362e57a7907c0a38a6bd05 | <mask token>
class MyGame(arcade.Window):
<mask token>
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
self.player_list = None
self.score = 0
self.player = None
<mask token>
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.player_list.draw()
output = f'Score: {self.score}'
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
<mask token>
| <mask token>
class MyGame(arcade.Window):
<mask token>
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
self.player_list = None
self.score = 0
self.player = None
def setup(self):
self.player_list = arcade.SpriteList()
self.score = 0
self.player = Toad()
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
self.player_list.append(self.player)
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.player_list.draw()
output = f'Score: {self.score}'
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
<mask token>
| <mask token>
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
self.player_list = None
self.score = 0
self.player = None
def setup(self):
self.player_list = arcade.SpriteList()
self.score = 0
self.player = Toad()
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
self.player_list.append(self.player)
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.player_list.draw()
output = f'Score: {self.score}'
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
<mask token>
| <mask token>
import arcade
import os
from Toad_arcade import Toad
SCREEN_WIDTH = 1920
SCREEN_HEIGHT = 1080
SCREEN_TITLE = 'PyToads - Battletoads reimplementation'
CHARACTER_SCALING = 1
TILE_SCALING = 0.5
COIN_SCALING = 0.5
MOVEMENT_SPEED = 5
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
self.player_list = None
self.score = 0
self.player = None
def setup(self):
self.player_list = arcade.SpriteList()
self.score = 0
self.player = Toad()
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
self.player_list.append(self.player)
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.player_list.draw()
output = f'Score: {self.score}'
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == '__main__':
main()
| """
Platformer Game
"""
import arcade
import os
from Toad_arcade import Toad
# Constants
SCREEN_WIDTH = 1920
SCREEN_HEIGHT = 1080
SCREEN_TITLE = "PyToads - Battletoads reimplementation"
# Constants used to scale our sprites from their original size
CHARACTER_SCALING = 1
TILE_SCALING = 0.5
COIN_SCALING = 0.5
MOVEMENT_SPEED = 5
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
# Sprite lists
self.player_list = None
# Set up the player
self.score = 0
self.player = None
def setup(self):
self.player_list = arcade.SpriteList()
# Set up the player
self.score = 0
self.player = Toad()
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
#self.player.scale = 0.8
self.player_list.append(self.player)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw all the sprites.
self.player_list.draw()
# Put the text on the screen.
output = f"Score: {self.score}"
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main() | [
6,
7,
9,
12,
13
] |
749 | b51591de921f6e153c1dd478cec7fad42ff4251a | <mask token>
@app.route('/direct')
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,
search_filter)
| <mask token>
@app.route('/normal')
def normal():
"""
A RemoteFlowSource is used directly as DN and search filter
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True)
conn.search(dn, search_filter)
@app.route('/direct')
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,
search_filter)
| <mask token>
app = Flask(__name__)
@app.route('/normal')
def normal():
"""
A RemoteFlowSource is used directly as DN and search filter
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True)
conn.search(dn, search_filter)
@app.route('/direct')
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,
search_filter)
| from flask import request, Flask
import ldap3
app = Flask(__name__)
@app.route('/normal')
def normal():
"""
A RemoteFlowSource is used directly as DN and search filter
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True)
conn.search(dn, search_filter)
@app.route('/direct')
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,
search_filter)
| from flask import request, Flask
import ldap3
app = Flask(__name__)
@app.route("/normal")
def normal():
"""
A RemoteFlowSource is used directly as DN and search filter
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = "dc={}".format(unsafe_dc)
search_filter = "(user={})".format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True)
conn.search(dn, search_filter)
@app.route("/direct")
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = "dc={}".format(unsafe_dc)
search_filter = "(user={})".format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(
dn, search_filter)
# if __name__ == "__main__":
# app.run(debug=True)
| [
1,
2,
3,
4,
5
] |
750 | 555f4e41661ff4cbf4b9d72feab41ca8b7da2d5f | <mask token>
| <mask token>
def saveListToCSV(filepath, _list):
with open(filepath, 'ab') as f:
np.savetxt(f, [_list], delimiter=',', fmt='%f')
| <mask token>
import numpy as np
def saveListToCSV(filepath, _list):
with open(filepath, 'ab') as f:
np.savetxt(f, [_list], delimiter=',', fmt='%f')
| # -*- coding: utf-8 -*-
"""
Created on Sun Jan 28 12:54:27 2018
@author: Alex
"""
import numpy as np
def saveListToCSV(filepath, _list):
with open(filepath,'ab') as f:
np.savetxt(f, [_list], delimiter=',', fmt='%f') | null | [
0,
1,
2,
3
] |
751 | 81f5753e8d0004244b4ee8e26895cb2b38fbb8b6 | <mask token>
| <mask token>
z1.write('file05.txt')
z1.write('file03.txt')
z1.close()
<mask token>
z2.extractall('电影')
z2.close()
| <mask token>
z1 = zipfile.ZipFile('a.zip', 'w')
z1.write('file05.txt')
z1.write('file03.txt')
z1.close()
z2 = zipfile.ZipFile('a.zip', 'r')
z2.extractall('电影')
z2.close()
| import shutil
import zipfile
z1 = zipfile.ZipFile('a.zip', 'w')
z1.write('file05.txt')
z1.write('file03.txt')
z1.close()
z2 = zipfile.ZipFile('a.zip', 'r')
z2.extractall('电影')
z2.close()
| #coding=utf-8
import shutil
import zipfile
# shutil.copyfile("file03.txt","file05.txt") #拷贝
# shutil.copytree("movie/大陆","电影") #拷贝文件夹
#忽略不需要拷贝的文件
# shutil.copytree("movie/大陆","电影",ignore=shutil.ignore_patterns("*.txt","*.html"))
#压缩和解压缩
# shutil.make_archive("电影/压缩","zip","movie/大陆")
z1 = zipfile.ZipFile("a.zip","w")
z1.write("file05.txt")
z1.write("file03.txt")
z1.close()
#解压缩
z2 = zipfile.ZipFile("a.zip","r")
z2.extractall("电影")
z2.close()
| [
0,
1,
2,
3,
4
] |
752 | ea2e9399a8384600d8457a9de3f263db44dc883d | <mask token>
| <mask token>
for i in cctv['구분']:
gu_list.append(gu_dict_num[i])
<mask token>
cctv.drop(['구분'], axis=1, inplace=True)
<mask token>
print(new_data.info())
new_data.to_csv('./dataset/train_add_cctv.csv', header=True, index=False)
| <mask token>
train_data = pd.read_csv('./dataset/train_park_daycare.csv')
cctv = pd.read_csv('./dataset/cctv_origin.csv', encoding='EUC-KR')
cctv = cctv.iloc[1:, :2]
gu_dict_num = {'용산구': 0, '양천구': 1, '강동구': 2, '관악구': 3, '노원구': 4, '영등포': 5,
'영등포구': 5, '마포구': 6, '서초구': 7, '성동구': 8, '금천구': 9, '도봉구': 10, '동작구': 11,
'강서구': 12, '동대문': 13, '동대문구': 13, '강북구': 14, '서대문': 15, '서대문구': 15,
'광진구': 16, '구로구': 17, '성북구': 18, '강남구': 19, '종로구': 20, '중구': 21, '중랑구':
22, '송파구': 23, '은평구': 24}
gu_list = []
for i in cctv['구분']:
gu_list.append(gu_dict_num[i])
cctv['gu'] = gu_list
cctv.drop(['구분'], axis=1, inplace=True)
cctv = cctv.rename(columns={'총계': 'cctv_num'})
cctv['cctv_num'] = cctv['cctv_num'].apply(lambda x: ''.join(x.split(',')))
cctv['cctv_num'] = pd.to_numeric(cctv['cctv_num'])
new_data = pd.merge(train_data, cctv, on='gu', how='left')
print(new_data.info())
new_data.to_csv('./dataset/train_add_cctv.csv', header=True, index=False)
| import pandas as pd
train_data = pd.read_csv('./dataset/train_park_daycare.csv')
cctv = pd.read_csv('./dataset/cctv_origin.csv', encoding='EUC-KR')
cctv = cctv.iloc[1:, :2]
gu_dict_num = {'용산구': 0, '양천구': 1, '강동구': 2, '관악구': 3, '노원구': 4, '영등포': 5,
'영등포구': 5, '마포구': 6, '서초구': 7, '성동구': 8, '금천구': 9, '도봉구': 10, '동작구': 11,
'강서구': 12, '동대문': 13, '동대문구': 13, '강북구': 14, '서대문': 15, '서대문구': 15,
'광진구': 16, '구로구': 17, '성북구': 18, '강남구': 19, '종로구': 20, '중구': 21, '중랑구':
22, '송파구': 23, '은평구': 24}
gu_list = []
for i in cctv['구분']:
gu_list.append(gu_dict_num[i])
cctv['gu'] = gu_list
cctv.drop(['구분'], axis=1, inplace=True)
cctv = cctv.rename(columns={'총계': 'cctv_num'})
cctv['cctv_num'] = cctv['cctv_num'].apply(lambda x: ''.join(x.split(',')))
cctv['cctv_num'] = pd.to_numeric(cctv['cctv_num'])
new_data = pd.merge(train_data, cctv, on='gu', how='left')
print(new_data.info())
new_data.to_csv('./dataset/train_add_cctv.csv', header=True, index=False)
| import pandas as pd
# 데이터 로드
train_data = pd.read_csv('./dataset/train_park_daycare.csv')
cctv = pd.read_csv("./dataset/cctv_origin.csv", encoding="EUC-KR")
## 데이터 전처리
# 데이터 추출
cctv = cctv.iloc[1:, :2]
# 구 매핑
gu_dict_num = {'용산구': 0, '양천구': 1, '강동구': 2, '관악구': 3, '노원구': 4, '영등포': 5, '영등포구': 5, '마포구': 6, '서초구': 7, '성동구': 8, '금천구': 9, '도봉구': 10, '동작구': 11, '강서구': 12, '동대문': 13, '동대문구': 13, '강북구': 14, '서대문': 15, '서대문구': 15, '광진구': 16, '구로구': 17, '성북구': 18, '강남구': 19, '종로구': 20, '중구': 21, '중랑구': 22, '송파구': 23, '은평구': 24}
gu_list = []
for i in cctv['구분']:
gu_list.append(gu_dict_num[i])
cctv['gu'] = gu_list
cctv.drop(['구분'], axis=1, inplace=True)
# 컬럼 이름 변경
cctv = cctv.rename(columns={'총계': 'cctv_num'})
# 데이터 타입 변경
cctv['cctv_num'] = cctv['cctv_num'].apply(lambda x: "".join(x.split(',')))
cctv['cctv_num'] = pd.to_numeric(cctv['cctv_num'])
# 조인
new_data = pd.merge(train_data, cctv, on='gu', how='left')
print(new_data.info())
# 저장
new_data.to_csv("./dataset/train_add_cctv.csv", header=True, index=False)
| [
0,
1,
2,
3,
4
] |
753 | 86d42716e05155f9e659b22c42635a8f5b8c4a60 | <mask token>
def generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[
int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=
False, preprocess_input: Optional[Callable]=None, use_data_augmentation:
bool=True):
inputs = tf.keras.layers.Input(shape=img_shape)
if use_data_augmentation:
x = _data_augmentation(inputs)
if preprocess_input is not None:
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
_freeze_model(base_model, freeze)
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=
base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
return model
| <mask token>
def _freeze_model(model: tf.keras.Model, freeze: Union[bool, int, float]=False
):
if isinstance(freeze, int):
freeze_len = freeze
elif isinstance(freeze, float):
freeze_len = int(freeze * len(model.layers))
elif freeze:
freeze_len = len(model.layers)
else:
freeze_len = 0
if freeze_len != len(model.layers):
model.trainable = True
for layer in model.layers[:freeze_len]:
layer.trainable = False
def generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[
int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=
False, preprocess_input: Optional[Callable]=None, use_data_augmentation:
bool=True):
inputs = tf.keras.layers.Input(shape=img_shape)
if use_data_augmentation:
x = _data_augmentation(inputs)
if preprocess_input is not None:
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
_freeze_model(base_model, freeze)
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=
base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
return model
| <mask token>
_data_augmentation = tf.keras.Sequential([tf.keras.layers.experimental.
preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.
preprocessing.RandomRotation(0.2)])
def _freeze_model(model: tf.keras.Model, freeze: Union[bool, int, float]=False
):
if isinstance(freeze, int):
freeze_len = freeze
elif isinstance(freeze, float):
freeze_len = int(freeze * len(model.layers))
elif freeze:
freeze_len = len(model.layers)
else:
freeze_len = 0
if freeze_len != len(model.layers):
model.trainable = True
for layer in model.layers[:freeze_len]:
layer.trainable = False
def generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[
int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=
False, preprocess_input: Optional[Callable]=None, use_data_augmentation:
bool=True):
inputs = tf.keras.layers.Input(shape=img_shape)
if use_data_augmentation:
x = _data_augmentation(inputs)
if preprocess_input is not None:
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
_freeze_model(base_model, freeze)
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=
base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
return model
| import tensorflow as tf
from typing import Optional, Tuple, Union, Callable
_data_augmentation = tf.keras.Sequential([tf.keras.layers.experimental.
preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.
preprocessing.RandomRotation(0.2)])
def _freeze_model(model: tf.keras.Model, freeze: Union[bool, int, float]=False
):
if isinstance(freeze, int):
freeze_len = freeze
elif isinstance(freeze, float):
freeze_len = int(freeze * len(model.layers))
elif freeze:
freeze_len = len(model.layers)
else:
freeze_len = 0
if freeze_len != len(model.layers):
model.trainable = True
for layer in model.layers[:freeze_len]:
layer.trainable = False
def generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[
int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=
False, preprocess_input: Optional[Callable]=None, use_data_augmentation:
bool=True):
inputs = tf.keras.layers.Input(shape=img_shape)
if use_data_augmentation:
x = _data_augmentation(inputs)
if preprocess_input is not None:
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
_freeze_model(base_model, freeze)
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=
base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
return model
| import tensorflow as tf
from typing import Optional, Tuple, Union, Callable
_data_augmentation = tf.keras.Sequential(
[
tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal"),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
]
)
def _freeze_model(
model: tf.keras.Model,
freeze: Union[bool, int, float] = False,
):
# Obs:
# When you set layer.trainable = False, the BatchNormalization layer will
# run in inference mode, and will not update its mean and variance statistics
# https://www.tensorflow.org/tutorials/images/transfer_learning#important_note_about_batchnormalization_layers
if isinstance(freeze, int):
freeze_len = freeze
elif isinstance(freeze, float):
freeze_len = int(freeze * len(model.layers))
else: # isinstance(freeze, bool):
if freeze:
freeze_len = len(model.layers)
else:
freeze_len = 0
if freeze_len != len(model.layers):
model.trainable = True
for layer in model.layers[:freeze_len]:
layer.trainable = False
def generate_model(
base_model: tf.keras.Model,
img_shape: Tuple[Optional[int], Optional[int], Optional[int]],
freeze: Union[bool, int, float] = False,
preprocess_input: Optional[Callable] = None,
use_data_augmentation: bool = True,
):
inputs = tf.keras.layers.Input(shape=img_shape)
if use_data_augmentation:
x = _data_augmentation(inputs)
if preprocess_input is not None:
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = tf.keras.Model(inputs, outputs)
_freeze_model(base_model, freeze)
base_learning_rate = 0.0001
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
return model
| [
1,
2,
3,
4,
5
] |
754 | cac9d84f20a79b115c84ff4fe8cf4640182a42d7 | <mask token>
class Test(TestItem):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def abortRun(self):
self._runHistory.pop()
<mask token>
<mask token>
<mask token>
<mask token>
@property
def funcName(self):
return self.item.__name__
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
@property
def state(self):
rec = self.runRecord
if rec:
return rec.state
return NOT_RUN
<mask token>
<mask token>
<mask token>
def addRunRecord(self, record):
self._runHistory.append(record)
if len(self._runHistory) > 5:
self._runHistory[:] = self._runHistory[-5:]
<mask token>
<mask token>
<mask token>
<mask token>
class NullTest(Test):
amNull = True
def __init__(self):
context = Context.getContext(dirPath=os.getcwd())
super(NullTest, self).__init__(None, None, None, context)
self.number = 0
self.startNewRun()
def startNewRun(self):
rec = RunRecord()
self._runHistory.append(rec)
def abortRun(self):
self._runHistory.pop()
@property
def isBug(self):
return False
def __bool__(self):
return False
class Suite(TestItem):
typeName = 'Suite'
isSuite = True
def __init__(self, *args, **kwargs):
self.myDir = kwargs.pop('myDir')
super(Suite, self).__init__(*args, **kwargs)
self.exited = False
self.number = 0
self.skipTests = False
self.entered = False
def reset(self):
self.entered = False
self.skipTests = False
@intelliprop
def children(self):
"""All the direct children of this item."""
tests = [t for t in self._collection if t.parent is self]
suites = [t for t in self._collection.suites if t.parent is self]
return suites + tests
@intelliprop
def tests(self):
"""All the direct test children of this item."""
return [t for t in self._collection if t.parent is self]
@property
def suite(self):
return self.item
@property
def runAfter(self):
"""The _run_after for this source."""
return self.namespace.get('_run_after', [])
@property
def postCheck(self):
return self.namespace.get('postCheck', lambda : None)
@property
def setUp(self):
return self.namespace.get('setUp', lambda : None)
@property
def postSetUp(self):
return self.namespace.get('postSetUp', lambda : None)
@property
def tearDown(self):
return self.namespace.get('tearDown', lambda : None)
@property
def suiteSetUp(self):
return self.namespace.get('suiteSetUp', lambda : None)
@property
def suiteTearDown(self):
return self.namespace.get('suiteTearDown', lambda : None)
def getResult(self, name=None):
runCount = 0
childStates = {}
result = Result(PASS, NONE)
if not self.children:
result.state = NOT_RUN
return result
for c in self.children:
state = c.state
runCount += state is not NOT_RUN
childStates[state] = None
if FAIL in childStates:
result.state = CHILD_FAIL
elif CHILD_FAIL in childStates:
result.state = CHILD_FAIL
elif BAD_SETUP in childStates:
result.state = CHILD_FAIL
elif PART_RUN in childStates:
result.state = PART_RUN
elif NOT_RUN in childStates:
if runCount:
result.state = PART_RUN
else:
result.state = NOT_RUN
return result
@property
def result(self):
return self.getResult()
@property
def state(self):
result = self.getResult()
return result.reportCode
def hasTests(self):
for t in self._collection:
if t.parent is self:
return True
class ModuleSuite(Suite):
pass
class ClassSuite(Suite):
@property
def klass(self):
return self.item.__class__.__name__
<mask token>
| <mask token>
class TestItem:
<mask token>
def __init__(self, item, uid, parentUid, context, namespace=None):
"""Constructor:
:Parameters:
item
The concrete test item. For a test function/method this is the
function/method itself. For a `ClassSuite` this is the instance and
for a `ModuleSuite` this is the the module instance.
uid
The unique ID for this item, which is a tuple of strings.
parentUid
The unique ID of the parent item or ``None``. Only the root `Suite`
of a test tree has a parent of ``None``.
namespace
A dictionary that provides the containing namespace for the test
item.
"""
self.item = item
self.uid = uid
self.context = context
self.parentUid = parentUid
self.namespace = self._getNamespace(namespace)
self._collection = None
self._running = False
self._marks = {}
self.extraInfo = {}
<mask token>
<mask token>
def isMarked(self, mark):
return mark in self._marks
<mask token>
<mask token>
@intelliprop
def state(self):
"""The current state of the test.
"""
result = self.getResult()
return result.state
def setState(self, state):
if state is PASS:
pass
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
@property
def path(self):
p = self.namespace.get('__file__', None)
if p is None:
return self.parent.path
if p.endswith('.pyc'):
p = p[:-1]
return p
<mask token>
<mask token>
class Test(TestItem):
typeName = 'Test'
isSuite = False
amNull = False
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
self._runHistory = []
self.stopAll = False
if self.func:
self.func.cs_test_info.test = weakref.proxy(self)
def getHistory(self):
return self._runHistory
def dumpHist(self):
return self._runHistory[-1].dump()
def startNewRun(self):
rec = RunRecord()
self._runHistory.append(rec)
def abortRun(self):
self._runHistory.pop()
def addStepRecord(self, name):
runRecord = self._runHistory[-1]
return runRecord.addStepRecord(name)
@property
def postCheck(self):
return self.parent.postCheck
@intelliprop
def hasFailed(self):
"""Check if this test has properly failed.
:Return:
``True`` if the test has failed to run for any reason.
"""
record = self.getRunRecord().getRecord('run')
return record.state is FAIL
@property
def klass(self):
return self.parent.klass
@property
def funcName(self):
return self.item.__name__
@property
def func(self):
return self.item
@property
def info(self):
return self.func.cs_test_info
@property
def isBroken(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
flag = self.info.reserved_cs_flags.get('broken', None)
if flag is None:
flag = self.info.cs_flags.get('broken', False)
return flag
@property
def isTodo(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
return self.info.cs_flags.get('todo', False)
@property
def isBug(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
flag = self.info.reserved_cs_flags.get('bug', None)
if flag is None:
flag = self.info.cs_flags.get('bug', False)
return flag
@property
def shouldFork(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
if self.info.reserved_cs_flags.get('fork', False):
return True
parent = self.parent
try:
return parent.suite.cs_attrs.fork_all
except AttributeError:
return False
@property
def testID(self):
return self.info.cs_tags.get('testID', None)
@property
def title(self):
return self.info.cs_flags.get('title', None)
@property
def isRunnable(self):
if self.parent.exited:
return False
return True
@property
def state(self):
rec = self.runRecord
if rec:
return rec.state
return NOT_RUN
@property
def result(self):
rec = self.runRecord
if rec:
return rec.result
return NOT_RUN
@property
def hasRunProblem(self):
rec = self.runRecord
if rec:
return rec.hasRunProblem
return False
@property
def hasFailed(self):
rec = self.runRecord
if rec:
return rec.hasFailed
return False
def addRunRecord(self, record):
self._runHistory.append(record)
if len(self._runHistory) > 5:
self._runHistory[:] = self._runHistory[-5:]
@property
def runRecord(self):
"""The XXX TODO"""
if self._runHistory:
for rec in reversed(self._runHistory):
if not rec.invalid:
return rec
@property
def phaseRecord(self):
"""The XXX TODO"""
if not self._runHistory:
return None, None
return self._runHistory[-1].phaseRecord
def getStepRecord(self, phase):
return self._runHistory[-1].getStepRecord(phase)
def getTestProcedure(self):
return self._collection.spec.getThing(self)
class NullTest(Test):
amNull = True
def __init__(self):
context = Context.getContext(dirPath=os.getcwd())
super(NullTest, self).__init__(None, None, None, context)
self.number = 0
self.startNewRun()
def startNewRun(self):
rec = RunRecord()
self._runHistory.append(rec)
def abortRun(self):
self._runHistory.pop()
@property
def isBug(self):
return False
def __bool__(self):
return False
class Suite(TestItem):
typeName = 'Suite'
isSuite = True
def __init__(self, *args, **kwargs):
self.myDir = kwargs.pop('myDir')
super(Suite, self).__init__(*args, **kwargs)
self.exited = False
self.number = 0
self.skipTests = False
self.entered = False
def reset(self):
self.entered = False
self.skipTests = False
@intelliprop
def children(self):
"""All the direct children of this item."""
tests = [t for t in self._collection if t.parent is self]
suites = [t for t in self._collection.suites if t.parent is self]
return suites + tests
@intelliprop
def tests(self):
"""All the direct test children of this item."""
return [t for t in self._collection if t.parent is self]
@property
def suite(self):
return self.item
@property
def runAfter(self):
"""The _run_after for this source."""
return self.namespace.get('_run_after', [])
@property
def postCheck(self):
return self.namespace.get('postCheck', lambda : None)
@property
def setUp(self):
return self.namespace.get('setUp', lambda : None)
@property
def postSetUp(self):
return self.namespace.get('postSetUp', lambda : None)
@property
def tearDown(self):
return self.namespace.get('tearDown', lambda : None)
@property
def suiteSetUp(self):
return self.namespace.get('suiteSetUp', lambda : None)
@property
def suiteTearDown(self):
return self.namespace.get('suiteTearDown', lambda : None)
def getResult(self, name=None):
runCount = 0
childStates = {}
result = Result(PASS, NONE)
if not self.children:
result.state = NOT_RUN
return result
for c in self.children:
state = c.state
runCount += state is not NOT_RUN
childStates[state] = None
if FAIL in childStates:
result.state = CHILD_FAIL
elif CHILD_FAIL in childStates:
result.state = CHILD_FAIL
elif BAD_SETUP in childStates:
result.state = CHILD_FAIL
elif PART_RUN in childStates:
result.state = PART_RUN
elif NOT_RUN in childStates:
if runCount:
result.state = PART_RUN
else:
result.state = NOT_RUN
return result
@property
def result(self):
return self.getResult()
@property
def state(self):
result = self.getResult()
return result.reportCode
def hasTests(self):
for t in self._collection:
if t.parent is self:
return True
class ModuleSuite(Suite):
pass
class ClassSuite(Suite):
@property
def klass(self):
return self.item.__class__.__name__
<mask token>
| <mask token>
class TestItem:
<mask token>
def __init__(self, item, uid, parentUid, context, namespace=None):
"""Constructor:
:Parameters:
item
The concrete test item. For a test function/method this is the
function/method itself. For a `ClassSuite` this is the instance and
for a `ModuleSuite` this is the the module instance.
uid
The unique ID for this item, which is a tuple of strings.
parentUid
The unique ID of the parent item or ``None``. Only the root `Suite`
of a test tree has a parent of ``None``.
namespace
A dictionary that provides the containing namespace for the test
item.
"""
self.item = item
self.uid = uid
self.context = context
self.parentUid = parentUid
self.namespace = self._getNamespace(namespace)
self._collection = None
self._running = False
self._marks = {}
self.extraInfo = {}
def setMark(self, mark):
self._marks[mark] = None
def clearMark(self, mark):
if mark in self._marks:
del self._marks[mark]
def isMarked(self, mark):
return mark in self._marks
def setCollection(self, collection):
self._collection = weakref.proxy(collection)
<mask token>
@intelliprop
def state(self):
"""The current state of the test.
"""
result = self.getResult()
return result.state
def setState(self, state):
if state is PASS:
pass
@intelliprop
def level(self):
"""This item's level in the test tree.
This is the number of ancestors this item has. If zero then this is
the 'root' item.
"""
return len(self.ancestors)
@intelliprop
def parent(self):
"""The parent of this item, which may be ``None``.
If this is ``None`` then this item is the root of a (possibly nested)
suite of tests.
"""
return self._collection.parent(self)
@intelliprop
def ancestors(self):
"""A list of all ancestors for this item.
Each entry is a UID. The first entry is the oldest ancesctor and the
last entry is the immediate parent's UID.
"""
return self._collection.getAncestors(self)
def hasFailingAncestor(self):
"""Check if any ancestor is considered to have failed.
An ancestor suite has failed if, for example, its ``suiteSetup``
failed.
:Return:
``True`` if any ancestors has failed.
"""
parent = self.parent
if parent is None:
return
return
return parent.hasFailed or parent.hasFailingAncestor()
<mask token>
@intelliprop
def rawDoc(self):
"""The raw docstring, no cleaning up performed at all."""
return self.namespace['__doc__']
@intelliprop
def docLines(self):
"""The docstring as lines.
This is cleaned up to remove leading and trailing blank lines from
the summary and details.
:Return:
A sequence of (non-nul terminated) lines for the docstring. The
summary (if present) is separated from the details by a single
empty line.
"""
summary, description = self._getDocParts()
if description:
return summary + [''] + description
return summary
@intelliprop
def doc(self):
"""The docstring after being cleaned up.
:Return:
The cleaned up docstrinc as a multiline string. Leading and
trailing blank lines are removed and the summary is separated from
any details by a single blakn line. Common leading whitspace is
also removed from each line.
"""
return '\n'.join(self.docLines)
def _getDocParts(self):
lines = self.rawDoc.splitlines()
while lines and not lines[0].strip():
lines.pop(0)
summary = []
while lines and lines[0].strip():
summary.append(lines.pop(0))
while lines and not lines[0].strip():
lines.pop(0)
while lines and not lines[-1].strip():
lines.pop()
summary = summary[:1] + dedentLines(summary[1:])
details = dedentLines(lines)
return summary, details
@property
def summary(self):
summary, description = self._getDocParts()
return ' '.join(summary)
@property
def details(self):
summary, description = self._getDocParts()
return description
@property
def sourcesUnderTest(self):
sources = []
for p in self.namespace.get('sources_under_test', []):
if not os.path.isabs(p):
p = os.path.abspath(os.path.join(self.dirname, p))
sources.append(p)
p = self.parent
if p is not None:
sources.extend(p.sourcesUnderTest)
return sources
@property
def klass(self):
return None
@property
def path(self):
p = self.namespace.get('__file__', None)
if p is None:
return self.parent.path
if p.endswith('.pyc'):
p = p[:-1]
return p
@property
def dirname(self):
f = self.path
if f:
return os.path.dirname(f)
@property
def isBug(self):
return False
class Test(TestItem):
typeName = 'Test'
isSuite = False
amNull = False
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
self._runHistory = []
self.stopAll = False
if self.func:
self.func.cs_test_info.test = weakref.proxy(self)
def getHistory(self):
return self._runHistory
def dumpHist(self):
return self._runHistory[-1].dump()
def startNewRun(self):
rec = RunRecord()
self._runHistory.append(rec)
def abortRun(self):
self._runHistory.pop()
def addStepRecord(self, name):
runRecord = self._runHistory[-1]
return runRecord.addStepRecord(name)
@property
def postCheck(self):
return self.parent.postCheck
@intelliprop
def hasFailed(self):
"""Check if this test has properly failed.
:Return:
``True`` if the test has failed to run for any reason.
"""
record = self.getRunRecord().getRecord('run')
return record.state is FAIL
@property
def klass(self):
return self.parent.klass
@property
def funcName(self):
return self.item.__name__
@property
def func(self):
return self.item
@property
def info(self):
return self.func.cs_test_info
@property
def isBroken(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
flag = self.info.reserved_cs_flags.get('broken', None)
if flag is None:
flag = self.info.cs_flags.get('broken', False)
return flag
@property
def isTodo(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
return self.info.cs_flags.get('todo', False)
@property
def isBug(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
flag = self.info.reserved_cs_flags.get('bug', None)
if flag is None:
flag = self.info.cs_flags.get('bug', False)
return flag
@property
def shouldFork(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
if self.info.reserved_cs_flags.get('fork', False):
return True
parent = self.parent
try:
return parent.suite.cs_attrs.fork_all
except AttributeError:
return False
@property
def testID(self):
return self.info.cs_tags.get('testID', None)
@property
def title(self):
return self.info.cs_flags.get('title', None)
@property
def isRunnable(self):
if self.parent.exited:
return False
return True
@property
def state(self):
rec = self.runRecord
if rec:
return rec.state
return NOT_RUN
@property
def result(self):
rec = self.runRecord
if rec:
return rec.result
return NOT_RUN
@property
def hasRunProblem(self):
rec = self.runRecord
if rec:
return rec.hasRunProblem
return False
@property
def hasFailed(self):
rec = self.runRecord
if rec:
return rec.hasFailed
return False
def addRunRecord(self, record):
self._runHistory.append(record)
if len(self._runHistory) > 5:
self._runHistory[:] = self._runHistory[-5:]
@property
def runRecord(self):
"""The XXX TODO"""
if self._runHistory:
for rec in reversed(self._runHistory):
if not rec.invalid:
return rec
@property
def phaseRecord(self):
"""The XXX TODO"""
if not self._runHistory:
return None, None
return self._runHistory[-1].phaseRecord
def getStepRecord(self, phase):
return self._runHistory[-1].getStepRecord(phase)
def getTestProcedure(self):
return self._collection.spec.getThing(self)
class NullTest(Test):
amNull = True
def __init__(self):
context = Context.getContext(dirPath=os.getcwd())
super(NullTest, self).__init__(None, None, None, context)
self.number = 0
self.startNewRun()
def startNewRun(self):
rec = RunRecord()
self._runHistory.append(rec)
def abortRun(self):
self._runHistory.pop()
@property
def isBug(self):
return False
def __bool__(self):
return False
class Suite(TestItem):
typeName = 'Suite'
isSuite = True
def __init__(self, *args, **kwargs):
self.myDir = kwargs.pop('myDir')
super(Suite, self).__init__(*args, **kwargs)
self.exited = False
self.number = 0
self.skipTests = False
self.entered = False
def reset(self):
self.entered = False
self.skipTests = False
@intelliprop
def children(self):
"""All the direct children of this item."""
tests = [t for t in self._collection if t.parent is self]
suites = [t for t in self._collection.suites if t.parent is self]
return suites + tests
@intelliprop
def tests(self):
"""All the direct test children of this item."""
return [t for t in self._collection if t.parent is self]
@property
def suite(self):
return self.item
@property
def runAfter(self):
"""The _run_after for this source."""
return self.namespace.get('_run_after', [])
@property
def postCheck(self):
return self.namespace.get('postCheck', lambda : None)
@property
def setUp(self):
return self.namespace.get('setUp', lambda : None)
@property
def postSetUp(self):
return self.namespace.get('postSetUp', lambda : None)
@property
def tearDown(self):
return self.namespace.get('tearDown', lambda : None)
@property
def suiteSetUp(self):
return self.namespace.get('suiteSetUp', lambda : None)
@property
def suiteTearDown(self):
return self.namespace.get('suiteTearDown', lambda : None)
def getResult(self, name=None):
runCount = 0
childStates = {}
result = Result(PASS, NONE)
if not self.children:
result.state = NOT_RUN
return result
for c in self.children:
state = c.state
runCount += state is not NOT_RUN
childStates[state] = None
if FAIL in childStates:
result.state = CHILD_FAIL
elif CHILD_FAIL in childStates:
result.state = CHILD_FAIL
elif BAD_SETUP in childStates:
result.state = CHILD_FAIL
elif PART_RUN in childStates:
result.state = PART_RUN
elif NOT_RUN in childStates:
if runCount:
result.state = PART_RUN
else:
result.state = NOT_RUN
return result
@property
def result(self):
return self.getResult()
@property
def state(self):
result = self.getResult()
return result.reportCode
def hasTests(self):
for t in self._collection:
if t.parent is self:
return True
class ModuleSuite(Suite):
pass
class ClassSuite(Suite):
@property
def klass(self):
return self.item.__class__.__name__
<mask token>
| <mask token>
class StepRecord:
<mask token>
<mask token>
<mask token>
@property
def state(self):
return self.result
<mask token>
<mask token>
class StepRecordList:
def __init__(self):
self.entries = []
class RunRecord:
"""A set of records containing all information about a single test's run.
This stores multiple `StepRecord` instances. The records are stored in a
dictionary keyed by the following names:
setUp, tearDown, prevTearDown, rn
Each maps to a single `StepRecord`.
suiteSetUp, suiteTearDown, prevSuiteTearDown
Each mapping to a list of `StepRecord` instances, in execution order.
"""
_simpleNames = 'setUp tearDown prevTearDown run postCheck'.split()
_listNames = 'suiteSetUp suiteTearDown prevSuiteTearDown'.split()
_recordNames = _simpleNames + _listNames
_runTime = None
def __init__(self):
self.runTime = RunRecord._runTime
self.invalid = False
self._records = {}
self.extraInfo = {}
def __setstate__(self, state):
self.invalid = False
self.__dict__.update(state)
@classmethod
def startNewRun(cls):
cls._runTime = time.time()
@classmethod
def finishRun(cls):
cls._runTime = None
def addStepRecord(self, name):
"""Add a new phase record to this run record.
Adds a new `StepRecord` for a test phase, which must be one of those
defined for a `RunRecord`.
:Parameters:
name
The name for the record. It must be the name of a defined test
phase.
:Return:
The newly added `StepRecord` instance. This will always be a newly
created instance.
"""
assert name in RunRecord._recordNames
record = StepRecord()
if name in RunRecord._simpleNames:
assert name not in self._records
self._records[name] = record
else:
if name not in self._records:
self._records[name] = StepRecordList()
self._records[name].entries.append(record)
return record
def getResult(self, name):
pass
@property
def result(self):
try:
rec = self._records['setUp']
except KeyError:
pass
else:
if rec.state is not PASS:
result = Result(BAD_SETUP, BAD_SETUP)
return result
try:
rec = self._records['run']
except KeyError:
pass
else:
result = Result(rec.state, rec.reason)
return result
for rec in self._records.get('suiteSetUp', []):
if rec.state is not PASS:
return Result(NOT_RUN, BAD_SUITE_SETUP)
try:
rec = self._records['setUp']
except KeyError:
pass
else:
if rec.state is not PASS:
return Result(NOT_RUN, BAD_SETUP)
return Result(NOT_RUN, NONE)
@property
def state(self):
try:
rec = self._records['setUp']
except KeyError:
pass
else:
if rec.state is NOT_RUN:
return NOT_RUN
if rec.state not in (PASS, BUG, BUG_PASS):
return BAD_SETUP
try:
rec = self._records['run']
return rec.state
except KeyError:
pass
return NOT_RUN
@property
def isRunnable(self):
for name in ('suiteTearDown', 'tearDown', 'suiteSetUp', 'setUp'):
try:
if self._records[name].state not in (PASS, SKIPPED, NOT_RUN):
return True
except KeyError:
pass
return False
@property
def hasRunProblem(self):
for name in ('tearDown', 'suiteTearDown', 'suiteSetUp', 'setUp',
'run', 'postCheck'):
try:
record = self._records[name]
except KeyError:
continue
if name in ('tearDown', 'setUp', 'run', 'postCheck'):
if record.state not in (PASS, SKIPPED, NOT_RUN, TODO, BUG,
BUG_PASS):
return True
else:
for rec in record.entries:
if rec.state not in (PASS, SKIPPED, NOT_RUN, TODO, BUG,
BUG_PASS):
return True
return False
@property
def hasFailed(self):
for name in ('suiteSetUp', 'setUp', 'run'):
try:
record = self._records[name]
except KeyError:
continue
if name in ('setUp', 'run'):
if record.state not in (PASS, SKIPPED, NOT_RUN, TODO, BUG,
BUG_PASS):
return True
else:
for rec in record.entries:
if rec.state not in (PASS, SKIPPED, NOT_RUN, TODO, BUG,
BUG_PASS):
return True
return False
@property
def phaseRecord(self):
"""Get the most recent phaseRecord.
This is used to get the most pertinent record for this test; i.e. the
one that provides the most useful result for the test.
TODO: This is not yet well defined.
"""
for name in ('tearDown', 'run', 'setUp'):
try:
return name, self._records[name]
except KeyError:
pass
seq = self._records.get('suiteSetUp', None)
if seq is None:
return None, None
for ent in seq.entries:
if ent.hasFailed:
return 'suiteSetUp', ent
return 'suiteSetUp', seq.entries[0]
def getStepRecord(self, phase):
"""Get the record details for a test run phase."""
ent = self._records.get(phase, None)
if hasattr(ent, 'append'):
seq = ent
for ent in seq:
if ent.hasFailed:
return ent
return seq.entries[0]
if hasattr(ent, 'entries'):
seq = ent.entries
for ent in seq:
if ent.hasFailed:
return ent
if seq:
return seq[0]
return
return ent
class TestItem:
"""Base class for `Test` and `Suite` classes.
"""
def __init__(self, item, uid, parentUid, context, namespace=None):
"""Constructor:
:Parameters:
item
The concrete test item. For a test function/method this is the
function/method itself. For a `ClassSuite` this is the instance and
for a `ModuleSuite` this is the the module instance.
uid
The unique ID for this item, which is a tuple of strings.
parentUid
The unique ID of the parent item or ``None``. Only the root `Suite`
of a test tree has a parent of ``None``.
namespace
A dictionary that provides the containing namespace for the test
item.
"""
self.item = item
self.uid = uid
self.context = context
self.parentUid = parentUid
self.namespace = self._getNamespace(namespace)
self._collection = None
self._running = False
self._marks = {}
self.extraInfo = {}
def setMark(self, mark):
self._marks[mark] = None
def clearMark(self, mark):
if mark in self._marks:
del self._marks[mark]
def isMarked(self, mark):
return mark in self._marks
def setCollection(self, collection):
self._collection = weakref.proxy(collection)
def setPhase(self, phase):
self._phase = phase
@intelliprop
def state(self):
"""The current state of the test.
"""
result = self.getResult()
return result.state
def setState(self, state):
if state is PASS:
pass
@intelliprop
def level(self):
"""This item's level in the test tree.
This is the number of ancestors this item has. If zero then this is
the 'root' item.
"""
return len(self.ancestors)
@intelliprop
def parent(self):
"""The parent of this item, which may be ``None``.
If this is ``None`` then this item is the root of a (possibly nested)
suite of tests.
"""
return self._collection.parent(self)
@intelliprop
def ancestors(self):
"""A list of all ancestors for this item.
Each entry is a UID. The first entry is the oldest ancesctor and the
last entry is the immediate parent's UID.
"""
return self._collection.getAncestors(self)
def hasFailingAncestor(self):
"""Check if any ancestor is considered to have failed.
An ancestor suite has failed if, for example, its ``suiteSetup``
failed.
:Return:
``True`` if any ancestors has failed.
"""
parent = self.parent
if parent is None:
return
return
return parent.hasFailed or parent.hasFailingAncestor()
def _getNamespace(self, namespace=None):
return namespace or dict([(n, getattr(self.item, n)) for n in dir(
self.item)])
@intelliprop
def rawDoc(self):
"""The raw docstring, no cleaning up performed at all."""
return self.namespace['__doc__']
@intelliprop
def docLines(self):
"""The docstring as lines.
This is cleaned up to remove leading and trailing blank lines from
the summary and details.
:Return:
A sequence of (non-nul terminated) lines for the docstring. The
summary (if present) is separated from the details by a single
empty line.
"""
summary, description = self._getDocParts()
if description:
return summary + [''] + description
return summary
@intelliprop
def doc(self):
"""The docstring after being cleaned up.
:Return:
The cleaned up docstrinc as a multiline string. Leading and
trailing blank lines are removed and the summary is separated from
any details by a single blakn line. Common leading whitspace is
also removed from each line.
"""
return '\n'.join(self.docLines)
def _getDocParts(self):
lines = self.rawDoc.splitlines()
while lines and not lines[0].strip():
lines.pop(0)
summary = []
while lines and lines[0].strip():
summary.append(lines.pop(0))
while lines and not lines[0].strip():
lines.pop(0)
while lines and not lines[-1].strip():
lines.pop()
summary = summary[:1] + dedentLines(summary[1:])
details = dedentLines(lines)
return summary, details
@property
def summary(self):
summary, description = self._getDocParts()
return ' '.join(summary)
@property
def details(self):
summary, description = self._getDocParts()
return description
@property
def sourcesUnderTest(self):
sources = []
for p in self.namespace.get('sources_under_test', []):
if not os.path.isabs(p):
p = os.path.abspath(os.path.join(self.dirname, p))
sources.append(p)
p = self.parent
if p is not None:
sources.extend(p.sourcesUnderTest)
return sources
@property
def klass(self):
return None
@property
def path(self):
p = self.namespace.get('__file__', None)
if p is None:
return self.parent.path
if p.endswith('.pyc'):
p = p[:-1]
return p
@property
def dirname(self):
f = self.path
if f:
return os.path.dirname(f)
@property
def isBug(self):
return False
class Test(TestItem):
typeName = 'Test'
isSuite = False
amNull = False
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
self._runHistory = []
self.stopAll = False
if self.func:
self.func.cs_test_info.test = weakref.proxy(self)
def getHistory(self):
return self._runHistory
def dumpHist(self):
return self._runHistory[-1].dump()
def startNewRun(self):
rec = RunRecord()
self._runHistory.append(rec)
def abortRun(self):
self._runHistory.pop()
def addStepRecord(self, name):
runRecord = self._runHistory[-1]
return runRecord.addStepRecord(name)
@property
def postCheck(self):
return self.parent.postCheck
@intelliprop
def hasFailed(self):
"""Check if this test has properly failed.
:Return:
``True`` if the test has failed to run for any reason.
"""
record = self.getRunRecord().getRecord('run')
return record.state is FAIL
@property
def klass(self):
return self.parent.klass
@property
def funcName(self):
return self.item.__name__
@property
def func(self):
return self.item
@property
def info(self):
return self.func.cs_test_info
@property
def isBroken(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
flag = self.info.reserved_cs_flags.get('broken', None)
if flag is None:
flag = self.info.cs_flags.get('broken', False)
return flag
@property
def isTodo(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
return self.info.cs_flags.get('todo', False)
@property
def isBug(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
flag = self.info.reserved_cs_flags.get('bug', None)
if flag is None:
flag = self.info.cs_flags.get('bug', False)
return flag
@property
def shouldFork(self):
if not hasattr(self, 'info'):
raise PropertyError('%r has no attribute %r' % (self.__class__.
__name__, 'info'))
if self.info.reserved_cs_flags.get('fork', False):
return True
parent = self.parent
try:
return parent.suite.cs_attrs.fork_all
except AttributeError:
return False
@property
def testID(self):
return self.info.cs_tags.get('testID', None)
@property
def title(self):
return self.info.cs_flags.get('title', None)
@property
def isRunnable(self):
if self.parent.exited:
return False
return True
@property
def state(self):
rec = self.runRecord
if rec:
return rec.state
return NOT_RUN
@property
def result(self):
rec = self.runRecord
if rec:
return rec.result
return NOT_RUN
@property
def hasRunProblem(self):
rec = self.runRecord
if rec:
return rec.hasRunProblem
return False
@property
def hasFailed(self):
rec = self.runRecord
if rec:
return rec.hasFailed
return False
def addRunRecord(self, record):
self._runHistory.append(record)
if len(self._runHistory) > 5:
self._runHistory[:] = self._runHistory[-5:]
@property
def runRecord(self):
"""The XXX TODO"""
if self._runHistory:
for rec in reversed(self._runHistory):
if not rec.invalid:
return rec
@property
def phaseRecord(self):
"""The XXX TODO"""
if not self._runHistory:
return None, None
return self._runHistory[-1].phaseRecord
def getStepRecord(self, phase):
return self._runHistory[-1].getStepRecord(phase)
def getTestProcedure(self):
return self._collection.spec.getThing(self)
class NullTest(Test):
amNull = True
def __init__(self):
context = Context.getContext(dirPath=os.getcwd())
super(NullTest, self).__init__(None, None, None, context)
self.number = 0
self.startNewRun()
def startNewRun(self):
rec = RunRecord()
self._runHistory.append(rec)
def abortRun(self):
self._runHistory.pop()
@property
def isBug(self):
return False
def __bool__(self):
return False
class Suite(TestItem):
typeName = 'Suite'
isSuite = True
def __init__(self, *args, **kwargs):
self.myDir = kwargs.pop('myDir')
super(Suite, self).__init__(*args, **kwargs)
self.exited = False
self.number = 0
self.skipTests = False
self.entered = False
def reset(self):
self.entered = False
self.skipTests = False
@intelliprop
def children(self):
"""All the direct children of this item."""
tests = [t for t in self._collection if t.parent is self]
suites = [t for t in self._collection.suites if t.parent is self]
return suites + tests
@intelliprop
def tests(self):
"""All the direct test children of this item."""
return [t for t in self._collection if t.parent is self]
@property
def suite(self):
return self.item
@property
def runAfter(self):
"""The _run_after for this source."""
return self.namespace.get('_run_after', [])
@property
def postCheck(self):
return self.namespace.get('postCheck', lambda : None)
@property
def setUp(self):
return self.namespace.get('setUp', lambda : None)
@property
def postSetUp(self):
return self.namespace.get('postSetUp', lambda : None)
@property
def tearDown(self):
return self.namespace.get('tearDown', lambda : None)
@property
def suiteSetUp(self):
return self.namespace.get('suiteSetUp', lambda : None)
@property
def suiteTearDown(self):
return self.namespace.get('suiteTearDown', lambda : None)
def getResult(self, name=None):
runCount = 0
childStates = {}
result = Result(PASS, NONE)
if not self.children:
result.state = NOT_RUN
return result
for c in self.children:
state = c.state
runCount += state is not NOT_RUN
childStates[state] = None
if FAIL in childStates:
result.state = CHILD_FAIL
elif CHILD_FAIL in childStates:
result.state = CHILD_FAIL
elif BAD_SETUP in childStates:
result.state = CHILD_FAIL
elif PART_RUN in childStates:
result.state = PART_RUN
elif NOT_RUN in childStates:
if runCount:
result.state = PART_RUN
else:
result.state = NOT_RUN
return result
@property
def result(self):
return self.getResult()
@property
def state(self):
result = self.getResult()
return result.reportCode
def hasTests(self):
for t in self._collection:
if t.parent is self:
return True
class ModuleSuite(Suite):
pass
class ClassSuite(Suite):
@property
def klass(self):
return self.item.__class__.__name__
<mask token>
| """Those things that are core to tests.
This module provides the most fundamental test entities, which include such
things as:
- Tests
- Suites
- Test states
"""
from __future__ import print_function
__docformat__ = "restructuredtext"
import os
import textwrap
import time
import weakref
import inspect
from cleversheep3.Prog.Aspects import intelliprop
from cleversheep3.Prog.Enum import Enum
from cleversheep3.Test.Tester import Context
from cleversheep3.Test.Tester import Coordinator
from cleversheep3.Test.Tester import options
State = Enum("State")
# Static test states.
NOT_RUN = State("NOT_RUN")
PASS = State("PASS")
FAIL = State("FAIL")
SKIPPED = State("SKIPPED")
BROKEN = State("BROKEN")
DISABLED = State("DISABLED")
BUG = State("BUG")
BUG_PASS = State("BUG_PASS")
TODO = State("TODO")
# Dynamic test states.
SET_UP = State("SET_UP")
TEAR_DOWN = State("TEAR_DOWN")
RUNNING = State("RUNNING")
FINISHED = State("FINISHED") # TODO: To remove!
# Suite specific states.
CHILD_FAIL = State("CHILD_FAIL")
PART_RUN = State("PART_RUN")
# Reason codes. Note that this list is those codes that are only used as
# reasons, some states are also used as reason code.
NONE = State("NONE")
ERROR = State("ERROR")
BAD_SETUP = State("BAD_SETUP")
BAD_SUITE_SETUP = State("BAD_SUITE_SETUP")
EXIT_SUITE = State("EXIT_SUITE")
EXIT_ALL = State("EXIT_ALL")
USER_STOPPED = State("USER_STOPPED")
def dedentLines(lines):
return textwrap.dedent("\n".join(lines)).splitlines()
class TestInfo:
"""Information about a test function.
This class supports the ``@test(...)`` decorator that is used by
cleversheep3 to mark test functions. All such marked functions are given
an attribute called ``cs_test_info``, which is in an instance of this
class.
This is most commonly used in test filter functions, as registered by
`addTestFilter`. When tests are marked using the ``@test`` decorator
they can be given one or more tags and/or flags.
Currently this is little more than a struct, except that it provides
a `__getattr__` that returns ``None`` by default.
"""
def __init__(self, *args, **kwargs):
"""Constructor:
:Parameters:
args
Non-keyword arguments are interpreted by the test framework.
Each argument is a string. The supported forms are:
plat:<platformType>
The test will only be executed if ``<platformType>`` matches
`Sys.Platform.platformType`. For example: ``"plat:windows"``.
kwargs
Any keyword arguments are simply stored as attributes. So, if
you decorate a ``test_x`` with ``test(xyz=123)`` then
``test_x.cs_test_info.xyz`` will be ``123``.
"""
self.reserved_cs_flags = {}
self.cs_flags = {}
self.cs_tags = {}
for arg in args:
if ":" in arg:
name, value = arg.split(":", 1)
self.cs_flags[name] = value
else:
self.cs_flags[arg] = True
for name in kwargs:
if name.startswith("cs_"):
self.reserved_cs_flags[name[3:]] = kwargs[name]
else:
self.cs_tags[name] = kwargs[name]
self.reserved_cs_flags['defined_in_base'] = None
def __getattr__(self, name):
"""Attribute access:
Provides read access to test method tags. For example, if you mark a
test:<py>:
@test(abc="Hello")
Then, if ``info`` is the test's `TestInfo` object, ``info.abc`` will
be ``"Hello"``. If the test does not have ``abc`` set then the result
is ``None``.
"""
if name in self.__dict__:
return self.__dict__.get(name)
return self.cs_tags.get(name, None)
class Result:
"""Full result details for a test."""
def __init__(self, state, reason):
self.state, self.reason = state, reason
@property
def reportCode(self):
if self.reason is NONE:
return self.state
return self.reason
class StepRecord:
"""A single record used to store informmation about the success or
otherwise of a test phase.
A test phase is one of:
- A suite set-up/tear-down.
- A test set-up/tear-down.
- The execution of the test-method itself.
:Ivariables:
result
TODO
reason
TODO
"""
def __init__(self, result=NOT_RUN, reason=NONE, details=None):
self.result, self.reason = result, reason
self.exc = None
self.reported = False
def setResult(self, state, reason=NONE, details=None):
self._state, self._reason = state, reason
self._details = details
# TODO: Just transitional.
@property
def state(self):
return self.result
@property
def hasFailed(self):
return self.result in (FAIL, BAD_SETUP)
def __str__(self):
return "StepRecord: %s/%s" % (self.state, self.reason)
class StepRecordList:
def __init__(self):
self.entries = []
class RunRecord:
"""A set of records containing all information about a single test's run.
This stores multiple `StepRecord` instances. The records are stored in a
dictionary keyed by the following names:
setUp, tearDown, prevTearDown, rn
Each maps to a single `StepRecord`.
suiteSetUp, suiteTearDown, prevSuiteTearDown
Each mapping to a list of `StepRecord` instances, in execution order.
"""
_simpleNames = """setUp tearDown prevTearDown run postCheck""".split()
_listNames = """suiteSetUp suiteTearDown prevSuiteTearDown""".split()
_recordNames = _simpleNames + _listNames
_runTime = None
def __init__(self):
#assert RunRecord._runTime is not None
self.runTime = RunRecord._runTime
self.invalid = False
self._records = {}
self.extraInfo = {}
def __setstate__(self, state):
self.invalid = False
self.__dict__.update(state)
@classmethod
def startNewRun(cls):
cls._runTime = time.time()
@classmethod
def finishRun(cls):
cls._runTime = None
def addStepRecord(self, name):
"""Add a new phase record to this run record.
Adds a new `StepRecord` for a test phase, which must be one of those
defined for a `RunRecord`.
:Parameters:
name
The name for the record. It must be the name of a defined test
phase.
:Return:
The newly added `StepRecord` instance. This will always be a newly
created instance.
"""
assert name in RunRecord._recordNames
record = StepRecord()
if name in RunRecord._simpleNames:
assert name not in self._records
self._records[name] = record
else:
if name not in self._records:
self._records[name] = StepRecordList()
self._records[name].entries.append(record)
return record
def getResult(self, name):
pass
@property
def result(self):
# Is set-up failed then we report that as bad setup.
try:
rec = self._records["setUp"]
except KeyError:
pass
else:
if rec.state is not PASS:
result = Result(BAD_SETUP, BAD_SETUP)
return result
# See if the test was actually executed.
try:
rec = self._records["run"]
except KeyError:
pass
else:
result = Result(rec.state, rec.reason)
return result
# Test was not run, so we need to find out why. A suite set-up
# failure means we consider the test not-run.
for rec in self._records.get("suiteSetUp", []):
if rec.state is not PASS:
return Result(NOT_RUN, BAD_SUITE_SETUP)
try:
rec = self._records["setUp"]
except KeyError:
pass
else:
if rec.state is not PASS:
return Result(NOT_RUN, BAD_SETUP)
return Result(NOT_RUN, NONE)
@property
def state(self):
# If set-up failed then we report that as bad setup.
try:
rec = self._records["setUp"]
except KeyError:
pass
else:
if rec.state is NOT_RUN:
return NOT_RUN
if rec.state not in (PASS, BUG, BUG_PASS):
return BAD_SETUP
# If the test has a 'run' entry then that defines the state.
try:
rec = self._records["run"]
#if rec.state is NOT_RUN:
# return RUNNING
return rec.state
except KeyError:
pass
# Otherwise the state is not-run.
return NOT_RUN
@property
def isRunnable(self):
for name in ("suiteTearDown", "tearDown", "suiteSetUp", "setUp"):
try:
if self._records[name].state not in (PASS, SKIPPED, NOT_RUN):
return True
except KeyError:
pass
return False
@property
def hasRunProblem(self):
for name in ("tearDown", "suiteTearDown", "suiteSetUp", "setUp",
"run", "postCheck"):
try:
record = self._records[name]
except KeyError:
continue
if name in ("tearDown", "setUp", "run", "postCheck"):
if record.state not in (PASS, SKIPPED, NOT_RUN,
TODO, BUG, BUG_PASS):
return True
else:
for rec in record.entries:
if rec.state not in (PASS, SKIPPED, NOT_RUN,
TODO, BUG, BUG_PASS):
return True
return False
@property
def hasFailed(self):
for name in ("suiteSetUp", "setUp", "run"):
try:
record = self._records[name]
except KeyError:
continue
if name in ("setUp", "run"):
if record.state not in (PASS, SKIPPED, NOT_RUN, TODO, BUG,
BUG_PASS):
return True
else:
for rec in record.entries:
if rec.state not in (PASS, SKIPPED, NOT_RUN,
TODO, BUG, BUG_PASS):
return True
return False
@property
def phaseRecord(self):
"""Get the most recent phaseRecord.
This is used to get the most pertinent record for this test; i.e. the
one that provides the most useful result for the test.
TODO: This is not yet well defined.
"""
for name in ("tearDown", "run", "setUp"):
try:
return name, self._records[name]
except KeyError:
pass
#return None, None
seq = self._records.get("suiteSetUp", None)
if seq is None:
return None, None
for ent in seq.entries:
if ent.hasFailed:
return "suiteSetUp", ent
return "suiteSetUp", seq.entries[0]
def getStepRecord(self, phase):
"""Get the record details for a test run phase."""
ent = self._records.get(phase, None)
if hasattr(ent, "append"): # Yurk!
seq = ent
for ent in seq:
if ent.hasFailed:
return ent
return seq.entries[0]
if hasattr(ent, "entries"): # Double yurk!
seq = ent.entries
for ent in seq:
if ent.hasFailed:
return ent
if seq:
return seq[0]
return
return ent
class TestItem:
"""Base class for `Test` and `Suite` classes.
"""
def __init__(self, item, uid, parentUid, context, namespace=None):
"""Constructor:
:Parameters:
item
The concrete test item. For a test function/method this is the
function/method itself. For a `ClassSuite` this is the instance and
for a `ModuleSuite` this is the the module instance.
uid
The unique ID for this item, which is a tuple of strings.
parentUid
The unique ID of the parent item or ``None``. Only the root `Suite`
of a test tree has a parent of ``None``.
namespace
A dictionary that provides the containing namespace for the test
item.
"""
self.item = item
self.uid = uid
self.context = context
self.parentUid = parentUid
self.namespace = self._getNamespace(namespace)
self._collection = None
self._running = False
self._marks = {}
self.extraInfo = {}
def setMark(self, mark):
self._marks[mark] = None
def clearMark(self, mark):
if mark in self._marks:
del self._marks[mark]
def isMarked(self, mark):
return mark in self._marks
def setCollection(self, collection):
self._collection = weakref.proxy(collection)
# TODO: To remove.
def setPhase(self, phase):
self._phase = phase
@intelliprop
def state(self):
"""The current state of the test.
"""
result = self.getResult()
return result.state
def setState(self, state):
if state is PASS:
pass
@intelliprop
def level(self):
"""This item's level in the test tree.
This is the number of ancestors this item has. If zero then this is
the 'root' item.
"""
return len(self.ancestors)
@intelliprop
def parent(self):
"""The parent of this item, which may be ``None``.
If this is ``None`` then this item is the root of a (possibly nested)
suite of tests.
"""
return self._collection.parent(self)
@intelliprop
def ancestors(self):
"""A list of all ancestors for this item.
Each entry is a UID. The first entry is the oldest ancesctor and the
last entry is the immediate parent's UID.
"""
return self._collection.getAncestors(self)
def hasFailingAncestor(self):
"""Check if any ancestor is considered to have failed.
An ancestor suite has failed if, for example, its ``suiteSetup``
failed.
:Return:
``True`` if any ancestors has failed.
"""
parent = self.parent
if parent is None:
return
# TODO: Temporarily disabled.
return
return parent.hasFailed or parent.hasFailingAncestor()
def _getNamespace(self, namespace=None):
return namespace or dict([(n, getattr(self.item, n))
for n in dir(self.item)])
@intelliprop
def rawDoc(self):
"""The raw docstring, no cleaning up performed at all."""
return self.namespace["__doc__"]
@intelliprop
def docLines(self):
"""The docstring as lines.
This is cleaned up to remove leading and trailing blank lines from
the summary and details.
:Return:
A sequence of (non-nul terminated) lines for the docstring. The
summary (if present) is separated from the details by a single
empty line.
"""
summary, description = self._getDocParts()
if description:
return summary + [""] + description
return summary
@intelliprop
def doc(self):
"""The docstring after being cleaned up.
:Return:
The cleaned up docstrinc as a multiline string. Leading and
trailing blank lines are removed and the summary is separated from
any details by a single blakn line. Common leading whitspace is
also removed from each line.
"""
return "\n".join(self.docLines)
def _getDocParts(self):
# Lose leading blank lines.
lines = self.rawDoc.splitlines()
while lines and not lines[0].strip():
lines.pop(0)
# All lines up to next blank line are the summary.
summary = []
while lines and lines[0].strip():
summary.append(lines.pop(0))
# Strip leading and trailing blank lines from the remaining details.
while lines and not lines[0].strip():
lines.pop(0)
while lines and not lines[-1].strip():
lines.pop()
# Dedent the summary and details before returning them.
summary = summary[:1] + dedentLines(summary[1:])
details = dedentLines(lines)
return summary, details
@property
def summary(self):
summary, description = self._getDocParts()
return " ".join(summary)
@property
def details(self):
summary, description = self._getDocParts()
return description
@property
def sourcesUnderTest(self):
sources = []
for p in self.namespace.get("sources_under_test", []):
if not os.path.isabs(p):
p = os.path.abspath(os.path.join(self.dirname, p))
sources.append(p)
p = self.parent
if p is not None:
sources.extend(p.sourcesUnderTest)
return sources
@property
def klass(self):
return None
@property
def path(self):
p = self.namespace.get("__file__", None)
if p is None:
return self.parent.path
if p.endswith(".pyc"):
p = p[:-1]
return p
@property
def dirname(self):
f = self.path
if f:
return os.path.dirname(f)
@property
def isBug(self):
return False
class Test(TestItem):
typeName = "Test"
isSuite = False
amNull = False
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
self._runHistory = []
self.stopAll = False
if self.func:
self.func.cs_test_info.test = weakref.proxy(self)
def getHistory(self):
return self._runHistory
def dumpHist(self):
return self._runHistory[-1].dump()
def startNewRun(self):
rec = RunRecord()
self._runHistory.append(rec)
def abortRun(self):
self._runHistory.pop()
def addStepRecord(self, name):
runRecord = self._runHistory[-1]
return runRecord.addStepRecord(name)
@property
def postCheck(self):
return self.parent.postCheck
@intelliprop
def hasFailed(self):
"""Check if this test has properly failed.
:Return:
``True`` if the test has failed to run for any reason.
"""
record = self.getRunRecord().getRecord("run")
return record.state is FAIL
@property
def klass(self):
return self.parent.klass
@property
def funcName(self):
return self.item.__name__
@property
def func(self):
return self.item
@property
def info(self):
return self.func.cs_test_info
@property
def isBroken(self):
if not hasattr(self, "info"):
raise PropertyError("%r has no attribute %r" % (
self.__class__.__name__, "info"))
flag = self.info.reserved_cs_flags.get("broken", None)
if flag is None:
flag = self.info.cs_flags.get("broken", False) # deprecated
return flag
@property
def isTodo(self):
if not hasattr(self, "info"):
raise PropertyError("%r has no attribute %r" % (
self.__class__.__name__, "info"))
return self.info.cs_flags.get("todo", False)
@property
def isBug(self):
if not hasattr(self, "info"):
raise PropertyError("%r has no attribute %r" % (
self.__class__.__name__, "info"))
flag = self.info.reserved_cs_flags.get("bug", None)
if flag is None:
flag = self.info.cs_flags.get("bug", False) # deprecated
return flag
@property
def shouldFork(self):
if not hasattr(self, "info"):
raise PropertyError("%r has no attribute %r" % (
self.__class__.__name__, "info"))
if self.info.reserved_cs_flags.get("fork", False):
return True
parent = self.parent
try:
return parent.suite.cs_attrs.fork_all
except AttributeError:
return False
@property
def testID(self):
return self.info.cs_tags.get("testID", None)
@property
def title(self):
return self.info.cs_flags.get("title", None)
@property
def isRunnable(self):
#if self.isBroken and not options.no_disabled:
# return False
if self.parent.exited:
return False
return True
@property
def state(self):
rec = self.runRecord
if rec:
return rec.state
return NOT_RUN
@property
def result(self):
rec = self.runRecord
if rec:
return rec.result
return NOT_RUN
@property
def hasRunProblem(self):
rec = self.runRecord
if rec:
return rec.hasRunProblem
return False
@property
def hasFailed(self):
rec = self.runRecord
if rec:
return rec.hasFailed
return False
def addRunRecord(self, record):
self._runHistory.append(record)
if len(self._runHistory) > 5:
self._runHistory[:] = self._runHistory[-5:]
@property
def runRecord(self):
"""The XXX TODO"""
if self._runHistory:
for rec in reversed(self._runHistory):
if not rec.invalid:
return rec
# TODO: Should now be StepRecord.
@property
def phaseRecord(self):
"""The XXX TODO"""
if not self._runHistory:
return None, None
return self._runHistory[-1].phaseRecord
def getStepRecord(self, phase):
return self._runHistory[-1].getStepRecord(phase)
def getTestProcedure(self):
return self._collection.spec.getThing(self)
class NullTest(Test):
amNull = True
def __init__(self):
context = Context.getContext(dirPath=os.getcwd())
super(NullTest, self).__init__(None, None, None, context)
self.number = 0
self.startNewRun()
def startNewRun(self):
rec = RunRecord()
self._runHistory.append(rec)
def abortRun(self):
self._runHistory.pop()
@property
def isBug(self):
return False
def __bool__(self):
return False
class Suite(TestItem):
typeName = "Suite"
isSuite = True
def __init__(self, *args, **kwargs):
self.myDir = kwargs.pop("myDir")
super(Suite, self).__init__(*args, **kwargs)
self.exited = False
self.number = 0
self.skipTests = False
self.entered = False
def reset(self):
self.entered = False
self.skipTests = False
@intelliprop
def children(self):
"""All the direct children of this item."""
tests = [t for t in self._collection if t.parent is self]
suites = [t for t in self._collection.suites if t.parent is self]
return suites + tests
@intelliprop
def tests(self):
"""All the direct test children of this item."""
return [t for t in self._collection if t.parent is self]
@property
def suite(self):
return self.item
@property
def runAfter(self):
"""The _run_after for this source."""
return self.namespace.get("_run_after", [])
@property
def postCheck(self):
return self.namespace.get("postCheck", lambda: None)
@property
def setUp(self):
return self.namespace.get("setUp", lambda: None)
@property
def postSetUp(self):
return self.namespace.get("postSetUp", lambda: None)
@property
def tearDown(self):
return self.namespace.get("tearDown", lambda: None)
@property
def suiteSetUp(self):
return self.namespace.get("suiteSetUp", lambda: None)
@property
def suiteTearDown(self):
return self.namespace.get("suiteTearDown", lambda: None)
def getResult(self, name=None):
runCount = 0
childStates = {}
result = Result(PASS, NONE)
if not self.children:
result.state = NOT_RUN
return result
for c in self.children:
state = c.state
runCount += state is not NOT_RUN
childStates[state] = None
if FAIL in childStates:
result.state = CHILD_FAIL
elif CHILD_FAIL in childStates:
result.state = CHILD_FAIL
elif BAD_SETUP in childStates:
result.state = CHILD_FAIL
elif PART_RUN in childStates:
result.state = PART_RUN
elif NOT_RUN in childStates:
if runCount:
result.state = PART_RUN
else:
result.state = NOT_RUN
return result
@property
def result(self):
return self.getResult()
@property
def state(self):
result = self.getResult()
return result.reportCode
def hasTests(self):
# Deprecated. Only used for old reporter support.
for t in self._collection:
if t.parent is self:
return True
class ModuleSuite(Suite):
pass
class ClassSuite(Suite):
@property
def klass(self):
return self.item.__class__.__name__
def enter_pdb():
"""Enter the python debugger."""
import sys, pdb
sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
pdb.set_trace()
| [
33,
64,
81,
104,
122
] |
755 | 7b1c7228c1fc9501ab857cba62a7e073691e75c9 | <mask token>
class Person:
def __call__(self, name):
print('__call__' + ' Hello ' + name)
<mask token>
<mask token>
| <mask token>
class Person:
def __call__(self, name):
print('__call__' + ' Hello ' + name)
def hello(self, name):
print('hello ' + name)
<mask token>
| <mask token>
class Person:
def __call__(self, name):
print('__call__' + ' Hello ' + name)
def hello(self, name):
print('hello ' + name)
<mask token>
person('hcq')
person.hello('hcq')
| <mask token>
class Person:
def __call__(self, name):
print('__call__' + ' Hello ' + name)
def hello(self, name):
print('hello ' + name)
person = Person()
person('hcq')
person.hello('hcq')
| """
@Description:
@Author : HCQ
@Contact_1: [email protected]
@Project : pytorch
@File : call_test
@Time : 2022/5/24 下午10:19
@Last Modify Time @Version @Desciption
-------------------- -------- -----------
2022/5/24 下午10:19 1.0 None
"""
class Person():
def __call__(self, name):
print("__call__" + " Hello " + name)
def hello(self, name):
print("hello " + name)
person = Person()
person("hcq") # 直接调用call
person.hello("hcq") | [
2,
3,
4,
5,
6
] |
756 | 6a7e5a78f516cecf083ca3900bdaaf427bedd497 | <mask token>
def get_stock_time_series(data_df, stock_id):
curr_ID_data = data_df.loc[stock_id]
output = np.array(curr_ID_data[0])
for i in range(1, len(curr_ID_data.index)):
output = np.vstack((output, curr_ID_data[i]))
return output
<mask token>
| <mask token>
def get_stock_time_series(data_df, stock_id):
curr_ID_data = data_df.loc[stock_id]
output = np.array(curr_ID_data[0])
for i in range(1, len(curr_ID_data.index)):
output = np.vstack((output, curr_ID_data[i]))
return output
<mask token>
for ETF_ID in ETF_ID_list:
Nm_conf = conf.config('feature_conf').config['Nm']
if Nm_conf['enable'] is True:
Nm_method = Nm_conf['method']
file_postfix = '_Nm_' + str(Nm_conf['type'][0]
) + '_' + Nm_method + '_' + str(94) + '_' + ETF_ID + '.pkl'
else:
file_postfix = '_' + str(94) + '.pkl'
src_file_path = './Data/all_feature_data' + file_postfix
meta_file_path = './Data/all_meta_data' + file_postfix
data = pd.read_pickle(src_file_path)
f = open(meta_file_path, 'rb')
tasharep_ID = pickle.load(f)
member_ID = pickle.load(f)
Date = pickle.load(f)
feature_list = pickle.load(f)
price_scaler = pickle.load(f)
trade_scaler = pickle.load(f)
f_idx = 59
src_time_period = ['20000101', '20180511']
eval_time_period = ['20180402', '20180518']
eval_time_len = Date.index(eval_time_period[1]) - Date.index(
eval_time_period[0]) + 1
total_acc = 0
for day_shift in range(eval_time_len - 5):
eval_start_date = Date.index(eval_time_period[0]) + day_shift
target_start_date = eval_start_date - 21
target_time_period = [Date[target_start_date], Date[eval_start_date]]
next_time_period = [Date[eval_start_date], Date[eval_start_date + 5]]
date_mask = (data.columns > src_time_period[0]) & (data.columns <=
src_time_period[1])
src_data = data.iloc[:, date_mask]
date_mask = (data.columns > target_time_period[0]) & (data.columns <=
target_time_period[1])
target_data = data.iloc[:, date_mask]
date_mask = (data.columns > next_time_period[0]) & (data.columns <=
next_time_period[1])
next_data = data.iloc[:, date_mask]
src_TS = get_stock_time_series(src_data, ETF_ID)
target_TS = get_stock_time_series(target_data, ETF_ID)
next_TS = get_stock_time_series(next_data, ETF_ID)
overall_TS = get_stock_time_series(data, ETF_ID)
target_xcorr = np.correlate(src_TS[:, f_idx], target_TS[:, f_idx],
mode='valid')
target_len = len(target_TS)
max_target_xcorr_idx = target_xcorr.argsort()[::-1]
predict_target_idx = max_target_xcorr_idx + target_len
next_len = len(next_TS)
max_next_xcorr_idx = next_xcorr.argsort()[::-1]
top_num = 10
acc = []
label = np.argmax(next_TS[:, -3:], axis=-1)
for idx in max_target_xcorr_idx[:top_num]:
predict = np.argmax(overall_TS[predict_target_idx[idx]:
predict_target_idx[idx] + next_len, -3:], axis=-1)
acc.append(sum(label == predict) / next_len)
max_acc_idx = np.argsort(acc)[::-1]
output_xcorr_idx = [max_target_xcorr_idx[acc_idx] for acc_idx in
max_acc_idx]
top_num = 3
avg_acc = 0
acc = []
for idx in output_xcorr_idx[:top_num]:
predict = np.argmax(overall_TS[predict_target_idx[idx]:
predict_target_idx[idx] + next_len, -3:], axis=-1)
acc.append(sum(label == predict) / next_len)
avg_acc = avg_acc + acc[-1]
print('Avg. Acc.: [{}]'.format(avg_acc / top_num))
total_acc = total_acc + avg_acc / top_num
print('[{}] Overall Acc.: [{}]'.format(ETF_ID, total_acc / (
eval_time_len - 5)))
output_date[ETF_ID] = [Date[i] for i in output_xcorr_idx[:10]]
<mask token>
pickle.dump(output_date, f, True)
f.close()
| <mask token>
def get_stock_time_series(data_df, stock_id):
curr_ID_data = data_df.loc[stock_id]
output = np.array(curr_ID_data[0])
for i in range(1, len(curr_ID_data.index)):
output = np.vstack((output, curr_ID_data[i]))
return output
ID_conf = conf.config('feature_conf').config['ID']
ETF_ID_list = ['0050', '0052', '0053', '0054', '0055', '0056', '0057',
'0058', '0059', '006201', '006203', '006204', '006208']
output_date = {}
for ETF_ID in ETF_ID_list:
Nm_conf = conf.config('feature_conf').config['Nm']
if Nm_conf['enable'] is True:
Nm_method = Nm_conf['method']
file_postfix = '_Nm_' + str(Nm_conf['type'][0]
) + '_' + Nm_method + '_' + str(94) + '_' + ETF_ID + '.pkl'
else:
file_postfix = '_' + str(94) + '.pkl'
src_file_path = './Data/all_feature_data' + file_postfix
meta_file_path = './Data/all_meta_data' + file_postfix
data = pd.read_pickle(src_file_path)
f = open(meta_file_path, 'rb')
tasharep_ID = pickle.load(f)
member_ID = pickle.load(f)
Date = pickle.load(f)
feature_list = pickle.load(f)
price_scaler = pickle.load(f)
trade_scaler = pickle.load(f)
f_idx = 59
src_time_period = ['20000101', '20180511']
eval_time_period = ['20180402', '20180518']
eval_time_len = Date.index(eval_time_period[1]) - Date.index(
eval_time_period[0]) + 1
total_acc = 0
for day_shift in range(eval_time_len - 5):
eval_start_date = Date.index(eval_time_period[0]) + day_shift
target_start_date = eval_start_date - 21
target_time_period = [Date[target_start_date], Date[eval_start_date]]
next_time_period = [Date[eval_start_date], Date[eval_start_date + 5]]
date_mask = (data.columns > src_time_period[0]) & (data.columns <=
src_time_period[1])
src_data = data.iloc[:, date_mask]
date_mask = (data.columns > target_time_period[0]) & (data.columns <=
target_time_period[1])
target_data = data.iloc[:, date_mask]
date_mask = (data.columns > next_time_period[0]) & (data.columns <=
next_time_period[1])
next_data = data.iloc[:, date_mask]
src_TS = get_stock_time_series(src_data, ETF_ID)
target_TS = get_stock_time_series(target_data, ETF_ID)
next_TS = get_stock_time_series(next_data, ETF_ID)
overall_TS = get_stock_time_series(data, ETF_ID)
target_xcorr = np.correlate(src_TS[:, f_idx], target_TS[:, f_idx],
mode='valid')
target_len = len(target_TS)
max_target_xcorr_idx = target_xcorr.argsort()[::-1]
predict_target_idx = max_target_xcorr_idx + target_len
next_len = len(next_TS)
max_next_xcorr_idx = next_xcorr.argsort()[::-1]
top_num = 10
acc = []
label = np.argmax(next_TS[:, -3:], axis=-1)
for idx in max_target_xcorr_idx[:top_num]:
predict = np.argmax(overall_TS[predict_target_idx[idx]:
predict_target_idx[idx] + next_len, -3:], axis=-1)
acc.append(sum(label == predict) / next_len)
max_acc_idx = np.argsort(acc)[::-1]
output_xcorr_idx = [max_target_xcorr_idx[acc_idx] for acc_idx in
max_acc_idx]
top_num = 3
avg_acc = 0
acc = []
for idx in output_xcorr_idx[:top_num]:
predict = np.argmax(overall_TS[predict_target_idx[idx]:
predict_target_idx[idx] + next_len, -3:], axis=-1)
acc.append(sum(label == predict) / next_len)
avg_acc = avg_acc + acc[-1]
print('Avg. Acc.: [{}]'.format(avg_acc / top_num))
total_acc = total_acc + avg_acc / top_num
print('[{}] Overall Acc.: [{}]'.format(ETF_ID, total_acc / (
eval_time_len - 5)))
output_date[ETF_ID] = [Date[i] for i in output_xcorr_idx[:10]]
f = open('./Data/xcorr_date_data.pkl', 'wb')
pickle.dump(output_date, f, True)
f.close()
| <mask token>
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import config as conf
def get_stock_time_series(data_df, stock_id):
curr_ID_data = data_df.loc[stock_id]
output = np.array(curr_ID_data[0])
for i in range(1, len(curr_ID_data.index)):
output = np.vstack((output, curr_ID_data[i]))
return output
ID_conf = conf.config('feature_conf').config['ID']
ETF_ID_list = ['0050', '0052', '0053', '0054', '0055', '0056', '0057',
'0058', '0059', '006201', '006203', '006204', '006208']
output_date = {}
for ETF_ID in ETF_ID_list:
Nm_conf = conf.config('feature_conf').config['Nm']
if Nm_conf['enable'] is True:
Nm_method = Nm_conf['method']
file_postfix = '_Nm_' + str(Nm_conf['type'][0]
) + '_' + Nm_method + '_' + str(94) + '_' + ETF_ID + '.pkl'
else:
file_postfix = '_' + str(94) + '.pkl'
src_file_path = './Data/all_feature_data' + file_postfix
meta_file_path = './Data/all_meta_data' + file_postfix
data = pd.read_pickle(src_file_path)
f = open(meta_file_path, 'rb')
tasharep_ID = pickle.load(f)
member_ID = pickle.load(f)
Date = pickle.load(f)
feature_list = pickle.load(f)
price_scaler = pickle.load(f)
trade_scaler = pickle.load(f)
f_idx = 59
src_time_period = ['20000101', '20180511']
eval_time_period = ['20180402', '20180518']
eval_time_len = Date.index(eval_time_period[1]) - Date.index(
eval_time_period[0]) + 1
total_acc = 0
for day_shift in range(eval_time_len - 5):
eval_start_date = Date.index(eval_time_period[0]) + day_shift
target_start_date = eval_start_date - 21
target_time_period = [Date[target_start_date], Date[eval_start_date]]
next_time_period = [Date[eval_start_date], Date[eval_start_date + 5]]
date_mask = (data.columns > src_time_period[0]) & (data.columns <=
src_time_period[1])
src_data = data.iloc[:, date_mask]
date_mask = (data.columns > target_time_period[0]) & (data.columns <=
target_time_period[1])
target_data = data.iloc[:, date_mask]
date_mask = (data.columns > next_time_period[0]) & (data.columns <=
next_time_period[1])
next_data = data.iloc[:, date_mask]
src_TS = get_stock_time_series(src_data, ETF_ID)
target_TS = get_stock_time_series(target_data, ETF_ID)
next_TS = get_stock_time_series(next_data, ETF_ID)
overall_TS = get_stock_time_series(data, ETF_ID)
target_xcorr = np.correlate(src_TS[:, f_idx], target_TS[:, f_idx],
mode='valid')
target_len = len(target_TS)
max_target_xcorr_idx = target_xcorr.argsort()[::-1]
predict_target_idx = max_target_xcorr_idx + target_len
next_len = len(next_TS)
max_next_xcorr_idx = next_xcorr.argsort()[::-1]
top_num = 10
acc = []
label = np.argmax(next_TS[:, -3:], axis=-1)
for idx in max_target_xcorr_idx[:top_num]:
predict = np.argmax(overall_TS[predict_target_idx[idx]:
predict_target_idx[idx] + next_len, -3:], axis=-1)
acc.append(sum(label == predict) / next_len)
max_acc_idx = np.argsort(acc)[::-1]
output_xcorr_idx = [max_target_xcorr_idx[acc_idx] for acc_idx in
max_acc_idx]
top_num = 3
avg_acc = 0
acc = []
for idx in output_xcorr_idx[:top_num]:
predict = np.argmax(overall_TS[predict_target_idx[idx]:
predict_target_idx[idx] + next_len, -3:], axis=-1)
acc.append(sum(label == predict) / next_len)
avg_acc = avg_acc + acc[-1]
print('Avg. Acc.: [{}]'.format(avg_acc / top_num))
total_acc = total_acc + avg_acc / top_num
print('[{}] Overall Acc.: [{}]'.format(ETF_ID, total_acc / (
eval_time_len - 5)))
output_date[ETF_ID] = [Date[i] for i in output_xcorr_idx[:10]]
f = open('./Data/xcorr_date_data.pkl', 'wb')
pickle.dump(output_date, f, True)
f.close()
| # -*- coding: utf-8 -*-
"""
Created on Tue May 22 15:01:21 2018
@author: Weiyu_Lee
"""
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import config as conf
def get_stock_time_series(data_df, stock_id):
curr_ID_data = data_df.loc[stock_id]
output = np.array(curr_ID_data[0])
for i in range(1, len(curr_ID_data.index)):
output = np.vstack((output, curr_ID_data[i]))
return output
ID_conf = conf.config('feature_conf').config['ID']
#ETF_ID = ID_conf["ID"]
#ETF_ID_list = ["0050"]
ETF_ID_list = ["0050", "0052", "0053", "0054", "0055", "0056", "0057", "0058", "0059",
"006201", "006203", "006204", "006208"]
output_date = {}
for ETF_ID in ETF_ID_list:
Nm_conf = conf.config('feature_conf').config['Nm']
if Nm_conf["enable"] is True:
Nm_method = Nm_conf["method"]
file_postfix = '_Nm_' + str(Nm_conf["type"][0]) + '_' + Nm_method + '_' + str(94) + "_" + ETF_ID + '.pkl'
else:
file_postfix = "_" + str(94) + '.pkl'
src_file_path = './Data/all_feature_data' + file_postfix
meta_file_path = './Data/all_meta_data' + file_postfix
data = pd.read_pickle(src_file_path)
f = open(meta_file_path, "rb")
tasharep_ID = pickle.load(f)
member_ID = pickle.load(f)
Date = pickle.load(f)
feature_list = pickle.load(f)
price_scaler = pickle.load(f)
trade_scaler = pickle.load(f)
f_idx = 59 # MACD
src_time_period = ['20000101', '20180511']
# eval_time_period = ['20180511', '20180518']
eval_time_period = ['20180402', '20180518']
eval_time_len = Date.index(eval_time_period[1]) - Date.index(eval_time_period[0]) + 1
total_acc = 0
for day_shift in range(eval_time_len-5):
eval_start_date = Date.index(eval_time_period[0]) + day_shift
target_start_date = eval_start_date - 21
target_time_period = [Date[target_start_date], Date[eval_start_date]]
next_time_period = [Date[eval_start_date], Date[eval_start_date + 5]]
date_mask = (data.columns > src_time_period[0]) & (data.columns <= src_time_period[1])
src_data = data.iloc[:, date_mask]
date_mask = (data.columns > target_time_period[0]) & (data.columns <= target_time_period[1])
target_data = data.iloc[:, date_mask]
date_mask = (data.columns > next_time_period[0]) & (data.columns <= next_time_period[1])
next_data = data.iloc[:, date_mask]
src_TS = get_stock_time_series(src_data, ETF_ID)
target_TS = get_stock_time_series(target_data, ETF_ID)
next_TS = get_stock_time_series(next_data, ETF_ID)
overall_TS = get_stock_time_series(data, ETF_ID)
target_xcorr = np.correlate(src_TS[:, f_idx], target_TS[:, f_idx], mode='valid')
# next_xcorr = np.correlate(src_TS[:, f_idx], next_TS[:, f_idx], mode='valid')
target_len = len(target_TS)
max_target_xcorr_idx = target_xcorr.argsort()[::-1]
predict_target_idx = max_target_xcorr_idx + target_len
next_len = len(next_TS)
max_next_xcorr_idx = next_xcorr.argsort()[::-1]
# plt.plot(target_xcorr)
# plt.savefig("target_xcorr_{}.png".format(ETF_ID))
#for idx in max_target_xcorr_idx[:10]:
# plt.figure()
# plt.plot(target_TS[:, 84])
# plt.plot(src_TS[max_target_xcorr_idx[idx]:max_target_xcorr_idx[idx]+target_len, 84])
#plt.figure()
#plt.plot(target_xcorr)
#plt.plot(next_xcorr)
top_num = 10
acc = []
label = np.argmax(next_TS[:, -3:], axis=-1)
for idx in max_target_xcorr_idx[:top_num]:
predict = np.argmax(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, -3:], axis=-1)
acc.append(sum(label == predict) / next_len)
max_acc_idx = np.argsort(acc)[::-1]
output_xcorr_idx = [max_target_xcorr_idx[acc_idx] for acc_idx in max_acc_idx]
top_num = 3
avg_acc = 0
acc = []
for idx in output_xcorr_idx[:top_num]:
#plt.figure()
#plt.plot(next_TS[:, 84])
#plt.plot(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, 84])
#plt.figure()
#plt.plot(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, 3])
predict = np.argmax(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, -3:], axis=-1)
acc.append(sum(label == predict) / next_len)
avg_acc = avg_acc + acc[-1]
#print("Acc.: [{}]".format(acc[-1]))
print("Avg. Acc.: [{}]".format(avg_acc/top_num))
total_acc = total_acc + avg_acc/top_num
print("[{}] Overall Acc.: [{}]".format(ETF_ID, total_acc/(eval_time_len-5)))
output_date[ETF_ID] = [Date[i] for i in output_xcorr_idx[:10]]
f = open('./Data/xcorr_date_data.pkl', 'wb')
pickle.dump(output_date, f, True)
f.close()
| [
1,
2,
3,
4,
5
] |
757 | 5a33aeffa740a41bd0bd1d80f45796ae37377a4c | <mask token>
class AbstractLayer(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
@classmethod
def filter(cls, *args, **kwargs) ->models.QuerySet:
"""
Just to reduce the model.objects.filter to model.filter
:param args:
:param kwargs:
:return: QuerySet
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def all(cls):
"""
Shortcut for model.objects.all
"""
return cls.objects.all()
<mask token>
@classmethod
def create(cls, *args, **kwargs):
"""
Since we are not using auto fields for created_time,
we will be implementing our custom create method to take care of that.
Also, we reduce model.objects.create to model.create.
:param args:
:param kwargs:
:return: created object
"""
now = get_now()
obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)
obj.save()
return obj
class Meta:
abstract = True
class Country(AbstractLayer):
code = models.CharField(max_length=7, unique=True)
def __str__(self):
return self.code
class Meta:
db_table = 'countries'
class City(AbstractLayer):
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
country = models.ForeignKey(Country, on_delete=models.CASCADE,
related_name='cities')
lon = models.CharField(max_length=31)
lat = models.CharField(max_length=31)
def __str__(self):
return self.name
class Meta:
db_table = 'cities'
class Forecast(AbstractLayer):
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=
'forecasts')
detailed_status = models.CharField(max_length=1023, blank=True, null=True)
data = JSONField(blank=True, null=True, help_text=
'Whole JSON data representing the forecast details')
time = models.DateTimeField()
class Meta:
db_table = 'forecasts'
| <mask token>
class BaseManager(models.Manager):
<mask token>
<mask token>
<mask token>
class AbstractLayer(models.Model):
"""
All basic abstraction is done here.
Also, we'll implement some methods which will simplify the work with models.
"""
default_manager = BaseManager
objects = BaseManager
all_objects = models.Manager
created_time = models.DateTimeField(default=get_now)
last_updated_time = models.DateTimeField(default=get_now)
@classmethod
def get(cls, *args, **kwargs) ->(object or None):
"""
We use our custom get method to avoid errors (like Not Found).
This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).
:param args:
:param kwargs:
:return: object of model
"""
try:
return cls.objects.get(*args, **kwargs)
except cls.DoesNotExist:
return None
@classmethod
def filter(cls, *args, **kwargs) ->models.QuerySet:
"""
Just to reduce the model.objects.filter to model.filter
:param args:
:param kwargs:
:return: QuerySet
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def all(cls):
"""
Shortcut for model.objects.all
"""
return cls.objects.all()
def save(self, *args, **kwargs) ->None:
"""
We won't be using auto_now and auto_add_now for created_time and last_updated_time,
since they might cause unintentional errors in future.
Instead we implement custom save method to update them.
:param args:
:param kwargs:
:return: None
"""
self.last_updated_time = get_now()
super(AbstractLayer, self).save(*args, **kwargs)
@classmethod
def create(cls, *args, **kwargs):
"""
Since we are not using auto fields for created_time,
we will be implementing our custom create method to take care of that.
Also, we reduce model.objects.create to model.create.
:param args:
:param kwargs:
:return: created object
"""
now = get_now()
obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)
obj.save()
return obj
class Meta:
abstract = True
class Country(AbstractLayer):
code = models.CharField(max_length=7, unique=True)
def __str__(self):
return self.code
class Meta:
db_table = 'countries'
class City(AbstractLayer):
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
country = models.ForeignKey(Country, on_delete=models.CASCADE,
related_name='cities')
lon = models.CharField(max_length=31)
lat = models.CharField(max_length=31)
def __str__(self):
return self.name
class Meta:
db_table = 'cities'
class Forecast(AbstractLayer):
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=
'forecasts')
detailed_status = models.CharField(max_length=1023, blank=True, null=True)
data = JSONField(blank=True, null=True, help_text=
'Whole JSON data representing the forecast details')
time = models.DateTimeField()
class Meta:
db_table = 'forecasts'
| <mask token>
class BaseManager(models.Manager):
<mask token>
<mask token>
def get_queryset(self):
super(BaseManager, self).get_queryset().order_by('-created_time')
class AbstractLayer(models.Model):
"""
All basic abstraction is done here.
Also, we'll implement some methods which will simplify the work with models.
"""
default_manager = BaseManager
objects = BaseManager
all_objects = models.Manager
created_time = models.DateTimeField(default=get_now)
last_updated_time = models.DateTimeField(default=get_now)
@classmethod
def get(cls, *args, **kwargs) ->(object or None):
"""
We use our custom get method to avoid errors (like Not Found).
This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).
:param args:
:param kwargs:
:return: object of model
"""
try:
return cls.objects.get(*args, **kwargs)
except cls.DoesNotExist:
return None
@classmethod
def filter(cls, *args, **kwargs) ->models.QuerySet:
"""
Just to reduce the model.objects.filter to model.filter
:param args:
:param kwargs:
:return: QuerySet
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def all(cls):
"""
Shortcut for model.objects.all
"""
return cls.objects.all()
def save(self, *args, **kwargs) ->None:
"""
We won't be using auto_now and auto_add_now for created_time and last_updated_time,
since they might cause unintentional errors in future.
Instead we implement custom save method to update them.
:param args:
:param kwargs:
:return: None
"""
self.last_updated_time = get_now()
super(AbstractLayer, self).save(*args, **kwargs)
@classmethod
def create(cls, *args, **kwargs):
"""
Since we are not using auto fields for created_time,
we will be implementing our custom create method to take care of that.
Also, we reduce model.objects.create to model.create.
:param args:
:param kwargs:
:return: created object
"""
now = get_now()
obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)
obj.save()
return obj
class Meta:
abstract = True
class Country(AbstractLayer):
code = models.CharField(max_length=7, unique=True)
def __str__(self):
return self.code
class Meta:
db_table = 'countries'
class City(AbstractLayer):
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
country = models.ForeignKey(Country, on_delete=models.CASCADE,
related_name='cities')
lon = models.CharField(max_length=31)
lat = models.CharField(max_length=31)
def __str__(self):
return self.name
class Meta:
db_table = 'cities'
class Forecast(AbstractLayer):
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=
'forecasts')
detailed_status = models.CharField(max_length=1023, blank=True, null=True)
data = JSONField(blank=True, null=True, help_text=
'Whole JSON data representing the forecast details')
time = models.DateTimeField()
class Meta:
db_table = 'forecasts'
| <mask token>
class BaseManager(models.Manager):
<mask token>
use_for_related_fields = True
def get_queryset(self):
super(BaseManager, self).get_queryset().order_by('-created_time')
class AbstractLayer(models.Model):
"""
All basic abstraction is done here.
Also, we'll implement some methods which will simplify the work with models.
"""
default_manager = BaseManager
objects = BaseManager
all_objects = models.Manager
created_time = models.DateTimeField(default=get_now)
last_updated_time = models.DateTimeField(default=get_now)
@classmethod
def get(cls, *args, **kwargs) ->(object or None):
"""
We use our custom get method to avoid errors (like Not Found).
This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).
:param args:
:param kwargs:
:return: object of model
"""
try:
return cls.objects.get(*args, **kwargs)
except cls.DoesNotExist:
return None
@classmethod
def filter(cls, *args, **kwargs) ->models.QuerySet:
"""
Just to reduce the model.objects.filter to model.filter
:param args:
:param kwargs:
:return: QuerySet
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def all(cls):
"""
Shortcut for model.objects.all
"""
return cls.objects.all()
def save(self, *args, **kwargs) ->None:
"""
We won't be using auto_now and auto_add_now for created_time and last_updated_time,
since they might cause unintentional errors in future.
Instead we implement custom save method to update them.
:param args:
:param kwargs:
:return: None
"""
self.last_updated_time = get_now()
super(AbstractLayer, self).save(*args, **kwargs)
@classmethod
def create(cls, *args, **kwargs):
"""
Since we are not using auto fields for created_time,
we will be implementing our custom create method to take care of that.
Also, we reduce model.objects.create to model.create.
:param args:
:param kwargs:
:return: created object
"""
now = get_now()
obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)
obj.save()
return obj
class Meta:
abstract = True
class Country(AbstractLayer):
code = models.CharField(max_length=7, unique=True)
def __str__(self):
return self.code
class Meta:
db_table = 'countries'
class City(AbstractLayer):
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
country = models.ForeignKey(Country, on_delete=models.CASCADE,
related_name='cities')
lon = models.CharField(max_length=31)
lat = models.CharField(max_length=31)
def __str__(self):
return self.name
class Meta:
db_table = 'cities'
class Forecast(AbstractLayer):
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=
'forecasts')
detailed_status = models.CharField(max_length=1023, blank=True, null=True)
data = JSONField(blank=True, null=True, help_text=
'Whole JSON data representing the forecast details')
time = models.DateTimeField()
class Meta:
db_table = 'forecasts'
| from django.contrib.postgres.fields import JSONField
from django.db import models
from core.utils.time import get_now
class BaseManager(models.Manager):
"""
Our basic manager is used to order all child models of AbstractLayer
by created time (descending), therefore it creates a LIFO order,
causing the recent ones appear first in results.
"""
use_for_related_fields = True
def get_queryset(self):
super(BaseManager, self).get_queryset().order_by('-created_time')
class AbstractLayer(models.Model):
"""
All basic abstraction is done here.
Also, we'll implement some methods which will simplify the work with models.
"""
# let's configure managers
default_manager = BaseManager
objects = BaseManager
all_objects = models.Manager
# All objects in our database are gonna have time of creation and last updated time.
created_time = models.DateTimeField(default=get_now)
last_updated_time = models.DateTimeField(default=get_now)
@classmethod
def get(cls, *args, **kwargs) -> object or None:
"""
We use our custom get method to avoid errors (like Not Found).
This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).
:param args:
:param kwargs:
:return: object of model
"""
try:
return cls.objects.get(*args, **kwargs)
except cls.DoesNotExist:
# if objects does not exist, we use None
return None
@classmethod
def filter(cls, *args, **kwargs) -> models.QuerySet:
"""
Just to reduce the model.objects.filter to model.filter
:param args:
:param kwargs:
:return: QuerySet
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def all(cls):
"""
Shortcut for model.objects.all
"""
return cls.objects.all()
def save(self, *args, **kwargs) -> None:
"""
We won't be using auto_now and auto_add_now for created_time and last_updated_time,
since they might cause unintentional errors in future.
Instead we implement custom save method to update them.
:param args:
:param kwargs:
:return: None
"""
self.last_updated_time = get_now()
super(AbstractLayer, self).save(*args, **kwargs)
@classmethod
def create(cls, *args, **kwargs):
"""
Since we are not using auto fields for created_time,
we will be implementing our custom create method to take care of that.
Also, we reduce model.objects.create to model.create.
:param args:
:param kwargs:
:return: created object
"""
now = get_now()
obj = cls(
*args,
**kwargs,
created_time=now,
last_updated_time=now
)
obj.save()
return obj
class Meta:
abstract = True
class Country(AbstractLayer):
code = models.CharField(max_length=7, unique=True)
def __str__(self):
return self.code
class Meta:
db_table = 'countries'
class City(AbstractLayer):
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
country = models.ForeignKey(Country, on_delete=models.CASCADE, related_name='cities')
lon = models.CharField(max_length=31)
lat = models.CharField(max_length=31)
def __str__(self):
return self.name
class Meta:
db_table = 'cities'
class Forecast(AbstractLayer):
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name='forecasts')
detailed_status = models.CharField(max_length=1023, blank=True, null=True)
data = JSONField(blank=True, null=True, help_text='Whole JSON data representing the forecast details')
time = models.DateTimeField()
class Meta:
db_table = 'forecasts'
| [
12,
17,
18,
19,
22
] |
758 | 5cfd7744f98c80483cb4dd318c17a7cd83ed3ae3 | <mask token>
class Solution:
<mask token>
| <mask token>
class Solution:
def findDuplicates(self, nums: List[int]) ->List[int]:
res = []
for num in nums:
if nums[abs(num) - 1] < 0:
res.append(abs(num))
else:
nums[abs(num) - 1] *= -1
return res
| <mask token>
class Solution:
<mask token>
class Solution:
def findDuplicates(self, nums: List[int]) ->List[int]:
res = []
for num in nums:
if nums[abs(num) - 1] < 0:
res.append(abs(num))
else:
nums[abs(num) - 1] *= -1
return res
| <mask token>
class Solution:
def findDuplicates(self, nums: List[int]) ->List[int]:
cnt = {}
for num in nums:
cnt[num] = cnt.get(num, 0) + 1
res = []
for k, v in cnt.items():
if v > 1:
res.append(k)
return res
class Solution:
def findDuplicates(self, nums: List[int]) ->List[int]:
res = []
for num in nums:
if nums[abs(num) - 1] < 0:
res.append(abs(num))
else:
nums[abs(num) - 1] *= -1
return res
| """
Given an array of integers, 1 ≤ a[i] ≤ n (n = size of array), some elements appear twice and others appear once.
Find all the elements that appear twice in this array.
Could you do it without extra space and in O(n) runtime?
Example:
Input:
[4,3,2,7,8,2,3,1]
Output:
[2,3]
"""
# O(n) TC and SC
class Solution:
def findDuplicates(self, nums: List[int]) -> List[int]:
cnt = {}
for num in nums:
cnt[num] = cnt.get(num, 0) + 1
res = []
for k, v in cnt.items():
if v > 1:
res.append(k)
return res
# O(n) TC and O(1) SC
class Solution:
def findDuplicates(self, nums: List[int]) -> List[int]:
res = []
for num in nums:
if nums[abs(num)-1] < 0:
res.append(abs(num))
else:
nums[abs(num)-1] *= -1
return res
| [
1,
2,
3,
4,
5
] |
759 | cfa862988edf9d70aa5e975cca58b4e61a4de847 | <mask token>
| <mask token>
setup(name='gromacsplotter', version='0.1', description=
'Read xvg files created with gromacs for plotting with matplotlib', url
='', author='Ilyas Kuhlemann', author_email='[email protected]',
license='MIT', packages=['gromacsplotter'], scripts=[],
install_requires=['numpy', 'matplotlib'], entry_points={
'console_scripts': [
'gromacsplotter = gromacsplotter.plot_xvg_data:main']}, zip_safe=False)
| <mask token>
from setuptools import setup
setup(name='gromacsplotter', version='0.1', description=
'Read xvg files created with gromacs for plotting with matplotlib', url
='', author='Ilyas Kuhlemann', author_email='[email protected]',
license='MIT', packages=['gromacsplotter'], scripts=[],
install_requires=['numpy', 'matplotlib'], entry_points={
'console_scripts': [
'gromacsplotter = gromacsplotter.plot_xvg_data:main']}, zip_safe=False)
| """
USAGE:
o install in develop mode: navigate to the folder containing this file,
and type 'python setup.py develop --user'.
(ommit '--user' if you want to install for
all users)
"""
from setuptools import setup
setup(name='gromacsplotter',
version='0.1',
description='Read xvg files created with gromacs for plotting with matplotlib',
url='',
author='Ilyas Kuhlemann',
author_email='[email protected]',
license='MIT',
packages=["gromacsplotter"],
scripts=[],
install_requires=['numpy',
"matplotlib"],
entry_points = {
'console_scripts': ["gromacsplotter = gromacsplotter.plot_xvg_data:main"]
},
zip_safe=False)
| null | [
0,
1,
2,
3
] |
760 | 7b9660bba6fcb8c725251971f3733a1cc915c0e7 | <mask token>
class Point(object):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class Rect(object):
"""A rectangle identified by its lower left
and upper right corners.
"""
def __init__(self, ll, ur):
"""Initialize rectangle with ll and ur as corners."""
log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))
self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))
log.debug('ll will be {}'.format(self.ll))
self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))
log.debug('ur will be {}'.format(self.ur))
log.debug('Created rect {}'.format(repr(self)))
def __repr__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __str__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __eq__(self, other):
assert isinstance(other, Rect)
return self.ll == other.ll and self.ur == other.ur
def overlaps(self, other):
"""r1.overlaps(r2) if there is an area of positive
size within r1 and also within r2. "Of positive size"
means that touching at a corner or along an edge is
not enough ... the area of overlap must be positive.
"""
if self.ll.x >= other.ur.x:
return False
if self.ll.y >= other.ur.y:
return False
if self.ur.x <= other.ll.x:
return False
if self.ur.y <= other.ll.y:
return False
return True
def intersect(self, other):
"""Region of overlap, or (0,0),(0,0) if none"""
if self.overlaps(other):
return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,
other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur
.y, other.ur.y)))
else:
return Rect(Point(0, 0), Point(0, 0))
| <mask token>
class Point(object):
<mask token>
<mask token>
def __repr__(self):
return 'Point({},{})'.format(self.x, self.y)
def __str__(self):
return '({}, {})'.format(self.x, self.y)
def __eq__(self, other):
assert isinstance(other, Point)
return self.x == other.x and self.y == other.y
class Rect(object):
"""A rectangle identified by its lower left
and upper right corners.
"""
def __init__(self, ll, ur):
"""Initialize rectangle with ll and ur as corners."""
log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))
self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))
log.debug('ll will be {}'.format(self.ll))
self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))
log.debug('ur will be {}'.format(self.ur))
log.debug('Created rect {}'.format(repr(self)))
def __repr__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __str__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __eq__(self, other):
assert isinstance(other, Rect)
return self.ll == other.ll and self.ur == other.ur
def overlaps(self, other):
"""r1.overlaps(r2) if there is an area of positive
size within r1 and also within r2. "Of positive size"
means that touching at a corner or along an edge is
not enough ... the area of overlap must be positive.
"""
if self.ll.x >= other.ur.x:
return False
if self.ll.y >= other.ur.y:
return False
if self.ur.x <= other.ll.x:
return False
if self.ur.y <= other.ll.y:
return False
return True
def intersect(self, other):
"""Region of overlap, or (0,0),(0,0) if none"""
if self.overlaps(other):
return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,
other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur
.y, other.ur.y)))
else:
return Rect(Point(0, 0), Point(0, 0))
| <mask token>
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class Point(object):
"""A point is an ordered pair, (x,y)"""
def __init__(self, x, y):
assert isinstance(x, numbers.Number)
assert isinstance(y, numbers.Number)
self.x = x
self.y = y
log.debug('Created Point {}'.format(repr(self)))
def __repr__(self):
return 'Point({},{})'.format(self.x, self.y)
def __str__(self):
return '({}, {})'.format(self.x, self.y)
def __eq__(self, other):
assert isinstance(other, Point)
return self.x == other.x and self.y == other.y
class Rect(object):
"""A rectangle identified by its lower left
and upper right corners.
"""
def __init__(self, ll, ur):
"""Initialize rectangle with ll and ur as corners."""
log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))
self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))
log.debug('ll will be {}'.format(self.ll))
self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))
log.debug('ur will be {}'.format(self.ur))
log.debug('Created rect {}'.format(repr(self)))
def __repr__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __str__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __eq__(self, other):
assert isinstance(other, Rect)
return self.ll == other.ll and self.ur == other.ur
def overlaps(self, other):
"""r1.overlaps(r2) if there is an area of positive
size within r1 and also within r2. "Of positive size"
means that touching at a corner or along an edge is
not enough ... the area of overlap must be positive.
"""
if self.ll.x >= other.ur.x:
return False
if self.ll.y >= other.ur.y:
return False
if self.ur.x <= other.ll.x:
return False
if self.ur.y <= other.ll.y:
return False
return True
def intersect(self, other):
"""Region of overlap, or (0,0),(0,0) if none"""
if self.overlaps(other):
return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,
other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur
.y, other.ur.y)))
else:
return Rect(Point(0, 0), Point(0, 0))
| <mask token>
import numbers
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class Point(object):
"""A point is an ordered pair, (x,y)"""
def __init__(self, x, y):
assert isinstance(x, numbers.Number)
assert isinstance(y, numbers.Number)
self.x = x
self.y = y
log.debug('Created Point {}'.format(repr(self)))
def __repr__(self):
return 'Point({},{})'.format(self.x, self.y)
def __str__(self):
return '({}, {})'.format(self.x, self.y)
def __eq__(self, other):
assert isinstance(other, Point)
return self.x == other.x and self.y == other.y
class Rect(object):
"""A rectangle identified by its lower left
and upper right corners.
"""
def __init__(self, ll, ur):
"""Initialize rectangle with ll and ur as corners."""
log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))
self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))
log.debug('ll will be {}'.format(self.ll))
self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))
log.debug('ur will be {}'.format(self.ur))
log.debug('Created rect {}'.format(repr(self)))
def __repr__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __str__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __eq__(self, other):
assert isinstance(other, Rect)
return self.ll == other.ll and self.ur == other.ur
def overlaps(self, other):
"""r1.overlaps(r2) if there is an area of positive
size within r1 and also within r2. "Of positive size"
means that touching at a corner or along an edge is
not enough ... the area of overlap must be positive.
"""
if self.ll.x >= other.ur.x:
return False
if self.ll.y >= other.ur.y:
return False
if self.ur.x <= other.ll.x:
return False
if self.ur.y <= other.ll.y:
return False
return True
def intersect(self, other):
"""Region of overlap, or (0,0),(0,0) if none"""
if self.overlaps(other):
return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,
other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur
.y, other.ur.y)))
else:
return Rect(Point(0, 0), Point(0, 0))
| """
Rectangles: Compute overlapping region of two rectangles.
Point(x: number, y: number): Cartesian coordinate pair
Rect(ll: Point, ur: Point): A rectangle defined by lower left
and upper right coordinates
Rect.overlaps(other: Rect) -> boolean: True if non-empty overlap
Rect.intersect(other: Rect) -> Rect:
region of intersection if non-empty,
or empty Rect from 0,0 to 0,0 if not Rect.overlaps(other)
CIS 211 Project 1
Author: Noah Tigner
UO email: [email protected]
"""
import numbers
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# To turn on debugging output, change the above to
# log.setLevel(logging.DEBUG)
class Point(object):
"""A point is an ordered pair, (x,y)"""
def __init__(self, x, y):
assert isinstance(x, numbers.Number)
assert isinstance(y, numbers.Number)
self.x = x
self.y = y
log.debug("Created Point {}".format(repr(self)))
def __repr__(self):
return "Point({},{})".format(self.x, self.y)
def __str__(self):
return "({}, {})".format(self.x, self.y)
def __eq__(self, other):
assert(isinstance(other, Point))
return self.x == other.x and self.y == other.y
class Rect(object):
"""A rectangle identified by its lower left
and upper right corners.
"""
def __init__(self, ll, ur):
"""Initialize rectangle with ll and ur as corners."""
log.debug("Rect from ll {}, ur {}".format(repr(ll), repr(ur)))
# Ensure ll really is lower left and ur really is upper right
self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))
log.debug("ll will be {}".format(self.ll))
self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))
log.debug("ur will be {}".format(self.ur))
log.debug("Created rect {}".format(repr(self)))
def __repr__(self):
return "Rect({},{})".format(self.ll, self.ur)
def __str__(self):
return "Rect({},{})".format(self.ll, self.ur)
def __eq__(self, other):
assert isinstance(other, Rect)
return self.ll == other.ll and self.ur == other.ur
def overlaps(self, other):
"""r1.overlaps(r2) if there is an area of positive
size within r1 and also within r2. "Of positive size"
means that touching at a corner or along an edge is
not enough ... the area of overlap must be positive.
"""
if self.ll.x >= other.ur.x:
return False
if self.ll.y >= other.ur.y:
return False
if self.ur.x <= other.ll.x:
return False
if self.ur.y <= other.ll.y:
return False
return True
def intersect(self, other):
"""Region of overlap, or (0,0),(0,0) if none"""
if self.overlaps(other):
return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y, other.ll.y)),
Point(min(self.ur.x, other.ur.x), min(self.ur.y, other.ur.y)))
else:
return Rect(Point(0, 0), Point(0, 0))
| [
9,
12,
16,
17,
18
] |
761 | f25351a3cb7bf583152baa8e7ec47b0f2161cb9c | <mask token>
class Notifier(object):
<mask token>
def __init__(self):
pass
<mask token>
@abc.abstractmethod
def send(self, msg):
pass
| <mask token>
class Notifier(object):
<mask token>
def __init__(self):
pass
@abc.abstractmethod
def config(self, kwargs):
pass
@abc.abstractmethod
def send(self, msg):
pass
| <mask token>
class Notifier(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def config(self, kwargs):
pass
@abc.abstractmethod
def send(self, msg):
pass
| import abc
class Notifier(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def config(self, kwargs):
pass
@abc.abstractmethod
def send(self, msg):
pass
| # Copyright 2014 The crabapple Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import abc
class Notifier(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def config(self, kwargs):
pass
@abc.abstractmethod
def send(self, msg):
pass
| [
3,
4,
5,
6,
7
] |
762 | d0eb6ea2e816ac59ae93684edb38ff3a49909633 | <mask token>
| def usage_list(self):
print('Available modules')
print('=================')
for module in sorted(self.list()):
if 'module' not in self.mods[module]:
self.import_module(module)
if not self.mods[module]['module'].__doc__:
continue
text = self.mods[module]['module'].__doc__.strip('\n ')
text = text.split('\n')
if len(text) > 2:
if text[1].startswith('='):
text[1] = '=' * (14 + len(text[1]))
text = '\n'.join(text)
print('\n%-12s: %s' % (module, text))
| def usage_list(self):
print('Available modules')
print('=================')
for module in sorted(self.list()):
if ('module' not in self.mods[module]):
self.import_module(module)
if (not self.mods[module]['module'].__doc__):
continue
text = self.mods[module]['module'].__doc__.strip('\n ')
text = text.split('\n')
if (len(text) > 2):
if text[1].startswith('='):
text[1] = ('=' * (14 + len(text[1])))
text = '\n'.join(text)
print(('\n%-12s: %s' % (module, text))) | null | null | [
0,
1,
2
] |
763 | f3329962004a4454c04327da56d8dd1d0f1d45e7 | <mask token>
| <mask token>
if r.status_code == 200:
text = r.text
pattern = 'Přispěvatel'
m = re.search(pattern, text)
pattern2 = '<strong>([0-9]{1,})'
m2 = re.search(pattern2, text[m.start():])
pattern3 = 'currency " >([0-9]{1,})'
m3 = re.search(pattern3, text.replace(' ', ''))
with open(path + 'data.json', 'w') as fdata:
json.dump({'date': datetime.datetime.now().isoformat(), 'amount':
m3.group(1), 'supporters': m2.group(1)}, fdata)
| <mask token>
r = requests.get(
'https://www.hithit.com/cs/project/4067/volebni-kalkulacka-on-steroids')
path = os.path.dirname(os.path.realpath(__file__)) + '/'
if r.status_code == 200:
text = r.text
pattern = 'Přispěvatel'
m = re.search(pattern, text)
pattern2 = '<strong>([0-9]{1,})'
m2 = re.search(pattern2, text[m.start():])
pattern3 = 'currency " >([0-9]{1,})'
m3 = re.search(pattern3, text.replace(' ', ''))
with open(path + 'data.json', 'w') as fdata:
json.dump({'date': datetime.datetime.now().isoformat(), 'amount':
m3.group(1), 'supporters': m2.group(1)}, fdata)
| import csv
import datetime
import json
import re
import requests
import os
r = requests.get(
'https://www.hithit.com/cs/project/4067/volebni-kalkulacka-on-steroids')
path = os.path.dirname(os.path.realpath(__file__)) + '/'
if r.status_code == 200:
text = r.text
pattern = 'Přispěvatel'
m = re.search(pattern, text)
pattern2 = '<strong>([0-9]{1,})'
m2 = re.search(pattern2, text[m.start():])
pattern3 = 'currency " >([0-9]{1,})'
m3 = re.search(pattern3, text.replace(' ', ''))
with open(path + 'data.json', 'w') as fdata:
json.dump({'date': datetime.datetime.now().isoformat(), 'amount':
m3.group(1), 'supporters': m2.group(1)}, fdata)
| import csv
import datetime
import json
import re
import requests
import os
r = requests.get("https://www.hithit.com/cs/project/4067/volebni-kalkulacka-on-steroids")
path = os.path.dirname(os.path.realpath(__file__)) + "/"
if r.status_code == 200:
text = r.text
pattern = 'Přispěvatel'
m = re.search(pattern, text)
pattern2 = '<strong>([0-9]{1,})'
m2 = re.search(pattern2, text[m.start():])
pattern3 = 'currency " >([0-9]{1,})'
m3 = re.search(pattern3, text.replace(' ', ''))
with open(path + "data.json", "w") as fdata:
json.dump({
"date": datetime.datetime.now().isoformat(),
"amount": m3.group(1),
"supporters": m2.group(1)
}, fdata)
| [
0,
1,
2,
3,
4
] |
764 | 2c505f3f1dfdefae8edbea0916873229bcda901f | <mask token>
| <mask token>
class NestableBlueprint(Blueprint):
<mask token>
| <mask token>
class NestableBlueprint(Blueprint):
def register_blueprint(self, blueprint, **options):
def deferred(state):
url_prefix = (state.url_prefix or u'') + (options.get(
'url_prefix', blueprint.url_prefix) or u'')
if 'url_prefix' in options:
del options['url_prefix']
state.app.register_blueprint(blueprint, url_prefix=url_prefix,
**options)
self.record(deferred)
| from flask import Blueprint
class NestableBlueprint(Blueprint):
def register_blueprint(self, blueprint, **options):
def deferred(state):
url_prefix = (state.url_prefix or u'') + (options.get(
'url_prefix', blueprint.url_prefix) or u'')
if 'url_prefix' in options:
del options['url_prefix']
state.app.register_blueprint(blueprint, url_prefix=url_prefix,
**options)
self.record(deferred)
| from flask import Blueprint
class NestableBlueprint(Blueprint):
def register_blueprint(self, blueprint, **options):
def deferred(state):
# state.url_prefix => 自己url前缀 + blueprint.url_prefix => /v3/api/cmdb/
url_prefix = (state.url_prefix or u"") + (options.get('url_prefix', blueprint.url_prefix) or u"")
if 'url_prefix' in options:
del options['url_prefix']
# app.register_blueprint(blueprint, '/v3/api/cmdb/')
state.app.register_blueprint(blueprint, url_prefix=url_prefix, **options)
self.record(deferred)
| [
0,
1,
2,
3,
4
] |
765 | 5721786b61cf8706b1d401a46d06f2d32153df8b | <mask token>
| <mask token>
for i in range(1, n - 1):
rem = n % i
if rem == 0:
sum = sum + i
if sum == n:
print('the number is perfect')
else:
print('not prime')
| n = int(input('enter the number\n'))
sum = 0
for i in range(1, n - 1):
rem = n % i
if rem == 0:
sum = sum + i
if sum == n:
print('the number is perfect')
else:
print('not prime')
| n=int(input("enter the number\n"))
sum=0
for i in range(1,n-1):
rem=n%i
if(rem==0):
sum=sum+i
if(sum==n):
print("the number is perfect")
else:
print("not prime")
| null | [
0,
1,
2,
3
] |
766 | 24f87bd6aab0ff65cf2153e27df31122818ad0ac | <mask token>
class TestColGroup(unittest.TestCase):
<mask token>
def test_col(self):
return
data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]
gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},
'attr_sort': 1})
self.assertEqual(
'<table><colgroup span="3" width="100"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': {}}), 'colgroup wraps col')
self.assertEqual(
'<table><colgroup span="3" width="100"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': [{}, {}, {}]}),
'colgroup wraps multiple cols')
self.assertEqual(
'<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': None, 'col': {}}),
'colgroup can be overriden when col is present too')
gen = Table({'data': data, 'col': [{}, {}, {}]})
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': {}}),
'multiple cols against single colgroup')
self.assertEqual(
'<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),
'no cols against multiple colgroups')
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': [{}, {}, {}]}),
'multiple cols against multiple colgroups')
<mask token>
| <mask token>
class TestColGroup(unittest.TestCase):
def test_colgroup(self):
return
data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]
gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},
'attr_sort': 1})
self.assertEqual(
'<table><colgroup span="3" width="100" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate(), 'colgroup present from generate()')
self.assertEqual(
'<table><colgroup span="3" width="100" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>'
, gen.generate({'tgroups': 2}),
'colgroup present from generate() with tgroups')
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': None}), 'colgroup can be overriden')
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': 1}), 'colgroup yields no-op if scalar')
self.assertEqual(
'<table><colgroup color="red" span="1" /><colgroup color="blue" span="2" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': [{'span': 1, 'color': 'red'}, {
'span': 2, 'color': 'blue'}]}), 'can specify multiple colgroups')
def test_col(self):
return
data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]
gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},
'attr_sort': 1})
self.assertEqual(
'<table><colgroup span="3" width="100"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': {}}), 'colgroup wraps col')
self.assertEqual(
'<table><colgroup span="3" width="100"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': [{}, {}, {}]}),
'colgroup wraps multiple cols')
self.assertEqual(
'<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': None, 'col': {}}),
'colgroup can be overriden when col is present too')
gen = Table({'data': data, 'col': [{}, {}, {}]})
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': {}}),
'multiple cols against single colgroup')
self.assertEqual(
'<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),
'no cols against multiple colgroups')
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': [{}, {}, {}]}),
'multiple cols against multiple colgroups')
<mask token>
| <mask token>
class TestColGroup(unittest.TestCase):
def test_colgroup(self):
return
data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]
gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},
'attr_sort': 1})
self.assertEqual(
'<table><colgroup span="3" width="100" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate(), 'colgroup present from generate()')
self.assertEqual(
'<table><colgroup span="3" width="100" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>'
, gen.generate({'tgroups': 2}),
'colgroup present from generate() with tgroups')
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': None}), 'colgroup can be overriden')
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': 1}), 'colgroup yields no-op if scalar')
self.assertEqual(
'<table><colgroup color="red" span="1" /><colgroup color="blue" span="2" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': [{'span': 1, 'color': 'red'}, {
'span': 2, 'color': 'blue'}]}), 'can specify multiple colgroups')
def test_col(self):
return
data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]
gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},
'attr_sort': 1})
self.assertEqual(
'<table><colgroup span="3" width="100"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': {}}), 'colgroup wraps col')
self.assertEqual(
'<table><colgroup span="3" width="100"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': [{}, {}, {}]}),
'colgroup wraps multiple cols')
self.assertEqual(
'<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': None, 'col': {}}),
'colgroup can be overriden when col is present too')
gen = Table({'data': data, 'col': [{}, {}, {}]})
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': {}}),
'multiple cols against single colgroup')
self.assertEqual(
'<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),
'no cols against multiple colgroups')
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': [{}, {}, {}]}),
'multiple cols against multiple colgroups')
if __name__ == '__main__':
unittest.main()
| import unittest
from Spreadsheet.HTML import Table
class TestColGroup(unittest.TestCase):
def test_colgroup(self):
return
data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]
gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},
'attr_sort': 1})
self.assertEqual(
'<table><colgroup span="3" width="100" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate(), 'colgroup present from generate()')
self.assertEqual(
'<table><colgroup span="3" width="100" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>'
, gen.generate({'tgroups': 2}),
'colgroup present from generate() with tgroups')
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': None}), 'colgroup can be overriden')
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': 1}), 'colgroup yields no-op if scalar')
self.assertEqual(
'<table><colgroup color="red" span="1" /><colgroup color="blue" span="2" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': [{'span': 1, 'color': 'red'}, {
'span': 2, 'color': 'blue'}]}), 'can specify multiple colgroups')
def test_col(self):
return
data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]
gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},
'attr_sort': 1})
self.assertEqual(
'<table><colgroup span="3" width="100"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': {}}), 'colgroup wraps col')
self.assertEqual(
'<table><colgroup span="3" width="100"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': [{}, {}, {}]}),
'colgroup wraps multiple cols')
self.assertEqual(
'<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': None, 'col': {}}),
'colgroup can be overriden when col is present too')
gen = Table({'data': data, 'col': [{}, {}, {}]})
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': {}}),
'multiple cols against single colgroup')
self.assertEqual(
'<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),
'no cols against multiple colgroups')
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'
, gen.generate({'colgroup': [{}, {}, {}]}),
'multiple cols against multiple colgroups')
if __name__ == '__main__':
unittest.main()
| import unittest
from Spreadsheet.HTML import Table
class TestColGroup(unittest.TestCase):
def test_colgroup(self):
return
data = [
['a','b','c'],
[1,2,3],
[4,5,6],
]
gen = Table( { 'data': data, 'colgroup': { 'span': 3, 'width': 100 }, 'attr_sort': 1 } )
self.assertEqual(
'<table><colgroup span="3" width="100" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate(),
"colgroup present from generate()"
)
self.assertEqual(
'<table><colgroup span="3" width="100" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>',
gen.generate( { 'tgroups': 2 } ),
"colgroup present from generate() with tgroups"
)
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': None } ),
"colgroup can be overriden"
)
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': 1 } ),
"colgroup yields no-op if scalar"
)
self.assertEqual(
'<table><colgroup color="red" span="1" /><colgroup color="blue" span="2" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': [ { 'span': 1, 'color': 'red' }, { 'span': 2, 'color': 'blue' } ] } ),
"can specify multiple colgroups"
)
def test_col(self):
return
data = [
['a','b','c'],
[1,2,3],
[4,5,6],
]
gen = Table( { 'data': data, 'colgroup': { 'span': 3, 'width': 100 }, 'attr_sort': 1 } );
self.assertEqual(
'<table><colgroup span="3" width="100"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'col': {} } ),
"colgroup wraps col"
)
self.assertEqual(
'<table><colgroup span="3" width="100"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'col': [{},{},{}] } ),
"colgroup wraps multiple cols"
)
self.assertEqual(
'<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': None, 'col': {} } ),
"colgroup can be overriden when col is present too"
)
gen = Table( { 'data': data, 'col': [{},{},{}] } );
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': {} } ),
"multiple cols against single colgroup"
)
self.assertEqual(
'<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'col': None, 'colgroup': [{},{},{}] } ),
"no cols against multiple colgroups"
)
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': [{},{},{}] } ),
"multiple cols against multiple colgroups"
)
if __name__ == '__main__':
unittest.main()
| [
2,
3,
4,
5,
6
] |
767 | 2a0172641c48c47f048bf5e9f1889b29abbb0b7c | <mask token>
def shell(cmd):
subprocess.call(cmd, shell=True)
print('Done! {0}.'.format(cmd))
<mask token>
| <mask token>
def shell(cmd):
subprocess.call(cmd, shell=True)
print('Done! {0}.'.format(cmd))
<mask token>
shell(cmd)
<mask token>
shell(cmd)
<mask token>
shell(cmd)
<mask token>
shell(cmd)
print('time used = {0:.0f}'.format(time.time() - start))
| <mask token>
NR_THREAD = 20
def shell(cmd):
subprocess.call(cmd, shell=True)
print('Done! {0}.'.format(cmd))
start = time.time()
cmd = 'mkdir FTRL/tmp -p'
shell(cmd)
cmd = 'mkdir FTRL/data -p'
shell(cmd)
cmd = (
'FTRL/FTRLStarter.py ffmData/filter100/split_train.ffm ffmData/filter100/split_test.ffm FTRL/tmp/split_test_cv.out ffmData/filter100/click_test.ffm FTRL/tmp/click_test_cv.out'
)
shell(cmd)
cmd = 'util/map.py FTRL/tmp/split_test_cv.out data/split_test.csv'
shell(cmd)
print('time used = {0:.0f}'.format(time.time() - start))
| from __future__ import division, absolute_import, print_function, unicode_literals
import argparse, csv, sys, subprocess, time
NR_THREAD = 20
def shell(cmd):
subprocess.call(cmd, shell=True)
print('Done! {0}.'.format(cmd))
start = time.time()
cmd = 'mkdir FTRL/tmp -p'
shell(cmd)
cmd = 'mkdir FTRL/data -p'
shell(cmd)
cmd = (
'FTRL/FTRLStarter.py ffmData/filter100/split_train.ffm ffmData/filter100/split_test.ffm FTRL/tmp/split_test_cv.out ffmData/filter100/click_test.ffm FTRL/tmp/click_test_cv.out'
)
shell(cmd)
cmd = 'util/map.py FTRL/tmp/split_test_cv.out data/split_test.csv'
shell(cmd)
print('time used = {0:.0f}'.format(time.time() - start))
| #!/usr/bin/env python3
#coding=utf8
from __future__ import (division,absolute_import,print_function,unicode_literals)
import argparse, csv, sys,subprocess,time
NR_THREAD=20
def shell(cmd):
subprocess.call(cmd,shell=True)
print("Done! {0}.".format(cmd))
start=time.time()
cmd = 'mkdir FTRL/tmp -p'
shell(cmd)
cmd = 'mkdir FTRL/data -p'
shell(cmd)
#cmd = 'FTRL/ensamble/ensamble.py -s {nr_thread} -f 5 ffmData/Filter100/click_train.ffm ffmData/Filter100/click_test.ffm FTRL/data/click_train_out.txt FTRL/data/click_test_out.txt '.format(nr_thread=NR_THREAD)
#shell(cmd)
cmd = 'FTRL/FTRLStarter.py ffmData/filter100/split_train.ffm ffmData/filter100/split_test.ffm FTRL/tmp/split_test_cv.out ffmData/filter100/click_test.ffm FTRL/tmp/click_test_cv.out'
shell(cmd)
cmd='util/map.py FTRL/tmp/split_test_cv.out data/split_test.csv'
shell(cmd)
print('time used = {0:.0f}'.format(time.time()-start))
| [
1,
2,
3,
4,
5
] |
768 | eab5bf4776582349615ad56ee1ed93bc8f868565 | <mask token>
class Communication(Module):
<mask token>
<mask token>
def receive(self, length):
if not isinstance(length, int):
raise Exception('Receive length must be an integer.')
return self.port.read(length)
<mask token>
@property
def port(self):
return self._port
<mask token>
| <mask token>
class Communication(Module):
def __init__(self, parent, port_name, baud_rate):
self.parent = parent
if not isinstance(port_name, str):
raise Exception('Port name must be a string.')
if not isinstance(baud_rate, int):
raise Exception('Baud rate must be an integer.')
if baud_rate not in BAUD_RATES:
raise Exception(
'%d is not a valid baud rate; check the SCI Specification for acceptable values.'
% baud_rate)
self.port = serial.Serial(port_name, baud_rate)
def send(self, data):
if not isinstance(data, str):
raise Exception('Data must be a string.')
self.port.write(data)
def receive(self, length):
if not isinstance(length, int):
raise Exception('Receive length must be an integer.')
return self.port.read(length)
<mask token>
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
| <mask token>
class Communication(Module):
def __init__(self, parent, port_name, baud_rate):
self.parent = parent
if not isinstance(port_name, str):
raise Exception('Port name must be a string.')
if not isinstance(baud_rate, int):
raise Exception('Baud rate must be an integer.')
if baud_rate not in BAUD_RATES:
raise Exception(
'%d is not a valid baud rate; check the SCI Specification for acceptable values.'
% baud_rate)
self.port = serial.Serial(port_name, baud_rate)
def send(self, data):
if not isinstance(data, str):
raise Exception('Data must be a string.')
self.port.write(data)
def receive(self, length):
if not isinstance(length, int):
raise Exception('Receive length must be an integer.')
return self.port.read(length)
_port = None
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
| from common import *
import serial
CMD_BAUD = chr(129)
BAUD_RATES = [300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400,
57600, 115200]
class Communication(Module):
def __init__(self, parent, port_name, baud_rate):
self.parent = parent
if not isinstance(port_name, str):
raise Exception('Port name must be a string.')
if not isinstance(baud_rate, int):
raise Exception('Baud rate must be an integer.')
if baud_rate not in BAUD_RATES:
raise Exception(
'%d is not a valid baud rate; check the SCI Specification for acceptable values.'
% baud_rate)
self.port = serial.Serial(port_name, baud_rate)
def send(self, data):
if not isinstance(data, str):
raise Exception('Data must be a string.')
self.port.write(data)
def receive(self, length):
if not isinstance(length, int):
raise Exception('Receive length must be an integer.')
return self.port.read(length)
_port = None
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
| from common import *
import serial
CMD_BAUD = chr(129)
BAUD_RATES = [300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400, 57600, 115200]
class Communication(Module):
def __init__(self, parent, port_name, baud_rate):
self.parent = parent
if not isinstance(port_name, str):
raise Exception("Port name must be a string.")
if not isinstance(baud_rate, int):
raise Exception("Baud rate must be an integer.")
if baud_rate not in BAUD_RATES:
raise Exception("%d is not a valid baud rate; check the SCI Specification for acceptable values." % baud_rate)
self.port = serial.Serial(port_name, baud_rate)
def send(self, data):
if not isinstance(data, str):
raise Exception("Data must be a string.")
self.port.write(data)
def receive(self, length):
if not isinstance(length, int):
raise Exception("Receive length must be an integer.")
return self.port.read(length)
_port = None
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
| [
3,
6,
7,
9,
10
] |
769 | abbad57e945d2195021948a0e0838c6bfd9c6a1e | <mask token>
class ConnectWindow:
<mask token>
def startScreen(self):
"""This function creates the board and intializes the board count for each column"""
self.background = Rectangle(Point(0, 0), Point(690, 590))
self.background.setFill('blue')
self.background.draw(self.window)
for i in range(7):
for j in range(6):
self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)
self.Circles.setFill('white')
self.Circles.draw(self.window)
for i in range(6):
self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i *
100 + 100))
self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i +
100, 900))
self.horizLine.draw(self.window)
self.vertLine.draw(self.window)
self.grid = [[], [], [], [], [], [], []]
self.boardCount = [0, 0, 0, 0, 0, 0, 0]
counter = 2
for x in range(7):
for y in range(6):
self.grid[x].append(counter)
counter += 1
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class ConnectWindow:
<mask token>
def startScreen(self):
"""This function creates the board and intializes the board count for each column"""
self.background = Rectangle(Point(0, 0), Point(690, 590))
self.background.setFill('blue')
self.background.draw(self.window)
for i in range(7):
for j in range(6):
self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)
self.Circles.setFill('white')
self.Circles.draw(self.window)
for i in range(6):
self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i *
100 + 100))
self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i +
100, 900))
self.horizLine.draw(self.window)
self.vertLine.draw(self.window)
self.grid = [[], [], [], [], [], [], []]
self.boardCount = [0, 0, 0, 0, 0, 0, 0]
counter = 2
for x in range(7):
for y in range(6):
self.grid[x].append(counter)
counter += 1
def validClick(self, x):
"""This function checks if there is enough space vertically for move to be valid"""
if self.boardCount[x] >= 6:
print('Invalid Move')
return False
else:
return True
<mask token>
def handleClick(self, point):
"""This function works with the user to add each move into the board count and to the current grid"""
self.newX = point.getX()
self.x = self.newX // 100
self.y = self.boardCount[self.x]
if self.validClick(self.x):
self.boardCount[self.x] += 1
self.limitCounter += 1
self.grid[self.x][self.y] = self.currentUser
if self.isWon() == False:
self.limitCounter += 1
self.computerMove()
self.drawUmove()
<mask token>
def printWinner(self, winner):
"""This function prints who the winner is or if it is a tie"""
if winner == 3:
txt = Text(Point(345, 300), 'Tie Game!')
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
elif winner == 1:
txt = Text(Point(345, 300), 'You Won!')
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
else:
txt = Text(Point(345, 300), 'Computer Won!')
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
def validCmove(self, x, y):
"""This function checks if the computer's move will be valid"""
if self.boardCount[x] > y:
return False
""" if it tries to place below the highest piece"""
if self.boardCount[x] < y:
return False
"""if it tries to place it in a column with 6 pieces already"""
if self.boardCount[x] >= 6:
return False
else:
return True
def drawCmove(self, x, y):
"""This function adds the computer's move to the game board and adds it to the board count"""
piece = Circle(Point(x * 100 + 50, 600 - (y * 100 + 50)), 30)
piece.setFill('yellow')
piece.draw(self.window)
self.boardCount[x] += 1
self.grid[x][y] = -1
return
<mask token>
def randomMove(self):
"""This function creates a random coordinate for its move, checks if it's valid, then prints the move.
It will continue to run until numbers are valid for current board"""
randY = random.randint(0, 6)
randX = random.randint(0, 7)
if self.validCmove(randY, randX):
self.drawCmove(randY, randX)
return
else:
self.randomMove()
<mask token>
| <mask token>
class ConnectWindow:
<mask token>
def startScreen(self):
"""This function creates the board and intializes the board count for each column"""
self.background = Rectangle(Point(0, 0), Point(690, 590))
self.background.setFill('blue')
self.background.draw(self.window)
for i in range(7):
for j in range(6):
self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)
self.Circles.setFill('white')
self.Circles.draw(self.window)
for i in range(6):
self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i *
100 + 100))
self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i +
100, 900))
self.horizLine.draw(self.window)
self.vertLine.draw(self.window)
self.grid = [[], [], [], [], [], [], []]
self.boardCount = [0, 0, 0, 0, 0, 0, 0]
counter = 2
for x in range(7):
for y in range(6):
self.grid[x].append(counter)
counter += 1
def validClick(self, x):
"""This function checks if there is enough space vertically for move to be valid"""
if self.boardCount[x] >= 6:
print('Invalid Move')
return False
else:
return True
def drawUmove(self):
"""This function prints the pieces onto the board at the given position from the user"""
piece = Circle(Point(self.x * 100 + 50, 600 - (self.y * 100 + 50)), 30)
piece.setFill('red')
piece.draw(self.window)
return
def handleClick(self, point):
"""This function works with the user to add each move into the board count and to the current grid"""
self.newX = point.getX()
self.x = self.newX // 100
self.y = self.boardCount[self.x]
if self.validClick(self.x):
self.boardCount[self.x] += 1
self.limitCounter += 1
self.grid[self.x][self.y] = self.currentUser
if self.isWon() == False:
self.limitCounter += 1
self.computerMove()
self.drawUmove()
def isWon(self):
"""This function checks if there is a winner in the game (True/False) and calls printWinner function"""
for i in range(7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i][j + 1]
self.square3 = self.grid[i][j + 2]
self.square4 = self.grid[i][j + 3]
if (self.square1 == self.square2 and self.square2 == self.
square3 and self.square3 == self.square4):
self.printWinner(self.square1)
return True
for i in range(4):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i + 1][j + 1]
self.square3 = self.grid[i + 2][j + 2]
self.square4 = self.grid[i + 3][j + 3]
if (self.square1 == self.square2 and self.square2 == self.
square3 and self.square3 == self.square4):
self.printWinner(self.square1)
return True
for i in range(3, 7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i - 1][j + 1]
self.square3 = self.grid[i - 2][j + 2]
self.square4 = self.grid[i - 3][j + 3]
if (self.square1 == self.square2 and self.square2 == self.
square3 and self.square3 == self.square4):
self.printWinner(self.square1)
return True
for i in range(4):
for j in range(6):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i + 1][j]
self.square3 = self.grid[i + 2][j]
self.square4 = self.grid[i + 3][j]
if (self.square1 == self.square2 and self.square2 == self.
square3 and self.square3 == self.square4):
self.printWinner(self.square1)
return True
if self.limitCounter == 42:
self.printWinner(3)
return True
return False
def printWinner(self, winner):
"""This function prints who the winner is or if it is a tie"""
if winner == 3:
txt = Text(Point(345, 300), 'Tie Game!')
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
elif winner == 1:
txt = Text(Point(345, 300), 'You Won!')
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
else:
txt = Text(Point(345, 300), 'Computer Won!')
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
def validCmove(self, x, y):
"""This function checks if the computer's move will be valid"""
if self.boardCount[x] > y:
return False
""" if it tries to place below the highest piece"""
if self.boardCount[x] < y:
return False
"""if it tries to place it in a column with 6 pieces already"""
if self.boardCount[x] >= 6:
return False
else:
return True
def drawCmove(self, x, y):
"""This function adds the computer's move to the game board and adds it to the board count"""
piece = Circle(Point(x * 100 + 50, 600 - (y * 100 + 50)), 30)
piece.setFill('yellow')
piece.draw(self.window)
self.boardCount[x] += 1
self.grid[x][y] = -1
return
<mask token>
def randomMove(self):
"""This function creates a random coordinate for its move, checks if it's valid, then prints the move.
It will continue to run until numbers are valid for current board"""
randY = random.randint(0, 6)
randX = random.randint(0, 7)
if self.validCmove(randY, randX):
self.drawCmove(randY, randX)
return
else:
self.randomMove()
<mask token>
| <mask token>
class ConnectWindow:
<mask token>
def startScreen(self):
"""This function creates the board and intializes the board count for each column"""
self.background = Rectangle(Point(0, 0), Point(690, 590))
self.background.setFill('blue')
self.background.draw(self.window)
for i in range(7):
for j in range(6):
self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)
self.Circles.setFill('white')
self.Circles.draw(self.window)
for i in range(6):
self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i *
100 + 100))
self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i +
100, 900))
self.horizLine.draw(self.window)
self.vertLine.draw(self.window)
self.grid = [[], [], [], [], [], [], []]
self.boardCount = [0, 0, 0, 0, 0, 0, 0]
counter = 2
for x in range(7):
for y in range(6):
self.grid[x].append(counter)
counter += 1
def validClick(self, x):
"""This function checks if there is enough space vertically for move to be valid"""
if self.boardCount[x] >= 6:
print('Invalid Move')
return False
else:
return True
def drawUmove(self):
"""This function prints the pieces onto the board at the given position from the user"""
piece = Circle(Point(self.x * 100 + 50, 600 - (self.y * 100 + 50)), 30)
piece.setFill('red')
piece.draw(self.window)
return
def handleClick(self, point):
"""This function works with the user to add each move into the board count and to the current grid"""
self.newX = point.getX()
self.x = self.newX // 100
self.y = self.boardCount[self.x]
if self.validClick(self.x):
self.boardCount[self.x] += 1
self.limitCounter += 1
self.grid[self.x][self.y] = self.currentUser
if self.isWon() == False:
self.limitCounter += 1
self.computerMove()
self.drawUmove()
def isWon(self):
"""This function checks if there is a winner in the game (True/False) and calls printWinner function"""
for i in range(7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i][j + 1]
self.square3 = self.grid[i][j + 2]
self.square4 = self.grid[i][j + 3]
if (self.square1 == self.square2 and self.square2 == self.
square3 and self.square3 == self.square4):
self.printWinner(self.square1)
return True
for i in range(4):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i + 1][j + 1]
self.square3 = self.grid[i + 2][j + 2]
self.square4 = self.grid[i + 3][j + 3]
if (self.square1 == self.square2 and self.square2 == self.
square3 and self.square3 == self.square4):
self.printWinner(self.square1)
return True
for i in range(3, 7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i - 1][j + 1]
self.square3 = self.grid[i - 2][j + 2]
self.square4 = self.grid[i - 3][j + 3]
if (self.square1 == self.square2 and self.square2 == self.
square3 and self.square3 == self.square4):
self.printWinner(self.square1)
return True
for i in range(4):
for j in range(6):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i + 1][j]
self.square3 = self.grid[i + 2][j]
self.square4 = self.grid[i + 3][j]
if (self.square1 == self.square2 and self.square2 == self.
square3 and self.square3 == self.square4):
self.printWinner(self.square1)
return True
if self.limitCounter == 42:
self.printWinner(3)
return True
return False
def printWinner(self, winner):
"""This function prints who the winner is or if it is a tie"""
if winner == 3:
txt = Text(Point(345, 300), 'Tie Game!')
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
elif winner == 1:
txt = Text(Point(345, 300), 'You Won!')
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
else:
txt = Text(Point(345, 300), 'Computer Won!')
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
def validCmove(self, x, y):
"""This function checks if the computer's move will be valid"""
if self.boardCount[x] > y:
return False
""" if it tries to place below the highest piece"""
if self.boardCount[x] < y:
return False
"""if it tries to place it in a column with 6 pieces already"""
if self.boardCount[x] >= 6:
return False
else:
return True
def drawCmove(self, x, y):
"""This function adds the computer's move to the game board and adds it to the board count"""
piece = Circle(Point(x * 100 + 50, 600 - (y * 100 + 50)), 30)
piece.setFill('yellow')
piece.draw(self.window)
self.boardCount[x] += 1
self.grid[x][y] = -1
return
def computerMove(self):
"""This function computes where the computer will put its next move and calls the drawCmove() fxn to do so.
The computer will add its piece to wherever there are three in a row in either color then looks to see when
there are two in a row. Move will be placed randomly if no pieces are placed in a row"""
for i in range(7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i][j + 1]
self.square3 = self.grid[i][j + 2]
if (self.square1 == self.square2 and self.square2 == self.
square3):
if self.validCmove(i, j + 3):
self.drawCmove(i, j + 3)
return
else:
self.randomMove()
return
for i in range(4):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i + 1][j + 1]
self.square3 = self.grid[i + 2][j + 2]
if (self.square1 == self.square2 and self.square2 == self.
square3):
if self.validCmove(i + 3, j + 3):
self.drawCmove(i + 3, j + 3)
return
if self.validCmove(i - 1, j - 1):
self.drawCmove(i - 1, j - 1)
else:
self.randomMove()
return
for i in range(3, 7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i - 1][j + 1]
self.square3 = self.grid[i - 2][j + 2]
if (self.square1 == self.square2 and self.square2 == self.
square3):
if self.validCmove(i - 3, j + 3):
self.drawCmove(i - 3, j + 3)
return
if self.validCmove(i + 1, j - 1):
self.drawCmove(i + 1, j - 1)
else:
self.randomMove()
return
for i in range(4):
for j in range(6):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i + 1][j]
self.square3 = self.grid[i + 2][j]
if (self.square1 == self.square2 and self.square2 == self.
square3):
if self.validCmove(i + 3, j):
self.drawCmove(i + 3, j)
return
if self.validCmove(i - 1, j):
self.drawCmove(i - 1, j)
return
else:
self.randomMove()
return
for i in range(4):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i + 1][j + 1]
if self.square1 == self.square2:
if self.validCmove(i + 2, j + 2):
self.drawCmove(i + 2, j + 2)
return
if self.validCmove(i - 1, j - 1):
self.drawCmove(i - 1, j - 1)
else:
self.randomMove()
return
for i in range(7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i][j + 1]
if self.square1 == self.square2:
if self.validCmove(i, j + 2):
self.drawCmove(i, j + 2)
return
if self.validCmove(i, j - 1):
self.drawCmove(i, j - 1)
return
else:
self.randomMove()
return
for i in range(3, 7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i - 1][j + 1]
if self.square1 == self.square2:
if self.validCmove(i - 2, j + 2):
self.drawCmove(i - 2, j + 2)
return
if self.validCmove(i + 1, j - 1):
self.drawCmove(i + 1, j - 1)
else:
self.randomMove()
return
for i in range(4):
for j in range(6):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i + 1][j]
if self.square1 == self.square2:
if self.validCmove(i + 2, j):
self.drawCmove(i + 2, j)
return
if self.validCmove(i - 1, j):
self.drawCmove(i - 1, j)
return
else:
self.randomMove()
return
else:
self.randomMove()
def randomMove(self):
"""This function creates a random coordinate for its move, checks if it's valid, then prints the move.
It will continue to run until numbers are valid for current board"""
randY = random.randint(0, 6)
randX = random.randint(0, 7)
if self.validCmove(randY, randX):
self.drawCmove(randY, randX)
return
else:
self.randomMove()
<mask token>
| #connect4_JayNa.py
#Jay Na
#CS111 Spring 2018
#This file creates a version of the game Connect4, where the user plays against an AI
from graphics import *
import random
class ConnectWindow:
def __init__(self):
self.window = GraphWin("Connect Four", 690, 590)
self.window.setMouseHandler(self.handleClick)
self.startScreen()
self.currentUser = 1
self.limitCounter = 0
def startScreen(self):
'''This function creates the board and intializes the board count for each column'''
#draws blue rectangle as the background
self.background = Rectangle(Point(0,0), Point(690,590))
self.background.setFill('blue')
self.background.draw(self.window)
#draws white circles to represent the spots for the game
for i in range(7):
for j in range(6):
self.Circles = Circle(Point(i*100+50,j*100+50),(30))
self.Circles.setFill('white')
self.Circles.draw(self.window)
#draws lines to separate circles in rectangle
for i in range(6):
self.horizLine = Line(Point(0,i*100+100), Point(900,i*100+100))
self.vertLine = Line(Point(100*i+100,0), Point(100*i+100,900))
self.horizLine.draw(self.window)
self.vertLine.draw(self.window)
#initiates counts for each column and creates grid
self.grid = [[],[],[],[],[],[],[]]
self.boardCount = [0,0,0,0,0,0,0]
counter = 2
#help from CS Major, Joh Farmer
for x in range(7):
for y in range(6):
self.grid[x].append(counter)
counter += 1
def validClick(self, x):
'''This function checks if there is enough space vertically for move to be valid'''
if self.boardCount[x] >= 6:
print("Invalid Move")
return False
else:
return True
def drawUmove(self):
'''This function prints the pieces onto the board at the given position from the user'''
piece = Circle(Point(self.x*100+50, 600-(self.y*100+50)),30)
piece.setFill('red')
piece.draw(self.window)
return
def handleClick(self, point):
'''This function works with the user to add each move into the board count and to the current grid'''
self.newX = point.getX()
self.x = self.newX//100
self.y = self.boardCount[self.x]
if self.validClick(self.x):
self.boardCount[self.x] += 1
self.limitCounter += 1
self.grid[self.x][self.y] = self.currentUser
if self.isWon() == False:
self.limitCounter += 1
self.computerMove()
self.drawUmove()
def isWon(self):
'''This function checks if there is a winner in the game (True/False) and calls printWinner function'''
#checks to see if there is a winner vertically
for i in range(7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i][j+1]
self.square3 = self.grid[i][j+2]
self.square4 = self.grid[i][j+3]
if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:
self.printWinner(self.square1)
return True
#checks to see if there is a winner diagonally from lower left to upper right
for i in range(4):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j+1]
self.square3 = self.grid[i+2][j+2]
self.square4 = self.grid[i+3][j+3]
if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:
self.printWinner(self.square1)
return True
#checks to see if there is a winner diagonally from upper left to lower right
for i in range(3,7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i-1][j+1]
self.square3 = self.grid[i-2][j+2]
self.square4 = self.grid[i-3][j+3]
if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:
self.printWinner(self.square1)
return True
#checks to see if there is a winner horizontally
for i in range(4):
for j in range(6):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j]
self.square3 = self.grid[i+2][j]
self.square4 = self.grid[i+3][j]
if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:
self.printWinner(self.square1)
return True
#checks if board is full without a winner (tie)
if self.limitCounter == 42:
self.printWinner(3)
return True
return False
def printWinner(self, winner):
'''This function prints who the winner is or if it is a tie'''
#if input is 3 from isWon() fxn, game is tied and so "Tie Game!" is printed
if winner == 3:
txt = Text(Point(345, 300), "Tie Game!")
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
else:
#prints "You Won!" if user wins
if winner == 1:
txt = Text(Point(345, 300), "You Won!")
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
else:
#prints "Computer Won!" if computer wins
txt = Text(Point(345, 300), "Computer Won!")
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
def validCmove(self, x, y):
'''This function checks if the computer's move will be valid'''
#checks if '''if it tries to place it higher than the highest piece'''
if self.boardCount[x] > y:
return False
''' if it tries to place below the highest piece'''
if self.boardCount[x] < y:
return False
'''if it tries to place it in a column with 6 pieces already'''
if self.boardCount[x] >= 6:
return False
else:
return True
def drawCmove(self, x ,y):
'''This function adds the computer's move to the game board and adds it to the board count'''
piece = Circle(Point((x)*100+50, 600 - ((y)*100+50)),30)
piece.setFill('yellow')
piece.draw(self.window)
self.boardCount[x] += 1
self.grid[x][y] = -1
return
def computerMove(self):
'''This function computes where the computer will put its next move and calls the drawCmove() fxn to do so.
The computer will add its piece to wherever there are three in a row in either color then looks to see when
there are two in a row. Move will be placed randomly if no pieces are placed in a row'''
#checks if there are three pieces lined up vertically in a row and places its move to win or prevent the win'''
for i in range(7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i][j+1]
self.square3 = self.grid[i][j+2]
if self.square1 == self.square2 and self.square2 == self.square3:
if self.validCmove(i,j+3):
self.drawCmove(i,j+3)
return
else:
self.randomMove()
return
#checks if there are three pieces lined up diagonally from lower left to upper right and places its move to win or prevent the win
#help from CS major, Joh Farmer
for i in range(4):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j+1]
self.square3 = self.grid[i+2][j+2]
if self.square1 == self.square2 and self.square2 == self.square3:
if self.validCmove(i+3,j+3):
self.drawCmove(i+3,j+3)
return
if self.validCmove(i-1,j-1):
self.drawCmove(i-1,j-1)
else:
self.randomMove()
return
#checks if there are three pieces lined up diagonally from lower right to upper left and places its move to win or prevent the win
for i in range(3,7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i-1][j+1]
self.square3 = self.grid[i-2][j+2]
if self.square1 == self.square2 and self.square2 == self.square3:
if self.validCmove(i-3,j+3):
self.drawCmove(i-3,j+3)
return
if self.validCmove(i+1,j-1):
self.drawCmove(i+1,j-1)
else:
self.randomMove()
return
#checks if there are three pieces lined up horizontally in a row and places its move to win or prevent the win (either side)'''
for i in range(4):
for j in range(6):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j]
self.square3 = self.grid[i+2][j]
if self.square1 == self.square2 and self.square2 == self.square3:
if self.validCmove(i+3,j):
self.drawCmove(i+3,j)
return
if self.validCmove(i-1,j):
self.drawCmove(i-1,j)
return
else:
self.randomMove()
return
#checks if there are two in a row diagonally from lower left to upper right and places its move accordingly
for i in range(4):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j+1]
if self.square1 == self.square2:
if self.validCmove(i+2,j+2):
self.drawCmove(i+2,j+2)
return
if self.validCmove(i-1,j-1):
self.drawCmove(i-1,j-1)
else:
self.randomMove()
return
#checks if there are two in a row vertically and places its move accordingly
for i in range(7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i][j+1]
if self.square1 == self.square2:
if self.validCmove(i,j+2):
self.drawCmove(i,j+2)
return
if self.validCmove(i,j-1):
self.drawCmove(i,j-1)
return
else:
self.randomMove()
return
#checks if there are two in a row diagonally from lower right to upper left and places its move accordingly
for i in range(3,7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i-1][j+1]
if self.square1 == self.square2:
if self.validCmove(i-2,j+2):
self.drawCmove(i-2,j+2)
return
if self.validCmove(i+1,j-1):
self.drawCmove(i+1,j-1)
else:
self.randomMove()
return
#checks if there are two in a row horizontally and places its move accordingly
for i in range(4):
for j in range(6):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j]
if self.square1 == self.square2:
if self.validCmove(i+2,j):
self.drawCmove(i+2,j)
return
if self.validCmove(i-1,j):
self.drawCmove(i-1,j)
return
else:
self.randomMove()
return
#places move randomly if no pieces are being placed in a row
else:
self.randomMove()
def randomMove(self):
'''This function creates a random coordinate for its move, checks if it's valid, then prints the move.
It will continue to run until numbers are valid for current board'''
randY = random.randint(0,6)
randX = random.randint(0,7)
if self.validCmove(randY,randX):
self.drawCmove(randY,randX)
return
else:
self.randomMove()
def main():
gameOver = False
connect4 = ConnectWindow()
while gameOver == False:
connect4.window.getMouse()
gameOver = connect4.isWon()
input("Hit enter to quit")
main()
| [
2,
8,
10,
11,
16
] |
770 | 96d5cf948a9b0f622889977e8b26993299bceead | <mask token>
| <mask token>
def file_name(file_dir):
root_tmp = []
dirs_tmp = []
files_tmp = []
for root, dirs, files in os.walk(file_dir):
root_tmp.append(root)
dirs_tmp.append(dirs)
files_tmp.append(files)
return root_tmp, dirs_tmp, files_tmp
<mask token>
for key in lot_list:
if key in combined_all.keys():
print('The Lot %d in %s already existed in %s' % (key, file_tmp,
combined_all[key]))
<mask token>
for row_tmp in df.index:
lot_tmp = df.iloc[row_tmp, :].Lot
if lot_tmp == lot_last:
list_tmp.append(df.iloc[row_tmp, :])
counter += 1
else:
df_tmp = pd.concat(list_tmp, axis=1)
combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T
feature_tmp = df_tmp.T.iloc[:, 7:]
feature_tmp = df_scaler.fit_transform(feature_tmp)
t1 = np.mean(feature_tmp)
t2 = np.std(feature_tmp)
t3 = np.mean(np.sqrt(np.abs(feature_tmp))) ** 2
t4 = np.sqrt(np.mean(feature_tmp ** 2))
t5 = np.max(feature_tmp)
t6 = np.sum((feature_tmp - t1) ** 3) / ((len(feature_tmp) - 1) * t2 **
3)
t7 = np.sum((feature_tmp - t1) ** 4) / ((len(feature_tmp) - 1) * t2 **
4)
t8 = t5 / t4
t9 = t5 / t3
t10 = t4 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))
t11 = t5 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))
feature_all.loc[idx, :] = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10,
t11, df_tmp.T.time.iloc[0], df_tmp.T.time.iloc[-1], df_tmp.T.
recipe.iloc[0], df_tmp.T.stage.iloc[0], lot_last]
list_tmp = []
idx += 1
counter = 0
lot_last = lot_tmp
print(row_tmp)
| <mask token>
def file_name(file_dir):
root_tmp = []
dirs_tmp = []
files_tmp = []
for root, dirs, files in os.walk(file_dir):
root_tmp.append(root)
dirs_tmp.append(dirs)
files_tmp.append(files)
return root_tmp, dirs_tmp, files_tmp
root = '/home/ninja1mmm/Desktop/phm/data'
root_tmp, dirs_tmp, files_tmp = file_name(root)
combined_all = {}
feature_all = pd.DataFrame(columns=['mean', 'std', 'root amplitude', 'rms',
'max', 'skewness', 'kurtosis', 'peak factor', 'margin', 'waveform',
'pulse', 'start_time', 'end_time', 'recipe', 'stage', 'Lot'])
file_tmp = files_tmp[2][0]
path_tmp = root_tmp[2] + '/' + file_tmp
df = pd.read_pickle(path_tmp)
df = df.replace([np.inf, -np.inf], np.nan).dropna()
df = df.reset_index(drop=True)
df_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
lot_list = list(set(df.Lot))
for key in lot_list:
if key in combined_all.keys():
print('The Lot %d in %s already existed in %s' % (key, file_tmp,
combined_all[key]))
list_tmp = []
lot_last = df.Lot[0]
counter = 0
idx = 0
for row_tmp in df.index:
lot_tmp = df.iloc[row_tmp, :].Lot
if lot_tmp == lot_last:
list_tmp.append(df.iloc[row_tmp, :])
counter += 1
else:
df_tmp = pd.concat(list_tmp, axis=1)
combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T
feature_tmp = df_tmp.T.iloc[:, 7:]
feature_tmp = df_scaler.fit_transform(feature_tmp)
t1 = np.mean(feature_tmp)
t2 = np.std(feature_tmp)
t3 = np.mean(np.sqrt(np.abs(feature_tmp))) ** 2
t4 = np.sqrt(np.mean(feature_tmp ** 2))
t5 = np.max(feature_tmp)
t6 = np.sum((feature_tmp - t1) ** 3) / ((len(feature_tmp) - 1) * t2 **
3)
t7 = np.sum((feature_tmp - t1) ** 4) / ((len(feature_tmp) - 1) * t2 **
4)
t8 = t5 / t4
t9 = t5 / t3
t10 = t4 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))
t11 = t5 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))
feature_all.loc[idx, :] = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10,
t11, df_tmp.T.time.iloc[0], df_tmp.T.time.iloc[-1], df_tmp.T.
recipe.iloc[0], df_tmp.T.stage.iloc[0], lot_last]
list_tmp = []
idx += 1
counter = 0
lot_last = lot_tmp
print(row_tmp)
| <mask token>
import os
import numpy as np
import pandas as pd
from sklearn import preprocessing
def file_name(file_dir):
root_tmp = []
dirs_tmp = []
files_tmp = []
for root, dirs, files in os.walk(file_dir):
root_tmp.append(root)
dirs_tmp.append(dirs)
files_tmp.append(files)
return root_tmp, dirs_tmp, files_tmp
root = '/home/ninja1mmm/Desktop/phm/data'
root_tmp, dirs_tmp, files_tmp = file_name(root)
combined_all = {}
feature_all = pd.DataFrame(columns=['mean', 'std', 'root amplitude', 'rms',
'max', 'skewness', 'kurtosis', 'peak factor', 'margin', 'waveform',
'pulse', 'start_time', 'end_time', 'recipe', 'stage', 'Lot'])
file_tmp = files_tmp[2][0]
path_tmp = root_tmp[2] + '/' + file_tmp
df = pd.read_pickle(path_tmp)
df = df.replace([np.inf, -np.inf], np.nan).dropna()
df = df.reset_index(drop=True)
df_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
lot_list = list(set(df.Lot))
for key in lot_list:
if key in combined_all.keys():
print('The Lot %d in %s already existed in %s' % (key, file_tmp,
combined_all[key]))
list_tmp = []
lot_last = df.Lot[0]
counter = 0
idx = 0
for row_tmp in df.index:
lot_tmp = df.iloc[row_tmp, :].Lot
if lot_tmp == lot_last:
list_tmp.append(df.iloc[row_tmp, :])
counter += 1
else:
df_tmp = pd.concat(list_tmp, axis=1)
combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T
feature_tmp = df_tmp.T.iloc[:, 7:]
feature_tmp = df_scaler.fit_transform(feature_tmp)
t1 = np.mean(feature_tmp)
t2 = np.std(feature_tmp)
t3 = np.mean(np.sqrt(np.abs(feature_tmp))) ** 2
t4 = np.sqrt(np.mean(feature_tmp ** 2))
t5 = np.max(feature_tmp)
t6 = np.sum((feature_tmp - t1) ** 3) / ((len(feature_tmp) - 1) * t2 **
3)
t7 = np.sum((feature_tmp - t1) ** 4) / ((len(feature_tmp) - 1) * t2 **
4)
t8 = t5 / t4
t9 = t5 / t3
t10 = t4 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))
t11 = t5 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))
feature_all.loc[idx, :] = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10,
t11, df_tmp.T.time.iloc[0], df_tmp.T.time.iloc[-1], df_tmp.T.
recipe.iloc[0], df_tmp.T.stage.iloc[0], lot_last]
list_tmp = []
idx += 1
counter = 0
lot_last = lot_tmp
print(row_tmp)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 13:37:10 2018
@author: ninja1mmm
"""
import os
import numpy as np
import pandas as pd
from sklearn import preprocessing
def file_name(file_dir):
root_tmp=[]
dirs_tmp=[]
files_tmp=[]
for root, dirs, files in os.walk(file_dir):
root_tmp.append(root)
dirs_tmp.append(dirs)
files_tmp.append(files)
return root_tmp, dirs_tmp, files_tmp
root = '/home/ninja1mmm/Desktop/phm/data'
root_tmp, dirs_tmp, files_tmp = file_name(root)
combined_all = {}
feature_all = pd.DataFrame(columns = ['mean', 'std','root amplitude',
'rms','max','skewness','kurtosis',
'peak factor','margin','waveform',
'pulse','start_time', 'end_time',
'recipe', 'stage', 'Lot'])
#df_check = pd.DataFrame()
# read the first file to test here
file_tmp = files_tmp[2][0]
# iterate through the files if needed
#for file_tmp in files_tmp[2]:
path_tmp = root_tmp[2]+'/'+file_tmp
df = pd.read_pickle(path_tmp)
#df_tmp = df[df['Lot']==28113]
#if len(df_tmp)>0:
# df_tmp = df_tmp.iloc[0,:]
# df_check = df_check.append(df_tmp)
#------------------------------------------------------------------------------
# Crucial step
df=df.replace([np.inf, -np.inf], np.nan).dropna()
df=df.reset_index(drop=True)
df_scaler = preprocessing.MinMaxScaler(feature_range = (0,1))
#------------------------------------------------------------------------------
lot_list = list(set(df.Lot))
# Check if Lot already existed
for key in lot_list:
if key in combined_all.keys():
print('The Lot %d in %s already existed in %s' % (key, file_tmp,
combined_all[key]))
# for tmp in lot_list:
# combined_all[tmp] = file_tmp
# Select and save all the wafer processing cycles
list_tmp = []
lot_last = df.Lot[0]
counter = 0
idx = 0
# Specify the range. Here set to 100000 for the ease of test
for row_tmp in df.index:
lot_tmp = df.iloc[row_tmp,:].Lot
if lot_tmp == lot_last:
list_tmp.append(df.iloc[row_tmp,:])
counter += 1
else:
df_tmp = pd.concat(list_tmp, axis = 1)
# lot_last serves as the key, can be changed
# combined_all[lot_last] = df_tmp.T
combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T
# Calculate mean and save in feature dictionary as an example
# Normalize the data again because for some parameters we need the local (within cycle) feature
feature_tmp = df_tmp.T.iloc[:,7:] # Not a correct way, because shutter position also need to be excluded
feature_tmp = df_scaler.fit_transform(feature_tmp)
# ------------------------------------------------------------------
# Add features here. Remember to add new columns when initialzing df
t1 = np.mean(feature_tmp)
t2 = np.std(feature_tmp)
t3 = np.mean(np.sqrt(np.abs(feature_tmp)))**2
t4 = np.sqrt(np.mean(feature_tmp**2))
t5 = np.max(feature_tmp)
t6 = np.sum((feature_tmp-t1)**3)/((len(feature_tmp)-1)*(t2**3))
t7 = np.sum((feature_tmp-t1)**4)/((len(feature_tmp)-1)*(t2**4))
t8 = t5/t4
t9 = t5/t3
t10 = t4/(np.sum(np.abs(feature_tmp))/len(feature_tmp))
t11 = t5/(np.sum(np.abs(feature_tmp))/(len(feature_tmp)))
# Newly added
# First order difference
# ---------------------------------------------------------------------
feature_all.loc[idx,:] = [t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,
df_tmp.T.time.iloc[0],df_tmp.T.time.iloc[-1],
df_tmp.T.recipe.iloc[0],df_tmp.T.stage.iloc[0],
lot_last]
list_tmp = []
idx += 1
counter = 0
lot_last = lot_tmp
print(row_tmp)
#------------------------------------------------------------------------------
| [
0,
2,
3,
4,
5
] |
771 | d09984c6e6a0ce82389dbbbade63507e9687355d | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('Pages', '0014_auto_20191223_2032')]
operations = [migrations.AlterField(model_name='dept', name=
'Hospital_id', field=models.ForeignKey(default='null', on_delete=
django.db.models.deletion.CASCADE, to='Pages.Hospital'))]
| from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('Pages', '0014_auto_20191223_2032')]
operations = [migrations.AlterField(model_name='dept', name=
'Hospital_id', field=models.ForeignKey(default='null', on_delete=
django.db.models.deletion.CASCADE, to='Pages.Hospital'))]
| # Generated by Django 2.2.6 on 2019-12-23 16:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Pages', '0014_auto_20191223_2032'),
]
operations = [
migrations.AlterField(
model_name='dept',
name='Hospital_id',
field=models.ForeignKey(default='null', on_delete=django.db.models.deletion.CASCADE, to='Pages.Hospital'),
),
]
| [
0,
1,
2,
3,
4
] |
772 | 5d4ef314bb7169f5de4795e5c1aca62a1a060bae | <mask token>
class AdminCityTable(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
class AdminAreaModel(models.Model):
area_id = models.AutoField(primary_key=True)
area_name = models.CharField(max_length=30, unique=True)
city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)
def __str__(self):
return self.area_name
class AdminRestaurantTypeModel(models.Model):
restaurant_type_id = models.AutoField(primary_key=True)
restaurant_type_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.restaurant_type_name
| <mask token>
class AdminStateModel(models.Model):
<mask token>
<mask token>
<mask token>
class AdminCityTable(models.Model):
city_id = models.AutoField(primary_key=True)
city_name = models.CharField(max_length=30, unique=True)
state = models.ForeignKey(AdminStateModel, on_delete=models.CASCADE)
def __str__(self):
return self.city_name
class AdminAreaModel(models.Model):
area_id = models.AutoField(primary_key=True)
area_name = models.CharField(max_length=30, unique=True)
city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)
def __str__(self):
return self.area_name
class AdminRestaurantTypeModel(models.Model):
restaurant_type_id = models.AutoField(primary_key=True)
restaurant_type_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.restaurant_type_name
| <mask token>
class AdminLoginModel(models.Model):
user_name = models.CharField(max_length=30, unique=True)
password = models.CharField(max_length=16)
class AdminStateModel(models.Model):
state_id = models.AutoField(primary_key=True)
state_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.state_name
class AdminCityTable(models.Model):
city_id = models.AutoField(primary_key=True)
city_name = models.CharField(max_length=30, unique=True)
state = models.ForeignKey(AdminStateModel, on_delete=models.CASCADE)
def __str__(self):
return self.city_name
class AdminAreaModel(models.Model):
area_id = models.AutoField(primary_key=True)
area_name = models.CharField(max_length=30, unique=True)
city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)
def __str__(self):
return self.area_name
class AdminRestaurantTypeModel(models.Model):
restaurant_type_id = models.AutoField(primary_key=True)
restaurant_type_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.restaurant_type_name
| from django.db import models
class AdminLoginModel(models.Model):
user_name = models.CharField(max_length=30, unique=True)
password = models.CharField(max_length=16)
class AdminStateModel(models.Model):
state_id = models.AutoField(primary_key=True)
state_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.state_name
class AdminCityTable(models.Model):
city_id = models.AutoField(primary_key=True)
city_name = models.CharField(max_length=30, unique=True)
state = models.ForeignKey(AdminStateModel, on_delete=models.CASCADE)
def __str__(self):
return self.city_name
class AdminAreaModel(models.Model):
area_id = models.AutoField(primary_key=True)
area_name = models.CharField(max_length=30, unique=True)
city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)
def __str__(self):
return self.area_name
class AdminRestaurantTypeModel(models.Model):
restaurant_type_id = models.AutoField(primary_key=True)
restaurant_type_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.restaurant_type_name
| from django.db import models
# Login Admin Model
class AdminLoginModel(models.Model):
user_name = models.CharField(max_length=30,unique=True)
password = models.CharField(max_length=16)
# Swiggy Admin State Table
class AdminStateModel(models.Model):
state_id = models.AutoField(primary_key=True)
state_name = models.CharField(max_length=30,unique=True)
def __str__(self):
return self.state_name
# Admin City Table
class AdminCityTable(models.Model):
city_id = models.AutoField(primary_key = True)
city_name = models.CharField(max_length=30,unique=True)
state = models.ForeignKey(AdminStateModel,on_delete=models.CASCADE)
def __str__(self):
return self.city_name
#Admin Area Models for Area Operations
class AdminAreaModel(models.Model):
area_id = models.AutoField(primary_key = True)
area_name = models.CharField(max_length=30,unique=True)
city = models.ForeignKey(AdminCityTable,on_delete=models.CASCADE)
def __str__(self):
return self.area_name
#Admin Restaurant type Model
class AdminRestaurantTypeModel(models.Model):
restaurant_type_id = models.AutoField(primary_key = True)
restaurant_type_name = models.CharField(max_length=30,unique=True)
def __str__(self):
return self.restaurant_type_name
| [
7,
10,
14,
15,
16
] |
773 | 5c5f00084f37837b749e1fbb52a18d515e09ba06 | <mask token>
| <mask token>
def BFS(graph: Graph.Graph, start, end):
visited = set()
parent = dict()
parent[start] = None
queue = []
queue.append(start)
visited.add(start)
while queue:
current = queue.pop(0)
if current == end:
break
for v in graph.neighbors(current):
if v not in visited:
queue.append(v)
visited.add(v)
parent[v] = current
return parent
| import graph as Graph
def BFS(graph: Graph.Graph, start, end):
visited = set()
parent = dict()
parent[start] = None
queue = []
queue.append(start)
visited.add(start)
while queue:
current = queue.pop(0)
if current == end:
break
for v in graph.neighbors(current):
if v not in visited:
queue.append(v)
visited.add(v)
parent[v] = current
return parent
| null | null | [
0,
1,
2
] |
774 | 6b785502e8a8983c164ebdffdd304da47c926acb | <mask token>
| <mask token>
class LaughsappConfig(AppConfig):
<mask token>
| <mask token>
class LaughsappConfig(AppConfig):
name = 'laughsApp'
| from django.apps import AppConfig
class LaughsappConfig(AppConfig):
name = 'laughsApp'
| null | [
0,
1,
2,
3
] |
775 | 4a7f8221208e8252c7f5c0adff2949f0e552def1 | <mask token>
| <mask token>
print(rest_endpoint)
<mask token>
print(run_id)
| <mask token>
ws = Workspace.from_config()
step1 = PythonScriptStep(name='prepare data', source_directory='scripts',
script_name='data_prep.py', compute_target='aml-cluster')
step2 = PythonScriptStep(name='train model', source_directory='scripts',
script_name='train_model.py', compute_target='aml-cluster')
train_pipeline = Pipeline(workspace=ws, steps=[step1, step2])
experiment = Experiment(workspace=ws, name='training-pipeline')
pipeline_run = experiment.submit(train_pipeline)
published_pipeline = pipeline_run.publish(name='training_pipeline',
description='Model training pipeline', version='1.0')
rest_endpoint = published_pipeline.endpoint
print(rest_endpoint)
response = requests.post(rest_endpoint, headers=some_auth_header, json={
'ExperimentName': 'run_training_pipeline'})
run_id = response.json()['Id']
print(run_id)
| from azureml.core import Workspace
from azureml.pipeline.core import Pipeline
from azureml.core import Experiment
from azureml.pipeline.steps import PythonScriptStep
import requests
ws = Workspace.from_config()
step1 = PythonScriptStep(name='prepare data', source_directory='scripts',
script_name='data_prep.py', compute_target='aml-cluster')
step2 = PythonScriptStep(name='train model', source_directory='scripts',
script_name='train_model.py', compute_target='aml-cluster')
train_pipeline = Pipeline(workspace=ws, steps=[step1, step2])
experiment = Experiment(workspace=ws, name='training-pipeline')
pipeline_run = experiment.submit(train_pipeline)
published_pipeline = pipeline_run.publish(name='training_pipeline',
description='Model training pipeline', version='1.0')
rest_endpoint = published_pipeline.endpoint
print(rest_endpoint)
response = requests.post(rest_endpoint, headers=some_auth_header, json={
'ExperimentName': 'run_training_pipeline'})
run_id = response.json()['Id']
print(run_id)
| from azureml.core import Workspace
from azureml.pipeline.core import Pipeline
from azureml.core import Experiment
from azureml.pipeline.steps import PythonScriptStep
import requests
ws = Workspace.from_config()
# Step to run a Python script
step1 = PythonScriptStep(
name = "prepare data",
source_directory = "scripts",
script_name = "data_prep.py",
compute_target = "aml-cluster"
)
# Step to train a model
step2 = PythonScriptStep(
name = "train model",
source_directory = "scripts",
script_name = "train_model.py",
compute_target = "aml-cluster"
)
# Construct the pipeline
train_pipeline = Pipeline(workspace = ws, steps = [step1, step2])
# Create an experiment and run the pipeline with it
experiment = Experiment(workspace = ws, name = "training-pipeline")
pipeline_run = experiment.submit(train_pipeline)
# To run all pipeline steps without cached results
# pipeline_run = experiment.submit(train_pipeline, regenerate_outputs=True)
# Publish the pipeline run
published_pipeline = pipeline_run.publish(
name="training_pipeline",
description="Model training pipeline",
version="1.0"
)
# Get the endpoint for the published pipeline
rest_endpoint = published_pipeline.endpoint
print(rest_endpoint)
# Consume the pipeline through REST request
response = requests.post(
rest_endpoint,
headers=some_auth_header,
json={"ExperimentName": "run_training_pipeline"})
run_id = response.json()["Id"]
print(run_id) | [
0,
1,
2,
3,
4
] |
776 | 03fb1cf0aac0c37858dd8163562a7139ed4e1179 | <mask token>
| <mask token>
def buildExpectations(queryPath, searchPatternPath):
"""
Based on SpeechCommand_v0.02 directory structure.
"""
expectations = []
currentDirectory = ''
queryFilename = queryPath.split('/')[-1]
queryDirectory = queryPath.split('/')[-2]
queryCode = queryFilename.split('_')[0]
searchFileList = sorted(glob.glob(searchPatternPath))
for searchFile in searchFileList:
searchFilename = searchFile.split('/')[-1]
searchDirectory = searchFile.split('/')[-2]
searchCode = searchFilename.split('_')[0]
if searchDirectory != currentDirectory:
currentDirectory = searchDirectory
if searchCode == queryCode:
if currentDirectory == queryDirectory:
expectations.append([[0, 1]])
else:
expectations.append([[0, 0]])
return expectations
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Dynamic Time Warping')
parser.add_argument('-g', '--graph', action='store_true', help=
'Enable graph display')
parser.add_argument('-t', '--threshold', type=float, default=0.4, help=
'Set score threshold')
parser.add_argument('query_path')
parser.add_argument('search_pattern_path')
printGroup = parser.add_mutually_exclusive_group()
printGroup.add_argument('-p', '--percentage', action='store_true', help
='Enable percentage display')
printGroup.add_argument('-v', '--verbose', action='store_true', help=
'Enable verbose display')
args = parser.parse_args()
GRAPH = args.graph
PERCENTAGE = args.percentage
threshold = args.threshold
VERBOSE = args.verbose
queryPath = args.query_path
searchPatternPath = args.search_pattern_path
dtw.VERBOSE = VERBOSE
stats.VERBOSE = VERBOSE
labels, sweepList, bestList = dtw.runSearch(queryPath, searchPatternPath)
results = dtw.computeResultsPrecisely(sweepList, threshold,
positiveOnly=True)
for i, result in enumerate(results):
print(labels[i] + ': ', end='')
for j, (hitIndex, _) in enumerate(result):
print(hitIndex * 3, end='')
if j < len(result) - 1:
print(' | ', end='')
print()
if GRAPH:
dtw.showSweeps(labels, sweepList, bestList)
plt.show()
| <mask token>
GRAPH = False
PERCENTAGE = False
VERBOSE = False
def buildExpectations(queryPath, searchPatternPath):
"""
Based on SpeechCommand_v0.02 directory structure.
"""
expectations = []
currentDirectory = ''
queryFilename = queryPath.split('/')[-1]
queryDirectory = queryPath.split('/')[-2]
queryCode = queryFilename.split('_')[0]
searchFileList = sorted(glob.glob(searchPatternPath))
for searchFile in searchFileList:
searchFilename = searchFile.split('/')[-1]
searchDirectory = searchFile.split('/')[-2]
searchCode = searchFilename.split('_')[0]
if searchDirectory != currentDirectory:
currentDirectory = searchDirectory
if searchCode == queryCode:
if currentDirectory == queryDirectory:
expectations.append([[0, 1]])
else:
expectations.append([[0, 0]])
return expectations
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Dynamic Time Warping')
parser.add_argument('-g', '--graph', action='store_true', help=
'Enable graph display')
parser.add_argument('-t', '--threshold', type=float, default=0.4, help=
'Set score threshold')
parser.add_argument('query_path')
parser.add_argument('search_pattern_path')
printGroup = parser.add_mutually_exclusive_group()
printGroup.add_argument('-p', '--percentage', action='store_true', help
='Enable percentage display')
printGroup.add_argument('-v', '--verbose', action='store_true', help=
'Enable verbose display')
args = parser.parse_args()
GRAPH = args.graph
PERCENTAGE = args.percentage
threshold = args.threshold
VERBOSE = args.verbose
queryPath = args.query_path
searchPatternPath = args.search_pattern_path
dtw.VERBOSE = VERBOSE
stats.VERBOSE = VERBOSE
labels, sweepList, bestList = dtw.runSearch(queryPath, searchPatternPath)
results = dtw.computeResultsPrecisely(sweepList, threshold,
positiveOnly=True)
for i, result in enumerate(results):
print(labels[i] + ': ', end='')
for j, (hitIndex, _) in enumerate(result):
print(hitIndex * 3, end='')
if j < len(result) - 1:
print(' | ', end='')
print()
if GRAPH:
dtw.showSweeps(labels, sweepList, bestList)
plt.show()
| import dtw
import stats
import glob
import argparse
import matplotlib.pyplot as plt
GRAPH = False
PERCENTAGE = False
VERBOSE = False
def buildExpectations(queryPath, searchPatternPath):
"""
Based on SpeechCommand_v0.02 directory structure.
"""
expectations = []
currentDirectory = ''
queryFilename = queryPath.split('/')[-1]
queryDirectory = queryPath.split('/')[-2]
queryCode = queryFilename.split('_')[0]
searchFileList = sorted(glob.glob(searchPatternPath))
for searchFile in searchFileList:
searchFilename = searchFile.split('/')[-1]
searchDirectory = searchFile.split('/')[-2]
searchCode = searchFilename.split('_')[0]
if searchDirectory != currentDirectory:
currentDirectory = searchDirectory
if searchCode == queryCode:
if currentDirectory == queryDirectory:
expectations.append([[0, 1]])
else:
expectations.append([[0, 0]])
return expectations
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Dynamic Time Warping')
parser.add_argument('-g', '--graph', action='store_true', help=
'Enable graph display')
parser.add_argument('-t', '--threshold', type=float, default=0.4, help=
'Set score threshold')
parser.add_argument('query_path')
parser.add_argument('search_pattern_path')
printGroup = parser.add_mutually_exclusive_group()
printGroup.add_argument('-p', '--percentage', action='store_true', help
='Enable percentage display')
printGroup.add_argument('-v', '--verbose', action='store_true', help=
'Enable verbose display')
args = parser.parse_args()
GRAPH = args.graph
PERCENTAGE = args.percentage
threshold = args.threshold
VERBOSE = args.verbose
queryPath = args.query_path
searchPatternPath = args.search_pattern_path
dtw.VERBOSE = VERBOSE
stats.VERBOSE = VERBOSE
labels, sweepList, bestList = dtw.runSearch(queryPath, searchPatternPath)
results = dtw.computeResultsPrecisely(sweepList, threshold,
positiveOnly=True)
for i, result in enumerate(results):
print(labels[i] + ': ', end='')
for j, (hitIndex, _) in enumerate(result):
print(hitIndex * 3, end='')
if j < len(result) - 1:
print(' | ', end='')
print()
if GRAPH:
dtw.showSweeps(labels, sweepList, bestList)
plt.show()
| import dtw
import stats
import glob
import argparse
import matplotlib.pyplot as plt
GRAPH = False
PERCENTAGE = False
VERBOSE = False
def buildExpectations(queryPath, searchPatternPath):
"""
Based on SpeechCommand_v0.02 directory structure.
"""
expectations = []
currentDirectory = ""
queryFilename = queryPath.split("/")[-1]
queryDirectory = queryPath.split("/")[-2]
queryCode = queryFilename.split("_")[0]
searchFileList = sorted(glob.glob(searchPatternPath))
for searchFile in searchFileList:
searchFilename = searchFile.split("/")[-1]
searchDirectory = searchFile.split("/")[-2]
searchCode = searchFilename.split("_")[0]
if searchDirectory != currentDirectory:
currentDirectory = searchDirectory
if searchCode == queryCode:
if currentDirectory == queryDirectory:
expectations.append([[0, 1]])
else:
expectations.append([[0, 0]])
return expectations
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description='Dynamic Time Warping')
parser.add_argument('-g', '--graph', action='store_true', help='Enable graph display')
parser.add_argument('-t', '--threshold', type=float, default=0.4, help='Set score threshold')
parser.add_argument('query_path')
parser.add_argument('search_pattern_path')
printGroup = parser.add_mutually_exclusive_group()
printGroup.add_argument('-p', '--percentage', action='store_true', help='Enable percentage display')
printGroup.add_argument('-v', '--verbose', action='store_true', help='Enable verbose display')
args = parser.parse_args()
GRAPH = args.graph
PERCENTAGE = args.percentage
threshold = args.threshold
VERBOSE = args.verbose
queryPath = args.query_path
searchPatternPath = args.search_pattern_path
dtw.VERBOSE = VERBOSE
stats.VERBOSE = VERBOSE
labels, sweepList, bestList = dtw.runSearch(queryPath, searchPatternPath)
results = dtw.computeResultsPrecisely(sweepList, threshold, positiveOnly=True)
for i, result in enumerate(results):
print(labels[i] + ": ", end='')
for j, (hitIndex, _) in enumerate(result):
print(hitIndex * 3, end='')
if j < len(result) - 1:
print(" | ", end='')
print()
if GRAPH:
dtw.showSweeps(labels, sweepList, bestList)
plt.show()
| [
0,
2,
3,
4,
5
] |
777 | 303a8609cb21c60a416160264c3d3da805674920 | <mask token>
@pytest.mark.slow
def test_x_noise_reg():
x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train),
scale_diag=abs(x_train))
y_train = noise.sample().numpy()
too_much_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,
16), noise_reg=('fixed_rate', 3.0), trainable_base_dist=True)
too_much_noise.fit(x_train, y_train, epochs=700, verbose=0)
x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test),
scale_diag=abs(x_test))
y_test = noise.sample().numpy()
out1 = too_much_noise.pdf(x_test, y_test).numpy()
out2 = too_much_noise.pdf(x_test, y_test).numpy()
assert all(out1 == out2)
little_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,
16), noise_reg=('rule_of_thumb', 0.1), trainable_base_dist=True)
little_noise.fit(x_train, y_train, epochs=700, verbose=0)
little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)
) / 700.0
too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)
) / 700.0
assert little_noise_score > too_much_noise_score
def test_y_noise_reg():
x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(
(10, 3))
y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(
(10, 3))
noise = NormalizingFlowNetwork(3, n_flows=3, hidden_sizes=(16, 16),
trainable_base_dist=True, noise_reg=('fixed_rate', 1.0))
noise.fit(x_train, y_train, epochs=10, verbose=0)
input_model = noise._get_input_model()
y1 = input_model(y_train, training=False).numpy()
y2 = input_model(y_train, training=False).numpy()
assert np.all(y1 == y2)
y1 = input_model(y_train, training=True).numpy()
y2 = input_model(y_train, training=True).numpy()
assert not np.all(y1 == y2)
| <mask token>
tf.random.set_seed(22)
np.random.seed(22)
@pytest.mark.slow
def test_x_noise_reg():
x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train),
scale_diag=abs(x_train))
y_train = noise.sample().numpy()
too_much_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,
16), noise_reg=('fixed_rate', 3.0), trainable_base_dist=True)
too_much_noise.fit(x_train, y_train, epochs=700, verbose=0)
x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test),
scale_diag=abs(x_test))
y_test = noise.sample().numpy()
out1 = too_much_noise.pdf(x_test, y_test).numpy()
out2 = too_much_noise.pdf(x_test, y_test).numpy()
assert all(out1 == out2)
little_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,
16), noise_reg=('rule_of_thumb', 0.1), trainable_base_dist=True)
little_noise.fit(x_train, y_train, epochs=700, verbose=0)
little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)
) / 700.0
too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)
) / 700.0
assert little_noise_score > too_much_noise_score
def test_y_noise_reg():
x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(
(10, 3))
y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(
(10, 3))
noise = NormalizingFlowNetwork(3, n_flows=3, hidden_sizes=(16, 16),
trainable_base_dist=True, noise_reg=('fixed_rate', 1.0))
noise.fit(x_train, y_train, epochs=10, verbose=0)
input_model = noise._get_input_model()
y1 = input_model(y_train, training=False).numpy()
y2 = input_model(y_train, training=False).numpy()
assert np.all(y1 == y2)
y1 = input_model(y_train, training=True).numpy()
y2 = input_model(y_train, training=True).numpy()
assert not np.all(y1 == y2)
| <mask token>
tfd = tfp.distributions
tf.random.set_seed(22)
np.random.seed(22)
@pytest.mark.slow
def test_x_noise_reg():
x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train),
scale_diag=abs(x_train))
y_train = noise.sample().numpy()
too_much_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,
16), noise_reg=('fixed_rate', 3.0), trainable_base_dist=True)
too_much_noise.fit(x_train, y_train, epochs=700, verbose=0)
x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test),
scale_diag=abs(x_test))
y_test = noise.sample().numpy()
out1 = too_much_noise.pdf(x_test, y_test).numpy()
out2 = too_much_noise.pdf(x_test, y_test).numpy()
assert all(out1 == out2)
little_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,
16), noise_reg=('rule_of_thumb', 0.1), trainable_base_dist=True)
little_noise.fit(x_train, y_train, epochs=700, verbose=0)
little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)
) / 700.0
too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)
) / 700.0
assert little_noise_score > too_much_noise_score
def test_y_noise_reg():
x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(
(10, 3))
y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(
(10, 3))
noise = NormalizingFlowNetwork(3, n_flows=3, hidden_sizes=(16, 16),
trainable_base_dist=True, noise_reg=('fixed_rate', 1.0))
noise.fit(x_train, y_train, epochs=10, verbose=0)
input_model = noise._get_input_model()
y1 = input_model(y_train, training=False).numpy()
y2 = input_model(y_train, training=False).numpy()
assert np.all(y1 == y2)
y1 = input_model(y_train, training=True).numpy()
y2 = input_model(y_train, training=True).numpy()
assert not np.all(y1 == y2)
| import tensorflow as tf
import tensorflow_probability as tfp
import pytest
import numpy as np
from estimators import NormalizingFlowNetwork
tfd = tfp.distributions
tf.random.set_seed(22)
np.random.seed(22)
@pytest.mark.slow
def test_x_noise_reg():
x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train),
scale_diag=abs(x_train))
y_train = noise.sample().numpy()
too_much_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,
16), noise_reg=('fixed_rate', 3.0), trainable_base_dist=True)
too_much_noise.fit(x_train, y_train, epochs=700, verbose=0)
x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test),
scale_diag=abs(x_test))
y_test = noise.sample().numpy()
out1 = too_much_noise.pdf(x_test, y_test).numpy()
out2 = too_much_noise.pdf(x_test, y_test).numpy()
assert all(out1 == out2)
little_noise = NormalizingFlowNetwork(1, n_flows=2, hidden_sizes=(16,
16), noise_reg=('rule_of_thumb', 0.1), trainable_base_dist=True)
little_noise.fit(x_train, y_train, epochs=700, verbose=0)
little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)
) / 700.0
too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)
) / 700.0
assert little_noise_score > too_much_noise_score
def test_y_noise_reg():
x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(
(10, 3))
y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape(
(10, 3))
noise = NormalizingFlowNetwork(3, n_flows=3, hidden_sizes=(16, 16),
trainable_base_dist=True, noise_reg=('fixed_rate', 1.0))
noise.fit(x_train, y_train, epochs=10, verbose=0)
input_model = noise._get_input_model()
y1 = input_model(y_train, training=False).numpy()
y2 = input_model(y_train, training=False).numpy()
assert np.all(y1 == y2)
y1 = input_model(y_train, training=True).numpy()
y2 = input_model(y_train, training=True).numpy()
assert not np.all(y1 == y2)
| import tensorflow as tf
import tensorflow_probability as tfp
import pytest
import numpy as np
from estimators import NormalizingFlowNetwork
tfd = tfp.distributions
tf.random.set_seed(22)
np.random.seed(22)
@pytest.mark.slow
def test_x_noise_reg():
x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train), scale_diag=abs(x_train))
y_train = noise.sample().numpy()
too_much_noise = NormalizingFlowNetwork(
1,
n_flows=2,
hidden_sizes=(16, 16),
noise_reg=("fixed_rate", 3.0),
trainable_base_dist=True,
)
too_much_noise.fit(x_train, y_train, epochs=700, verbose=0)
x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1))
noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test), scale_diag=abs(x_test))
y_test = noise.sample().numpy()
out1 = too_much_noise.pdf(x_test, y_test).numpy()
out2 = too_much_noise.pdf(x_test, y_test).numpy()
# making sure that the noise regularisation is deactivated in testing mode
assert all(out1 == out2)
little_noise = NormalizingFlowNetwork(
1,
n_flows=2,
hidden_sizes=(16, 16),
noise_reg=("rule_of_thumb", 0.1),
trainable_base_dist=True,
)
little_noise.fit(x_train, y_train, epochs=700, verbose=0)
little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)) / 700.0
too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)) / 700.0
assert little_noise_score > too_much_noise_score
def test_y_noise_reg():
x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape((10, 3))
y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape((10, 3))
noise = NormalizingFlowNetwork(
3,
n_flows=3,
hidden_sizes=(16, 16),
trainable_base_dist=True,
noise_reg=("fixed_rate", 1.0),
)
noise.fit(x_train, y_train, epochs=10, verbose=0)
input_model = noise._get_input_model()
# y_input should not include randomness during evaluation
y1 = input_model(y_train, training=False).numpy()
y2 = input_model(y_train, training=False).numpy()
assert np.all(y1 == y2)
# loss should include randomness during learning
y1 = input_model(y_train, training=True).numpy()
y2 = input_model(y_train, training=True).numpy()
assert not np.all(y1 == y2)
| [
2,
3,
4,
5,
6
] |
778 | 830e7e84eebd6a4adb411cc95c9e9c8ff7bdac30 | <mask token>
| def isSubsetSum(set, n, sum):
subset = [[(False) for i in range(sum + 1)] for i in range(n + 1)]
for i in range(n + 1):
subset[i][0] = True
for i in range(1, sum + 1):
subset[0][i] = False
for i in range(1, n + 1):
for j in range(1, sum + 1):
if j < set[i - 1]:
subset[i][j] = subset[i - 1][j]
if j >= set[i - 1]:
subset[i][j] = subset[i - 1][j] or subset[i - 1][j - set[i - 1]
]
return subset[n][sum]
<mask token>
| def isSubsetSum(set, n, sum):
subset = [[(False) for i in range(sum + 1)] for i in range(n + 1)]
for i in range(n + 1):
subset[i][0] = True
for i in range(1, sum + 1):
subset[0][i] = False
for i in range(1, n + 1):
for j in range(1, sum + 1):
if j < set[i - 1]:
subset[i][j] = subset[i - 1][j]
if j >= set[i - 1]:
subset[i][j] = subset[i - 1][j] or subset[i - 1][j - set[i - 1]
]
return subset[n][sum]
<mask token>
for i in range(t):
n, k = map(int, input().split())
lst = list(map(int, input().strip().split(' ')))[:n]
if isSubsetSum(lst, n, k) == True:
print('YES')
else:
print('NO')
| def isSubsetSum(set, n, sum):
subset = [[(False) for i in range(sum + 1)] for i in range(n + 1)]
for i in range(n + 1):
subset[i][0] = True
for i in range(1, sum + 1):
subset[0][i] = False
for i in range(1, n + 1):
for j in range(1, sum + 1):
if j < set[i - 1]:
subset[i][j] = subset[i - 1][j]
if j >= set[i - 1]:
subset[i][j] = subset[i - 1][j] or subset[i - 1][j - set[i - 1]
]
return subset[n][sum]
t = int(input())
for i in range(t):
n, k = map(int, input().split())
lst = list(map(int, input().strip().split(' ')))[:n]
if isSubsetSum(lst, n, k) == True:
print('YES')
else:
print('NO')
| def isSubsetSum(set, n, sum):
subset =([[False for i in range(sum + 1)] for i in range(n + 1)])
for i in range(n + 1):
subset[i][0] = True
for i in range(1, sum + 1):
subset[0][i]= False
for i in range(1, n + 1):
for j in range(1, sum + 1):
if j<set[i-1]:
subset[i][j] = subset[i-1][j]
if j>= set[i-1]:
subset[i][j] = (subset[i-1][j] or subset[i - 1][j-set[i-1]])
return subset[n][sum]
t=int(input())
for i in range(t):
n,k=map(int,input().split())
lst=list(map(int,input().strip().split(' ')))[:n]
if (isSubsetSum(lst, n, k) == True):
print("YES")
else:
print("NO")
| [
0,
1,
2,
3,
4
] |
779 | e60c3a6aececd97ec08ae32b552bcda795375b3b | import os
import sys
from shutil import copyfile
def buildDocumentation():
"""
Build eMonitor Documentation with sphinx
:param sys.argv:
* html: build html documentation in directory */docs/output/html*
* pdf: build pdf documentation in directory */docs/output/pdf*
"""
helptext = 'usage: build_doc.py <output format> <type of documentation>' \
'\n - html: for html output' \
'\n - pdf: for pdf output' \
'\n\n - all: complete documentation' \
'\n - dev: only developer documentation' \
'\n - user: only user documentation'
if len(sys.argv) != 3:
print helptext
sys.exit(1)
if sys.argv[1] not in ['pdf', 'html']:
print helptext
sys.exit(1)
if sys.argv[2] not in ['all', 'dev', 'user']:
print helptext
sys.exit(1)
copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory
os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))
os.remove('index.rst') # delete config file from root directory
if __name__ == '__main__':
buildDocumentation()
| null | null | null | null | [
0
] |
780 | 00a0668d5fcb8358b4bd7736c48e4867afc0f5b6 | <mask token>
class SolverError(Exception):
pass
<mask token>
def ecos_solve(A, b, c, dim_dict, **kwargs):
"""Wraps ecos.solve for convenience."""
ecos_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q':
dim_dict['q'] if 'q' in dim_dict else []}
if 'ep' in dim_dict and dim_dict['ep'] > 0 or 's' in dim_dict and len(
dim_dict['s']) > 0:
raise SolverError(
'Only zero, linear, and second order cones supported.')
zero = 0 if 'z' not in dim_dict else dim_dict['z']
ecos_A, ecos_G = A[:zero, :], A[zero:, :]
ecos_b, ecos_h = b[:zero], b[zero:]
sol = ecos.solve(c=c, G=ecos_G, h=ecos_h, dims=ecos_cones, A=ecos_A, b=
ecos_b, **kwargs)
solution = True
x = sol['x']
s = np.concatenate([np.zeros(zero), sol['s']])
y = np.concatenate([sol['y'], sol['z']])
if sol['info']['exitFlag'] == 0:
print('prim abs res.', np.linalg.norm(A @ x + s - b))
print('dua abs res.', np.linalg.norm(A.T @ y + c))
print('s^T y', s @ y)
if sol['info']['exitFlag'] in [1, 11]:
solution = False
obj = b @ y
assert obj < 0
y /= -obj
print('primal infeas. cert residual norm', np.linalg.norm(A.T @ y))
proj = prod_cone.Pi(-y, *make_prod_cone_cache(dim_dict))
print('primal infeas dist from cone', np.linalg.norm(proj))
x = np.zeros_like(x)
s = np.zeros_like(s)
if sol['info']['exitFlag'] in [2, 12]:
solution = False
obj = c @ x
assert obj < 0
x /= -obj
s /= -obj
print('dual infeas. cert residual norm', np.linalg.norm(A @ x + s))
proj = prod_cone.Pi(s, *make_prod_cone_cache(dim_dict))
print('dual infeas cert dist from cone', np.linalg.norm(s - proj))
y = np.zeros_like(y)
z = xsy2z(x, s, y, tau=solution, kappa=not solution)
return z, sol['info']
<mask token>
| <mask token>
class SolverError(Exception):
pass
def scs_solve(A, b, c, dim_dict, init_z=None, **kwargs):
"""Wraps scs.solve for convenience."""
scs_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q':
dim_dict['q'] if 'q' in dim_dict else [], 's': dim_dict['s'] if 's' in
dim_dict else [], 'ep': dim_dict['ep'] if 'ep' in dim_dict else 0,
'ed': dim_dict['ed'] if 'ed' in dim_dict else 0, 'f': dim_dict['z'] if
'z' in dim_dict else 0}
sol = scs.solve({'A': A, 'b': b, 'c': c}, cone=scs_cones, **kwargs)
info = sol['info']
if info['statusVal'] > 0:
z = xsy2z(sol['x'], sol['s'], sol['y'], tau=1.0, kappa=0.0)
if info['statusVal'] < 0:
x = np.zeros_like(sol['x']) if np.any(np.isnan(sol['x'])) else sol['x']
s = np.zeros_like(sol['s']) if np.any(np.isnan(sol['s'])) else sol['s']
y = np.zeros_like(sol['y']) if np.any(np.isnan(sol['y'])) else sol['y']
if np.allclose(y, 0.0) and c @ x < 0:
obj = c @ x
x /= -obj
s /= -obj
if np.allclose(s, 0.0) and b @ y < 0:
obj = b @ y
y /= -obj
z = xsy2z(x, s, y, tau=0.0, kappa=1.0)
return z, info
def ecos_solve(A, b, c, dim_dict, **kwargs):
"""Wraps ecos.solve for convenience."""
ecos_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q':
dim_dict['q'] if 'q' in dim_dict else []}
if 'ep' in dim_dict and dim_dict['ep'] > 0 or 's' in dim_dict and len(
dim_dict['s']) > 0:
raise SolverError(
'Only zero, linear, and second order cones supported.')
zero = 0 if 'z' not in dim_dict else dim_dict['z']
ecos_A, ecos_G = A[:zero, :], A[zero:, :]
ecos_b, ecos_h = b[:zero], b[zero:]
sol = ecos.solve(c=c, G=ecos_G, h=ecos_h, dims=ecos_cones, A=ecos_A, b=
ecos_b, **kwargs)
solution = True
x = sol['x']
s = np.concatenate([np.zeros(zero), sol['s']])
y = np.concatenate([sol['y'], sol['z']])
if sol['info']['exitFlag'] == 0:
print('prim abs res.', np.linalg.norm(A @ x + s - b))
print('dua abs res.', np.linalg.norm(A.T @ y + c))
print('s^T y', s @ y)
if sol['info']['exitFlag'] in [1, 11]:
solution = False
obj = b @ y
assert obj < 0
y /= -obj
print('primal infeas. cert residual norm', np.linalg.norm(A.T @ y))
proj = prod_cone.Pi(-y, *make_prod_cone_cache(dim_dict))
print('primal infeas dist from cone', np.linalg.norm(proj))
x = np.zeros_like(x)
s = np.zeros_like(s)
if sol['info']['exitFlag'] in [2, 12]:
solution = False
obj = c @ x
assert obj < 0
x /= -obj
s /= -obj
print('dual infeas. cert residual norm', np.linalg.norm(A @ x + s))
proj = prod_cone.Pi(s, *make_prod_cone_cache(dim_dict))
print('dual infeas cert dist from cone', np.linalg.norm(s - proj))
y = np.zeros_like(y)
z = xsy2z(x, s, y, tau=solution, kappa=not solution)
return z, sol['info']
<mask token>
| <mask token>
class SolverError(Exception):
pass
def scs_solve(A, b, c, dim_dict, init_z=None, **kwargs):
"""Wraps scs.solve for convenience."""
scs_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q':
dim_dict['q'] if 'q' in dim_dict else [], 's': dim_dict['s'] if 's' in
dim_dict else [], 'ep': dim_dict['ep'] if 'ep' in dim_dict else 0,
'ed': dim_dict['ed'] if 'ed' in dim_dict else 0, 'f': dim_dict['z'] if
'z' in dim_dict else 0}
sol = scs.solve({'A': A, 'b': b, 'c': c}, cone=scs_cones, **kwargs)
info = sol['info']
if info['statusVal'] > 0:
z = xsy2z(sol['x'], sol['s'], sol['y'], tau=1.0, kappa=0.0)
if info['statusVal'] < 0:
x = np.zeros_like(sol['x']) if np.any(np.isnan(sol['x'])) else sol['x']
s = np.zeros_like(sol['s']) if np.any(np.isnan(sol['s'])) else sol['s']
y = np.zeros_like(sol['y']) if np.any(np.isnan(sol['y'])) else sol['y']
if np.allclose(y, 0.0) and c @ x < 0:
obj = c @ x
x /= -obj
s /= -obj
if np.allclose(s, 0.0) and b @ y < 0:
obj = b @ y
y /= -obj
z = xsy2z(x, s, y, tau=0.0, kappa=1.0)
return z, info
def ecos_solve(A, b, c, dim_dict, **kwargs):
"""Wraps ecos.solve for convenience."""
ecos_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q':
dim_dict['q'] if 'q' in dim_dict else []}
if 'ep' in dim_dict and dim_dict['ep'] > 0 or 's' in dim_dict and len(
dim_dict['s']) > 0:
raise SolverError(
'Only zero, linear, and second order cones supported.')
zero = 0 if 'z' not in dim_dict else dim_dict['z']
ecos_A, ecos_G = A[:zero, :], A[zero:, :]
ecos_b, ecos_h = b[:zero], b[zero:]
sol = ecos.solve(c=c, G=ecos_G, h=ecos_h, dims=ecos_cones, A=ecos_A, b=
ecos_b, **kwargs)
solution = True
x = sol['x']
s = np.concatenate([np.zeros(zero), sol['s']])
y = np.concatenate([sol['y'], sol['z']])
if sol['info']['exitFlag'] == 0:
print('prim abs res.', np.linalg.norm(A @ x + s - b))
print('dua abs res.', np.linalg.norm(A.T @ y + c))
print('s^T y', s @ y)
if sol['info']['exitFlag'] in [1, 11]:
solution = False
obj = b @ y
assert obj < 0
y /= -obj
print('primal infeas. cert residual norm', np.linalg.norm(A.T @ y))
proj = prod_cone.Pi(-y, *make_prod_cone_cache(dim_dict))
print('primal infeas dist from cone', np.linalg.norm(proj))
x = np.zeros_like(x)
s = np.zeros_like(s)
if sol['info']['exitFlag'] in [2, 12]:
solution = False
obj = c @ x
assert obj < 0
x /= -obj
s /= -obj
print('dual infeas. cert residual norm', np.linalg.norm(A @ x + s))
proj = prod_cone.Pi(s, *make_prod_cone_cache(dim_dict))
print('dual infeas cert dist from cone', np.linalg.norm(s - proj))
y = np.zeros_like(y)
z = xsy2z(x, s, y, tau=solution, kappa=not solution)
return z, sol['info']
def solve(A, b, c, dim_dict, solver='scs', solver_options={},
refine_solver_time_ratio=1.0, max_iters=10, verbose=False,
max_lsqr_iters=20, return_z=False):
solver_start = time.time()
if solver == 'scs':
z, info = scs_solve(A, b, c, dim_dict, **solver_options)
elif solver == 'ecos':
z, info = ecos_solve(A, b, c, dim_dict, **solver_options)
else:
raise Exception('The only supported solvers are ecos and scs')
solver_time = time.time() - solver_start
A = sp.csc_matrix(A)
new_residual, u, v = residual_and_uv(z, (A.indptr, A.indices, A.data),
b, c, make_prod_cone_cache(dim_dict))
x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])
pres = np.linalg.norm(A @ x + s - b) / (1 + np.linalg.norm(b))
dres = np.linalg.norm(A.T @ y + c) / (1 + np.linalg.norm(c))
gap = np.abs(c @ x + b @ y) / (1 + np.abs(c @ x) + np.abs(b @ y))
print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))
z_plus = refine(A, b, c, dim_dict, z, verbose=verbose, iters=max_iters,
lsqr_iters=max_lsqr_iters)
if return_z:
return z_plus, info
else:
new_residual, u, v = residual_and_uv(z_plus, (A.indptr, A.indices,
A.data), b, c, make_prod_cone_cache(dim_dict))
x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])
pres = np.linalg.norm(A @ x + s - b) / (1 + np.linalg.norm(b))
dres = np.linalg.norm(A.T @ y + c) / (1 + np.linalg.norm(c))
gap = np.abs(c @ x + b @ y) / (1 + np.abs(c @ x) + np.abs(b @ y))
print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))
return x, s, y, info
| <mask token>
import numpy as np
import scs
import ecos
import time
from .problem import *
from .refine import *
class SolverError(Exception):
pass
def scs_solve(A, b, c, dim_dict, init_z=None, **kwargs):
"""Wraps scs.solve for convenience."""
scs_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q':
dim_dict['q'] if 'q' in dim_dict else [], 's': dim_dict['s'] if 's' in
dim_dict else [], 'ep': dim_dict['ep'] if 'ep' in dim_dict else 0,
'ed': dim_dict['ed'] if 'ed' in dim_dict else 0, 'f': dim_dict['z'] if
'z' in dim_dict else 0}
sol = scs.solve({'A': A, 'b': b, 'c': c}, cone=scs_cones, **kwargs)
info = sol['info']
if info['statusVal'] > 0:
z = xsy2z(sol['x'], sol['s'], sol['y'], tau=1.0, kappa=0.0)
if info['statusVal'] < 0:
x = np.zeros_like(sol['x']) if np.any(np.isnan(sol['x'])) else sol['x']
s = np.zeros_like(sol['s']) if np.any(np.isnan(sol['s'])) else sol['s']
y = np.zeros_like(sol['y']) if np.any(np.isnan(sol['y'])) else sol['y']
if np.allclose(y, 0.0) and c @ x < 0:
obj = c @ x
x /= -obj
s /= -obj
if np.allclose(s, 0.0) and b @ y < 0:
obj = b @ y
y /= -obj
z = xsy2z(x, s, y, tau=0.0, kappa=1.0)
return z, info
def ecos_solve(A, b, c, dim_dict, **kwargs):
"""Wraps ecos.solve for convenience."""
ecos_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q':
dim_dict['q'] if 'q' in dim_dict else []}
if 'ep' in dim_dict and dim_dict['ep'] > 0 or 's' in dim_dict and len(
dim_dict['s']) > 0:
raise SolverError(
'Only zero, linear, and second order cones supported.')
zero = 0 if 'z' not in dim_dict else dim_dict['z']
ecos_A, ecos_G = A[:zero, :], A[zero:, :]
ecos_b, ecos_h = b[:zero], b[zero:]
sol = ecos.solve(c=c, G=ecos_G, h=ecos_h, dims=ecos_cones, A=ecos_A, b=
ecos_b, **kwargs)
solution = True
x = sol['x']
s = np.concatenate([np.zeros(zero), sol['s']])
y = np.concatenate([sol['y'], sol['z']])
if sol['info']['exitFlag'] == 0:
print('prim abs res.', np.linalg.norm(A @ x + s - b))
print('dua abs res.', np.linalg.norm(A.T @ y + c))
print('s^T y', s @ y)
if sol['info']['exitFlag'] in [1, 11]:
solution = False
obj = b @ y
assert obj < 0
y /= -obj
print('primal infeas. cert residual norm', np.linalg.norm(A.T @ y))
proj = prod_cone.Pi(-y, *make_prod_cone_cache(dim_dict))
print('primal infeas dist from cone', np.linalg.norm(proj))
x = np.zeros_like(x)
s = np.zeros_like(s)
if sol['info']['exitFlag'] in [2, 12]:
solution = False
obj = c @ x
assert obj < 0
x /= -obj
s /= -obj
print('dual infeas. cert residual norm', np.linalg.norm(A @ x + s))
proj = prod_cone.Pi(s, *make_prod_cone_cache(dim_dict))
print('dual infeas cert dist from cone', np.linalg.norm(s - proj))
y = np.zeros_like(y)
z = xsy2z(x, s, y, tau=solution, kappa=not solution)
return z, sol['info']
def solve(A, b, c, dim_dict, solver='scs', solver_options={},
refine_solver_time_ratio=1.0, max_iters=10, verbose=False,
max_lsqr_iters=20, return_z=False):
solver_start = time.time()
if solver == 'scs':
z, info = scs_solve(A, b, c, dim_dict, **solver_options)
elif solver == 'ecos':
z, info = ecos_solve(A, b, c, dim_dict, **solver_options)
else:
raise Exception('The only supported solvers are ecos and scs')
solver_time = time.time() - solver_start
A = sp.csc_matrix(A)
new_residual, u, v = residual_and_uv(z, (A.indptr, A.indices, A.data),
b, c, make_prod_cone_cache(dim_dict))
x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])
pres = np.linalg.norm(A @ x + s - b) / (1 + np.linalg.norm(b))
dres = np.linalg.norm(A.T @ y + c) / (1 + np.linalg.norm(c))
gap = np.abs(c @ x + b @ y) / (1 + np.abs(c @ x) + np.abs(b @ y))
print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))
z_plus = refine(A, b, c, dim_dict, z, verbose=verbose, iters=max_iters,
lsqr_iters=max_lsqr_iters)
if return_z:
return z_plus, info
else:
new_residual, u, v = residual_and_uv(z_plus, (A.indptr, A.indices,
A.data), b, c, make_prod_cone_cache(dim_dict))
x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])
pres = np.linalg.norm(A @ x + s - b) / (1 + np.linalg.norm(b))
dres = np.linalg.norm(A.T @ y + c) / (1 + np.linalg.norm(c))
gap = np.abs(c @ x + b @ y) / (1 + np.abs(c @ x) + np.abs(b @ y))
print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))
return x, s, y, info
| """
Copyright 2019 Enzo Busseti, Walaa Moursi, and Stephen Boyd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#__all__ = ['solve']
import numpy as np
import scs
import ecos
import time
from .problem import *
from .refine import *
class SolverError(Exception):
pass
def scs_solve(A, b, c, dim_dict, init_z=None, **kwargs):
"""Wraps scs.solve for convenience."""
scs_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0,
'q': dim_dict['q'] if 'q' in dim_dict else [],
's': dim_dict['s'] if 's' in dim_dict else [],
'ep': dim_dict['ep'] if 'ep' in dim_dict else 0,
'ed': dim_dict['ed'] if 'ed' in dim_dict else 0,
'f': dim_dict['z'] if 'z' in dim_dict else 0}
#print('scs_cones', scs_cones)
sol = scs.solve({'A': A, 'b': b,
'c': c},
cone=scs_cones,
**kwargs)
info = sol['info']
if info['statusVal'] > 0:
z = xsy2z(sol['x'], sol['s'], sol['y'], tau=1., kappa=0.)
if info['statusVal'] < 0:
x = np.zeros_like(sol['x']) \
if np.any(np.isnan(sol['x'])) else sol['x']
s = np.zeros_like(sol['s']) \
if np.any(np.isnan(sol['s'])) else sol['s']
y = np.zeros_like(sol['y']) \
if np.any(np.isnan(sol['y'])) else sol['y']
if np.allclose(y, 0.) and c@x < 0:
obj = c@x
# assert obj < 0
x /= -obj
s /= -obj
# print('primal res:', np.linalg.norm(A@x + s))
if np.allclose(s, 0.) and b@y < 0:
obj = b@y
# assert obj < 0
y /= -obj
# print('dual res:', np.linalg.norm(A.T@y))
# print('SCS NONSOLVED')
# print('x', x)
# print('s', s)
# print('y', y)
z = xsy2z(x, s, y, tau=0., kappa=1.)
return z, info
def ecos_solve(A, b, c, dim_dict, **kwargs):
"""Wraps ecos.solve for convenience."""
###
# ECOS uses a different definition of the exp cone,
# with y and z switched. In the future I might wrap it
# (i.e., switch rows of A and elements of b, and switch
# elements of the solutions s and y) but for now
# I'm not supporting exp cones in ecos.
###
ecos_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0,
'q': dim_dict['q'] if 'q' in dim_dict else []} # ,
# 'e': dim_dict['ep'] if 'ep' in dim_dict else 0}
# print(ecos_cones)
if ('ep' in dim_dict and dim_dict['ep'] > 0
or 's' in dim_dict and len(dim_dict['s']) > 0):
raise SolverError(
'Only zero, linear, and second order cones supported.')
zero = 0 if 'z' not in dim_dict else dim_dict['z']
ecos_A, ecos_G = A[:zero, :], A[zero:, :]
ecos_b, ecos_h = b[:zero], b[zero:]
sol = ecos.solve(c=c, G=ecos_G, h=ecos_h, dims=ecos_cones,
A=ecos_A, b=ecos_b, **kwargs)
solution = True
x = sol['x']
s = np.concatenate([np.zeros(zero), sol['s']])
# not sure we can trust this
# s = b - A@x
y = np.concatenate([sol['y'], sol['z']])
if sol['info']['exitFlag'] == 0: # check that things make sense
print('prim abs res.', np.linalg.norm(A@x + s - b))
print('dua abs res.', np.linalg.norm(A.T@y + c))
print('s^T y', s@y)
if sol['info']['exitFlag'] in [1, 11]: # infeas
solution = False
obj = b@y
assert (obj < 0)
y /= -obj
print('primal infeas. cert residual norm', np.linalg.norm(A.T@y))
#cones = dim2cones(dim_dict)
proj = prod_cone.Pi(-y, *make_prod_cone_cache(dim_dict))
print('primal infeas dist from cone', np.linalg.norm(proj))
# if not (np.linalg.norm(proj) == 0.) and sol['info']['exitFlag'] == 1.:
# raise SolverError
x = np.zeros_like(x)
s = np.zeros_like(s)
if sol['info']['exitFlag'] in [2, 12]: # unbound
solution = False
obj = c@x
assert (obj < 0)
x /= -obj
s /= -obj
print('dual infeas. cert residual norm', np.linalg.norm(A@x + s))
proj = prod_cone.Pi(s, *make_prod_cone_cache(dim_dict))
print('dual infeas cert dist from cone', np.linalg.norm(s - proj))
# if not (np.linalg.norm(s - proj) == 0.) and sol['info']['exitFlag'] == 2.:
# raise SolverError
y = np.zeros_like(y)
# print('ECOS SOLUTION')
# print('solution', solution)
# print('x', x)
# print('s', s)
# print('y', y)
z = xsy2z(x, s, y, tau=solution, kappa=not solution)
return z, sol['info']
def solve(A, b, c, dim_dict,
solver='scs',
solver_options={},
refine_solver_time_ratio=1.,
max_iters=10,
verbose=False,
max_lsqr_iters=20,
return_z=False):
solver_start = time.time()
if solver == 'scs':
z, info = scs_solve(A, b, c, dim_dict, **solver_options)
elif solver == 'ecos':
z, info = ecos_solve(A, b, c, dim_dict, **solver_options)
else:
raise Exception('The only supported solvers are ecos and scs')
solver_time = time.time() - solver_start
A = sp.csc_matrix(A)
#A_tr = sp.csc_matrix(A.T)
new_residual, u, v = residual_and_uv(
z, (A.indptr, A.indices, A.data), b, c, make_prod_cone_cache(dim_dict))
x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])
pres = np.linalg.norm(A@x + s - b) / (1 + np.linalg.norm(b))
dres = np.linalg.norm(A.T@y + c) / (1 + np.linalg.norm(c))
gap = np.abs(c@x + b@y) / (1 + np.abs(c@x) + np.abs(b@y))
print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))
z_plus = refine(A, b, c, dim_dict, z,
verbose=verbose,
iters=max_iters,
lsqr_iters=max_lsqr_iters) # ,
# max_runtime=solver_time * refine_solver_time_ratio)
if return_z:
return z_plus, info
else:
new_residual, u, v =\
residual_and_uv(z_plus, (A.indptr, A.indices, A.data), b, c,
make_prod_cone_cache(dim_dict))
x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])
pres = np.linalg.norm(A@x + s - b) / (1 + np.linalg.norm(b))
dres = np.linalg.norm(A.T@y + c) / (1 + np.linalg.norm(c))
gap = np.abs(c@x + b@y) / (1 + np.abs(c@x) + np.abs(b@y))
print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))
return x, s, y, info
| [
2,
3,
4,
5,
6
] |
781 | f5c277da2b22debe26327464ae736892360059b4 | <mask token>
| <mask token>
plt.pcolor(df)
plt.colorbar()
plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns)
plt.show()
| <mask token>
month = ['Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb',
'Mar', 'Apr', 'May']
df = pd.DataFrame([[53, 0, 5, 3, 3], [51, 0, 1, 3, 2], [70, 4, 7, 5, 1], [
66, 4, 1, 4, 2], [64, 4, 4, 3, 2], [69, 4, 7, 8, 2], [45, 2, 8, 4, 2],
[29, 1, 6, 6, 1], [56, 4, 4, 2, 2], [41, 2, 2, 2, 1], [3, 0, 0, 0, 0],
[8, 0, 0, 0, 0]], columns=['1000', '2000', '3000', '4000', '5000'],
index=month)
plt.pcolor(df)
plt.colorbar()
plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns)
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
month = ['Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb',
'Mar', 'Apr', 'May']
df = pd.DataFrame([[53, 0, 5, 3, 3], [51, 0, 1, 3, 2], [70, 4, 7, 5, 1], [
66, 4, 1, 4, 2], [64, 4, 4, 3, 2], [69, 4, 7, 8, 2], [45, 2, 8, 4, 2],
[29, 1, 6, 6, 1], [56, 4, 4, 2, 2], [41, 2, 2, 2, 1], [3, 0, 0, 0, 0],
[8, 0, 0, 0, 0]], columns=['1000', '2000', '3000', '4000', '5000'],
index=month)
plt.pcolor(df)
plt.colorbar()
plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns)
plt.show()
| null | [
0,
1,
2,
3
] |
782 | 5c2a6802e89314c25f0264bbe2bc7ed2689a255a | <mask token>
| <mask token>
for i in range(n):
a[i] = int(a[i])
for i in range(n - 1):
for j in range(n - i - 1):
if a[j] > a[j + 1]:
a[j], a[j + 1] = a[j + 1], a[j]
print('Sortes array :', a)
| a = input('Please enter the elements with spaces between them:').split()
n = len(a)
for i in range(n):
a[i] = int(a[i])
for i in range(n - 1):
for j in range(n - i - 1):
if a[j] > a[j + 1]:
a[j], a[j + 1] = a[j + 1], a[j]
print('Sortes array :', a)
| a=input("Please enter the elements with spaces between them:").split()
n=len(a)
for i in range(n):
a[i]=int(a[i])
for i in range(n-1):
for j in range(n-i-1):
if a[j]>a[j+1]:
a[j],a[j+1]=a[j+1],a[j]
print("Sortes array :",a) | null | [
0,
1,
2,
3
] |
783 | 20f0480ee7e0782b23ec8ade150cdd8d8ad718bb | <mask token>
def euler():
h = 0.1
x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
y_eval = [0.0]
delta_y = [0.0]
y_real = [0.0]
eps = [0.0]
for i in range(1, len(x)):
y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))
delta_y.append(h * fun(y_eval[i], x[i]))
y_real.append(real_fun(x[i]))
eps.append(abs(y_real[i] - y_eval[i]))
print(y_eval)
print(delta_y)
print(y_real)
print(eps)
data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]
for i in range(0, len(x)):
data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])
table = ff.create_table(data_matrix)
plot(table)
def fun(x, y):
return pow(x + y, 2)
<mask token>
| <mask token>
def euler():
h = 0.1
x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
y_eval = [0.0]
delta_y = [0.0]
y_real = [0.0]
eps = [0.0]
for i in range(1, len(x)):
y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))
delta_y.append(h * fun(y_eval[i], x[i]))
y_real.append(real_fun(x[i]))
eps.append(abs(y_real[i] - y_eval[i]))
print(y_eval)
print(delta_y)
print(y_real)
print(eps)
data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]
for i in range(0, len(x)):
data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])
table = ff.create_table(data_matrix)
plot(table)
def fun(x, y):
return pow(x + y, 2)
def real_fun(x):
return tan(x) - x
<mask token>
| <mask token>
def euler():
h = 0.1
x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
y_eval = [0.0]
delta_y = [0.0]
y_real = [0.0]
eps = [0.0]
for i in range(1, len(x)):
y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))
delta_y.append(h * fun(y_eval[i], x[i]))
y_real.append(real_fun(x[i]))
eps.append(abs(y_real[i] - y_eval[i]))
print(y_eval)
print(delta_y)
print(y_real)
print(eps)
data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]
for i in range(0, len(x)):
data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])
table = ff.create_table(data_matrix)
plot(table)
def fun(x, y):
return pow(x + y, 2)
def real_fun(x):
return tan(x) - x
euler()
| from math import pow
from math import tan
import plotly.figure_factory as ff
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
def euler():
h = 0.1
x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
y_eval = [0.0]
delta_y = [0.0]
y_real = [0.0]
eps = [0.0]
for i in range(1, len(x)):
y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))
delta_y.append(h * fun(y_eval[i], x[i]))
y_real.append(real_fun(x[i]))
eps.append(abs(y_real[i] - y_eval[i]))
print(y_eval)
print(delta_y)
print(y_real)
print(eps)
data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]
for i in range(0, len(x)):
data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])
table = ff.create_table(data_matrix)
plot(table)
def fun(x, y):
return pow(x + y, 2)
def real_fun(x):
return tan(x) - x
euler()
| from math import pow
from math import tan
import plotly.figure_factory as ff
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
def euler():
h = 0.1
x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
y_eval = [0.0]
delta_y = [0.0]
y_real = [0.0]
eps = [0.0]
for i in range(1, len(x)):
y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))
delta_y.append(h * fun(y_eval[i], x[i]))
y_real.append(real_fun(x[i]))
eps.append(abs(y_real[i] - y_eval[i]))
# print in table format
print(y_eval)
print(delta_y)
print(y_real)
print(eps)
data_matrix = [
['k', 'x', 'y', 'delta_y', 'y_real', 'eps']
]
for i in range(0, len(x)):
data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])
table = ff.create_table(data_matrix)
plot(table)
def fun(x, y):
return pow(x + y, 2)
def real_fun(x):
return tan(x) - x
euler()
| [
2,
3,
4,
5,
6
] |
784 | f145274c8caa1e725d12003874eb54a580a6e35e | <mask token>
| <mask token>
print(dic.get('country', 'Russia'))
<mask token>
print(dic)
| dic = {'city': 'Moscow', 'temperature': 20}
print(dic.get('country', 'Russia'))
dic['date'] = '27.05.2019'
print(dic)
| dic = {"city": "Moscow", "temperature": 20}
# print(dic["city"])
# dic["temperature"] -= 5
# print(dic)
print(dic.get("country", "Russia"))
dic["date"] = "27.05.2019"
print(dic) | null | [
0,
1,
2,
3
] |
785 | 038b8206f77b325bf43fc753f6cee8b4278f4bc9 | <mask token>
def calculate_recovery_clifford(cl_in, desired_cl=0):
"""
Extracts the clifford that has to be applied to cl_in to make the net
operation correspond to desired_cl from the clifford lookuptable.
This operation should perform the inverse of calculate_net_clifford
"""
row = list(clifford_lookuptable[cl_in])
return row.index(desired_cl)
@deprecated(version='0.4', reason='not used within pyqed')
def decompose_clifford_seq(clifford_sequence, gate_decomposition=
gate_decomposition):
decomposed_seq = []
for cl in clifford_sequence:
decomposed_seq.extend(gate_decomposition[cl])
return decomposed_seq
<mask token>
def randomized_benchmarking_sequence(n_cl: int, desired_net_cl: int=0,
number_of_qubits: int=1, max_clifford_idx: int=11520, interleaving_cl:
int=None, seed: int=None) ->np.ndarray:
"""
Generates a randomized benchmarking sequence for the one or two qubit
clifford group.
Args:
n_cl (int) : number of Cliffords
desired_net_cl (int) : idx of the desired net clifford, if None is
specified no recovery Clifford is calculated
number_of_qubits(int): used to determine if Cliffords are drawn
from the single qubit or two qubit clifford group.
max_clifford_idx (int): used to set the index of the highest random
clifford generated. Useful to generate e.g., simultaneous two
qubit RB sequences.
FIXME: seems useless, because none of the callers set this for real, and we trim it to the group size
interleaving_cl (int): interleaves the sequence with a specific
clifford if desired
seed (int) : seed used to initialize the random number
generator.
Returns:
list of clifford indices (ints)
N.B. in the case of the 1 qubit clifford group this function does the
same as "randomized_benchmarking_sequence_old" but
does not use the 24 by 24 lookuptable method to calculate the
net clifford. It instead uses the "Clifford" objects used in
constructing the two qubit Clifford classes.
The old method exists to establish the equivalence between the two methods.
"""
if number_of_qubits == 1:
Cl = SingleQubitClifford
group_size = np.min([24, max_clifford_idx])
elif number_of_qubits == 2:
Cl = TwoQubitClifford
group_size = np.min([11520, max_clifford_idx])
else:
raise NotImplementedError()
rng_seed = np.random.RandomState(seed)
rb_clifford_indices = rng_seed.randint(0, group_size, int(n_cl))
if interleaving_cl is not None:
rb_clif_ind_intl = np.empty(rb_clifford_indices.size * 2, dtype=int)
rb_clif_ind_intl[0::2] = rb_clifford_indices
rb_clif_ind_intl[1::2] = interleaving_cl
rb_clifford_indices = rb_clif_ind_intl
if desired_net_cl is not None:
net_clifford = calculate_net_clifford(rb_clifford_indices, Cl)
recovery_to_idx_clifford = net_clifford.get_inverse()
recovery_clifford = Cl(desired_net_cl) * recovery_to_idx_clifford
rb_clifford_indices = np.append(rb_clifford_indices,
recovery_clifford.idx)
return rb_clifford_indices
| <mask token>
def calculate_recovery_clifford(cl_in, desired_cl=0):
"""
Extracts the clifford that has to be applied to cl_in to make the net
operation correspond to desired_cl from the clifford lookuptable.
This operation should perform the inverse of calculate_net_clifford
"""
row = list(clifford_lookuptable[cl_in])
return row.index(desired_cl)
@deprecated(version='0.4', reason='not used within pyqed')
def decompose_clifford_seq(clifford_sequence, gate_decomposition=
gate_decomposition):
decomposed_seq = []
for cl in clifford_sequence:
decomposed_seq.extend(gate_decomposition[cl])
return decomposed_seq
@deprecated(version='0.4', reason='not used within pyqed')
def convert_clifford_sequence_to_tape(clifford_sequence, lutmapping,
gate_decomposition=gate_decomposition):
"""
Converts a list of qubit operations to the relevant pulse elements
This method will be overwritten depending on the hardware implementation.
"""
decomposed_seq = []
for cl in clifford_sequence:
decomposed_seq.extend(gate_decomposition[cl])
tape = []
for g in decomposed_seq:
tape.append(lutmapping.index(g))
return tape
def randomized_benchmarking_sequence_old(n_cl: int, desired_net_cl: int=0,
seed: int=None):
"""
Generates a sequence of "n_cl" random single qubit Cliffords followed
by a a recovery Clifford to make the net result correspond
to the "desired_net_cl".
Args:
n_cl (int) : number of Cliffords
desired_net_cl (int) : idx of the desired net clifford
seed (int) : seed used to initialize the random number
generator.
The default behaviour is that the net clifford corresponds to an
identity ("0"). If you want e.g. an inverting sequence you should set
the desired_net_cl to "3" (corresponds to Pauli X).
"""
logging.warning(
'deprecation warning, only exists for testing equivalence to new function.'
)
if seed is None:
rb_cliffords = np.random.randint(0, 24, int(n_cl))
else:
rng_seed = np.random.RandomState(seed)
rb_cliffords = rng_seed.randint(0, 24, int(n_cl))
net_clifford = calculate_net_clifford(rb_cliffords).idx
recovery_clifford = calculate_recovery_clifford(net_clifford,
desired_net_cl)
rb_cliffords = np.append(rb_cliffords, recovery_clifford)
return rb_cliffords
def randomized_benchmarking_sequence(n_cl: int, desired_net_cl: int=0,
number_of_qubits: int=1, max_clifford_idx: int=11520, interleaving_cl:
int=None, seed: int=None) ->np.ndarray:
"""
Generates a randomized benchmarking sequence for the one or two qubit
clifford group.
Args:
n_cl (int) : number of Cliffords
desired_net_cl (int) : idx of the desired net clifford, if None is
specified no recovery Clifford is calculated
number_of_qubits(int): used to determine if Cliffords are drawn
from the single qubit or two qubit clifford group.
max_clifford_idx (int): used to set the index of the highest random
clifford generated. Useful to generate e.g., simultaneous two
qubit RB sequences.
FIXME: seems useless, because none of the callers set this for real, and we trim it to the group size
interleaving_cl (int): interleaves the sequence with a specific
clifford if desired
seed (int) : seed used to initialize the random number
generator.
Returns:
list of clifford indices (ints)
N.B. in the case of the 1 qubit clifford group this function does the
same as "randomized_benchmarking_sequence_old" but
does not use the 24 by 24 lookuptable method to calculate the
net clifford. It instead uses the "Clifford" objects used in
constructing the two qubit Clifford classes.
The old method exists to establish the equivalence between the two methods.
"""
if number_of_qubits == 1:
Cl = SingleQubitClifford
group_size = np.min([24, max_clifford_idx])
elif number_of_qubits == 2:
Cl = TwoQubitClifford
group_size = np.min([11520, max_clifford_idx])
else:
raise NotImplementedError()
rng_seed = np.random.RandomState(seed)
rb_clifford_indices = rng_seed.randint(0, group_size, int(n_cl))
if interleaving_cl is not None:
rb_clif_ind_intl = np.empty(rb_clifford_indices.size * 2, dtype=int)
rb_clif_ind_intl[0::2] = rb_clifford_indices
rb_clif_ind_intl[1::2] = interleaving_cl
rb_clifford_indices = rb_clif_ind_intl
if desired_net_cl is not None:
net_clifford = calculate_net_clifford(rb_clifford_indices, Cl)
recovery_to_idx_clifford = net_clifford.get_inverse()
recovery_clifford = Cl(desired_net_cl) * recovery_to_idx_clifford
rb_clifford_indices = np.append(rb_clifford_indices,
recovery_clifford.idx)
return rb_clifford_indices
| <mask token>
def calculate_net_clifford(rb_clifford_indices: np.ndarray, Cliff: Clifford
=SingleQubitClifford) ->Clifford:
"""
Calculate the net-clifford from a list of cliffords indices.
Args:
rb_clifford_indices: list or array of integers specifying the cliffords.
Cliff : Clifford object used to determine what
inversion technique to use and what indices are valid.
Valid choices are `SingleQubitClifford` and `TwoQubitClifford`
Returns:
net_clifford: a `Clifford` object containing the net-clifford.
the Clifford index is contained in the Clifford.idx attribute.
Note: the order corresponds to the order in a pulse sequence but is
the reverse of what it would be in a chained dot product.
"""
net_clifford = Cliff(0)
for idx in rb_clifford_indices:
assert idx > -1, 'The convention for interleaved gates has changed! ' + 'See notes in this function. ' + 'You probably need to specify {}'.format(
100000 + abs(idx))
cliff = Cliff(idx % 100000)
net_clifford = cliff * net_clifford
return net_clifford
def calculate_recovery_clifford(cl_in, desired_cl=0):
"""
Extracts the clifford that has to be applied to cl_in to make the net
operation correspond to desired_cl from the clifford lookuptable.
This operation should perform the inverse of calculate_net_clifford
"""
row = list(clifford_lookuptable[cl_in])
return row.index(desired_cl)
@deprecated(version='0.4', reason='not used within pyqed')
def decompose_clifford_seq(clifford_sequence, gate_decomposition=
gate_decomposition):
decomposed_seq = []
for cl in clifford_sequence:
decomposed_seq.extend(gate_decomposition[cl])
return decomposed_seq
@deprecated(version='0.4', reason='not used within pyqed')
def convert_clifford_sequence_to_tape(clifford_sequence, lutmapping,
gate_decomposition=gate_decomposition):
"""
Converts a list of qubit operations to the relevant pulse elements
This method will be overwritten depending on the hardware implementation.
"""
decomposed_seq = []
for cl in clifford_sequence:
decomposed_seq.extend(gate_decomposition[cl])
tape = []
for g in decomposed_seq:
tape.append(lutmapping.index(g))
return tape
def randomized_benchmarking_sequence_old(n_cl: int, desired_net_cl: int=0,
seed: int=None):
"""
Generates a sequence of "n_cl" random single qubit Cliffords followed
by a a recovery Clifford to make the net result correspond
to the "desired_net_cl".
Args:
n_cl (int) : number of Cliffords
desired_net_cl (int) : idx of the desired net clifford
seed (int) : seed used to initialize the random number
generator.
The default behaviour is that the net clifford corresponds to an
identity ("0"). If you want e.g. an inverting sequence you should set
the desired_net_cl to "3" (corresponds to Pauli X).
"""
logging.warning(
'deprecation warning, only exists for testing equivalence to new function.'
)
if seed is None:
rb_cliffords = np.random.randint(0, 24, int(n_cl))
else:
rng_seed = np.random.RandomState(seed)
rb_cliffords = rng_seed.randint(0, 24, int(n_cl))
net_clifford = calculate_net_clifford(rb_cliffords).idx
recovery_clifford = calculate_recovery_clifford(net_clifford,
desired_net_cl)
rb_cliffords = np.append(rb_cliffords, recovery_clifford)
return rb_cliffords
def randomized_benchmarking_sequence(n_cl: int, desired_net_cl: int=0,
number_of_qubits: int=1, max_clifford_idx: int=11520, interleaving_cl:
int=None, seed: int=None) ->np.ndarray:
"""
Generates a randomized benchmarking sequence for the one or two qubit
clifford group.
Args:
n_cl (int) : number of Cliffords
desired_net_cl (int) : idx of the desired net clifford, if None is
specified no recovery Clifford is calculated
number_of_qubits(int): used to determine if Cliffords are drawn
from the single qubit or two qubit clifford group.
max_clifford_idx (int): used to set the index of the highest random
clifford generated. Useful to generate e.g., simultaneous two
qubit RB sequences.
FIXME: seems useless, because none of the callers set this for real, and we trim it to the group size
interleaving_cl (int): interleaves the sequence with a specific
clifford if desired
seed (int) : seed used to initialize the random number
generator.
Returns:
list of clifford indices (ints)
N.B. in the case of the 1 qubit clifford group this function does the
same as "randomized_benchmarking_sequence_old" but
does not use the 24 by 24 lookuptable method to calculate the
net clifford. It instead uses the "Clifford" objects used in
constructing the two qubit Clifford classes.
The old method exists to establish the equivalence between the two methods.
"""
if number_of_qubits == 1:
Cl = SingleQubitClifford
group_size = np.min([24, max_clifford_idx])
elif number_of_qubits == 2:
Cl = TwoQubitClifford
group_size = np.min([11520, max_clifford_idx])
else:
raise NotImplementedError()
rng_seed = np.random.RandomState(seed)
rb_clifford_indices = rng_seed.randint(0, group_size, int(n_cl))
if interleaving_cl is not None:
rb_clif_ind_intl = np.empty(rb_clifford_indices.size * 2, dtype=int)
rb_clif_ind_intl[0::2] = rb_clifford_indices
rb_clif_ind_intl[1::2] = interleaving_cl
rb_clifford_indices = rb_clif_ind_intl
if desired_net_cl is not None:
net_clifford = calculate_net_clifford(rb_clifford_indices, Cl)
recovery_to_idx_clifford = net_clifford.get_inverse()
recovery_clifford = Cl(desired_net_cl) * recovery_to_idx_clifford
rb_clifford_indices = np.append(rb_clifford_indices,
recovery_clifford.idx)
return rb_clifford_indices
| import logging
import numpy as np
from deprecated import deprecated
from pycqed.measurement.randomized_benchmarking.clifford_group import clifford_lookuptable
from pycqed.measurement.randomized_benchmarking.clifford_decompositions import gate_decomposition
from pycqed.measurement.randomized_benchmarking.two_qubit_clifford_group import Clifford, SingleQubitClifford, TwoQubitClifford
def calculate_net_clifford(rb_clifford_indices: np.ndarray, Cliff: Clifford
=SingleQubitClifford) ->Clifford:
"""
Calculate the net-clifford from a list of cliffords indices.
Args:
rb_clifford_indices: list or array of integers specifying the cliffords.
Cliff : Clifford object used to determine what
inversion technique to use and what indices are valid.
Valid choices are `SingleQubitClifford` and `TwoQubitClifford`
Returns:
net_clifford: a `Clifford` object containing the net-clifford.
the Clifford index is contained in the Clifford.idx attribute.
Note: the order corresponds to the order in a pulse sequence but is
the reverse of what it would be in a chained dot product.
"""
net_clifford = Cliff(0)
for idx in rb_clifford_indices:
assert idx > -1, 'The convention for interleaved gates has changed! ' + 'See notes in this function. ' + 'You probably need to specify {}'.format(
100000 + abs(idx))
cliff = Cliff(idx % 100000)
net_clifford = cliff * net_clifford
return net_clifford
def calculate_recovery_clifford(cl_in, desired_cl=0):
"""
Extracts the clifford that has to be applied to cl_in to make the net
operation correspond to desired_cl from the clifford lookuptable.
This operation should perform the inverse of calculate_net_clifford
"""
row = list(clifford_lookuptable[cl_in])
return row.index(desired_cl)
@deprecated(version='0.4', reason='not used within pyqed')
def decompose_clifford_seq(clifford_sequence, gate_decomposition=
gate_decomposition):
decomposed_seq = []
for cl in clifford_sequence:
decomposed_seq.extend(gate_decomposition[cl])
return decomposed_seq
@deprecated(version='0.4', reason='not used within pyqed')
def convert_clifford_sequence_to_tape(clifford_sequence, lutmapping,
gate_decomposition=gate_decomposition):
"""
Converts a list of qubit operations to the relevant pulse elements
This method will be overwritten depending on the hardware implementation.
"""
decomposed_seq = []
for cl in clifford_sequence:
decomposed_seq.extend(gate_decomposition[cl])
tape = []
for g in decomposed_seq:
tape.append(lutmapping.index(g))
return tape
def randomized_benchmarking_sequence_old(n_cl: int, desired_net_cl: int=0,
seed: int=None):
"""
Generates a sequence of "n_cl" random single qubit Cliffords followed
by a a recovery Clifford to make the net result correspond
to the "desired_net_cl".
Args:
n_cl (int) : number of Cliffords
desired_net_cl (int) : idx of the desired net clifford
seed (int) : seed used to initialize the random number
generator.
The default behaviour is that the net clifford corresponds to an
identity ("0"). If you want e.g. an inverting sequence you should set
the desired_net_cl to "3" (corresponds to Pauli X).
"""
logging.warning(
'deprecation warning, only exists for testing equivalence to new function.'
)
if seed is None:
rb_cliffords = np.random.randint(0, 24, int(n_cl))
else:
rng_seed = np.random.RandomState(seed)
rb_cliffords = rng_seed.randint(0, 24, int(n_cl))
net_clifford = calculate_net_clifford(rb_cliffords).idx
recovery_clifford = calculate_recovery_clifford(net_clifford,
desired_net_cl)
rb_cliffords = np.append(rb_cliffords, recovery_clifford)
return rb_cliffords
def randomized_benchmarking_sequence(n_cl: int, desired_net_cl: int=0,
number_of_qubits: int=1, max_clifford_idx: int=11520, interleaving_cl:
int=None, seed: int=None) ->np.ndarray:
"""
Generates a randomized benchmarking sequence for the one or two qubit
clifford group.
Args:
n_cl (int) : number of Cliffords
desired_net_cl (int) : idx of the desired net clifford, if None is
specified no recovery Clifford is calculated
number_of_qubits(int): used to determine if Cliffords are drawn
from the single qubit or two qubit clifford group.
max_clifford_idx (int): used to set the index of the highest random
clifford generated. Useful to generate e.g., simultaneous two
qubit RB sequences.
FIXME: seems useless, because none of the callers set this for real, and we trim it to the group size
interleaving_cl (int): interleaves the sequence with a specific
clifford if desired
seed (int) : seed used to initialize the random number
generator.
Returns:
list of clifford indices (ints)
N.B. in the case of the 1 qubit clifford group this function does the
same as "randomized_benchmarking_sequence_old" but
does not use the 24 by 24 lookuptable method to calculate the
net clifford. It instead uses the "Clifford" objects used in
constructing the two qubit Clifford classes.
The old method exists to establish the equivalence between the two methods.
"""
if number_of_qubits == 1:
Cl = SingleQubitClifford
group_size = np.min([24, max_clifford_idx])
elif number_of_qubits == 2:
Cl = TwoQubitClifford
group_size = np.min([11520, max_clifford_idx])
else:
raise NotImplementedError()
rng_seed = np.random.RandomState(seed)
rb_clifford_indices = rng_seed.randint(0, group_size, int(n_cl))
if interleaving_cl is not None:
rb_clif_ind_intl = np.empty(rb_clifford_indices.size * 2, dtype=int)
rb_clif_ind_intl[0::2] = rb_clifford_indices
rb_clif_ind_intl[1::2] = interleaving_cl
rb_clifford_indices = rb_clif_ind_intl
if desired_net_cl is not None:
net_clifford = calculate_net_clifford(rb_clifford_indices, Cl)
recovery_to_idx_clifford = net_clifford.get_inverse()
recovery_clifford = Cl(desired_net_cl) * recovery_to_idx_clifford
rb_clifford_indices = np.append(rb_clifford_indices,
recovery_clifford.idx)
return rb_clifford_indices
| import logging
import numpy as np
from deprecated import deprecated
from pycqed.measurement.randomized_benchmarking.clifford_group import clifford_lookuptable
from pycqed.measurement.randomized_benchmarking.clifford_decompositions import gate_decomposition
from pycqed.measurement.randomized_benchmarking.two_qubit_clifford_group import Clifford, SingleQubitClifford, TwoQubitClifford
def calculate_net_clifford(
rb_clifford_indices: np.ndarray,
Cliff:Clifford = SingleQubitClifford
) -> Clifford:
"""
Calculate the net-clifford from a list of cliffords indices.
Args:
rb_clifford_indices: list or array of integers specifying the cliffords.
Cliff : Clifford object used to determine what
inversion technique to use and what indices are valid.
Valid choices are `SingleQubitClifford` and `TwoQubitClifford`
Returns:
net_clifford: a `Clifford` object containing the net-clifford.
the Clifford index is contained in the Clifford.idx attribute.
Note: the order corresponds to the order in a pulse sequence but is
the reverse of what it would be in a chained dot product.
"""
# Calculate the net clifford
net_clifford = Cliff(0) # assumes element 0 is the Identity
for idx in rb_clifford_indices:
# [2020-07-03 Victor] the `abs` below was to remove the sign that was
# used to treat CZ as CZ and not the member of CNOT-like set of gates
# Using negative sign convention (i.e. `-4368` for the interleaved CZ)
# was a bad choice because there is no such thing as negative zero and
# the clifford numer 0 is the identity that is necessary for
# benchmarking an idling identity with the same duration as the time
# allocated to the flux pulses, for example
# cliff = Clifford(abs(idx)) # Deprecated!
assert idx > -1, (
"The convention for interleaved gates has changed! "
+ "See notes in this function. "
+ "You probably need to specify {}".format(100_000 + abs(idx))
)
# In order to benchmark specific gates (and not cliffords), e.g. CZ but
# not as a member of the CNOT-like set of gates, or an identity with
# the same duration as the CZ we use, by convention, when specifying
# the interleaved gate, the index of the corresponding
# clifford + 100000, this is to keep it readable and bigger than the
# 11520 elements of the Two-qubit Clifford group C2
# corresponding clifford
cliff = Cliff(idx % 100_000)
# order of operators applied in is right to left, therefore
# the new operator is applied on the left side.
net_clifford = cliff * net_clifford
return net_clifford
# FIXME: deprecate along with randomized_benchmarking_sequence_old()
def calculate_recovery_clifford(cl_in, desired_cl=0):
"""
Extracts the clifford that has to be applied to cl_in to make the net
operation correspond to desired_cl from the clifford lookuptable.
This operation should perform the inverse of calculate_net_clifford
"""
row = list(clifford_lookuptable[cl_in])
return row.index(desired_cl)
@deprecated(version='0.4', reason='not used within pyqed')
def decompose_clifford_seq(clifford_sequence, gate_decomposition=gate_decomposition):
decomposed_seq = []
for cl in clifford_sequence:
decomposed_seq.extend(gate_decomposition[cl])
return decomposed_seq
@deprecated(version='0.4', reason='not used within pyqed')
def convert_clifford_sequence_to_tape(
clifford_sequence, lutmapping, gate_decomposition=gate_decomposition
):
"""
Converts a list of qubit operations to the relevant pulse elements
This method will be overwritten depending on the hardware implementation.
"""
# This is intended to replace the block below but not done because
# I cannot test it at this moment (MAR)
# decomposed_seq = decompose_clifford_seq(clifford_sequence,
# gate_decomposition)
decomposed_seq = []
for cl in clifford_sequence:
decomposed_seq.extend(gate_decomposition[cl])
tape = []
for g in decomposed_seq:
tape.append(lutmapping.index(g))
return tape
# FIXME: deprecate, also including calculate_recovery_clifford() and clifford_lookuptable
def randomized_benchmarking_sequence_old(
n_cl: int, desired_net_cl: int = 0, seed: int = None
):
"""
Generates a sequence of "n_cl" random single qubit Cliffords followed
by a a recovery Clifford to make the net result correspond
to the "desired_net_cl".
Args:
n_cl (int) : number of Cliffords
desired_net_cl (int) : idx of the desired net clifford
seed (int) : seed used to initialize the random number
generator.
The default behaviour is that the net clifford corresponds to an
identity ("0"). If you want e.g. an inverting sequence you should set
the desired_net_cl to "3" (corresponds to Pauli X).
"""
logging.warning(
"deprecation warning, only exists for testing equivalence to new function."
)
if seed is None:
rb_cliffords = np.random.randint(0, 24, int(n_cl))
else:
rng_seed = np.random.RandomState(seed)
rb_cliffords = rng_seed.randint(0, 24, int(n_cl))
net_clifford = calculate_net_clifford(rb_cliffords).idx
recovery_clifford = calculate_recovery_clifford(net_clifford, desired_net_cl)
rb_cliffords = np.append(rb_cliffords, recovery_clifford)
return rb_cliffords
##############################################################################
# New style RB sequences (using the hash-table method) compatible
# with Clifford object.
# More advanced sequences are available using this method.
##############################################################################
def randomized_benchmarking_sequence(
n_cl: int,
desired_net_cl: int = 0,
number_of_qubits: int = 1,
max_clifford_idx: int = 11520,
interleaving_cl: int = None,
seed: int = None,
) -> np.ndarray:
"""
Generates a randomized benchmarking sequence for the one or two qubit
clifford group.
Args:
n_cl (int) : number of Cliffords
desired_net_cl (int) : idx of the desired net clifford, if None is
specified no recovery Clifford is calculated
number_of_qubits(int): used to determine if Cliffords are drawn
from the single qubit or two qubit clifford group.
max_clifford_idx (int): used to set the index of the highest random
clifford generated. Useful to generate e.g., simultaneous two
qubit RB sequences.
FIXME: seems useless, because none of the callers set this for real, and we trim it to the group size
interleaving_cl (int): interleaves the sequence with a specific
clifford if desired
seed (int) : seed used to initialize the random number
generator.
Returns:
list of clifford indices (ints)
N.B. in the case of the 1 qubit clifford group this function does the
same as "randomized_benchmarking_sequence_old" but
does not use the 24 by 24 lookuptable method to calculate the
net clifford. It instead uses the "Clifford" objects used in
constructing the two qubit Clifford classes.
The old method exists to establish the equivalence between the two methods.
"""
if number_of_qubits == 1:
Cl = SingleQubitClifford
group_size = np.min([24, max_clifford_idx])
elif number_of_qubits == 2:
Cl = TwoQubitClifford
group_size = np.min([11520, max_clifford_idx])
else:
raise NotImplementedError()
# Generate a random sequence of Cliffords
# Even if no seed is provided make sure we pick a new state such that
# it is safe to run generate and compile the random sequences in
# parallel using multiprocess
rng_seed = np.random.RandomState(seed)
rb_clifford_indices = rng_seed.randint(0, group_size, int(n_cl))
# Add interleaving cliffords if applicable
if interleaving_cl is not None:
rb_clif_ind_intl = np.empty(rb_clifford_indices.size * 2, dtype=int)
rb_clif_ind_intl[0::2] = rb_clifford_indices
rb_clif_ind_intl[1::2] = interleaving_cl
rb_clifford_indices = rb_clif_ind_intl
if desired_net_cl is not None:
# Calculate the net clifford
net_clifford = calculate_net_clifford(rb_clifford_indices, Cl)
# determine the inverse of the sequence
recovery_to_idx_clifford = net_clifford.get_inverse()
recovery_clifford = Cl(desired_net_cl) * recovery_to_idx_clifford
rb_clifford_indices = np.append(rb_clifford_indices, recovery_clifford.idx)
return rb_clifford_indices
| [
3,
5,
6,
7,
8
] |
786 | 3421c3b839721694945bdbb4f17183bceaed5296 | <mask token>
class MyDictTestCase(unittest.TestCase):
<mask token>
<mask token>
| <mask token>
class MyDictTestCase(unittest.TestCase):
def test_Dict(self):
actualDict1 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c'])
expectedDict1 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict1, expectedDict1)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c',
'd', 'e', 'f'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict2, expectedDict2)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a',
'b', 'c'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c', (4): None, (5): None,
(6): None, (7): None}
self.assertEqual(actualDict2, expectedDict2)
<mask token>
| <mask token>
class MyDictTestCase(unittest.TestCase):
def test_Dict(self):
actualDict1 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c'])
expectedDict1 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict1, expectedDict1)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c',
'd', 'e', 'f'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict2, expectedDict2)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a',
'b', 'c'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c', (4): None, (5): None,
(6): None, (7): None}
self.assertEqual(actualDict2, expectedDict2)
if __name__ == '__main__':
unittest.main()
| import unittest
import ConvertListToDict as cldf
class MyDictTestCase(unittest.TestCase):
def test_Dict(self):
actualDict1 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c'])
expectedDict1 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict1, expectedDict1)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c',
'd', 'e', 'f'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict2, expectedDict2)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a',
'b', 'c'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c', (4): None, (5): None,
(6): None, (7): None}
self.assertEqual(actualDict2, expectedDict2)
if __name__ == '__main__':
unittest.main()
| import unittest
import ConvertListToDict as cldf
class MyDictTestCase(unittest.TestCase):
def test_Dict(self):
# Testcase1 (len(keys) == len(values))
actualDict1 = cldf.ConvertListsToDict([1, 2, 3],['a','b','c'])
expectedDict1 = {1: 'a', 2: 'b', 3: 'c'}
self.assertEqual(actualDict1, expectedDict1)
# Testcase2 (len(keys) < len(values))
actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c','d','e','f'])
expectedDict2 = {1: 'a', 2: 'b', 3: 'c'}
self.assertEqual(actualDict2, expectedDict2)
# Testcase3 (len(keys) > len(values))
actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a', 'b', 'c'])
expectedDict2 = {1: 'a', 2: 'b', 3: 'c', 4: None, 5: None, 6: None, 7: None}
self.assertEqual(actualDict2, expectedDict2)
if __name__ == '__main__':
unittest.main()
| [
1,
2,
3,
4,
5
] |
787 | 1ea61ab4003de80ffe9fb3e284b6686d4bf20b15 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Restaurant', fields=[('id',
models.BigAutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('restaurant_name', models.CharField(
max_length=200)), ('city', models.CharField(max_length=200)), (
'district', models.CharField(max_length=200)), ('rating', models.
FloatField(validators=[django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(5)])), ('photo', models.
ImageField(upload_to='uploads'))]), migrations.CreateModel(name=
'Reservation', fields=[('id', models.BigAutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')), ('adult',
models.IntegerField()), ('entry_date', models.DateTimeField()), (
'restaurant', models.ForeignKey(on_delete=django.db.models.deletion
.CASCADE, to='restaurant.restaurant'))])]
| import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Restaurant', fields=[('id',
models.BigAutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('restaurant_name', models.CharField(
max_length=200)), ('city', models.CharField(max_length=200)), (
'district', models.CharField(max_length=200)), ('rating', models.
FloatField(validators=[django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(5)])), ('photo', models.
ImageField(upload_to='uploads'))]), migrations.CreateModel(name=
'Reservation', fields=[('id', models.BigAutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')), ('adult',
models.IntegerField()), ('entry_date', models.DateTimeField()), (
'restaurant', models.ForeignKey(on_delete=django.db.models.deletion
.CASCADE, to='restaurant.restaurant'))])]
| # Generated by Django 3.2.3 on 2021-08-26 09:18
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Restaurant',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('restaurant_name', models.CharField(max_length=200)),
('city', models.CharField(max_length=200)),
('district', models.CharField(max_length=200)),
('rating', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)])),
('photo', models.ImageField(upload_to='uploads')),
],
),
migrations.CreateModel(
name='Reservation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('adult', models.IntegerField()),
('entry_date', models.DateTimeField()),
('restaurant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='restaurant.restaurant')),
],
),
]
| [
0,
1,
2,
3,
4
] |
788 | 6db7189d26c63ca9f9667045b780ec11994bac28 | <mask token>
class PageRankReader(Reader):
def read_num_of_vertices(self):
line = self.config_fp.readline()
return int(line)
def read_vertex(self):
line = self.graph_fp.readline()
if not line:
return None
line = line.strip().split(':')
vertex_id = int(line[0])
edges = []
if line[1]:
for e in line[1].split(' '):
edges.append(Edge(int(e), None))
return PageRankVertex(vertex_id, None, edges)
class PageRankWriter(Writer):
def write_vertex(self, vertex):
return vertex.get_vertex_id(), str(vertex.get_value())
class PageRankCombiner(Combiner):
def combine(self, msg_x, msg_y):
msg_x_value = msg_x[1]
msg_y_value = msg_y[1]
return None, msg_x_value + msg_y_value
<mask token>
| <mask token>
class PageRankVertex(Vertex):
def compute(self):
if self.superstep() >= 1:
s = 0
while self.has_message():
msg = self.get_message()
s += msg
self.set_value(0.15 / self.get_num_of_vertices() + 0.85 * s)
if self.superstep() < 30:
n = len(self.get_out_edges())
if n > 0:
self.send_message_to_all_neighbors(self.get_value() / n)
else:
self.vote_to_halt()
class PageRankReader(Reader):
def read_num_of_vertices(self):
line = self.config_fp.readline()
return int(line)
def read_vertex(self):
line = self.graph_fp.readline()
if not line:
return None
line = line.strip().split(':')
vertex_id = int(line[0])
edges = []
if line[1]:
for e in line[1].split(' '):
edges.append(Edge(int(e), None))
return PageRankVertex(vertex_id, None, edges)
class PageRankWriter(Writer):
def write_vertex(self, vertex):
return vertex.get_vertex_id(), str(vertex.get_value())
class PageRankCombiner(Combiner):
def combine(self, msg_x, msg_y):
msg_x_value = msg_x[1]
msg_y_value = msg_y[1]
return None, msg_x_value + msg_y_value
<mask token>
| <mask token>
class PageRankVertex(Vertex):
def compute(self):
if self.superstep() >= 1:
s = 0
while self.has_message():
msg = self.get_message()
s += msg
self.set_value(0.15 / self.get_num_of_vertices() + 0.85 * s)
if self.superstep() < 30:
n = len(self.get_out_edges())
if n > 0:
self.send_message_to_all_neighbors(self.get_value() / n)
else:
self.vote_to_halt()
class PageRankReader(Reader):
def read_num_of_vertices(self):
line = self.config_fp.readline()
return int(line)
def read_vertex(self):
line = self.graph_fp.readline()
if not line:
return None
line = line.strip().split(':')
vertex_id = int(line[0])
edges = []
if line[1]:
for e in line[1].split(' '):
edges.append(Edge(int(e), None))
return PageRankVertex(vertex_id, None, edges)
class PageRankWriter(Writer):
def write_vertex(self, vertex):
return vertex.get_vertex_id(), str(vertex.get_value())
class PageRankCombiner(Combiner):
def combine(self, msg_x, msg_y):
msg_x_value = msg_x[1]
msg_y_value = msg_y[1]
return None, msg_x_value + msg_y_value
def main():
if len(sys.argv) < 4:
print('usage: python %s [config] [graph] [out_file]' % sys.argv[0])
return
pagerank_reader = PageRankReader(sys.argv[1], sys.argv[2])
pagerank_writer = PageRankWriter(sys.argv[3])
pagerank_combiner = PageRankCombiner()
pagerank = Pypregel(reader=pagerank_reader, writer=pagerank_writer,
combiner=pagerank_combiner)
pagerank.run()
<mask token>
| import sys
from pypregel import Pypregel
from pypregel.vertex import Vertex, Edge
from pypregel.reader import Reader
from pypregel.writer import Writer
from pypregel.combiner import Combiner
class PageRankVertex(Vertex):
def compute(self):
if self.superstep() >= 1:
s = 0
while self.has_message():
msg = self.get_message()
s += msg
self.set_value(0.15 / self.get_num_of_vertices() + 0.85 * s)
if self.superstep() < 30:
n = len(self.get_out_edges())
if n > 0:
self.send_message_to_all_neighbors(self.get_value() / n)
else:
self.vote_to_halt()
class PageRankReader(Reader):
def read_num_of_vertices(self):
line = self.config_fp.readline()
return int(line)
def read_vertex(self):
line = self.graph_fp.readline()
if not line:
return None
line = line.strip().split(':')
vertex_id = int(line[0])
edges = []
if line[1]:
for e in line[1].split(' '):
edges.append(Edge(int(e), None))
return PageRankVertex(vertex_id, None, edges)
class PageRankWriter(Writer):
def write_vertex(self, vertex):
return vertex.get_vertex_id(), str(vertex.get_value())
class PageRankCombiner(Combiner):
def combine(self, msg_x, msg_y):
msg_x_value = msg_x[1]
msg_y_value = msg_y[1]
return None, msg_x_value + msg_y_value
def main():
if len(sys.argv) < 4:
print('usage: python %s [config] [graph] [out_file]' % sys.argv[0])
return
pagerank_reader = PageRankReader(sys.argv[1], sys.argv[2])
pagerank_writer = PageRankWriter(sys.argv[3])
pagerank_combiner = PageRankCombiner()
pagerank = Pypregel(reader=pagerank_reader, writer=pagerank_writer,
combiner=pagerank_combiner)
pagerank.run()
if __name__ == '__main__':
main()
| import sys
from pypregel import Pypregel
from pypregel.vertex import Vertex, Edge
from pypregel.reader import Reader
from pypregel.writer import Writer
from pypregel.combiner import Combiner
class PageRankVertex(Vertex):
def compute(self):
if self.superstep() >= 1:
s = 0
while self.has_message():
msg = self.get_message()
s += msg
self.set_value(0.15 / self.get_num_of_vertices() + 0.85 * s)
if self.superstep() < 30:
n = len(self.get_out_edges())
if n > 0:
self.send_message_to_all_neighbors(self.get_value() / n)
else:
self.vote_to_halt()
class PageRankReader(Reader):
def read_num_of_vertices(self):
line = self.config_fp.readline()
return int(line)
def read_vertex(self):
line = self.graph_fp.readline()
if not line:
return None
line = line.strip().split(':')
vertex_id = int(line[0])
edges = []
if line[1]:
for e in line[1].split(' '):
edges.append(Edge(int(e), None))
return PageRankVertex(vertex_id, None, edges)
class PageRankWriter(Writer):
def write_vertex(self, vertex):
return vertex.get_vertex_id(), str(vertex.get_value())
class PageRankCombiner(Combiner):
def combine(self, msg_x, msg_y):
msg_x_value = msg_x[1]
msg_y_value = msg_y[1]
return None, msg_x_value + msg_y_value
def main():
if len(sys.argv) < 4:
print("usage: python %s [config] [graph] [out_file]" % sys.argv[0])
return
pagerank_reader = PageRankReader(sys.argv[1], sys.argv[2])
pagerank_writer = PageRankWriter(sys.argv[3])
pagerank_combiner = PageRankCombiner()
pagerank = Pypregel(
reader=pagerank_reader,
writer=pagerank_writer,
combiner=pagerank_combiner
)
pagerank.run()
if __name__ == "__main__":
main()
| [
7,
9,
10,
12,
13
] |
789 | 78dc2193c05ddb4cd4c80b1c0322890eca7fcf19 | <mask token>
| <mask token>
if __name__ == '__main__':
n.notify('READY=1')
time.sleep(2)
| <mask token>
n = sdnotify.SystemdNotifier()
if __name__ == '__main__':
n.notify('READY=1')
time.sleep(2)
| import signal
import time
import sdnotify
n = sdnotify.SystemdNotifier()
if __name__ == '__main__':
n.notify('READY=1')
time.sleep(2)
| import signal
import time
import sdnotify
n = sdnotify.SystemdNotifier()
if __name__ == '__main__':
n.notify("READY=1")
time.sleep(2)
| [
0,
1,
2,
3,
4
] |
790 | 2fadc5c90d1bae14c57fc3bf02582e12aa8abdf6 | <mask token>
class PLTT(Editable):
<mask token>
<mask token>
<mask token>
def define(self, clr):
self.clr = clr
self.string('magic', length=4, default='PLTT')
self.uint32('size_')
self.uint32('format')
self.uint32('extended')
self.uint32('datasize')
self.uint32('offset')
self.data = ''
def load(self, reader):
Editable.load(self, reader)
self.data = array.array('H', reader.read(self.datasize))
def save(self, writer):
writer = Editable.save(self, writer)
ofs = writer.tell()
writer.write(self.data.tostring())
writer.writePadding(ofs + self.datasize)
return writer
def get_palettes(self):
palettes = []
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = 0
for pal_id in range(len(self.data) / num):
palette = []
for i in range(num):
val = self.data[start + i]
palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,
(val >> 10 & 31) << 3, 255))
start += num
palettes.append(palette)
return palettes
<mask token>
<mask token>
class NCLR(Editable):
"""2d color information
"""
def define(self):
self.string('magic', length=4, default='RLCN')
self.uint16('endian', default=65534)
self.uint16('version', default=257)
self.uint32('size_')
self.uint16('headersize', default=16)
self.uint16('numblocks', default=1)
self.pltt = PLTT(self)
def load(self, reader):
Editable.load(self, reader)
assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)
self.pltt.load(reader)
def save(self, writer=None):
writer = Editable.save(self, writer)
writer = self.pltt.save(writer)
return writer
def get_palette(self, pal_id=0, transparent=True):
return self.pltt.get_palette(pal_id, transparent)
def get_palettes(self):
return self.pltt.get_palettes()
def set_palette(self, pal_id, palette):
return self.pltt.set_palette(pal_id, palette)
| <mask token>
class PLTT(Editable):
<mask token>
<mask token>
<mask token>
def define(self, clr):
self.clr = clr
self.string('magic', length=4, default='PLTT')
self.uint32('size_')
self.uint32('format')
self.uint32('extended')
self.uint32('datasize')
self.uint32('offset')
self.data = ''
def load(self, reader):
Editable.load(self, reader)
self.data = array.array('H', reader.read(self.datasize))
def save(self, writer):
writer = Editable.save(self, writer)
ofs = writer.tell()
writer.write(self.data.tostring())
writer.writePadding(ofs + self.datasize)
return writer
def get_palettes(self):
palettes = []
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = 0
for pal_id in range(len(self.data) / num):
palette = []
for i in range(num):
val = self.data[start + i]
palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,
(val >> 10 & 31) << 3, 255))
start += num
palettes.append(palette)
return palettes
<mask token>
def set_palette(self, pal_id, palette):
"""
Parameters
----------
pal_id : int
palette : list of tuple
List of 4-/3-int-tuple colors
"""
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = pal_id * num
for i, color in enumerate(palette):
if i > num:
break
r, g, b = color[:3]
self.data[start + i] = r >> 3 | g >> 3 << 5 | b >> 3 << 10
class NCLR(Editable):
"""2d color information
"""
def define(self):
self.string('magic', length=4, default='RLCN')
self.uint16('endian', default=65534)
self.uint16('version', default=257)
self.uint32('size_')
self.uint16('headersize', default=16)
self.uint16('numblocks', default=1)
self.pltt = PLTT(self)
def load(self, reader):
Editable.load(self, reader)
assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)
self.pltt.load(reader)
def save(self, writer=None):
writer = Editable.save(self, writer)
writer = self.pltt.save(writer)
return writer
def get_palette(self, pal_id=0, transparent=True):
return self.pltt.get_palette(pal_id, transparent)
def get_palettes(self):
return self.pltt.get_palettes()
def set_palette(self, pal_id, palette):
return self.pltt.set_palette(pal_id, palette)
| <mask token>
class PLTT(Editable):
<mask token>
<mask token>
<mask token>
def define(self, clr):
self.clr = clr
self.string('magic', length=4, default='PLTT')
self.uint32('size_')
self.uint32('format')
self.uint32('extended')
self.uint32('datasize')
self.uint32('offset')
self.data = ''
def load(self, reader):
Editable.load(self, reader)
self.data = array.array('H', reader.read(self.datasize))
def save(self, writer):
writer = Editable.save(self, writer)
ofs = writer.tell()
writer.write(self.data.tostring())
writer.writePadding(ofs + self.datasize)
return writer
def get_palettes(self):
palettes = []
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = 0
for pal_id in range(len(self.data) / num):
palette = []
for i in range(num):
val = self.data[start + i]
palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,
(val >> 10 & 31) << 3, 255))
start += num
palettes.append(palette)
return palettes
def get_palette(self, pal_id, transparent=True):
palette = []
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = pal_id * num
for i in range(num):
if not num and transparent:
palette.append(chr(0) * 4)
continue
val = self.data[start + i]
palette.append(chr((val >> 0 & 31) << 3) + chr((val >> 5 & 31) <<
3) + chr((val >> 10 & 31) << 3) + chr(255))
return palette
def set_palette(self, pal_id, palette):
"""
Parameters
----------
pal_id : int
palette : list of tuple
List of 4-/3-int-tuple colors
"""
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = pal_id * num
for i, color in enumerate(palette):
if i > num:
break
r, g, b = color[:3]
self.data[start + i] = r >> 3 | g >> 3 << 5 | b >> 3 << 10
class NCLR(Editable):
"""2d color information
"""
def define(self):
self.string('magic', length=4, default='RLCN')
self.uint16('endian', default=65534)
self.uint16('version', default=257)
self.uint32('size_')
self.uint16('headersize', default=16)
self.uint16('numblocks', default=1)
self.pltt = PLTT(self)
def load(self, reader):
Editable.load(self, reader)
assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)
self.pltt.load(reader)
def save(self, writer=None):
writer = Editable.save(self, writer)
writer = self.pltt.save(writer)
return writer
def get_palette(self, pal_id=0, transparent=True):
return self.pltt.get_palette(pal_id, transparent)
def get_palettes(self):
return self.pltt.get_palettes()
def set_palette(self, pal_id, palette):
return self.pltt.set_palette(pal_id, palette)
| <mask token>
class PLTT(Editable):
<mask token>
FORMAT_16BIT = 3
FORMAT_256BIT = 4
def define(self, clr):
self.clr = clr
self.string('magic', length=4, default='PLTT')
self.uint32('size_')
self.uint32('format')
self.uint32('extended')
self.uint32('datasize')
self.uint32('offset')
self.data = ''
def load(self, reader):
Editable.load(self, reader)
self.data = array.array('H', reader.read(self.datasize))
def save(self, writer):
writer = Editable.save(self, writer)
ofs = writer.tell()
writer.write(self.data.tostring())
writer.writePadding(ofs + self.datasize)
return writer
def get_palettes(self):
palettes = []
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = 0
for pal_id in range(len(self.data) / num):
palette = []
for i in range(num):
val = self.data[start + i]
palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,
(val >> 10 & 31) << 3, 255))
start += num
palettes.append(palette)
return palettes
def get_palette(self, pal_id, transparent=True):
palette = []
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = pal_id * num
for i in range(num):
if not num and transparent:
palette.append(chr(0) * 4)
continue
val = self.data[start + i]
palette.append(chr((val >> 0 & 31) << 3) + chr((val >> 5 & 31) <<
3) + chr((val >> 10 & 31) << 3) + chr(255))
return palette
def set_palette(self, pal_id, palette):
"""
Parameters
----------
pal_id : int
palette : list of tuple
List of 4-/3-int-tuple colors
"""
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = pal_id * num
for i, color in enumerate(palette):
if i > num:
break
r, g, b = color[:3]
self.data[start + i] = r >> 3 | g >> 3 << 5 | b >> 3 << 10
class NCLR(Editable):
"""2d color information
"""
def define(self):
self.string('magic', length=4, default='RLCN')
self.uint16('endian', default=65534)
self.uint16('version', default=257)
self.uint32('size_')
self.uint16('headersize', default=16)
self.uint16('numblocks', default=1)
self.pltt = PLTT(self)
def load(self, reader):
Editable.load(self, reader)
assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)
self.pltt.load(reader)
def save(self, writer=None):
writer = Editable.save(self, writer)
writer = self.pltt.save(writer)
return writer
def get_palette(self, pal_id=0, transparent=True):
return self.pltt.get_palette(pal_id, transparent)
def get_palettes(self):
return self.pltt.get_palettes()
def set_palette(self, pal_id, palette):
return self.pltt.set_palette(pal_id, palette)
|
import array
from PIL import Image
from generic.editable import XEditable as Editable
class PLTT(Editable):
"""Palette information"""
FORMAT_16BIT = 3
FORMAT_256BIT = 4
def define(self, clr):
self.clr = clr
self.string('magic', length=4, default='PLTT') # not reversed
self.uint32('size_')
self.uint32('format')
self.uint32('extended')
self.uint32('datasize')
self.uint32('offset')
self.data = ''
def load(self, reader):
Editable.load(self, reader)
self.data = array.array('H', reader.read(self.datasize))
def save(self, writer):
writer = Editable.save(self, writer)
ofs = writer.tell()
writer.write(self.data.tostring())
writer.writePadding(ofs+self.datasize)
return writer
def get_palettes(self):
palettes = []
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = 0
for pal_id in range(len(self.data)/num):
palette = []
for i in range(num):
val = self.data[start+i]
palette.append((((val >> 0) & 0x1f) << 3,
((val >> 5) & 0x1f) << 3,
((val >> 10) & 0x1f) << 3,
255))
start += num
palettes.append(palette)
return palettes
def get_palette(self, pal_id, transparent=True):
palette = []
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = pal_id*num
for i in range(num):
if not num and transparent:
palette.append(chr(0)*4)
continue
val = self.data[start+i]
palette.append(chr(((val >> 0) & 0x1f) << 3) +
chr(((val >> 5) & 0x1f) << 3) +
chr(((val >> 10) & 0x1f) << 3) +
chr(255))
return palette
def set_palette(self, pal_id, palette):
"""
Parameters
----------
pal_id : int
palette : list of tuple
List of 4-/3-int-tuple colors
"""
if self.format == self.FORMAT_16BIT:
num = 16
elif self.format == self.FORMAT_256BIT:
num = 256
start = pal_id*num
for i, color in enumerate(palette):
if i > num:
break
r, g, b = color[:3]
self.data[start+i] = ((r >> 3) |
(g >> 3 << 5) |
(b >> 3 << 10))
class NCLR(Editable):
"""2d color information
"""
def define(self):
self.string('magic', length=4, default='RLCN')
self.uint16('endian', default=0xFFFE)
self.uint16('version', default=0x101)
self.uint32('size_')
self.uint16('headersize', default=0x10)
self.uint16('numblocks', default=1)
self.pltt = PLTT(self)
def load(self, reader):
Editable.load(self, reader)
assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)
self.pltt.load(reader)
def save(self, writer=None):
writer = Editable.save(self, writer)
writer = self.pltt.save(writer)
return writer
def get_palette(self, pal_id=0, transparent=True):
return self.pltt.get_palette(pal_id, transparent)
def get_palettes(self):
return self.pltt.get_palettes()
def set_palette(self, pal_id, palette):
return self.pltt.set_palette(pal_id, palette)
| [
13,
14,
15,
16,
19
] |
791 | bd06b04666ade1e7591b02f8211bc9b62fd08936 | #!/usr/bin/env python
import sys
import errno
# read first line from stdin and discard it
first_line = sys.stdin.readline()
# print all other lines
for line in sys.stdin:
try:
print line,
except IOError, e:
if e.errno == errno.EPIPE:
exit(0)
| null | null | null | null | [
0
] |
792 | 360063940bb82defefc4195a5e17c9778b47e9e5 | <mask token>
def get_iam_token(iam_url, oauth_token):
response = post(iam_url, json={'yandexPassportOauthToken': oauth_token})
json_data = json.loads(response.text)
if json_data is not None and 'iamToken' in json_data:
return json_data['iamToken']
return None
<mask token>
| <mask token>
def get_iam_token(iam_url, oauth_token):
response = post(iam_url, json={'yandexPassportOauthToken': oauth_token})
json_data = json.loads(response.text)
if json_data is not None and 'iamToken' in json_data:
return json_data['iamToken']
return None
<mask token>
def MainYandex():
iam_url = 'https://iam.api.cloud.yandex.net/iam/v1/tokens'
oauth_token = 'AgAAAAAGg6eyAATuwWwJRFQmXUwDp4RCH-96fRc'
iam_token = get_iam_token(iam_url, oauth_token)
vision_url = 'https://vision.api.cloud.yandex.net/vision/v1/batchAnalyze'
folder_id = 'b1gneif5vbojdoubsq1d'
image_data = new_image_string()
response_text = request_analyze(vision_url, iam_token, folder_id,
image_data)
print(ParsOnText(response_text))
<mask token>
| <mask token>
def get_iam_token(iam_url, oauth_token):
response = post(iam_url, json={'yandexPassportOauthToken': oauth_token})
json_data = json.loads(response.text)
if json_data is not None and 'iamToken' in json_data:
return json_data['iamToken']
return None
def request_analyze(vision_url, iam_token, folder_id, image_data):
response = post(vision_url, headers={'Authorization': 'Bearer ' +
iam_token}, json={'folderId': folder_id, 'analyzeSpecs': [{
'content': image_data, 'features': [{'type': 'TEXT_DETECTION',
'textDetectionConfig': {'languageCodes': ['en', 'ru']}}]}]})
return response.text
def MainYandex():
iam_url = 'https://iam.api.cloud.yandex.net/iam/v1/tokens'
oauth_token = 'AgAAAAAGg6eyAATuwWwJRFQmXUwDp4RCH-96fRc'
iam_token = get_iam_token(iam_url, oauth_token)
vision_url = 'https://vision.api.cloud.yandex.net/vision/v1/batchAnalyze'
folder_id = 'b1gneif5vbojdoubsq1d'
image_data = new_image_string()
response_text = request_analyze(vision_url, iam_token, folder_id,
image_data)
print(ParsOnText(response_text))
<mask token>
| from requests import post
import json
import argparse
import base64
from ReadFromWindow import new_image_string
from ParsOnText import ParsOnText
def get_iam_token(iam_url, oauth_token):
response = post(iam_url, json={'yandexPassportOauthToken': oauth_token})
json_data = json.loads(response.text)
if json_data is not None and 'iamToken' in json_data:
return json_data['iamToken']
return None
def request_analyze(vision_url, iam_token, folder_id, image_data):
response = post(vision_url, headers={'Authorization': 'Bearer ' +
iam_token}, json={'folderId': folder_id, 'analyzeSpecs': [{
'content': image_data, 'features': [{'type': 'TEXT_DETECTION',
'textDetectionConfig': {'languageCodes': ['en', 'ru']}}]}]})
return response.text
def MainYandex():
iam_url = 'https://iam.api.cloud.yandex.net/iam/v1/tokens'
oauth_token = 'AgAAAAAGg6eyAATuwWwJRFQmXUwDp4RCH-96fRc'
iam_token = get_iam_token(iam_url, oauth_token)
vision_url = 'https://vision.api.cloud.yandex.net/vision/v1/batchAnalyze'
folder_id = 'b1gneif5vbojdoubsq1d'
image_data = new_image_string()
response_text = request_analyze(vision_url, iam_token, folder_id,
image_data)
print(ParsOnText(response_text))
if __name__ == '__main__':
MainYandex()
| from requests import post
import json
import argparse
import base64
from ReadFromWindow import new_image_string
from ParsOnText import ParsOnText
# Функция возвращает IAM-токен для аккаунта на Яндексе.
def get_iam_token(iam_url, oauth_token):
response = post(iam_url, json={"yandexPassportOauthToken": oauth_token})
json_data = json.loads(response.text)
if json_data is not None and 'iamToken' in json_data:
return json_data['iamToken']
return None
# Функция отправляет на сервер запрос на распознавание изображения и возвращает ответ сервера.
def request_analyze(vision_url, iam_token, folder_id, image_data):
response = post(vision_url, headers={'Authorization': 'Bearer '+ iam_token}, json={
'folderId': folder_id,
'analyzeSpecs': [
{
'content': image_data,
'features': [
{
'type': 'TEXT_DETECTION',
'textDetectionConfig': {'languageCodes': ['en', 'ru']}
}
],
}
]})
return response.text
def MainYandex():
iam_url = 'https://iam.api.cloud.yandex.net/iam/v1/tokens'
oauth_token = "AgAAAAAGg6eyAATuwWwJRFQmXUwDp4RCH-96fRc"
iam_token = get_iam_token(iam_url, oauth_token)
vision_url = 'https://vision.api.cloud.yandex.net/vision/v1/batchAnalyze'
folder_id="b1gneif5vbojdoubsq1d"
# iam_token = "CggaATEVAgAAABKABGpvdJWrMiN0GIJtioMMmFcbtp8oIyD15ply3SU8iLfpSS8yabiP2U8ar1vYibjhCbmbLcwQZu12fLA3wtelEAFb6WiHyMtpXIKsStx3w9K0QVfW9n-6CEzrbzeIpfdRgcCH6oonbDJusNPYePXJ-bfXGqAXcVJVsBJ8W1WKmy1LRJdIZh3stv9dP23-334JnnlO0Hna2uKrb_wwuKruBu1P_EFECnn8f11N8UllADo5MbD5YFdvRhLCHvsAaAPH0lzwGadUDSqvU1OmZOMZqGNktgmiKUIH7QJpYb-879VZtEFtCm7TVSBAKPZCDF_kBPDEymLZY5foRWvb0nTrI9-7XspfCdyoUVcH9fGyni5d7NtFtydsv9Vyuf0EQUcCv8cJ03SZWZZXze63i785VUq1rYoCc12j_Qo8Qela_RWNnsDWTw0Va0rzk9csN0vhUz9aYnpJhb-F0i_T0NCrABsBGShAauhz20FgEaUgrQ7NdA0GwTFApJ6zsCQfzc1o0YMhUS7C2YDQ9RmTTbe1PRr5s4qNx8vVuJ-whdz0aeKUdgPVOOdxyGXFxhpkDY8ykHgQMWeFr6MomppHrXAf8qwt6Ob__rehJYEVV8iOcAxb9ust3gaobxv-QRspyRnNLvWrp7fa-iWqB2nwdXL0bRz6be6B--Qjg8PRbbyVkixxGmYKIGYyMjM1M2EwMzBlODRkYWFhNjY0ODJiZjUzMjMzMGE2EJCmpuoFGND3qOoFIiQKFGFqZTJ2b2VyZWc3aHNidmgwbjFhEgx0YWtzaGVldi5rcmlaADACOAFKCBoBMRUCAAAAUAEg9QQ"
image_data = new_image_string()
response_text = request_analyze(vision_url, iam_token, folder_id, image_data)
# with open('output.json', 'w') as f:
# f.write(response_text)
print(ParsOnText(response_text))
if __name__ == '__main__':
MainYandex()
| [
1,
2,
3,
5,
6
] |
793 | 75716aaaca63f8ca6d32c885021c1dc0f9a12dac | <mask token>
| <mask token>
def loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
signal : numpy.array or DataTime object
Signal time values [Pa]
fs : float, optional
Sampling frequency, can be omitted if the input is a DataTime
object. Default to None
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse").
is_sdt_output : Bool, optional
If True, the outputs are returned as SciDataTool objects.
Default to False
Outputs
-------
N : float or numpy.array
The overall loudness array [sones], size (Ntime,).
N_specific : numpy.ndarray or DataFreq object
The specific loudness array [sones/bark], size (Nbark, Ntime).
bark_axis: numpy.array
The Bark axis array, size (Nbark,).
"""
if DataTime is not None and isinstance(signal, DataTime):
time = signal.get_along('time')['time']
fs = 1 / (time[1] - time[0])
signal = signal.get_along('time')[signal.symbol]
spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)
spec_third = amp2db(spec_third, ref=2e-05)
Nm = _main_loudness(spec_third, field_type)
N, N_specific = _calc_slopes(Nm)
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
if is_sdt_output:
if DataLinspace is None:
raise RuntimeError(
"In order to handle Data objects you need the 'SciDataTool' package."
)
else:
bark_data = DataLinspace(name='Critical band rate', unit='Bark',
initial=0, final=24, number=int(24 / 0.1), include_endpoint
=True)
N_specific = DataFreq(name=
'Specific loudness (Zwicker method for stationnary signal)',
symbol="N'_{zwst}", axes=[bark_data], values=N_specific,
unit='sone/Bark')
return N, N_specific, bark_axis
| <mask token>
try:
from SciDataTool import DataTime, DataLinspace, DataFreq
except ImportError:
DataTime = None
DataLinspace = None
DataFreq = None
def loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
signal : numpy.array or DataTime object
Signal time values [Pa]
fs : float, optional
Sampling frequency, can be omitted if the input is a DataTime
object. Default to None
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse").
is_sdt_output : Bool, optional
If True, the outputs are returned as SciDataTool objects.
Default to False
Outputs
-------
N : float or numpy.array
The overall loudness array [sones], size (Ntime,).
N_specific : numpy.ndarray or DataFreq object
The specific loudness array [sones/bark], size (Nbark, Ntime).
bark_axis: numpy.array
The Bark axis array, size (Nbark,).
"""
if DataTime is not None and isinstance(signal, DataTime):
time = signal.get_along('time')['time']
fs = 1 / (time[1] - time[0])
signal = signal.get_along('time')[signal.symbol]
spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)
spec_third = amp2db(spec_third, ref=2e-05)
Nm = _main_loudness(spec_third, field_type)
N, N_specific = _calc_slopes(Nm)
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
if is_sdt_output:
if DataLinspace is None:
raise RuntimeError(
"In order to handle Data objects you need the 'SciDataTool' package."
)
else:
bark_data = DataLinspace(name='Critical band rate', unit='Bark',
initial=0, final=24, number=int(24 / 0.1), include_endpoint
=True)
N_specific = DataFreq(name=
'Specific loudness (Zwicker method for stationnary signal)',
symbol="N'_{zwst}", axes=[bark_data], values=N_specific,
unit='sone/Bark')
return N, N_specific, bark_axis
| import numpy as np
from mosqito.sound_level_meter import noct_spectrum
from mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness
from mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes
from mosqito.utils.conversion import amp2db
try:
from SciDataTool import DataTime, DataLinspace, DataFreq
except ImportError:
DataTime = None
DataLinspace = None
DataFreq = None
def loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
signal : numpy.array or DataTime object
Signal time values [Pa]
fs : float, optional
Sampling frequency, can be omitted if the input is a DataTime
object. Default to None
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse").
is_sdt_output : Bool, optional
If True, the outputs are returned as SciDataTool objects.
Default to False
Outputs
-------
N : float or numpy.array
The overall loudness array [sones], size (Ntime,).
N_specific : numpy.ndarray or DataFreq object
The specific loudness array [sones/bark], size (Nbark, Ntime).
bark_axis: numpy.array
The Bark axis array, size (Nbark,).
"""
if DataTime is not None and isinstance(signal, DataTime):
time = signal.get_along('time')['time']
fs = 1 / (time[1] - time[0])
signal = signal.get_along('time')[signal.symbol]
spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)
spec_third = amp2db(spec_third, ref=2e-05)
Nm = _main_loudness(spec_third, field_type)
N, N_specific = _calc_slopes(Nm)
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
if is_sdt_output:
if DataLinspace is None:
raise RuntimeError(
"In order to handle Data objects you need the 'SciDataTool' package."
)
else:
bark_data = DataLinspace(name='Critical band rate', unit='Bark',
initial=0, final=24, number=int(24 / 0.1), include_endpoint
=True)
N_specific = DataFreq(name=
'Specific loudness (Zwicker method for stationnary signal)',
symbol="N'_{zwst}", axes=[bark_data], values=N_specific,
unit='sone/Bark')
return N, N_specific, bark_axis
| # -*- coding: utf-8 -*-
# Third party imports
import numpy as np
# Local application imports
from mosqito.sound_level_meter import noct_spectrum
from mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness
from mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes
from mosqito.utils.conversion import amp2db
# Optional package import
try:
from SciDataTool import DataTime, DataLinspace, DataFreq
except ImportError:
DataTime = None
DataLinspace = None
DataFreq = None
def loudness_zwst(signal, fs=None, field_type="free", is_sdt_output=False):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
signal : numpy.array or DataTime object
Signal time values [Pa]
fs : float, optional
Sampling frequency, can be omitted if the input is a DataTime
object. Default to None
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse").
is_sdt_output : Bool, optional
If True, the outputs are returned as SciDataTool objects.
Default to False
Outputs
-------
N : float or numpy.array
The overall loudness array [sones], size (Ntime,).
N_specific : numpy.ndarray or DataFreq object
The specific loudness array [sones/bark], size (Nbark, Ntime).
bark_axis: numpy.array
The Bark axis array, size (Nbark,).
"""
# Manage SciDataTool input type
if DataTime is not None and isinstance(signal, DataTime):
time = signal.get_along("time")["time"]
fs = 1 / (time[1] - time[0])
signal = signal.get_along("time")[signal.symbol]
# Compute third octave band spectrum
spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)
# Compute dB values
spec_third = amp2db(spec_third, ref=2e-5)
# Compute main loudness
Nm = _main_loudness(spec_third, field_type)
# Computation of specific loudness pattern and integration of overall
# loudness by attaching slopes towards higher frequencies
N, N_specific = _calc_slopes(Nm)
# Define Bark axis
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
# Manage SciDataTool output type
if is_sdt_output:
if DataLinspace is None:
raise RuntimeError(
"In order to handle Data objects you need the 'SciDataTool' package."
)
else:
bark_data = DataLinspace(
name="Critical band rate",
unit="Bark",
initial=0,
final=24,
number=int(24 / 0.1),
include_endpoint=True,
)
N_specific = DataFreq(
name="Specific loudness (Zwicker method for stationnary signal)",
symbol="N'_{zwst}",
axes=[bark_data],
values=N_specific,
unit="sone/Bark",
)
return N, N_specific, bark_axis
| [
0,
1,
2,
3,
4
] |
794 | 0e9d0927e8d69b0c0fad98479d47f2409c95a751 | <mask token>
| <mask token>
for i in range(len(x)):
ans += abs(x[i] - y[i])
for i in range(1, len(y)):
ans += abs(x[i - 1] - y[i])
if n % 2 == 1:
ans += max(abs(a[n // 2] - x[-1]), abs(a[n // 2] - y[0]))
print(ans)
| n = int(input())
a = sorted([int(input()) for _ in range(n)])
x = a[:n // 2]
y = a[(n + 1) // 2:]
ans = 0
for i in range(len(x)):
ans += abs(x[i] - y[i])
for i in range(1, len(y)):
ans += abs(x[i - 1] - y[i])
if n % 2 == 1:
ans += max(abs(a[n // 2] - x[-1]), abs(a[n // 2] - y[0]))
print(ans)
| n = int(input())
a = sorted([int(input()) for _ in range(n)])
x = a[:n//2]
y = a[(n + 1)//2:]
ans = 0
for i in range(len(x)):
ans += abs(x[i] - y[i])
for i in range(1, len(y)):
ans += abs(x[i - 1] - y[i])
if n % 2 == 1:
ans += max(
abs(a[n // 2] - x[-1]),
abs(a[n // 2] - y[0]),
)
print(ans) | null | [
0,
1,
2,
3
] |
795 | e3dece36ba3e5b3df763e7119c485f6ed2155098 | <mask token>
def get_summary():
model = T5ForConditionalGeneration.from_pretrained('t5-small')
tokenizer = T5Tokenizer.from_pretrained('t5-small')
device = torch.device('cpu')
text = str(url_display1.get('1.0', tk.END))
preprocess_text = text.strip().replace('\n', '')
t5_prepared_Text = 'summarize: ' + preprocess_text
tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors='pt'
).to(device)
summary_ids = model.generate(tokenized_text, num_beams=4,
no_repeat_ngram_size=2, min_length=30, max_length=100,
early_stopping=True)
output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
Str1 = text
str2 = output
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
edited = len(text) - len(output)
Precision = (len(text) + len(output) + edited) / 2
Precisioncalc = Precision / 100
result = ('\n\nSummarized text: \n', output
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab2_display_text.insert(tk.END, result)
def open_pdf():
open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=
'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))
)
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display.insert(3.0, io)
def open_pdf1():
open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=
'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))
)
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display1.insert(3.0, io)
<mask token>
def clear_url_entry():
url_entry.delete(0, END)
<mask token>
def get_text():
raw_text = str(url_entry.get())
page = urlopen(raw_text)
soup = BeautifulSoup(page)
fetched_text = ' '.join(map(lambda p: p.text, soup.find_all('p')))
url_display.insert(tk.END, fetched_text)
def get_url_summary():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
result = '\nSummary:{}'.format(final_text)
tab3_display_text.insert(tk.END, result)
def use_spacy():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
print(final_text)
Str1 = raw_text
str2 = text_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nSpacy Summary:{}\n'.format(final_text
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab3_display_text.insert(tk.END, result)
def use_nltk():
raw_text = url_display.get('1.0', tk.END)
final_text = nltk_summarizer(raw_text)
print(final_text)
Str1 = raw_text
str2 = nltk_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nNLTK Summary:{}\n'.format(final_text
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab3_display_text.insert(tk.END, result)
<mask token>
| <mask token>
def get_summary():
model = T5ForConditionalGeneration.from_pretrained('t5-small')
tokenizer = T5Tokenizer.from_pretrained('t5-small')
device = torch.device('cpu')
text = str(url_display1.get('1.0', tk.END))
preprocess_text = text.strip().replace('\n', '')
t5_prepared_Text = 'summarize: ' + preprocess_text
tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors='pt'
).to(device)
summary_ids = model.generate(tokenized_text, num_beams=4,
no_repeat_ngram_size=2, min_length=30, max_length=100,
early_stopping=True)
output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
Str1 = text
str2 = output
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
edited = len(text) - len(output)
Precision = (len(text) + len(output) + edited) / 2
Precisioncalc = Precision / 100
result = ('\n\nSummarized text: \n', output
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab2_display_text.insert(tk.END, result)
def open_pdf():
open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=
'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))
)
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display.insert(3.0, io)
def open_pdf1():
open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=
'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))
)
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display1.insert(3.0, io)
def clear_display_result():
tab3_display_text.delete('1.0', END)
def clear_url_entry():
url_entry.delete(0, END)
<mask token>
def get_text():
raw_text = str(url_entry.get())
page = urlopen(raw_text)
soup = BeautifulSoup(page)
fetched_text = ' '.join(map(lambda p: p.text, soup.find_all('p')))
url_display.insert(tk.END, fetched_text)
def get_url_summary():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
result = '\nSummary:{}'.format(final_text)
tab3_display_text.insert(tk.END, result)
def use_spacy():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
print(final_text)
Str1 = raw_text
str2 = text_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nSpacy Summary:{}\n'.format(final_text
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab3_display_text.insert(tk.END, result)
def use_nltk():
raw_text = url_display.get('1.0', tk.END)
final_text = nltk_summarizer(raw_text)
print(final_text)
Str1 = raw_text
str2 = nltk_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nNLTK Summary:{}\n'.format(final_text
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab3_display_text.insert(tk.END, result)
<mask token>
| <mask token>
window = Tk()
window.title('Summaryzer GUI')
window.geometry('700x400')
window.config(background='black')
style = ttk.Style(window)
style.configure('lefttab.TNotebook', tabposition='wn')
tab_control = ttk.Notebook(window, style='lefttab.TNotebook')
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
tab_control.add(tab3, text=f"{'Extractive':^20s}")
tab_control.add(tab2, text=f"{'Abstractive':^20s}")
label1 = Label(tab3, text='Extractive Summrize', padx=5, pady=5)
label1.grid(column=1, row=0)
label2 = Label(tab2, text='Abstractive Summrize', padx=5, pady=5)
label2.grid(column=0, row=0)
tab_control.pack(expand=1, fill='both')
def get_summary():
model = T5ForConditionalGeneration.from_pretrained('t5-small')
tokenizer = T5Tokenizer.from_pretrained('t5-small')
device = torch.device('cpu')
text = str(url_display1.get('1.0', tk.END))
preprocess_text = text.strip().replace('\n', '')
t5_prepared_Text = 'summarize: ' + preprocess_text
tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors='pt'
).to(device)
summary_ids = model.generate(tokenized_text, num_beams=4,
no_repeat_ngram_size=2, min_length=30, max_length=100,
early_stopping=True)
output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
Str1 = text
str2 = output
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
edited = len(text) - len(output)
Precision = (len(text) + len(output) + edited) / 2
Precisioncalc = Precision / 100
result = ('\n\nSummarized text: \n', output
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab2_display_text.insert(tk.END, result)
def open_pdf():
open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=
'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))
)
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display.insert(3.0, io)
def open_pdf1():
open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=
'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))
)
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display1.insert(3.0, io)
def clear_display_result():
tab3_display_text.delete('1.0', END)
def clear_url_entry():
url_entry.delete(0, END)
def openfiles():
file1 = tkinter.filedialog.askopenfilename(filetypes=(('Text Files',
'.txt'), ('All files', '*')))
read_text = open(file1).read()
url_display.insert(tk.END, read_text)
def get_text():
raw_text = str(url_entry.get())
page = urlopen(raw_text)
soup = BeautifulSoup(page)
fetched_text = ' '.join(map(lambda p: p.text, soup.find_all('p')))
url_display.insert(tk.END, fetched_text)
def get_url_summary():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
result = '\nSummary:{}'.format(final_text)
tab3_display_text.insert(tk.END, result)
def use_spacy():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
print(final_text)
Str1 = raw_text
str2 = text_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nSpacy Summary:{}\n'.format(final_text
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab3_display_text.insert(tk.END, result)
def use_nltk():
raw_text = url_display.get('1.0', tk.END)
final_text = nltk_summarizer(raw_text)
print(final_text)
Str1 = raw_text
str2 = nltk_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nNLTK Summary:{}\n'.format(final_text
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab3_display_text.insert(tk.END, result)
def use_gensim():
raw_text = url_display.get('1.0', tk.END)
final_text = summarize(raw_text)
print(final_text)
Str1 = raw_text
str2 = summarize(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nGensim Summary:{}\n'.format(final_text
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab3_display_text.insert(tk.END, result)
l1 = Label(tab3, text='Enter URL To Summarize')
l1.grid(row=1, column=0)
raw_entry = StringVar()
url_entry = Entry(tab3, textvariable=raw_entry, width=50)
url_entry.grid(row=1, column=1)
button1 = Button(tab3, text='Reset', command=clear_url_entry, width=12, bg=
'#03A9F4', fg='#fff')
button1.grid(row=4, column=0, padx=10, pady=10)
button2 = Button(tab3, text='Get Text', command=get_text, width=12, bg=
'#03A9F4', fg='#fff')
button2.grid(row=4, column=1, padx=10, pady=10)
button3 = Button(tab3, text='Open File', width=12, command=openfiles, bg=
'#c5cae9')
button3.grid(row=5, column=0, padx=10, pady=10)
button4 = Button(tab3, text='Open PDF', width=12, command=open_pdf, bg=
'#c5cae9')
button4.grid(row=5, column=1, padx=10, pady=10)
button5 = Button(tab3, text='SpaCy', command=use_spacy, width=12, bg='red',
fg='#fff')
button5.grid(row=8, column=0, padx=10, pady=10)
button6 = Button(tab3, text='Clear Result', command=clear_display_result,
width=12, bg='#03A9F4', fg='#fff')
button6.grid(row=9, column=1, padx=10, pady=10)
button7 = Button(tab3, text='NLTK', command=use_nltk, width=12, bg=
'#03A9F4', fg='#fff')
button7.grid(row=8, column=1, padx=10, pady=10)
button8 = Button(tab3, text='Gensim', command=use_gensim, width=12, bg=
'#03A9F4', fg='#fff')
button8.grid(row=9, column=0, padx=10, pady=10)
url_display = ScrolledText(tab3, height=10)
url_display.grid(row=7, column=0, columnspan=3, padx=5, pady=5)
tab3_display_text = ScrolledText(tab3, height=10)
tab3_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)
l1 = Label(tab2, text='Enter URL To Summarize')
l1.grid(row=1, column=0)
raw_entry1 = StringVar()
url_entry1 = Entry(tab2, textvariable=raw_entry, width=50)
url_entry1.grid(row=1, column=1)
button9 = Button(tab2, text='Reset', command=clear_url_entry, width=12, bg=
'#03A9F4', fg='#fff')
button9.grid(row=4, column=0, padx=10, pady=10)
button10 = Button(tab2, text='Get Text', command=get_text, width=12, bg=
'#03A9F4', fg='#fff')
button10.grid(row=4, column=1, padx=10, pady=10)
button11 = Button(tab2, text='Open File', width=12, command=openfiles, bg=
'#c5cae9')
button11.grid(row=5, column=0, padx=10, pady=10)
button12 = Button(tab2, text='Open PDF', width=12, command=open_pdf1, bg=
'#c5cae9')
button12.grid(row=5, column=1, padx=10, pady=10)
button13 = Button(tab2, text='Clear Result', command=clear_display_result,
width=12, bg='#03A9F4', fg='#fff')
button13.grid(row=9, column=1, padx=10, pady=10)
button14 = Button(tab2, text='Abstract', command=get_summary, width=12, bg=
'#03A9F4', fg='#fff')
button14.grid(row=9, column=0, padx=10, pady=10)
url_display1 = ScrolledText(tab2, height=10)
url_display1.grid(row=7, column=0, columnspan=3, padx=5, pady=5)
tab2_display_text = ScrolledText(tab2, height=10)
tab2_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)
window.mainloop()
| import difflib
import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter.scrolledtext import *
import tkinter.filedialog
import PyPDF2
from tkinter import filedialog
import torch
import json
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
from spacy_summarization import text_summarizer
from gensim.summarization import summarize
from nltk_summarization import nltk_summarizer
from bs4 import BeautifulSoup
from urllib.request import urlopen
window = Tk()
window.title('Summaryzer GUI')
window.geometry('700x400')
window.config(background='black')
style = ttk.Style(window)
style.configure('lefttab.TNotebook', tabposition='wn')
tab_control = ttk.Notebook(window, style='lefttab.TNotebook')
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
tab_control.add(tab3, text=f"{'Extractive':^20s}")
tab_control.add(tab2, text=f"{'Abstractive':^20s}")
label1 = Label(tab3, text='Extractive Summrize', padx=5, pady=5)
label1.grid(column=1, row=0)
label2 = Label(tab2, text='Abstractive Summrize', padx=5, pady=5)
label2.grid(column=0, row=0)
tab_control.pack(expand=1, fill='both')
def get_summary():
model = T5ForConditionalGeneration.from_pretrained('t5-small')
tokenizer = T5Tokenizer.from_pretrained('t5-small')
device = torch.device('cpu')
text = str(url_display1.get('1.0', tk.END))
preprocess_text = text.strip().replace('\n', '')
t5_prepared_Text = 'summarize: ' + preprocess_text
tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors='pt'
).to(device)
summary_ids = model.generate(tokenized_text, num_beams=4,
no_repeat_ngram_size=2, min_length=30, max_length=100,
early_stopping=True)
output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
Str1 = text
str2 = output
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
edited = len(text) - len(output)
Precision = (len(text) + len(output) + edited) / 2
Precisioncalc = Precision / 100
result = ('\n\nSummarized text: \n', output
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab2_display_text.insert(tk.END, result)
def open_pdf():
open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=
'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))
)
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display.insert(3.0, io)
def open_pdf1():
open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=
'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))
)
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display1.insert(3.0, io)
def clear_display_result():
tab3_display_text.delete('1.0', END)
def clear_url_entry():
url_entry.delete(0, END)
def openfiles():
file1 = tkinter.filedialog.askopenfilename(filetypes=(('Text Files',
'.txt'), ('All files', '*')))
read_text = open(file1).read()
url_display.insert(tk.END, read_text)
def get_text():
raw_text = str(url_entry.get())
page = urlopen(raw_text)
soup = BeautifulSoup(page)
fetched_text = ' '.join(map(lambda p: p.text, soup.find_all('p')))
url_display.insert(tk.END, fetched_text)
def get_url_summary():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
result = '\nSummary:{}'.format(final_text)
tab3_display_text.insert(tk.END, result)
def use_spacy():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
print(final_text)
Str1 = raw_text
str2 = text_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nSpacy Summary:{}\n'.format(final_text
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab3_display_text.insert(tk.END, result)
def use_nltk():
raw_text = url_display.get('1.0', tk.END)
final_text = nltk_summarizer(raw_text)
print(final_text)
Str1 = raw_text
str2 = nltk_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nNLTK Summary:{}\n'.format(final_text
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab3_display_text.insert(tk.END, result)
def use_gensim():
raw_text = url_display.get('1.0', tk.END)
final_text = summarize(raw_text)
print(final_text)
Str1 = raw_text
str2 = summarize(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nGensim Summary:{}\n'.format(final_text
), ' Precision = ', Precisioncalc, ' similarity = ', printt
tab3_display_text.insert(tk.END, result)
l1 = Label(tab3, text='Enter URL To Summarize')
l1.grid(row=1, column=0)
raw_entry = StringVar()
url_entry = Entry(tab3, textvariable=raw_entry, width=50)
url_entry.grid(row=1, column=1)
button1 = Button(tab3, text='Reset', command=clear_url_entry, width=12, bg=
'#03A9F4', fg='#fff')
button1.grid(row=4, column=0, padx=10, pady=10)
button2 = Button(tab3, text='Get Text', command=get_text, width=12, bg=
'#03A9F4', fg='#fff')
button2.grid(row=4, column=1, padx=10, pady=10)
button3 = Button(tab3, text='Open File', width=12, command=openfiles, bg=
'#c5cae9')
button3.grid(row=5, column=0, padx=10, pady=10)
button4 = Button(tab3, text='Open PDF', width=12, command=open_pdf, bg=
'#c5cae9')
button4.grid(row=5, column=1, padx=10, pady=10)
button5 = Button(tab3, text='SpaCy', command=use_spacy, width=12, bg='red',
fg='#fff')
button5.grid(row=8, column=0, padx=10, pady=10)
button6 = Button(tab3, text='Clear Result', command=clear_display_result,
width=12, bg='#03A9F4', fg='#fff')
button6.grid(row=9, column=1, padx=10, pady=10)
button7 = Button(tab3, text='NLTK', command=use_nltk, width=12, bg=
'#03A9F4', fg='#fff')
button7.grid(row=8, column=1, padx=10, pady=10)
button8 = Button(tab3, text='Gensim', command=use_gensim, width=12, bg=
'#03A9F4', fg='#fff')
button8.grid(row=9, column=0, padx=10, pady=10)
url_display = ScrolledText(tab3, height=10)
url_display.grid(row=7, column=0, columnspan=3, padx=5, pady=5)
tab3_display_text = ScrolledText(tab3, height=10)
tab3_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)
l1 = Label(tab2, text='Enter URL To Summarize')
l1.grid(row=1, column=0)
raw_entry1 = StringVar()
url_entry1 = Entry(tab2, textvariable=raw_entry, width=50)
url_entry1.grid(row=1, column=1)
button9 = Button(tab2, text='Reset', command=clear_url_entry, width=12, bg=
'#03A9F4', fg='#fff')
button9.grid(row=4, column=0, padx=10, pady=10)
button10 = Button(tab2, text='Get Text', command=get_text, width=12, bg=
'#03A9F4', fg='#fff')
button10.grid(row=4, column=1, padx=10, pady=10)
button11 = Button(tab2, text='Open File', width=12, command=openfiles, bg=
'#c5cae9')
button11.grid(row=5, column=0, padx=10, pady=10)
button12 = Button(tab2, text='Open PDF', width=12, command=open_pdf1, bg=
'#c5cae9')
button12.grid(row=5, column=1, padx=10, pady=10)
button13 = Button(tab2, text='Clear Result', command=clear_display_result,
width=12, bg='#03A9F4', fg='#fff')
button13.grid(row=9, column=1, padx=10, pady=10)
button14 = Button(tab2, text='Abstract', command=get_summary, width=12, bg=
'#03A9F4', fg='#fff')
button14.grid(row=9, column=0, padx=10, pady=10)
url_display1 = ScrolledText(tab2, height=10)
url_display1.grid(row=7, column=0, columnspan=3, padx=5, pady=5)
tab2_display_text = ScrolledText(tab2, height=10)
tab2_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)
window.mainloop()
| # Core Packages
import difflib
import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter.scrolledtext import *
import tkinter.filedialog
import PyPDF2
from tkinter import filedialog
import torch
import json
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
# NLP Pkgs
from spacy_summarization import text_summarizer
from gensim.summarization import summarize
from nltk_summarization import nltk_summarizer
# Web Scraping Pkg
from bs4 import BeautifulSoup
from urllib.request import urlopen
# Structure and Layout
window = Tk()
window.title("Summaryzer GUI")
window.geometry("700x400")
window.config(background='black')
style = ttk.Style(window)
style.configure('lefttab.TNotebook', tabposition='wn', )
# TAB LAYOUT
tab_control = ttk.Notebook(window, style='lefttab.TNotebook')
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
# ADD TABS TO NOTEBOOK
tab_control.add(tab3, text=f'{"Extractive":^20s}')
tab_control.add(tab2, text=f'{"Abstractive":^20s}')
label1 = Label(tab3, text='Extractive Summrize', padx=5, pady=5)
label1.grid(column=1, row=0)
label2 = Label(tab2, text='Abstractive Summrize',padx=5, pady=5)
label2.grid(column=0, row=0)
tab_control.pack(expand=1, fill='both')
def get_summary():
model = T5ForConditionalGeneration.from_pretrained ('t5-small')
tokenizer = T5Tokenizer.from_pretrained ('t5-small')
device = torch.device ('cpu')
text = str(url_display1.get('1.0', tk.END))
preprocess_text = text.strip ().replace ("\n", "")
t5_prepared_Text = "summarize: " + preprocess_text
tokenized_text = tokenizer.encode (t5_prepared_Text, return_tensors="pt").to (device)
summary_ids = model.generate (tokenized_text,
num_beams=4,
no_repeat_ngram_size=2,
min_length=30,
max_length=100,
early_stopping=True)
output = tokenizer.decode (summary_ids[0], skip_special_tokens=True)
Str1 = text
str2 = output
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
edited = len(text)-len(output)
Precision = (len(text)+len(output)+edited)/2
Precisioncalc = Precision / 100
result =("\n\nSummarized text: \n", output)," Precision = " , Precisioncalc , " similarity = " , printt
tab2_display_text.insert(tk.END, result)
def open_pdf():
open_file = filedialog.askopenfilename(
initialdir="C:/gui/",
title="Open PDF File",
filetypes=(
("PDF Files", "*.pdf"),
("All Files", ".")))
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display.insert(3.0, io)
def open_pdf1():
open_file = filedialog.askopenfilename(
initialdir="C:/gui/",
title="Open PDF File",
filetypes=(
("PDF Files", "*.pdf"),
("All Files", ".")))
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display1.insert(3.0, io)
def clear_display_result():
tab3_display_text.delete('1.0', END)
# Clear For URL
def clear_url_entry():
url_entry.delete(0, END)
# Open File to Read and Process
def openfiles():
file1 = tkinter.filedialog.askopenfilename(filetypes=(("Text Files", ".txt"), ("All files", "*")))
read_text = open(file1).read()
url_display.insert(tk.END, read_text)
def get_text():
raw_text = str(url_entry.get())
page = urlopen(raw_text)
soup = BeautifulSoup(page)
fetched_text = ' '.join(map(lambda p: p.text, soup.find_all('p')))
url_display.insert(tk.END, fetched_text)
def get_url_summary():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
result = '\nSummary:{}'.format(final_text)
tab3_display_text.insert(tk.END, result)
def use_spacy ():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
print(final_text)
Str1 = raw_text
str2 = text_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nSpacy Summary:{}\n'.format(final_text)," Precision = " , Precisioncalc , " similarity = " , printt
tab3_display_text.insert(tk.END, result)
def use_nltk():
raw_text = url_display.get ('1.0', tk.END)
final_text = nltk_summarizer (raw_text)
print (final_text)
Str1 = raw_text
str2 = nltk_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nNLTK Summary:{}\n'.format(final_text)," Precision = " , Precisioncalc , " similarity = " , printt
tab3_display_text.insert(tk.END, result)
def use_gensim():
raw_text = url_display.get ('1.0', tk.END)
final_text = summarize(raw_text)
print (final_text)
Str1 = raw_text
str2 = summarize(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result ='\nGensim Summary:{}\n'.format(final_text)," Precision = " , Precisioncalc , " similarity = " , printt
tab3_display_text.insert(tk.END, result)
# URL TAB
l1 = Label(tab3, text="Enter URL To Summarize")
l1.grid(row=1, column=0)
raw_entry = StringVar()
url_entry = Entry(tab3, textvariable=raw_entry, width=50)
url_entry.grid(row=1, column=1)
# BUTTONS
button1 = Button(tab3, text="Reset", command=clear_url_entry, width=12, bg='#03A9F4', fg='#fff')
button1.grid(row=4, column=0, padx=10, pady=10)
button2 = Button(tab3, text="Get Text", command=get_text, width=12, bg='#03A9F4', fg='#fff')
button2.grid(row=4, column=1, padx=10, pady=10)
button3 = Button(tab3, text="Open File", width=12, command=openfiles, bg='#c5cae9')
button3.grid(row=5, column=0, padx=10, pady=10)
button4 = Button(tab3, text="Open PDF", width=12, command=open_pdf, bg='#c5cae9')
button4.grid(row=5, column=1, padx=10, pady=10)
button5 = Button(tab3, text="SpaCy", command=use_spacy, width=12, bg='red', fg='#fff')
button5.grid(row=8, column=0, padx=10, pady=10)
button6 = Button(tab3, text="Clear Result", command=clear_display_result, width=12, bg='#03A9F4', fg='#fff')
button6.grid(row=9, column=1, padx=10, pady=10)
button7 = Button(tab3, text="NLTK", command=use_nltk, width=12, bg='#03A9F4', fg='#fff')
button7.grid(row=8, column=1, padx=10, pady=10)
button8 = Button(tab3, text="Gensim", command=use_gensim, width=12, bg='#03A9F4', fg='#fff')
button8.grid(row=9, column=0, padx=10, pady=10)
# Display Screen For Result
url_display = ScrolledText(tab3, height=10)
url_display.grid(row=7, column=0, columnspan=3, padx=5, pady=5)
tab3_display_text = ScrolledText(tab3, height=10)
tab3_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)
l1 = Label(tab2, text="Enter URL To Summarize")
l1.grid(row=1, column=0)
raw_entry1 = StringVar()
url_entry1 = Entry(tab2, textvariable=raw_entry, width=50)
url_entry1.grid(row=1, column=1)
# BUTTONS
button9 = Button(tab2, text="Reset", command=clear_url_entry, width=12, bg='#03A9F4', fg='#fff')
button9.grid(row=4, column=0, padx=10, pady=10)
button10 = Button(tab2, text="Get Text", command=get_text, width=12, bg='#03A9F4', fg='#fff')
button10.grid(row=4, column=1, padx=10, pady=10)
button11 = Button(tab2, text="Open File", width=12, command=openfiles, bg='#c5cae9')
button11.grid(row=5, column=0, padx=10, pady=10)
button12 = Button(tab2, text="Open PDF", width=12, command=open_pdf1, bg='#c5cae9')
button12.grid(row=5, column=1, padx=10, pady=10)
button13 = Button(tab2, text="Clear Result", command=clear_display_result, width=12, bg='#03A9F4', fg='#fff')
button13.grid(row=9, column=1, padx=10, pady=10)
button14 = Button(tab2, text="Abstract", command=get_summary, width=12, bg='#03A9F4', fg='#fff')
button14.grid(row=9, column=0, padx=10, pady=10)
url_display1 = ScrolledText(tab2, height=10)
url_display1.grid(row=7, column=0, columnspan=3, padx=5, pady=5)
tab2_display_text = ScrolledText(tab2, height=10)
tab2_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)
window.mainloop()
| [
8,
9,
13,
14,
15
] |
796 | 64366e8532ffe05db7e7b7313e1d573c78a4e030 | <mask token>
class TestPipRequirement:
def is_packaging_subclass(self):
r = PipRequirement('prefect')
assert isinstance(r, packaging.requirements.Requirement)
def test_can_be_used_in_pydantic_model(self):
class MyModel(pydantic.BaseModel):
req: PipRequirement
inst = MyModel(req='prefect')
assert inst.req == PipRequirement('prefect')
assert isinstance(inst.req, PipRequirement)
def test_equality(self):
assert PipRequirement('prefect') == PipRequirement('prefect')
assert PipRequirement('prefect') != PipRequirement('prefect>=2')
<mask token>
def test_current_environment_requirements_warns_about_editable_prefect():
with pytest.warns(UserWarning, match='prefect.*is an editable installation'
):
requirements = current_environment_requirements(
on_uninstallable_requirement='warn')
assert all(isinstance(r, PipRequirement) for r in requirements)
names = [r.name for r in requirements]
assert 'prefect' not in names
assert len(names) == len(set(names)), 'Names should not be repeated'
<mask token>
def test_current_environment_requirements_top_level_only():
requirements = current_environment_requirements(exclude_nested=True,
on_uninstallable_requirement='ignore')
all_requirements = current_environment_requirements(
on_uninstallable_requirement='ignore')
assert {r.name for r in requirements}.issubset({r.name for r in
all_requirements})
assert len(requirements) < len(all_requirements)
assert all(isinstance(r, PipRequirement) for r in requirements)
| <mask token>
class TestPipRequirement:
def is_packaging_subclass(self):
r = PipRequirement('prefect')
assert isinstance(r, packaging.requirements.Requirement)
def test_can_be_used_in_pydantic_model(self):
class MyModel(pydantic.BaseModel):
req: PipRequirement
inst = MyModel(req='prefect')
assert inst.req == PipRequirement('prefect')
assert isinstance(inst.req, PipRequirement)
def test_equality(self):
assert PipRequirement('prefect') == PipRequirement('prefect')
assert PipRequirement('prefect') != PipRequirement('prefect>=2')
<mask token>
def test_current_environment_requirements_warns_about_editable_prefect():
with pytest.warns(UserWarning, match='prefect.*is an editable installation'
):
requirements = current_environment_requirements(
on_uninstallable_requirement='warn')
assert all(isinstance(r, PipRequirement) for r in requirements)
names = [r.name for r in requirements]
assert 'prefect' not in names
assert len(names) == len(set(names)), 'Names should not be repeated'
def test_current_environment_requirements_raises_on_editable_prefect():
with pytest.raises(ValueError, match='prefect.*is an editable installation'
):
current_environment_requirements(on_uninstallable_requirement='raise')
<mask token>
def test_current_environment_requirements_top_level_only():
requirements = current_environment_requirements(exclude_nested=True,
on_uninstallable_requirement='ignore')
all_requirements = current_environment_requirements(
on_uninstallable_requirement='ignore')
assert {r.name for r in requirements}.issubset({r.name for r in
all_requirements})
assert len(requirements) < len(all_requirements)
assert all(isinstance(r, PipRequirement) for r in requirements)
| <mask token>
class TestPipRequirement:
def is_packaging_subclass(self):
r = PipRequirement('prefect')
assert isinstance(r, packaging.requirements.Requirement)
def test_can_be_used_in_pydantic_model(self):
class MyModel(pydantic.BaseModel):
req: PipRequirement
inst = MyModel(req='prefect')
assert inst.req == PipRequirement('prefect')
assert isinstance(inst.req, PipRequirement)
def test_equality(self):
assert PipRequirement('prefect') == PipRequirement('prefect')
assert PipRequirement('prefect') != PipRequirement('prefect>=2')
def test_current_environment_requirements():
requirements = current_environment_requirements(
on_uninstallable_requirement='ignore')
assert all(isinstance(r, PipRequirement) for r in requirements)
names = [r.name for r in requirements]
assert 'prefect' not in names
assert len(names) == len(set(names)), 'Names should not be repeated'
def test_current_environment_requirements_warns_about_editable_prefect():
with pytest.warns(UserWarning, match='prefect.*is an editable installation'
):
requirements = current_environment_requirements(
on_uninstallable_requirement='warn')
assert all(isinstance(r, PipRequirement) for r in requirements)
names = [r.name for r in requirements]
assert 'prefect' not in names
assert len(names) == len(set(names)), 'Names should not be repeated'
def test_current_environment_requirements_raises_on_editable_prefect():
with pytest.raises(ValueError, match='prefect.*is an editable installation'
):
current_environment_requirements(on_uninstallable_requirement='raise')
<mask token>
def test_current_environment_requirements_top_level_only():
requirements = current_environment_requirements(exclude_nested=True,
on_uninstallable_requirement='ignore')
all_requirements = current_environment_requirements(
on_uninstallable_requirement='ignore')
assert {r.name for r in requirements}.issubset({r.name for r in
all_requirements})
assert len(requirements) < len(all_requirements)
assert all(isinstance(r, PipRequirement) for r in requirements)
| import packaging.requirements
import pydantic
import pytest
from prefect.software.pip import PipRequirement, current_environment_requirements
class TestPipRequirement:
def is_packaging_subclass(self):
r = PipRequirement('prefect')
assert isinstance(r, packaging.requirements.Requirement)
def test_can_be_used_in_pydantic_model(self):
class MyModel(pydantic.BaseModel):
req: PipRequirement
inst = MyModel(req='prefect')
assert inst.req == PipRequirement('prefect')
assert isinstance(inst.req, PipRequirement)
def test_equality(self):
assert PipRequirement('prefect') == PipRequirement('prefect')
assert PipRequirement('prefect') != PipRequirement('prefect>=2')
def test_current_environment_requirements():
requirements = current_environment_requirements(
on_uninstallable_requirement='ignore')
assert all(isinstance(r, PipRequirement) for r in requirements)
names = [r.name for r in requirements]
assert 'prefect' not in names
assert len(names) == len(set(names)), 'Names should not be repeated'
def test_current_environment_requirements_warns_about_editable_prefect():
with pytest.warns(UserWarning, match='prefect.*is an editable installation'
):
requirements = current_environment_requirements(
on_uninstallable_requirement='warn')
assert all(isinstance(r, PipRequirement) for r in requirements)
names = [r.name for r in requirements]
assert 'prefect' not in names
assert len(names) == len(set(names)), 'Names should not be repeated'
def test_current_environment_requirements_raises_on_editable_prefect():
with pytest.raises(ValueError, match='prefect.*is an editable installation'
):
current_environment_requirements(on_uninstallable_requirement='raise')
def test_current_environment_requirements_raises_on_bad_mode():
with pytest.raises(ValueError, match=
'Unknown mode for `on_uninstallable_requirement`'):
current_environment_requirements(on_uninstallable_requirement='foo')
def test_current_environment_requirements_top_level_only():
requirements = current_environment_requirements(exclude_nested=True,
on_uninstallable_requirement='ignore')
all_requirements = current_environment_requirements(
on_uninstallable_requirement='ignore')
assert {r.name for r in requirements}.issubset({r.name for r in
all_requirements})
assert len(requirements) < len(all_requirements)
assert all(isinstance(r, PipRequirement) for r in requirements)
| import packaging.requirements
import pydantic
import pytest
from prefect.software.pip import PipRequirement, current_environment_requirements
class TestPipRequirement:
def is_packaging_subclass(self):
r = PipRequirement("prefect")
assert isinstance(r, packaging.requirements.Requirement)
def test_can_be_used_in_pydantic_model(self):
class MyModel(pydantic.BaseModel):
req: PipRequirement
inst = MyModel(req="prefect")
assert inst.req == PipRequirement("prefect")
assert isinstance(inst.req, PipRequirement)
def test_equality(self):
assert PipRequirement("prefect") == PipRequirement("prefect")
assert PipRequirement("prefect") != PipRequirement("prefect>=2")
# TODO: Add tests that mock the working set so we can make meaningful assertions
def test_current_environment_requirements():
requirements = current_environment_requirements(
on_uninstallable_requirement="ignore"
)
assert all(isinstance(r, PipRequirement) for r in requirements)
names = [r.name for r in requirements]
assert "prefect" not in names # Editable install is excluded
assert len(names) == len(set(names)), "Names should not be repeated"
def test_current_environment_requirements_warns_about_editable_prefect():
with pytest.warns(
UserWarning,
match=r"prefect.*is an editable installation",
):
requirements = current_environment_requirements(
on_uninstallable_requirement="warn"
)
assert all(isinstance(r, PipRequirement) for r in requirements)
names = [r.name for r in requirements]
assert "prefect" not in names
assert len(names) == len(set(names)), "Names should not be repeated"
def test_current_environment_requirements_raises_on_editable_prefect():
with pytest.raises(
ValueError,
match=r"prefect.*is an editable installation",
):
current_environment_requirements(on_uninstallable_requirement="raise")
def test_current_environment_requirements_raises_on_bad_mode():
with pytest.raises(
ValueError,
match=r"Unknown mode for `on_uninstallable_requirement`",
):
current_environment_requirements(on_uninstallable_requirement="foo")
def test_current_environment_requirements_top_level_only():
requirements = current_environment_requirements(
exclude_nested=True, on_uninstallable_requirement="ignore"
)
all_requirements = current_environment_requirements(
on_uninstallable_requirement="ignore"
)
assert {r.name for r in requirements}.issubset({r.name for r in all_requirements})
assert len(requirements) < len(all_requirements)
assert all(isinstance(r, PipRequirement) for r in requirements)
| [
6,
7,
8,
10,
11
] |
797 | 8c318d7152bfdf2bc472258eb87dfa499b743193 | <mask token>
| def application(env, handle_headers):
status = '200'
response_headers = [('Server', '')]
return ''
| # coding:utf-8
def application(env,handle_headers):
status="200"
response_headers=[
('Server','')
]
return "" | null | null | [
0,
1,
2
] |
798 | 6a601d1c7c3c162c0902d03e6c39f8d75d4bcaf0 | import numpy as np, argparse, sys, itertools, os, errno, warnings
from mpi4py import MPI
from enlib import enmap as en, powspec, utils
from enlib.degrees_of_freedom import DOF, Arg
from enlib.cg import CG
warnings.filterwarnings("ignore")
#from matplotlib.pylab import *
parser = argparse.ArgumentParser()
parser.add_argument("freqs")
parser.add_argument("maps")
parser.add_argument("noise")
parser.add_argument("powspec")
parser.add_argument("posfile")
parser.add_argument("odir")
parser.add_argument("-R", "--radius", type=float, default=30)
parser.add_argument("--burnin", type=int, default=10)
parser.add_argument("-n", "--nsamp", type=int, default=50)
parser.add_argument("--dump", type=int, default=0)
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-i", type=int, default=0)
parser.add_argument("--nmax", type=int, default=0)
parser.add_argument("--mindist-group", type=float, default=10)
parser.add_argument("-c", "--cont", action="store_true")
args = parser.parse_args()
comm = MPI.COMM_WORLD
myid = comm.rank
nproc= comm.size
r2c = 180/np.pi
r2b = r2c*60*(8*np.log(2))**0.5
def read_maps(fmt, n, ntot=4):
try:
maps = en.read_map(fmt)
if maps.ndim == ntot-1: maps = en.enmap([maps]*n,maps.wcs)
if maps.ndim != ntot: raise ValueError("Map %s must have %d dimensions" % (fmt,ntot))
return maps
except (IOError, OSError):
maps = [en.read_map(fmt % i) for i in range(n)]
maps = en.ndmap(maps, maps[0].wcs)
if maps.ndim != ntot: maps = maps.reshape(maps.shape[:-2]+(1,)*(maps.ndim-ntot)+maps.shape[-2:])
return maps
def flat_noise(shape, wcs, sigmas):
res = en.zeros([len(sigmas),shape[-3],shape[-3],shape[-2],shape[-1]], wcs)
for i,s in enumerate(sigmas):
res[i] = (np.eye(shape[-3])*s**2)[:,:,None,None]
return res
def read_noise(info, shape, wcs, n):
try:
nmat = flat_noise(shape, wcs, parse_floats(info))
except ValueError:
nmat = read_maps(info, n, 5)
if len(nmat) != n: raise ValueError("Number of noise maps (%d) != number of signal maps (%d)!" % (len(nmat), n))
if np.any(nmat.shape[-2:] != shape[-2:]): raise ValueError("Noise and maps have inconsistent shape!")
return nmat
def parse_floats(strs): return np.array([float(w) for w in strs.split(",")])
def apodize(m, rad, apod_fun):
scale = m.extent()/m.shape[-2:]
y = np.arange(m.shape[-2])*scale[0]
x = np.arange(m.shape[-1])*scale[1]
yfun = apod_fun(y, rad)*apod_fun(y[-1]-y, rad)
xfun = apod_fun(x, rad)*apod_fun(x[-1]-x, rad)
a = yfun[:,None]*xfun[None,:]
return m*a
def apod_step(x, r): return x>r
def apod_butter(x, r): return (1+(x/r)**-4)**-1
def apod_cos(x,r): return (1-np.cos(np.min(1,nx/r)*np.pi))/2
# Read our inputs
freqs = parse_floats(args.freqs)
maps = read_maps(args.maps, len(freqs))
ncomp = maps.shape[-3]
nfreq = maps.shape[-4]
noise = read_noise(args.noise, maps.shape, maps.wcs, len(freqs))
ps = powspec.read_spectrum(args.powspec, expand="diag")[:ncomp,:ncomp]
poss = np.loadtxt(args.posfile)[:,:2]/r2c
R = args.radius/r2c/60
beam_fiducial = 1.5/r2b
beam_range = [0.8/r2b,3.0/r2b]
beam_max_asym = 2
apod_rad = R/10
# We will cut out small mini-maps around each source candadate and
# sample the CMB and source parameters jointly. But some candiates
# are so near each other that they aren't independent. These must
# be grouped into groups.
def build_groups(poss):
def dist(a,b): return np.sum((poss[a]-poss[b])**2)**0.5*180*60/np.pi
rest = set(range(len(poss)))
groups = []
while len(rest) > 0:
group = []
tocheck = [rest.pop()]
# Find distance to all other points
while len(tocheck) > 0:
current = tocheck.pop()
rnew = set()
while rest:
other = rest.pop()
if dist(current,other) < args.mindist_group:
tocheck.append(other)
else:
rnew.add(other)
rest = rnew
group.append(current)
groups.append(group)
return groups
groups = build_groups(poss)
print "Found %d groups" % len(groups)
# We will sample (cmb,A,pos,ibeam) jointly in gibbs fashion:
# cmb,A <- P(cmb,A|data,A,pos,ibeam) # direct, but requires cr
# pos,ibeam <- P(pos,ibeam|data,cmb,A) # MCMC
# To take into account the nonperiodicity of each submap, we must introduce
# a region of extra noise around the edge.
class CMBSampler:
"""Draws samples from P(s,a|d,Cl,N,T), where T[ntemp,nfreq,ncomp,ny,nx] is a set of templates.
a[ntemp] is the set of template amplitudes."""
def __init__(self, maps, inoise, ps, T=None):
self.d = maps
self.iN = inoise
self.hN = en.multi_pow(inoise, 0.5, axes=[1,2])
self.iS = en.spec2flat(maps.shape[-3:], maps.wcs, ps, -1.0)
self.hS = en.spec2flat(maps.shape[-3:], maps.wcs, ps, -0.5)
self.ps = ps
self.b, self.x = None, None
# Prepare the preconditioner. It approximates the noise as the
# same in every pixel, and ignores the cmb-template coupling.
# See M(self,u) for details.
iN_white = np.array(np.sum(np.mean(np.mean(self.iN,-1),-1),0))
# iN_white is now in pixel space, but the preconditioner needs it
# in harmonic space, which introduces a
#norm = np.prod((maps.box[1]-maps.box[0])/maps.shape[-2:])
#norm = 1./np.prod(maps.shape[-2:])
#iN_white /= norm
self.S_prec = en.multi_pow(self.iS + iN_white[:,:,None,None], -1)
# The template
self.set_template(T)
def set_template(self, T):
if T is None: T = np.zeros((0,)+self.d.shape)
self.T = T
self.TT = np.einsum("aijyx,bijyx->ab",self.T,self.T)
self.dof = DOF(Arg(default=self.d[0]), Arg(shape=T.shape[:1]))
def P(self, u):
s, a = self.dof.unzip(u)
return s[None,:,:,:] + np.sum(self.T*a[:,None,None,None,None],0)
def PT(self, d):
return self.dof.zip(np.sum(d,0), np.einsum("qijyx,ijyx->q",self.T, d))
def A(self, u):
s, a = self.dof.unzip(u)
# U"u = [S"s, 0a]
Uu = self.dof.zip(en.harm2map(en.map_mul(self.iS, en.map2harm(s))),a*0)
# P'N"P u
PNPu = self.PT(en.map_mul(self.iN, self.P(u)))
return Uu + PNPu
def M(self, u):
# Multiplying things out, the full expression for A is:
# [ S" + sum(N") sum(N"T) ]
# [ sum(T'N") sum(T'T) ]
# A reasonable approximation for this is
# [ S" + sum(sigma^{-2}) 0 ]
# [ 0 sum(T'T) ]
# which can be directly inverted.
s, a = self.dof.unzip(u)
# Solve for the cmb signal component
res_s = en.harm2map(en.map_mul(self.S_prec,en.map2harm(s)))
res_a = np.linalg.solve(self.TT, a)
return self.dof.zip(res_s, res_a)
def calc_b(self):
PNd = self.PT(en.map_mul(self.iN, self.d))
Uw1_s = en.harm2map(en.map_mul(self.hS, en.rand_gauss_harm(self.d.shape[-3:],self.d.wcs)))
Uw1_a = np.zeros(self.T.shape[0])
Uw1 = self.dof.zip(Uw1_s, Uw1_a)
PNw2 = self.PT(en.map_mul(self.hN, en.rand_gauss(self.d.shape, self.d.wcs)))
return PNd + Uw1 + PNw2
def solve(self, b, x0, verbose=False):
cg = CG(self.A, b, x0=x0*0, M=self.M)
while cg.err > 1e-6:
cg.step()
if verbose:
print "%5d %15.7e %15.7e" % (cg.i, cg.err, cg.err_true) #, self.dof.unzip(cg.x)[1]
#if cg.i % 10 == 0:
# s, a = self.dof.unzip(cg.x)
# matshow(s[0]); colorbar(); show()
return cg.x
def sample(self, verbose=False):
self.b = self.calc_b()
if self.x is None: self.x = self.dof.zip(self.d[0], np.zeros(self.T.shape[0]))
self.x = self.solve(self.b, self.x, verbose)
return self.dof.unzip(self.x)
class PtsrcModel:
"""This class converts from point source shape parameters to amplitude
basis functions."""
def __init__(self, template):
self.pos = template.posmap()
self.nfreq, self.ncomp = template.shape[:2]
self.nparam = self.nfreq*self.ncomp
def get_templates(self, pos, irads):
x = utils.rewind(self.pos - pos[:,None,None],0,2*np.pi)
W = np.array([[irads[0],irads[2]],[irads[2],irads[1]]])
xWx = np.sum(np.einsum("ab,byx->ayx", W, x)*x,0)
profile = np.exp(-0.5*xWx)
bases = np.eye(self.nfreq*self.ncomp).reshape(self.nfreq*self.ncomp,self.nfreq,self.ncomp)
return profile[None,None,None]*bases[:,:,:,None,None]
def get_model(self, amps, pos, irads):
return np.sum((self.get_templates(pos, irads).T*amps.T).T,0)
class ShapeSampler:
def __init__(self, maps, inoise, model, amps, pos, pos0, irads, nsamp=200, stepsize=0.02, maxdist=1.5*np.pi/180/60):
self.maps = maps
self.inoise = inoise
self.model= model
self.nsamp= nsamp
self.stepsize = stepsize
self.amps = amps
self.pos, self.irads = pos, irads
self.pos0 = pos0
self.maxdist=maxdist
self.lik = self.getlik(self.amps, self.pos, self.irads)
def getlik(self, amps, pos, irads):
if irads[0] < 0 or irads[1] < 0: return np.inf
if irads[0]*irads[1]-irads[2]**2 <= 0: return np.inf
sigma, phi = expand_beam(irads)
# The beam has a tendency to run off in unrealistic directions,
# so we need a relatively strong prior on it.
if np.min(sigma) < beam_range[0] or np.max(sigma) > beam_range[1] or np.max(sigma)/np.min(sigma) > beam_max_asym: return np.inf
template = self.model.get_model(amps, pos, irads)
residual = self.maps-template
tmp = np.einsum("fabyx,abyx->fayx",self.inoise, residual)
deviation = np.sum((pos-self.pos0)**2)**0.5/self.maxdist
penalty = 1+max(deviation-1,0)**2
return 0.5*np.sum(tmp*residual)*penalty
def newpos(self, pos):
# Draw pos with gaussian prior centered on previous position
# With a width given by the fiducial beam size.
step = self.stepsize
if np.random.uniform() < 0.1: step*100 # Sometimes try larger steps to break out of ruts
return pos + np.random.standard_normal(2) * beam_fiducial * self.stepsize
def newshape(self, irads):
return irads + np.random.standard_normal(3) * 1.0/beam_fiducial**2 * self.stepsize * 0.5
def newamp(self, amps):
return amps + np.random.standard_normal(len(amps)) * 1000 * self.stepsize
def subsample(self, verbose=False):
pos = self.newpos(self.pos)
lik = self.getlik(self.amps, pos, self.irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.pos, self.lik = pos, lik
irads = self.newshape(self.irads)
lik = self.getlik(self.amps, self.pos, irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.irads, self.lik = irads, lik
amps = self.newamp(self.amps)
lik = self.getlik(amps, self.pos, self.irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.amps, self.lik = amps, lik
if verbose:
sigma, phi = expand_beam(self.irads)
print (" %9.2f"*len(self.amps)+" %10.5f %10.5f %8.3f %8.3f %8.3f") % (tuple(self.amps)+tuple(self.pos*r2c)+tuple(sigma*r2b)+(phi*r2c,))
return self.amps, self.pos, self.irads
def sample(self, verbose=False):
"""Draw a new, uncorrelated sample."""
for i in range(self.nsamp): self.subsample(verbose)
return self.amps, self.pos, self.irads
class ShapeSamplerMulti:
def __init__(self, maps, inoise, model, amps, pos, pos0, irads, nsamp=1500, stepsize=0.02, maxdist=1.5*np.pi/180/60):
self.samplers = [ShapeSampler(maps, inoise, model, amp1, pos1, pos01, irads1, nsamp=1, stepsize=stepsize, maxdist=maxdist) for amp1, pos1, pos01, irads1 in zip(amps, pos, pos0, irads)]
self.nsamp = nsamp
def sample(self, verbose=False):
for i in range(self.nsamp):
for sampler in self.samplers:
sampler.sample(verbose)
amps = np.array([s.amps for s in self.samplers])
pos = np.array([s.pos for s in self.samplers])
irads= np.array([s.irads for s in self.samplers])
return amps, pos, irads
class GibbsSampler:
def __init__(self, maps, inoise, ps, pos0, amp0, irads0, cmb0):
self.maps = maps
self.inoise = inoise
self.ps = ps
self.src_model = PtsrcModel(maps)
self.pos, self.amp, self.irads, self.cmb = pos0, amp0, irads0, cmb0
self.pos0 = pos0
self.cmb_sampler = CMBSampler(maps, inoise, ps)
def sample(self, verbose=False):
# First draw cmb,amp <- P(cmb,amp|data,pos,irads)
src_template = self.src_model.get_templates(self.pos, self.irads)
self.cmb_sampler.set_template(src_template)
self.cmb, self.amp = self.cmb_sampler.sample(verbose)
# Then draw pos,irads <- P(pos,irads|data,cmb,amp)
maps_nocmb = self.maps - self.cmb[None,:,:,:]
shape_sampler = ShapeSampler(maps_nocmb, self.inoise, self.src_model, self.amp, self.pos, self.pos0, self.irads)
self.amp, self.pos, self.irads = shape_sampler.sample(verbose)
return self.pos, self.amp, self.irads, self.cmb
class GibbsSamplerMulti:
"""Like GibbsSampler, but samples multiple points jointly.
This means that the source amplitude parameters will be arrays."""
def __init__(self, maps, inoise, ps, pos0, amp0, irads0, cmb0):
self.maps = maps
self.inoise = inoise
self.ps = ps
self.src_model = PtsrcModel(maps)
self.pos, self.amp, self.irads, self.cmb = pos0, amp0, irads0, cmb0
self.pos0 = pos0
self.cmb_sampler = CMBSampler(maps, inoise, ps)
def sample(self, verbose=False):
# First draw cmb,amp <- P(cmb,amp|data,pos,irads)
src_template = np.concatenate([self.src_model.get_templates(pos, irads) for pos,irads in zip(self.pos, self.irads)])
self.cmb_sampler.set_template(src_template)
self.cmb, self.amp = self.cmb_sampler.sample(verbose)
# Separate amps for each source
self.amp = self.amp.reshape(self.pos.shape[0],-1)
# Then draw pos,irads <- P(pos,irads|data,cmb,amp)
maps_nocmb = self.maps - self.cmb[None,:,:,:]
shape_sampler = ShapeSamplerMulti(maps_nocmb, self.inoise, self.src_model, self.amp, self.pos, self.pos0, self.irads)
self.amp, self.pos, self.irads = shape_sampler.sample(verbose)
return self.pos, self.amp, self.irads, self.cmb
def expand_beam(irads):
C = np.array([[irads[0],irads[2]],[irads[2],irads[1]]])
E, V = np.linalg.eigh(C)
phi = np.arctan2(V[1,0],V[0,0])
sigma = E**-0.5
if sigma[1] > sigma[0]:
sigma = sigma[::-1]
phi += np.pi/2
phi %= np.pi
return sigma, phi
def smooth_gauss(m, sigma):
l = np.sum(m.lmap()**2,0)**0.5
return np.real(en.ifft(en.fft(m)*np.exp(-0.5*(l*sigma)**2)))
def get_startpoint(maps, inoise, ps, rad=5):
# Filter away the CMB
sampler = CMBSampler(maps, inoise, ps, maps[None][:0])
cmb, _ = sampler.sample()
residual = maps - cmb[None]
# Smooth based on fiducial beam
residual = smooth_gauss(residual, beam_fiducial)
# Extract best point near center
cpix = np.array(residual.shape[-2:])/2
center = np.sum(np.sum((residual[:,:,cpix[0]-rad:cpix[0]+rad,cpix[1]-rad:cpix[1]+rad])**2,0),0)
I = np.argmax(center)
ipix = np.unravel_index(I, center.shape)
pos = center.posmap()[:,ipix[0],ipix[1]]
return pos
def B(T,nu):
c = 299792458.0
h = 6.62606957e-34
k = 1.3806488e-23
return 2*h*nu**3/c**2/(np.exp(h*nu/k/T)-1)
def uK2mJ(amp,b1,b2):
T0 = 2.73; nu=148e9
dB = B(T0+amp*1e-6,nu)-B(T0,nu)
return dB*2*np.pi*b1*b2/1e-29
def output_dummy(id):
with open(args.odir+"/samps%03d.txt" % id, "w") as ofile:
pass
utils.mkdir(args.odir)
if args.nmax > 0: groups = groups[:args.nmax]
for i in range(myid, len(groups), nproc):
if i < args.i: continue
group = groups[i]
if args.cont:
# If all our members are done, skip to next group
try:
lens = [len(np.loadtxt(args.odir + "/samps%03d.txt" % j)) for j in group]
if np.min(lens) >= args.nsamp:
continue
except (IOError, OSError): pass
print "%5d/%d %3d:" % (i+1, len(groups), myid),
print (" %3d"*len(group)) % tuple(group)
pos0 = np.array([poss[j] for j in group])
# Cut out a relevant region
box = np.array([np.min(pos0,0)-R,np.max(pos0,0)+R])
submap = maps.submap(box)
if submap.size == 0:
for g in group:
output_dummy(g)
continue
subnoise = apodize(noise.submap(box), apod_rad, apod_step)
# Set up initial values for the sampler
irads = np.tile(np.array([1/beam_fiducial**2,1/beam_fiducial**2,0]),(len(group),1))
amp = np.zeros([len(group),ncomp*nfreq])
cmb = submap[0]
sampler = GibbsSamplerMulti(submap, subnoise, ps, pos0, amp, irads, cmb)
# Open ofiles
ofiles = [open(args.odir + "/samps%03d.txt" % j, "w") for j in group]
for j in xrange(-args.burnin, args.nsamp):
pos, amp, irad, cmb = sampler.sample(args.verbose)
if j >= 0:
for mypos, myamp, myirad, ofile, isrc in zip(pos, amp, irad, ofiles,group):
sigma, phi = expand_beam(myirad)
mJ = uK2mJ(myamp,sigma[0],sigma[1])
print >> ofile, (" %10.5f"*2 + " %6.1f"*len(myamp) + "%8.3f %8.3f %8.3f" + " %6.2f"*len(mJ)) % (tuple(mypos*r2c)+tuple(myamp)+tuple(sigma*r2b)+(phi*r2c,)+tuple(mJ))
ofile.flush()
if args.dump > 0 and j % args.dump == 0:
dumpdir = args.odir + "/dump%03d" % isrc
utils.mkdir(dumpdir)
src = sampler.src_model.get_model(myamp, mypos, myirad)
residual = submap - src - cmb[None]
# Cut out our area
mybox = np.array([poss[isrc]-R,poss[isrc]+R])
mycmb, myres, mymod, mysub = [a.submap(mybox) for a in [cmb,residual,src,submap]]
en.write_map(dumpdir + "/cmb%03d.hdf" % j, mycmb)
en.write_map(dumpdir + "/residual%03d.hdf" % j, myres)
en.write_map(dumpdir + "/model%03d.hdf" % j, mymod)
en.write_map(dumpdir + "/submap.hdf", mysub)
| null | null | null | null | [
0
] |
799 | af35075eaca9bba3d6bdb73353eaf944869cdede | <mask token>
def delete_pdp(pdp_id):
from moon_manager.db_driver import PDPManager
PDPManager.delete_pdp('', pdp_id)
<mask token>
| <mask token>
def delete_pdp(pdp_id):
from moon_manager.db_driver import PDPManager
PDPManager.delete_pdp('', pdp_id)
def add_pdp(pdp_id=None, value=None):
from moon_manager.db_driver import PDPManager
return PDPManager.add_pdp('', pdp_id, value)
<mask token>
| <mask token>
def delete_pdp(pdp_id):
from moon_manager.db_driver import PDPManager
PDPManager.delete_pdp('', pdp_id)
def add_pdp(pdp_id=None, value=None):
from moon_manager.db_driver import PDPManager
return PDPManager.add_pdp('', pdp_id, value)
def get_pdp(pdp_id=None):
from moon_manager.db_driver import PDPManager
return PDPManager.get_pdp('', pdp_id)
| def update_pdp(pdp_id, value):
from moon_manager.db_driver import PDPManager
return PDPManager.update_pdp('', pdp_id, value)
def delete_pdp(pdp_id):
from moon_manager.db_driver import PDPManager
PDPManager.delete_pdp('', pdp_id)
def add_pdp(pdp_id=None, value=None):
from moon_manager.db_driver import PDPManager
return PDPManager.add_pdp('', pdp_id, value)
def get_pdp(pdp_id=None):
from moon_manager.db_driver import PDPManager
return PDPManager.get_pdp('', pdp_id)
| # Software Name: MOON
# Version: 5.4
# SPDX-FileCopyrightText: Copyright (c) 2018-2020 Orange and its contributors
# SPDX-License-Identifier: Apache-2.0
# This software is distributed under the 'Apache License 2.0',
# the text of which is available at 'http://www.apache.org/licenses/LICENSE-2.0.txt'
# or see the "LICENSE" file for more details.
def update_pdp(pdp_id, value):
from moon_manager.db_driver import PDPManager
return PDPManager.update_pdp("", pdp_id, value)
def delete_pdp(pdp_id):
from moon_manager.db_driver import PDPManager
PDPManager.delete_pdp("", pdp_id)
def add_pdp(pdp_id=None, value=None):
from moon_manager.db_driver import PDPManager
return PDPManager.add_pdp("", pdp_id, value)
def get_pdp(pdp_id=None):
from moon_manager.db_driver import PDPManager
return PDPManager.get_pdp("", pdp_id)
| [
1,
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.