index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 0
305k
| step-2
stringlengths 6
1.1M
⌀ | step-3
stringlengths 15
1.23M
⌀ | step-4
stringlengths 23
1.34M
⌀ | step-5
stringlengths 55
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
100 | d815c6e233d81dfb144442a83e6006aa4e29bfce | <mask token>
| <mask token>
__all__ = ['VERSION']
| from .version import VERSION
__all__ = ['VERSION']
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==================================================
# @Author : Copyright@Ryuchen
# ==================================================
from .version import VERSION
__all__ = [
"VERSION"
]
| null | [
0,
1,
2,
3
] |
101 | eec52695e5afcc21e5fed6453e96cc3a58e7c1df | <mask token>
@micropython.viper
def viper_int(x: int, y: int) ->int:
return x + y + 3
<mask token>
@micropython.viper
def viper_local(x: int) ->int:
y = 4
return x + y
<mask token>
@micropython.viper
def viper_no_annotation(x, y):
return x * y
<mask token>
@micropython.viper
def viper_access_global():
global gl
gl = 1
return gl
<mask token>
@micropython.viper
def viper_set(x, y: int):
return {x, y + 1}
<mask token>
@micropython.viper
def viper_raise(x: int):
raise OSError(x)
<mask token>
@micropython.viper
def viper_gc() ->int:
return 1
<mask token>
| <mask token>
@micropython.viper
def viper_int(x: int, y: int) ->int:
return x + y + 3
<mask token>
@micropython.viper
def viper_local(x: int) ->int:
y = 4
return x + y
<mask token>
@micropython.viper
def viper_no_annotation(x, y):
return x * y
<mask token>
@micropython.viper
def viper_for(a: int, b: int) ->int:
total = 0
for x in range(a, b):
total += x
return total
<mask token>
@micropython.viper
def viper_access_global():
global gl
gl = 1
return gl
<mask token>
@micropython.viper
def viper_set(x, y: int):
return {x, y + 1}
<mask token>
@micropython.viper
def viper_raise(x: int):
raise OSError(x)
<mask token>
@micropython.viper
def viper_gc() ->int:
return 1
<mask token>
| <mask token>
@micropython.viper
def viper_int(x: int, y: int) ->int:
return x + y + 3
<mask token>
@micropython.viper
def viper_local(x: int) ->int:
y = 4
return x + y
<mask token>
@micropython.viper
def viper_no_annotation(x, y):
return x * y
<mask token>
@micropython.viper
def viper_for(a: int, b: int) ->int:
total = 0
for x in range(a, b):
total += x
return total
<mask token>
@micropython.viper
def viper_access_global():
global gl
gl = 1
return gl
<mask token>
@micropython.viper
def viper_print(x, y: int):
print(x, y + 1)
<mask token>
@micropython.viper
def viper_tuple(x, y: int):
return x, y + 1
<mask token>
@micropython.viper
def viper_set(x, y: int):
return {x, y + 1}
<mask token>
@micropython.viper
def viper_raise(x: int):
raise OSError(x)
<mask token>
@micropython.viper
def viper_gc() ->int:
return 1
<mask token>
| <mask token>
@micropython.viper
def viper_int(x: int, y: int) ->int:
return x + y + 3
<mask token>
@micropython.viper
def viper_object(x: object, y: object) ->object:
return x + y
<mask token>
@micropython.viper
def viper_local(x: int) ->int:
y = 4
return x + y
<mask token>
@micropython.viper
def viper_no_annotation(x, y):
return x * y
<mask token>
@micropython.viper
def viper_for(a: int, b: int) ->int:
total = 0
for x in range(a, b):
total += x
return total
<mask token>
@micropython.viper
def viper_access_global():
global gl
gl = 1
return gl
<mask token>
@micropython.viper
def viper_print(x, y: int):
print(x, y + 1)
<mask token>
@micropython.viper
def viper_tuple(x, y: int):
return x, y + 1
<mask token>
@micropython.viper
def viper_list(x, y: int):
return [x, y + 1]
<mask token>
@micropython.viper
def viper_set(x, y: int):
return {x, y + 1}
<mask token>
@micropython.viper
def viper_raise(x: int):
raise OSError(x)
<mask token>
@micropython.viper
def viper_gc() ->int:
return 1
<mask token>
| import micropython
# viper function taking and returning ints
@micropython.viper
def viper_int(x:int, y:int) -> int:
return x + y + 3
print(viper_int(1, 2))
# viper function taking and returning objects
@micropython.viper
def viper_object(x:object, y:object) -> object:
return x + y
print(viper_object(1, 2))
# a local (should have automatic type int)
@micropython.viper
def viper_local(x:int) -> int:
y = 4
return x + y
print(viper_local(3))
# without type annotation, types should default to object
@micropython.viper
def viper_no_annotation(x, y):
return x * y
print(viper_no_annotation(4, 5))
# a for loop
@micropython.viper
def viper_for(a:int, b:int) -> int:
total = 0
for x in range(a, b):
total += x
return total
print(viper_for(10, 10000))
# accessing a global
@micropython.viper
def viper_access_global():
global gl
gl = 1
return gl
print(viper_access_global(), gl)
# calling print with object and int types
@micropython.viper
def viper_print(x, y:int):
print(x, y + 1)
viper_print(1, 2)
# making a tuple from an object and an int
@micropython.viper
def viper_tuple(x, y:int):
return (x, y + 1)
print(viper_tuple(1, 2))
# making a list from an object and an int
@micropython.viper
def viper_list(x, y:int):
return [x, y + 1]
print(viper_list(1, 2))
# making a set from an object and an int
@micropython.viper
def viper_set(x, y:int):
return {x, y + 1}
print(sorted(list(viper_set(1, 2))))
# raising an exception
@micropython.viper
def viper_raise(x:int):
raise OSError(x)
try:
viper_raise(1)
except OSError as e:
print(repr(e))
# this doesn't work at the moment
#@micropython.viper
#def g() -> uint:
# return -1
# calling GC after defining the function
@micropython.viper
def viper_gc() -> int:
return 1
print(viper_gc())
import gc
gc.collect()
print(viper_gc())
| [
7,
8,
10,
12,
15
] |
102 | 9119fc1c75de980bbcf74f1e06a36ba587fc490b | <mask token>
class RSIStrategy(bt.Strategy):
def __init__(self):
self.order = None
self.position.size = 0
self.sellAlert1 = False
self.sellAlert2 = False
self.buyAlert = False
self.failureNum = 0
self.successNum = 0
self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)
self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)
self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)
self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)
<mask token>
def notify_order(self, order):
if order.status in [order.Completed]:
if order.isbuy():
return self.log('BUY Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
elif order.issell():
print('Succeeded for {} times.'.format(self.successNum))
return self.log('SELL Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
<mask token>
| <mask token>
class RSIStrategy(bt.Strategy):
def __init__(self):
self.order = None
self.position.size = 0
self.sellAlert1 = False
self.sellAlert2 = False
self.buyAlert = False
self.failureNum = 0
self.successNum = 0
self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)
self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)
self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)
self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)
<mask token>
def notify_order(self, order):
if order.status in [order.Completed]:
if order.isbuy():
return self.log('BUY Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
elif order.issell():
print('Succeeded for {} times.'.format(self.successNum))
return self.log('SELL Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
def next(self):
"""Here the conditions for openinng and closing a position have been set."""
if self.position.size == 0:
if self.rsi_2 < 30 and self.rsi_3 < 40:
self.buyAlert = True
if (self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and
self.buyAlert):
size = round(self.broker.getcash() / self.data, 3)
self.order = self.buy(size=size)
self.buyAlert = False
print(round(self.broker.get_cash(), 1))
if self.position.size != 0:
if self.rsi_4 > 67:
self.sellAlert1 = True
if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:
self.close()
self.successNum += 1
self.sellAlert1 = False
if self.rsi_4 > 85:
self.sellAlert2 = True
if self.rsi_4 < 80 and self.sellAlert2:
self.close()
self.successNum += 1
self.sellAlert1 = False
self.sellAlert2 = False
if 0.82 * self.order.executed.price > self.datas[0
].close > 0.8 * self.order.executed.price:
self.close()
self.failureNum += 1
print('Shit !!! Failed for {} times.'.format(self.failureNum))
| <mask token>
class RSIStrategy(bt.Strategy):
def __init__(self):
self.order = None
self.position.size = 0
self.sellAlert1 = False
self.sellAlert2 = False
self.buyAlert = False
self.failureNum = 0
self.successNum = 0
self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)
self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)
self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)
self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)
def log(self, txt, dt=None):
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def notify_order(self, order):
if order.status in [order.Completed]:
if order.isbuy():
return self.log('BUY Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
elif order.issell():
print('Succeeded for {} times.'.format(self.successNum))
return self.log('SELL Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
def next(self):
"""Here the conditions for openinng and closing a position have been set."""
if self.position.size == 0:
if self.rsi_2 < 30 and self.rsi_3 < 40:
self.buyAlert = True
if (self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and
self.buyAlert):
size = round(self.broker.getcash() / self.data, 3)
self.order = self.buy(size=size)
self.buyAlert = False
print(round(self.broker.get_cash(), 1))
if self.position.size != 0:
if self.rsi_4 > 67:
self.sellAlert1 = True
if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:
self.close()
self.successNum += 1
self.sellAlert1 = False
if self.rsi_4 > 85:
self.sellAlert2 = True
if self.rsi_4 < 80 and self.sellAlert2:
self.close()
self.successNum += 1
self.sellAlert1 = False
self.sellAlert2 = False
if 0.82 * self.order.executed.price > self.datas[0
].close > 0.8 * self.order.executed.price:
self.close()
self.failureNum += 1
print('Shit !!! Failed for {} times.'.format(self.failureNum))
| import backtrader as bt
class RSIStrategy(bt.Strategy):
def __init__(self):
self.order = None
self.position.size = 0
self.sellAlert1 = False
self.sellAlert2 = False
self.buyAlert = False
self.failureNum = 0
self.successNum = 0
self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)
self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)
self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)
self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)
def log(self, txt, dt=None):
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def notify_order(self, order):
if order.status in [order.Completed]:
if order.isbuy():
return self.log('BUY Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
elif order.issell():
print('Succeeded for {} times.'.format(self.successNum))
return self.log('SELL Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
def next(self):
"""Here the conditions for openinng and closing a position have been set."""
if self.position.size == 0:
if self.rsi_2 < 30 and self.rsi_3 < 40:
self.buyAlert = True
if (self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and
self.buyAlert):
size = round(self.broker.getcash() / self.data, 3)
self.order = self.buy(size=size)
self.buyAlert = False
print(round(self.broker.get_cash(), 1))
if self.position.size != 0:
if self.rsi_4 > 67:
self.sellAlert1 = True
if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:
self.close()
self.successNum += 1
self.sellAlert1 = False
if self.rsi_4 > 85:
self.sellAlert2 = True
if self.rsi_4 < 80 and self.sellAlert2:
self.close()
self.successNum += 1
self.sellAlert1 = False
self.sellAlert2 = False
if 0.82 * self.order.executed.price > self.datas[0
].close > 0.8 * self.order.executed.price:
self.close()
self.failureNum += 1
print('Shit !!! Failed for {} times.'.format(self.failureNum))
| import backtrader as bt
class RSIStrategy(bt.Strategy):
def __init__(self):
self.order = None
self.position.size = 0
self.sellAlert1 = False
self.sellAlert2 = False
self.buyAlert = False
self.failureNum = 0
self.successNum = 0
self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)
self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)
self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)
self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)
def log(self, txt, dt=None):
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def notify_order(self, order):
if order.status in [order.Completed]:
if order.isbuy():
return self.log(
'BUY Executed at price: {} with size: {}'.format(order.executed.price, order.executed.size))
elif order.issell():
print('Succeeded for {} times.'.format(self.successNum))
return self.log(
'SELL Executed at price: {} with size: {}'.format(order.executed.price, order.executed.size))
def next(self):
"""Here the conditions for openinng and closing a position have been set."""
if self.position.size == 0:
# The condition for activating BUY function --> By checking oversold condition.
if self.rsi_2 < 30 and self.rsi_3 < 40:
self.buyAlert = True
# If BUY is activated and below conditions are met, then aa buy order would be placed.
if self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and self.buyAlert:
size = round((self.broker.getcash() / self.data), 3)
self.order = self.buy(size=size)
self.buyAlert = False
print(round(self.broker.get_cash(), 1))
# print(self.datas[0].low[0])
if self.position.size != 0:
# The condition for activating SELL_1 function --> Waiting for RSI to reach overbought zone.
if self.rsi_4 > 67:
self.sellAlert1 = True
# If SELL_1 is activated and below conditions are met, then a sell order would be placed.
if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:
self.close()
self.successNum += 1
self.sellAlert1 = False
# The condition for activating SELL_2 function --> Activated at overbought condition with RSI>85
if self.rsi_4 > 85:
self.sellAlert2 = True
# If SELL_2 is activated and below conditions are met, then a sell order would be placed.
if (self.rsi_4 < 80) and self.sellAlert2:
self.close()
self.successNum += 1
self.sellAlert1 = False
self.sellAlert2 = False
# Setting Stop Loss for wrongly opened position.
if 0.82 * self.order.executed.price > self.datas[0].close > 0.8 * self.order.executed.price:
self.close()
self.failureNum += 1
print('Shit !!! Failed for {} times.'.format(self.failureNum))
| [
3,
4,
5,
6,
7
] |
103 | 99785ffb4b594db1fac05ca3d3f5764151b2b7b6 | <mask token>
@csrf_exempt
def create(request):
if request.method == 'POST':
json_data = request.body
stream = io.BytesIO(json_data)
pythondata = JSONParser().parse(stream)
serializer = StudentSerializer(data=pythondata)
if serializer.is_valid():
serializer.save()
res = {'msg': 'data inserted', 'code': 200}
json_data = JSONRenderer().render(res)
return HttpResponse(json_data)
else:
json_data = JSONRenderer().render(serializer.errors)
return HttpResponse(json_data)
| <mask token>
def student_detail(request, pk):
stu = Student.objects.get(id=pk)
serializers = StudentSerializer(stu)
return JsonResponse(serializers.data)
<mask token>
@csrf_exempt
def create(request):
if request.method == 'POST':
json_data = request.body
stream = io.BytesIO(json_data)
pythondata = JSONParser().parse(stream)
serializer = StudentSerializer(data=pythondata)
if serializer.is_valid():
serializer.save()
res = {'msg': 'data inserted', 'code': 200}
json_data = JSONRenderer().render(res)
return HttpResponse(json_data)
else:
json_data = JSONRenderer().render(serializer.errors)
return HttpResponse(json_data)
| <mask token>
def student_detail(request, pk):
stu = Student.objects.get(id=pk)
serializers = StudentSerializer(stu)
return JsonResponse(serializers.data)
def student_list(request):
stu = Student.objects.all()
serializers = StudentSerializer(stu, many=True)
return JsonResponse(serializers.data, safe=False)
@csrf_exempt
def create(request):
if request.method == 'POST':
json_data = request.body
stream = io.BytesIO(json_data)
pythondata = JSONParser().parse(stream)
serializer = StudentSerializer(data=pythondata)
if serializer.is_valid():
serializer.save()
res = {'msg': 'data inserted', 'code': 200}
json_data = JSONRenderer().render(res)
return HttpResponse(json_data)
else:
json_data = JSONRenderer().render(serializer.errors)
return HttpResponse(json_data)
| import django
from rest_framework import serializers
from django.shortcuts import render
from .models import Student
from .serializiers import StudentSerializer
from rest_framework.renderers import JSONRenderer
from django.http import HttpResponse, JsonResponse
import io
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from django.views.decorators.csrf import csrf_exempt
def student_detail(request, pk):
stu = Student.objects.get(id=pk)
serializers = StudentSerializer(stu)
return JsonResponse(serializers.data)
def student_list(request):
stu = Student.objects.all()
serializers = StudentSerializer(stu, many=True)
return JsonResponse(serializers.data, safe=False)
@csrf_exempt
def create(request):
if request.method == 'POST':
json_data = request.body
stream = io.BytesIO(json_data)
pythondata = JSONParser().parse(stream)
serializer = StudentSerializer(data=pythondata)
if serializer.is_valid():
serializer.save()
res = {'msg': 'data inserted', 'code': 200}
json_data = JSONRenderer().render(res)
return HttpResponse(json_data)
else:
json_data = JSONRenderer().render(serializer.errors)
return HttpResponse(json_data)
| import django
from rest_framework import serializers
from django.shortcuts import render
from .models import Student
from .serializiers import StudentSerializer
from rest_framework.renderers import JSONRenderer
from django.http import HttpResponse,JsonResponse
import io
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from django.views.decorators.csrf import csrf_exempt
# Single Model object.
def student_detail(request,pk):
#Student model object
stu = Student.objects.get(id=pk)
#Serializers convert student model object to python dictionary
serializers = StudentSerializer(stu)
#JSONRenderer convert student python dictionary to json object
# json_data = JSONRenderer().render(serializers.data)
# return HttpResponse(json_data,content_type='application/json')
#use simply to reduce the extra line of code
return JsonResponse(serializers.data)
def student_list(request):
#Student model object
stu = Student.objects.all()
#Serializers convert student model object to python dictionary
serializers = StudentSerializer(stu,many=True)
#JSONRenderer convert student python dictionary to json object
# json_data = JSONRenderer().render(serializers.data)
# return HttpResponse(json_data,content_type='application/json')
return JsonResponse(serializers.data,safe=False)
@csrf_exempt
def create(request):
if request.method=='POST':
json_data = request.body
stream = io.BytesIO(json_data)
pythondata = JSONParser().parse(stream)
serializer = StudentSerializer(data=pythondata)
if serializer.is_valid():
serializer.save()
res = {'msg':'data inserted','code':200}
json_data = JSONRenderer().render(res)
return HttpResponse(json_data)
else:
json_data = JSONRenderer().render(serializer.errors)
return HttpResponse(json_data)
| [
1,
2,
3,
4,
5
] |
104 | cf0eb9685cdfc412871d3b36270ddab3e520bb8f | <mask token>
class GoogleArticleItem(scrapy.Item):
title = scrapy.Field()
date = scrapy.Field()
snippet = scrapy.Field()
source = scrapy.Field()
| <mask token>
class CnnArticleItem(scrapy.Item):
<mask token>
<mask token>
<mask token>
<mask token>
class GoogleArticleItem(scrapy.Item):
title = scrapy.Field()
date = scrapy.Field()
snippet = scrapy.Field()
source = scrapy.Field()
| <mask token>
class CnnArticleItem(scrapy.Item):
title = scrapy.Field()
developments = scrapy.Field()
body = scrapy.Field()
date = scrapy.Field()
class GoogleArticleItem(scrapy.Item):
title = scrapy.Field()
date = scrapy.Field()
snippet = scrapy.Field()
source = scrapy.Field()
| import scrapy
class CnnArticleItem(scrapy.Item):
title = scrapy.Field()
developments = scrapy.Field()
body = scrapy.Field()
date = scrapy.Field()
class GoogleArticleItem(scrapy.Item):
title = scrapy.Field()
date = scrapy.Field()
snippet = scrapy.Field()
source = scrapy.Field()
| # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class CnnArticleItem(scrapy.Item):
title = scrapy.Field()
developments = scrapy.Field()
body = scrapy.Field()
date = scrapy.Field()
class GoogleArticleItem(scrapy.Item):
title = scrapy.Field()
date = scrapy.Field()
snippet = scrapy.Field()
source = scrapy.Field() | [
2,
3,
4,
5,
6
] |
105 | a42a94798d176e20646d41cf0f4b7e4f99e0790b | <mask token>
class StardogGraphStore(GraphStore):
<mask token>
def check_whether_db_exists(self):
logger.debug("Checking whether a triple store with db '{}' exists..."
.format(self._node_ts_url))
url = self._get_ts_db_url()
r = requests.get(url, auth=(self._ts_user, self._ts_pass))
status_code = r.status_code
logger.debug('Status type of response whether db exists: {}.'.
format(status_code))
return status_code == 200
<mask token>
def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:
ihash = GraphStore.IHASH_PREFIX.format(graph_hash)
logger.debug(
"Checking whether graph '{}' is already in the triple store..."
.format(ihash))
query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)
sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),
self._get_sparql_endpoint_for_update())
sparql_query.setQuery(query)
sparql_query.method = 'POST'
sparql_query.setReturnFormat(JSON)
sparql_query.setCredentials(self._ts_user, self._ts_pass)
result = sparql_query.query()
return result.convert()['boolean']
| <mask token>
class StardogGraphStore(GraphStore):
<mask token>
def check_whether_db_exists(self):
logger.debug("Checking whether a triple store with db '{}' exists..."
.format(self._node_ts_url))
url = self._get_ts_db_url()
r = requests.get(url, auth=(self._ts_user, self._ts_pass))
status_code = r.status_code
logger.debug('Status type of response whether db exists: {}.'.
format(status_code))
return status_code == 200
def add_graph(self, raw_graph, graph_format, graph_hash):
logger.debug("Adding graph to the triple store with URL '{}'...".
format(self._get_sparql_endpoint_for_update()))
ihash = GraphStore.IHASH_PREFIX.format(graph_hash)
g = Graph()
g.parse(data=raw_graph, format=graph_format)
sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),
self._get_sparql_endpoint_for_update())
query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.
serialize(format='nt').decode())
sparql_query.setQuery(query)
sparql_query.method = 'POST'
sparql_query.setCredentials(self._ts_user, self._ts_pass)
sparql_query.query()
def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:
ihash = GraphStore.IHASH_PREFIX.format(graph_hash)
logger.debug(
"Checking whether graph '{}' is already in the triple store..."
.format(ihash))
query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)
sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),
self._get_sparql_endpoint_for_update())
sparql_query.setQuery(query)
sparql_query.method = 'POST'
sparql_query.setReturnFormat(JSON)
sparql_query.setCredentials(self._ts_user, self._ts_pass)
result = sparql_query.query()
return result.convert()['boolean']
| <mask token>
logger = get_debug_logger()
class StardogGraphStore(GraphStore):
def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):
super(StardogGraphStore, self).__init__(ts_db_name, ts_url)
self._ts_user = ts_user
self._ts_pass = ts_pass
msg = (
"Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'."
.format(ts_user, self._node_ts_url))
logger.info(msg)
def check_whether_db_exists(self):
logger.debug("Checking whether a triple store with db '{}' exists..."
.format(self._node_ts_url))
url = self._get_ts_db_url()
r = requests.get(url, auth=(self._ts_user, self._ts_pass))
status_code = r.status_code
logger.debug('Status type of response whether db exists: {}.'.
format(status_code))
return status_code == 200
def add_graph(self, raw_graph, graph_format, graph_hash):
logger.debug("Adding graph to the triple store with URL '{}'...".
format(self._get_sparql_endpoint_for_update()))
ihash = GraphStore.IHASH_PREFIX.format(graph_hash)
g = Graph()
g.parse(data=raw_graph, format=graph_format)
sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),
self._get_sparql_endpoint_for_update())
query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.
serialize(format='nt').decode())
sparql_query.setQuery(query)
sparql_query.method = 'POST'
sparql_query.setCredentials(self._ts_user, self._ts_pass)
sparql_query.query()
def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:
ihash = GraphStore.IHASH_PREFIX.format(graph_hash)
logger.debug(
"Checking whether graph '{}' is already in the triple store..."
.format(ihash))
query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)
sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),
self._get_sparql_endpoint_for_update())
sparql_query.setQuery(query)
sparql_query.method = 'POST'
sparql_query.setReturnFormat(JSON)
sparql_query.setCredentials(self._ts_user, self._ts_pass)
result = sparql_query.query()
return result.convert()['boolean']
| import requests
from SPARQLWrapper import SPARQLWrapper, JSON
from rdflib import Graph
from plenum.server.plugin.graphchain.graph_store import GraphStore
from plenum.server.plugin.graphchain.logger import get_debug_logger
logger = get_debug_logger()
class StardogGraphStore(GraphStore):
def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):
super(StardogGraphStore, self).__init__(ts_db_name, ts_url)
self._ts_user = ts_user
self._ts_pass = ts_pass
msg = (
"Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'."
.format(ts_user, self._node_ts_url))
logger.info(msg)
def check_whether_db_exists(self):
logger.debug("Checking whether a triple store with db '{}' exists..."
.format(self._node_ts_url))
url = self._get_ts_db_url()
r = requests.get(url, auth=(self._ts_user, self._ts_pass))
status_code = r.status_code
logger.debug('Status type of response whether db exists: {}.'.
format(status_code))
return status_code == 200
def add_graph(self, raw_graph, graph_format, graph_hash):
logger.debug("Adding graph to the triple store with URL '{}'...".
format(self._get_sparql_endpoint_for_update()))
ihash = GraphStore.IHASH_PREFIX.format(graph_hash)
g = Graph()
g.parse(data=raw_graph, format=graph_format)
sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),
self._get_sparql_endpoint_for_update())
query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.
serialize(format='nt').decode())
sparql_query.setQuery(query)
sparql_query.method = 'POST'
sparql_query.setCredentials(self._ts_user, self._ts_pass)
sparql_query.query()
def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:
ihash = GraphStore.IHASH_PREFIX.format(graph_hash)
logger.debug(
"Checking whether graph '{}' is already in the triple store..."
.format(ihash))
query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)
sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),
self._get_sparql_endpoint_for_update())
sparql_query.setQuery(query)
sparql_query.method = 'POST'
sparql_query.setReturnFormat(JSON)
sparql_query.setCredentials(self._ts_user, self._ts_pass)
result = sparql_query.query()
return result.convert()['boolean']
| import requests
from SPARQLWrapper import SPARQLWrapper, JSON
from rdflib import Graph
from plenum.server.plugin.graphchain.graph_store import GraphStore
from plenum.server.plugin.graphchain.logger import get_debug_logger
logger = get_debug_logger()
class StardogGraphStore(GraphStore):
def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):
super(StardogGraphStore, self).__init__(ts_db_name, ts_url)
self._ts_user = ts_user
self._ts_pass = ts_pass
msg = "Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'." \
.format(ts_user, self._node_ts_url)
logger.info(msg)
def check_whether_db_exists(self):
logger.debug("Checking whether a triple store with db '{}' exists...".format(self._node_ts_url))
url = self._get_ts_db_url()
r = requests.get(url, auth=(self._ts_user, self._ts_pass))
status_code = r.status_code
logger.debug("Status type of response whether db exists: {}.".format(status_code))
return status_code == 200
def add_graph(self, raw_graph, graph_format, graph_hash):
logger.debug("Adding graph to the triple store with URL '{}'...".format(self._get_sparql_endpoint_for_update()))
ihash = GraphStore.IHASH_PREFIX.format(graph_hash)
g = Graph()
g.parse(data=raw_graph, format=graph_format)
sparql_query = SPARQLWrapper(
self._get_sparql_endpoint_for_query(),
self._get_sparql_endpoint_for_update())
query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.serialize(format='nt').decode())
sparql_query.setQuery(query)
sparql_query.method = 'POST'
sparql_query.setCredentials(self._ts_user, self._ts_pass)
sparql_query.query()
def check_if_graph_is_already_stored(self, graph_hash: str) -> bool:
ihash = GraphStore.IHASH_PREFIX.format(graph_hash)
logger.debug("Checking whether graph '{}' is already in the triple store...".format(ihash))
query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)
sparql_query = SPARQLWrapper(
self._get_sparql_endpoint_for_query(),
self._get_sparql_endpoint_for_update())
sparql_query.setQuery(query)
sparql_query.method = 'POST'
sparql_query.setReturnFormat(JSON)
sparql_query.setCredentials(self._ts_user, self._ts_pass)
result = sparql_query.query()
return result.convert()['boolean']
| [
3,
4,
6,
7,
8
] |
106 | 7affd79fb0bb47283bbd9a7fbcaa0ba43aa8e6a6 | <mask token>
| def countOfZeros(num):
cnt = 0
while num != 0:
cnt += 1
num = num & num - 1
return 32 - cnt
<mask token>
| def countOfZeros(num):
cnt = 0
while num != 0:
cnt += 1
num = num & num - 1
return 32 - cnt
def main():
num = eval(input("Enter number to count zeros in it's binary: "))
print('Assumung int is of 32 bits.')
result = countOfZeros(num)
print("Number of zero's in %d = %d" % (num, result))
<mask token>
| def countOfZeros(num):
cnt = 0
while num != 0:
cnt += 1
num = num & num - 1
return 32 - cnt
def main():
num = eval(input("Enter number to count zeros in it's binary: "))
print('Assumung int is of 32 bits.')
result = countOfZeros(num)
print("Number of zero's in %d = %d" % (num, result))
if __name__ == '__main__':
main()
| # Write a program to accept a no & count number of zeros in it.(int=32bits)
def countOfZeros(num):
cnt = 0
while(num!=0):
cnt+=1
num = num&(num-1)
return (32-cnt)
def main():
num = eval(input('Enter number to count zeros in it\'s binary: '))
print('Assumung int is of 32 bits.')
result = countOfZeros(num)
print('Number of zero\'s in %d = %d'%(num,result))
if __name__ == '__main__':
main()
| [
0,
1,
2,
3,
4
] |
107 | 62a7958ba5ebb6da866d6ef156e52136df22f235 | <mask token>
| <mask token>
print('x is {}'.format(x))
print(type(x))
<mask token>
print('x is {}'.format(x))
print(type(x))
| x = 7
x = 7 // 3
<mask token>
x = 0.1 + 0.1 + 0.1 - 0.3
print('x is {}'.format(x))
print(type(x))
<mask token>
a = Decimal('.10')
b = Decimal('.30')
x = a + a + a - b
print('x is {}'.format(x))
print(type(x))
| x = 7
x = 7 // 3
from decimal import *
x = 0.1 + 0.1 + 0.1 - 0.3
print('x is {}'.format(x))
print(type(x))
from decimal import *
a = Decimal('.10')
b = Decimal('.30')
x = a + a + a - b
print('x is {}'.format(x))
print(type(x))
|
# =============>This is a Normal mathematical tasks<==========
x = 7
x = 7 // 3 # rounds the number = 2 ans class int
#x = 7 / 3 # gives the floating number = 2.33333335 ans class float
#x = 7 % 3 # gives the reminder = 1 ans class int
#print("x is {}" .format(x))
#print(type(x))
# ================>This is how to add decimal accuracy vs procession<================
# x = .1 + .1 + .1 -.3 the answer is 5.551115123125783 because python doe not understand accuracy and precision to overcome do the import * from decimal
from decimal import *
x = .1 + .1 + .1 -.3
print("x is {}" .format(x))
print(type(x))
# =============>How to solve the above problem accuracy<===============
# And the type is class decimal.Decimal
# When dealing with money use this method
from decimal import *
a = Decimal('.10') # it will conver from string
b = Decimal('.30')
x = a + a + a - b
print("x is {}" .format(x))
print(type(x))
| [
0,
1,
2,
3,
4
] |
108 | 0744ec646e7b9303c67c25dff2997568c6171b91 | <mask token>
| <mask token>
parser.add_argument('nex', help='path of the .nex file to be launched')
parser.add_argument('file', help='autoexec.bas file to be generated')
<mask token>
contents += bytearray((0, 10))
contents += struct.pack('<H', len(command))
contents += command.encode('ASCII')
<mask token>
with open(args.file, 'wb') as f:
f.write(contents)
| <mask token>
parser = argparse.ArgumentParser(description=
'Generate an autoexec.bas that launches a .nex file')
parser.add_argument('nex', help='path of the .nex file to be launched')
parser.add_argument('file', help='autoexec.bas file to be generated')
args = parser.parse_args()
command = '.nexload ' + args.nex + '\r'
contents = bytearray(128)
contents[0:8] = 'PLUS3DOS'.encode('ASCII')
contents[8] = 26
contents[9:11] = [1, 0]
contents += bytearray((0, 10))
contents += struct.pack('<H', len(command))
contents += command.encode('ASCII')
programLength = len(contents) - 128
contents[15] = 0
contents[16:18] = struct.pack('<H', programLength)
contents[18:20] = struct.pack('<H', 10)
contents[20:22] = struct.pack('<H', programLength)
contents[11:15] = struct.pack('<L', len(contents))
contents[127] = sum(contents[0:126]) & 255
with open(args.file, 'wb') as f:
f.write(contents)
| import argparse
import struct
import sys
parser = argparse.ArgumentParser(description=
'Generate an autoexec.bas that launches a .nex file')
parser.add_argument('nex', help='path of the .nex file to be launched')
parser.add_argument('file', help='autoexec.bas file to be generated')
args = parser.parse_args()
command = '.nexload ' + args.nex + '\r'
contents = bytearray(128)
contents[0:8] = 'PLUS3DOS'.encode('ASCII')
contents[8] = 26
contents[9:11] = [1, 0]
contents += bytearray((0, 10))
contents += struct.pack('<H', len(command))
contents += command.encode('ASCII')
programLength = len(contents) - 128
contents[15] = 0
contents[16:18] = struct.pack('<H', programLength)
contents[18:20] = struct.pack('<H', 10)
contents[20:22] = struct.pack('<H', programLength)
contents[11:15] = struct.pack('<L', len(contents))
contents[127] = sum(contents[0:126]) & 255
with open(args.file, 'wb') as f:
f.write(contents)
| #!/usr/bin/env python3
#
# nextskeleton - An assembler skeleton for the ZX Spectrum Next
#
# Copyright (C) 2020 Richard "Shred" Körber
# https://github.com/shred/nextskeleton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import struct
import sys
parser = argparse.ArgumentParser(description='Generate an autoexec.bas that launches a .nex file')
parser.add_argument('nex',
help='path of the .nex file to be launched')
parser.add_argument('file',
help='autoexec.bas file to be generated')
args = parser.parse_args()
command = '.nexload ' + args.nex + '\r'
contents = bytearray(128)
contents[0:8] = 'PLUS3DOS'.encode('ASCII') # +3DOS signature
contents[8] = 0x1A
contents[9:11] = [0x01, 0x00] # Issue and Version
contents += bytearray((0x00, 0x0A)) # Line number 10
contents += struct.pack('<H', len(command)) # Line length
contents += command.encode('ASCII') # BASIC line
programLength = len(contents) - 128 # Length of the BASIC program
contents[15] = 0x00 # DOS header: PROGRAM
contents[16:18] = struct.pack('<H', programLength) # DOS header: length
contents[18:20] = struct.pack('<H', 10) # DOS header: run at line 10
contents[20:22] = struct.pack('<H', programLength) # DOS header: offset to prog
contents[11:15] = struct.pack('<L', len(contents)) # Set total length
contents[127] = sum(contents[0:126]) & 0xFF # Compute checksum
with open(args.file, 'wb') as f:
f.write(contents)
| [
0,
1,
2,
3,
4
] |
109 | 8abfb6a9ca3a7a909a1e8125e8c03e29b2bacda8 | <mask token>
| <mask token>
database.read_data()
<mask token>
print(prices.shape)
<mask token>
model.load_state_dict(torch.load('rnn_inner'))
model.init_hidden()
model.eval()
with torch.no_grad():
preds = list(model(prices[:50, None, None])[:, 0])
for i in range(len(prices) - 50):
preds.append(model.forward_step(preds[-1][None, ...])[0])
print(preds)
print(prices[1:])
plt.plot(np.arange(len(prices) - 1), prices[1:])
plt.plot(np.arange(len(preds)), preds)
plt.show()
| <mask token>
database = StockDatabase()
database.read_data()
prices = torch.tensor(database.normalize(database.get_stock_prices('AAPL',
length=2000)))
print(prices.shape)
model = RecurrentAnalyzer(100, 10).to('cpu')
model.load_state_dict(torch.load('rnn_inner'))
model.init_hidden()
model.eval()
with torch.no_grad():
preds = list(model(prices[:50, None, None])[:, 0])
for i in range(len(prices) - 50):
preds.append(model.forward_step(preds[-1][None, ...])[0])
print(preds)
print(prices[1:])
plt.plot(np.arange(len(prices) - 1), prices[1:])
plt.plot(np.arange(len(preds)), preds)
plt.show()
| from StockDatabase import StockDatabase
from RNNinner import RecurrentAnalyzer
import torch
import matplotlib.pyplot as plt
import numpy as np
database = StockDatabase()
database.read_data()
prices = torch.tensor(database.normalize(database.get_stock_prices('AAPL',
length=2000)))
print(prices.shape)
model = RecurrentAnalyzer(100, 10).to('cpu')
model.load_state_dict(torch.load('rnn_inner'))
model.init_hidden()
model.eval()
with torch.no_grad():
preds = list(model(prices[:50, None, None])[:, 0])
for i in range(len(prices) - 50):
preds.append(model.forward_step(preds[-1][None, ...])[0])
print(preds)
print(prices[1:])
plt.plot(np.arange(len(prices) - 1), prices[1:])
plt.plot(np.arange(len(preds)), preds)
plt.show()
| from StockDatabase import StockDatabase
from RNNinner import RecurrentAnalyzer
import torch
import matplotlib.pyplot as plt
import numpy as np
database = StockDatabase()
database.read_data()
prices = torch.tensor(database.normalize(database.get_stock_prices('AAPL',length=2000)))
print(prices.shape)
model = RecurrentAnalyzer(100,10).to('cpu')
model.load_state_dict(torch.load('rnn_inner'))
model.init_hidden()
model.eval()
with torch.no_grad():
preds = list(model(prices[:50,None,None])[:,0])
for i in range(len(prices)-50):
preds.append(model.forward_step(preds[-1][None,...])[0])
print(preds)
print(prices[1:])
plt.plot(np.arange(len(prices)-1),prices[1:])
plt.plot(np.arange(len(preds)), preds)
plt.show()
| [
0,
1,
2,
3,
4
] |
110 | 14f309d478de6de5a0b493503176941fdfa8b702 | <mask token>
| <mask token>
if __name__ == '__main__':
cap = cv2.VideoCapture()
while True:
ret, frame = cap.read()
cv2.imshow(frame)
| import cv2
import numpy as np
if __name__ == '__main__':
cap = cv2.VideoCapture()
while True:
ret, frame = cap.read()
cv2.imshow(frame)
| import cv2
import numpy as np
if __name__ == "__main__":
cap = cv2.VideoCapture()
while True:
ret, frame = cap.read()
cv2.imshow(frame)
| null | [
0,
1,
2,
3
] |
111 | 9cab749b915dbb808ac105caa5287b50729f5fd9 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Task', fields=[('file_id',
models.AutoField(primary_key=True, serialize=False)), ('task_id',
models.UUIDField(default=uuid.uuid4, editable=False)), ('file',
models.FileField(upload_to=dataUpload.models.task_directory_path)),
('path_to_tar', models.CharField(default='', max_length=1000)), (
'path_to_cache', models.CharField(default='', max_length=1000))])]
| import dataUpload.models
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Task', fields=[('file_id',
models.AutoField(primary_key=True, serialize=False)), ('task_id',
models.UUIDField(default=uuid.uuid4, editable=False)), ('file',
models.FileField(upload_to=dataUpload.models.task_directory_path)),
('path_to_tar', models.CharField(default='', max_length=1000)), (
'path_to_cache', models.CharField(default='', max_length=1000))])]
| # Generated by Django 3.2.4 on 2021-09-13 17:41
import dataUpload.models
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('file_id', models.AutoField(primary_key=True, serialize=False)),
('task_id', models.UUIDField(default=uuid.uuid4, editable=False)),
('file', models.FileField(upload_to=dataUpload.models.task_directory_path)),
('path_to_tar', models.CharField(default='', max_length=1000)),
('path_to_cache', models.CharField(default='', max_length=1000)),
],
),
]
| [
0,
1,
2,
3,
4
] |
112 | 93e5852df00733c024a59d37699bae58bd893030 | <mask token>
class BaseCard(object):
def __init__(self, field=[]):
self.data = {}
self.support_set_field = field
def add_cue_words(self, arr):
"""
为卡片添加cue words 提示用户输入
:param arr:
:return:
"""
if arr:
if isinstance(arr, str):
arr = [arr]
if 'cueWords' in self.data:
self.data['cueWords'] = self.data['cueWords']
else:
self.data['cueWords'] = []
self.data['cueWords'].extend(arr)
return self
<mask token>
def get_data(self):
return self.data
def __getattr__(self, item):
"""
添加魔术方法
:param item:
:return:
"""
operation = item[0:3]
field = item[4:]
if operation == 'set' and field and field.lower(
) in self.support_set_field:
def function(*args):
self.data[field.lower()] = args[0]
return function
else:
def function(*args):
logging.info('不支持 %s_%s' % (operation, field))
print('不支持', operation, field)
return function
<mask token>
| <mask token>
class BaseCard(object):
def __init__(self, field=[]):
self.data = {}
self.support_set_field = field
def add_cue_words(self, arr):
"""
为卡片添加cue words 提示用户输入
:param arr:
:return:
"""
if arr:
if isinstance(arr, str):
arr = [arr]
if 'cueWords' in self.data:
self.data['cueWords'] = self.data['cueWords']
else:
self.data['cueWords'] = []
self.data['cueWords'].extend(arr)
return self
def set_anchor(self, url, anchor_text):
"""
设置卡片链接
:param url: 比如:http(s)://....
:param anchor_text: 链接显示的文字
:return:
"""
if url:
self.data['url'] = url
if anchor_text:
self.data['anchorText'] = anchor_text
return self
def get_data(self):
return self.data
def __getattr__(self, item):
"""
添加魔术方法
:param item:
:return:
"""
operation = item[0:3]
field = item[4:]
if operation == 'set' and field and field.lower(
) in self.support_set_field:
def function(*args):
self.data[field.lower()] = args[0]
return function
else:
def function(*args):
logging.info('不支持 %s_%s' % (operation, field))
print('不支持', operation, field)
return function
<mask token>
| <mask token>
class BaseCard(object):
def __init__(self, field=[]):
self.data = {}
self.support_set_field = field
def add_cue_words(self, arr):
"""
为卡片添加cue words 提示用户输入
:param arr:
:return:
"""
if arr:
if isinstance(arr, str):
arr = [arr]
if 'cueWords' in self.data:
self.data['cueWords'] = self.data['cueWords']
else:
self.data['cueWords'] = []
self.data['cueWords'].extend(arr)
return self
def set_anchor(self, url, anchor_text):
"""
设置卡片链接
:param url: 比如:http(s)://....
:param anchor_text: 链接显示的文字
:return:
"""
if url:
self.data['url'] = url
if anchor_text:
self.data['anchorText'] = anchor_text
return self
def get_data(self):
return self.data
def __getattr__(self, item):
"""
添加魔术方法
:param item:
:return:
"""
operation = item[0:3]
field = item[4:]
if operation == 'set' and field and field.lower(
) in self.support_set_field:
def function(*args):
self.data[field.lower()] = args[0]
return function
else:
def function(*args):
logging.info('不支持 %s_%s' % (operation, field))
print('不支持', operation, field)
return function
if __name__ == '__main__':
pass
| <mask token>
import logging
class BaseCard(object):
def __init__(self, field=[]):
self.data = {}
self.support_set_field = field
def add_cue_words(self, arr):
"""
为卡片添加cue words 提示用户输入
:param arr:
:return:
"""
if arr:
if isinstance(arr, str):
arr = [arr]
if 'cueWords' in self.data:
self.data['cueWords'] = self.data['cueWords']
else:
self.data['cueWords'] = []
self.data['cueWords'].extend(arr)
return self
def set_anchor(self, url, anchor_text):
"""
设置卡片链接
:param url: 比如:http(s)://....
:param anchor_text: 链接显示的文字
:return:
"""
if url:
self.data['url'] = url
if anchor_text:
self.data['anchorText'] = anchor_text
return self
def get_data(self):
return self.data
def __getattr__(self, item):
"""
添加魔术方法
:param item:
:return:
"""
operation = item[0:3]
field = item[4:]
if operation == 'set' and field and field.lower(
) in self.support_set_field:
def function(*args):
self.data[field.lower()] = args[0]
return function
else:
def function(*args):
logging.info('不支持 %s_%s' % (operation, field))
print('不支持', operation, field)
return function
if __name__ == '__main__':
pass
| #!/usr/bin/env python3
# -*- coding=utf-8 -*-
# description:
# author:jack
# create_time: 2017/12/30
"""
卡片基类
"""
import logging
class BaseCard(object):
def __init__(self, field=[]):
self.data = {}
self.support_set_field = field
def add_cue_words(self, arr):
"""
为卡片添加cue words 提示用户输入
:param arr:
:return:
"""
if arr:
if isinstance(arr, str):
arr = [arr]
if 'cueWords' in self.data:
self.data['cueWords'] = self.data['cueWords']
else:
self.data['cueWords'] = []
self.data['cueWords'].extend(arr)
return self
def set_anchor(self, url, anchor_text):
"""
设置卡片链接
:param url: 比如:http(s)://....
:param anchor_text: 链接显示的文字
:return:
"""
if url:
self.data['url'] = url
if anchor_text:
self.data['anchorText'] = anchor_text
return self
def get_data(self):
return self.data
def __getattr__(self, item):
"""
添加魔术方法
:param item:
:return:
"""
# 获取操作类型 set
operation = item[0:3]
# 获取被操作的属性 set_xxxx 获取xxxx
field = item[4:]
if operation == 'set' and field and (field.lower() in self.support_set_field):
def function(*args):
self.data[field.lower()] = args[0]
return function
else:
def function(*args):
logging.info("不支持 %s_%s" % (operation, field))
print('不支持', operation, field)
return function
if __name__ == '__main__':
pass
| [
5,
6,
7,
8,
9
] |
113 | 8be4bf5c1a5a7b841edc915793571686ee0bffe6 | <mask token>
| <mask token>
class Solution:
<mask token>
<mask token>
| <mask token>
class Solution:
def remove_element(self, nums: list[int], val: int) ->int:
last_position = 0
for num in nums:
if num != val:
nums[last_position] = num
last_position += 1
return last_position
<mask token>
| """
Given an array nums and a value val, remove all instances of
that value in-place and return the new length.
Do not allocate extra space for another array, you must do
this by modifying the input array in-place with O(1) extra memory.
The order of elements can be changed. It doesn't matter
what you leave beyond the new length.
"""
class Solution:
def remove_element(self, nums: list[int], val: int) -> int:
last_position = 0
for num in nums:
if num != val:
nums[last_position] = num
last_position += 1
return last_position
"""
Complexity: Time : O(n) | Space: O(1)
""" | null | [
0,
1,
2,
3
] |
114 | 1de8c129769827c7fe763ce221cb9fdf8226e473 | def TongTien(m1,m2,s):
if s <=100:
tong = m1 * s
else:
tong = m1 * 100 + m2 * (s-100)
print tong
m1 = float(raw_input("nhap gia m1 :"))
m2 = float(raw_input("nhap gia m2 :"))
s = int (raw_input("nhap so dien da dung :"))
TongTien(m1,m2,s) | null | null | null | null | [
0
] |
115 | 9989d31dfe13809d67f629cc283cd02ce354a74e | <mask token>
def upgrade():
op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),
sa.Column('firstname', sa.String(length=64), nullable=True), sa.
Column('lastname', sa.String(length=64), nullable=True), sa.Column(
'email', sa.String(length=120), nullable=True), sa.Column(
'password', sa.String(length=64), nullable=True), sa.Column(
'address', sa.String(length=120), nullable=True), sa.Column('city',
sa.String(length=64), nullable=True), sa.Column('state', sa.String(
length=64), nullable=True), sa.Column('zipcode', sa.String(length=
64), nullable=True), sa.Column('country', sa.String(length=64),
nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.
Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa
.String(length=64), nullable=True), sa.Column('fitness', sa.Integer
(), nullable=True), sa.Column('experience', sa.Integer(), nullable=
True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('issue', sa.String(length=64), nullable=True), sa
.PrimaryKeyConstraint('id'))
op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('user_id', sa.Integer(), nullable=True), sa.
Column('health_id', sa.Integer(), nullable=True), sa.
ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.
ForeignKeyConstraint(['user_id'], ['users.id']), sa.
PrimaryKeyConstraint('id'))
op.create_table('positions', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('user_id', sa.Integer(), nullable=True), sa.
Column('position_type', sa.String(length=64), nullable=True), sa.
ForeignKeyConstraint(['user_id'], ['users.id']), sa.
PrimaryKeyConstraint('id'))
<mask token>
| <mask token>
def upgrade():
op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),
sa.Column('firstname', sa.String(length=64), nullable=True), sa.
Column('lastname', sa.String(length=64), nullable=True), sa.Column(
'email', sa.String(length=120), nullable=True), sa.Column(
'password', sa.String(length=64), nullable=True), sa.Column(
'address', sa.String(length=120), nullable=True), sa.Column('city',
sa.String(length=64), nullable=True), sa.Column('state', sa.String(
length=64), nullable=True), sa.Column('zipcode', sa.String(length=
64), nullable=True), sa.Column('country', sa.String(length=64),
nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.
Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa
.String(length=64), nullable=True), sa.Column('fitness', sa.Integer
(), nullable=True), sa.Column('experience', sa.Integer(), nullable=
True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('issue', sa.String(length=64), nullable=True), sa
.PrimaryKeyConstraint('id'))
op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('user_id', sa.Integer(), nullable=True), sa.
Column('health_id', sa.Integer(), nullable=True), sa.
ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.
ForeignKeyConstraint(['user_id'], ['users.id']), sa.
PrimaryKeyConstraint('id'))
op.create_table('positions', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('user_id', sa.Integer(), nullable=True), sa.
Column('position_type', sa.String(length=64), nullable=True), sa.
ForeignKeyConstraint(['user_id'], ['users.id']), sa.
PrimaryKeyConstraint('id'))
def downgrade():
op.drop_table('positions')
op.drop_table('users_health')
op.drop_table('health_types')
op.drop_table('users')
| <mask token>
revision = '35f6815c3112'
down_revision = None
<mask token>
def upgrade():
op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),
sa.Column('firstname', sa.String(length=64), nullable=True), sa.
Column('lastname', sa.String(length=64), nullable=True), sa.Column(
'email', sa.String(length=120), nullable=True), sa.Column(
'password', sa.String(length=64), nullable=True), sa.Column(
'address', sa.String(length=120), nullable=True), sa.Column('city',
sa.String(length=64), nullable=True), sa.Column('state', sa.String(
length=64), nullable=True), sa.Column('zipcode', sa.String(length=
64), nullable=True), sa.Column('country', sa.String(length=64),
nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.
Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa
.String(length=64), nullable=True), sa.Column('fitness', sa.Integer
(), nullable=True), sa.Column('experience', sa.Integer(), nullable=
True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('issue', sa.String(length=64), nullable=True), sa
.PrimaryKeyConstraint('id'))
op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('user_id', sa.Integer(), nullable=True), sa.
Column('health_id', sa.Integer(), nullable=True), sa.
ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.
ForeignKeyConstraint(['user_id'], ['users.id']), sa.
PrimaryKeyConstraint('id'))
op.create_table('positions', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('user_id', sa.Integer(), nullable=True), sa.
Column('position_type', sa.String(length=64), nullable=True), sa.
ForeignKeyConstraint(['user_id'], ['users.id']), sa.
PrimaryKeyConstraint('id'))
def downgrade():
op.drop_table('positions')
op.drop_table('users_health')
op.drop_table('health_types')
op.drop_table('users')
| <mask token>
revision = '35f6815c3112'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),
sa.Column('firstname', sa.String(length=64), nullable=True), sa.
Column('lastname', sa.String(length=64), nullable=True), sa.Column(
'email', sa.String(length=120), nullable=True), sa.Column(
'password', sa.String(length=64), nullable=True), sa.Column(
'address', sa.String(length=120), nullable=True), sa.Column('city',
sa.String(length=64), nullable=True), sa.Column('state', sa.String(
length=64), nullable=True), sa.Column('zipcode', sa.String(length=
64), nullable=True), sa.Column('country', sa.String(length=64),
nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.
Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa
.String(length=64), nullable=True), sa.Column('fitness', sa.Integer
(), nullable=True), sa.Column('experience', sa.Integer(), nullable=
True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('issue', sa.String(length=64), nullable=True), sa
.PrimaryKeyConstraint('id'))
op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('user_id', sa.Integer(), nullable=True), sa.
Column('health_id', sa.Integer(), nullable=True), sa.
ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.
ForeignKeyConstraint(['user_id'], ['users.id']), sa.
PrimaryKeyConstraint('id'))
op.create_table('positions', sa.Column('id', sa.Integer(), nullable=
False), sa.Column('user_id', sa.Integer(), nullable=True), sa.
Column('position_type', sa.String(length=64), nullable=True), sa.
ForeignKeyConstraint(['user_id'], ['users.id']), sa.
PrimaryKeyConstraint('id'))
def downgrade():
op.drop_table('positions')
op.drop_table('users_health')
op.drop_table('health_types')
op.drop_table('users')
| """tables
Revision ID: 35f6815c3112
Revises: None
Create Date: 2013-07-28 21:15:38.385006
"""
# revision identifiers, used by Alembic.
revision = '35f6815c3112'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('firstname', sa.String(length=64), nullable=True),
sa.Column('lastname', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password', sa.String(length=64), nullable=True),
sa.Column('address', sa.String(length=120), nullable=True),
sa.Column('city', sa.String(length=64), nullable=True),
sa.Column('state', sa.String(length=64), nullable=True),
sa.Column('zipcode', sa.String(length=64), nullable=True),
sa.Column('country', sa.String(length=64), nullable=True),
sa.Column('role', sa.Integer(), nullable=True),
sa.Column('dob', sa.DateTime(), nullable=True),
sa.Column('gender', sa.String(length=64), nullable=True),
sa.Column('fitness', sa.Integer(), nullable=True),
sa.Column('experience', sa.Integer(), nullable=True),
sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('health_types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('issue', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users_health',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('health_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['health_id'], ['health_types.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('positions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('position_type', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('positions')
op.drop_table('users_health')
op.drop_table('health_types')
op.drop_table('users')
### end Alembic commands ###
| [
1,
2,
3,
4,
5
] |
116 | 540ae4be6a41d52d9c803f829fc8b13b523b31bc | <mask token>
class ReplaceCornersAtCertainAngles(object):
def __init__(self):
windowWidth = 250
windowHeight = 140
windowWidthResize = 100
windowHeightResize = 0
self.w = vanilla.FloatingWindow((windowWidth, windowHeight),
'Replace Corners At Certain Angles', minSize=(windowWidth,
windowHeight), maxSize=(windowWidth + windowWidthResize,
windowHeight + windowHeightResize), autosaveName=
'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')
self.cornerList = self.getAllCorners()
self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',
sizeStyle='small')
self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),
self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',
sizeStyle='small')
self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15,
17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),
'at angles', sizeStyle='small')
self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),
('larger', 'smaller'), sizeStyle='small', callback=self.
SavePreferences)
self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',
sizeStyle='small')
self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),
'90', sizeStyle='small')
self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),
'Replace', sizeStyle='regular', callback=self.
ReplaceCornersAtCertainAnglesMain)
self.w.setDefaultButton(self.w.runButton)
if not self.LoadPreferences():
print(
"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults"
)
self.CheckButton(None)
self.w.open()
self.w.makeKey()
def SavePreferences(self, sender):
try:
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'
] = self.w.largerOrSmaller.get()
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'
] = self.w.thresholdAngle.get()
except:
return False
return True
<mask token>
<mask token>
<mask token>
def angleBetweenVectors(self, P0, P1, P2):
vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)
vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)
angle1 = math.degrees(math.atan2(vector1.y, vector1.x))
angle2 = math.degrees(math.atan2(vector2.y, vector2.x))
angleBetweenVectors = (angle1 - angle2) % 360.0
return angleBetweenVectors
<mask token>
<mask token>
| <mask token>
class ReplaceCornersAtCertainAngles(object):
def __init__(self):
windowWidth = 250
windowHeight = 140
windowWidthResize = 100
windowHeightResize = 0
self.w = vanilla.FloatingWindow((windowWidth, windowHeight),
'Replace Corners At Certain Angles', minSize=(windowWidth,
windowHeight), maxSize=(windowWidth + windowWidthResize,
windowHeight + windowHeightResize), autosaveName=
'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')
self.cornerList = self.getAllCorners()
self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',
sizeStyle='small')
self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),
self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',
sizeStyle='small')
self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15,
17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),
'at angles', sizeStyle='small')
self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),
('larger', 'smaller'), sizeStyle='small', callback=self.
SavePreferences)
self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',
sizeStyle='small')
self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),
'90', sizeStyle='small')
self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),
'Replace', sizeStyle='regular', callback=self.
ReplaceCornersAtCertainAnglesMain)
self.w.setDefaultButton(self.w.runButton)
if not self.LoadPreferences():
print(
"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults"
)
self.CheckButton(None)
self.w.open()
self.w.makeKey()
def SavePreferences(self, sender):
try:
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'
] = self.w.largerOrSmaller.get()
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'
] = self.w.thresholdAngle.get()
except:
return False
return True
<mask token>
def CheckButton(self, sender):
if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():
self.w.runButton.enable(onOff=False)
else:
self.w.runButton.enable(onOff=True)
def getAllCorners(self):
thisFont = Glyphs.font
corners = [g.name for g in thisFont.glyphs if g.name.startswith(
'_corner.')]
return corners
def angleBetweenVectors(self, P0, P1, P2):
vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)
vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)
angle1 = math.degrees(math.atan2(vector1.y, vector1.x))
angle2 = math.degrees(math.atan2(vector2.y, vector2.x))
angleBetweenVectors = (angle1 - angle2) % 360.0
return angleBetweenVectors
<mask token>
<mask token>
| <mask token>
class ReplaceCornersAtCertainAngles(object):
def __init__(self):
windowWidth = 250
windowHeight = 140
windowWidthResize = 100
windowHeightResize = 0
self.w = vanilla.FloatingWindow((windowWidth, windowHeight),
'Replace Corners At Certain Angles', minSize=(windowWidth,
windowHeight), maxSize=(windowWidth + windowWidthResize,
windowHeight + windowHeightResize), autosaveName=
'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')
self.cornerList = self.getAllCorners()
self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',
sizeStyle='small')
self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),
self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',
sizeStyle='small')
self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15,
17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),
'at angles', sizeStyle='small')
self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),
('larger', 'smaller'), sizeStyle='small', callback=self.
SavePreferences)
self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',
sizeStyle='small')
self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),
'90', sizeStyle='small')
self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),
'Replace', sizeStyle='regular', callback=self.
ReplaceCornersAtCertainAnglesMain)
self.w.setDefaultButton(self.w.runButton)
if not self.LoadPreferences():
print(
"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults"
)
self.CheckButton(None)
self.w.open()
self.w.makeKey()
def SavePreferences(self, sender):
try:
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'
] = self.w.largerOrSmaller.get()
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'
] = self.w.thresholdAngle.get()
except:
return False
return True
def LoadPreferences(self):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_({
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller':
'0',
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle':
'90'})
self.w.largerOrSmaller.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'])
self.w.thresholdAngle.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'])
except:
return False
return True
def CheckButton(self, sender):
if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():
self.w.runButton.enable(onOff=False)
else:
self.w.runButton.enable(onOff=True)
def getAllCorners(self):
thisFont = Glyphs.font
corners = [g.name for g in thisFont.glyphs if g.name.startswith(
'_corner.')]
return corners
def angleBetweenVectors(self, P0, P1, P2):
vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)
vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)
angle1 = math.degrees(math.atan2(vector1.y, vector1.x))
angle2 = math.degrees(math.atan2(vector2.y, vector2.x))
angleBetweenVectors = (angle1 - angle2) % 360.0
return angleBetweenVectors
def ReplaceCornersAtCertainAnglesMain(self, sender):
try:
fromSelection = self.w.searchForCorner.get()
fromCornerName = self.cornerList[fromSelection]
toSelection = self.w.replaceWithCorner.get()
toCornerName = self.cornerList[toSelection]
smallerThan = bool(self.w.largerOrSmaller.get())
thresholdAngle = float(self.w.thresholdAngle.get())
thisFont = Glyphs.font
masterIDs = [m.id for m in thisFont.masters]
selectedGlyphs = [l.parent for l in thisFont.selectedLayers]
for thisGlyph in selectedGlyphs:
for masterID in masterIDs:
masterLayer = thisGlyph.layers[masterID]
print("Processing %s, layer '%s'" % (thisGlyph.name,
masterLayer.name))
if masterLayer.hints:
for thisHint in masterLayer.hints:
if (thisHint.type == CORNER and thisHint.name ==
fromCornerName):
node = thisHint.originNode
angle = self.angleBetweenVectors(node.
prevNode, node, node.nextNode)
if (smallerThan and angle < thresholdAngle or
not smallerThan and angle > thresholdAngle
):
thisHint.name = toCornerName
print(
'- replaced hint at %i, %i (angle: %.1f)'
% (node.x, node.y, angle))
else:
print(angle)
if not self.SavePreferences(self):
print(
"Note: 'Replace Corners At Certain Angles' could not write preferences."
)
except Exception as e:
Glyphs.showMacroWindow()
print('Replace Corners At Certain Angles Error: %s' % e)
ReplaceCornersAtCertainAngles()
| from __future__ import division, print_function, unicode_literals
__doc__ = """
Replace Corner Components at blunt or acute angles.
"""
import vanilla, math
from Foundation import NSPoint
class ReplaceCornersAtCertainAngles(object):
def __init__(self):
windowWidth = 250
windowHeight = 140
windowWidthResize = 100
windowHeightResize = 0
self.w = vanilla.FloatingWindow((windowWidth, windowHeight),
'Replace Corners At Certain Angles', minSize=(windowWidth,
windowHeight), maxSize=(windowWidth + windowWidthResize,
windowHeight + windowHeightResize), autosaveName=
'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')
self.cornerList = self.getAllCorners()
self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',
sizeStyle='small')
self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),
self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',
sizeStyle='small')
self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15,
17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),
'at angles', sizeStyle='small')
self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),
('larger', 'smaller'), sizeStyle='small', callback=self.
SavePreferences)
self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',
sizeStyle='small')
self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),
'90', sizeStyle='small')
self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),
'Replace', sizeStyle='regular', callback=self.
ReplaceCornersAtCertainAnglesMain)
self.w.setDefaultButton(self.w.runButton)
if not self.LoadPreferences():
print(
"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults"
)
self.CheckButton(None)
self.w.open()
self.w.makeKey()
def SavePreferences(self, sender):
try:
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'
] = self.w.largerOrSmaller.get()
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'
] = self.w.thresholdAngle.get()
except:
return False
return True
def LoadPreferences(self):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_({
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller':
'0',
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle':
'90'})
self.w.largerOrSmaller.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'])
self.w.thresholdAngle.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'])
except:
return False
return True
def CheckButton(self, sender):
if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():
self.w.runButton.enable(onOff=False)
else:
self.w.runButton.enable(onOff=True)
def getAllCorners(self):
thisFont = Glyphs.font
corners = [g.name for g in thisFont.glyphs if g.name.startswith(
'_corner.')]
return corners
def angleBetweenVectors(self, P0, P1, P2):
vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)
vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)
angle1 = math.degrees(math.atan2(vector1.y, vector1.x))
angle2 = math.degrees(math.atan2(vector2.y, vector2.x))
angleBetweenVectors = (angle1 - angle2) % 360.0
return angleBetweenVectors
def ReplaceCornersAtCertainAnglesMain(self, sender):
try:
fromSelection = self.w.searchForCorner.get()
fromCornerName = self.cornerList[fromSelection]
toSelection = self.w.replaceWithCorner.get()
toCornerName = self.cornerList[toSelection]
smallerThan = bool(self.w.largerOrSmaller.get())
thresholdAngle = float(self.w.thresholdAngle.get())
thisFont = Glyphs.font
masterIDs = [m.id for m in thisFont.masters]
selectedGlyphs = [l.parent for l in thisFont.selectedLayers]
for thisGlyph in selectedGlyphs:
for masterID in masterIDs:
masterLayer = thisGlyph.layers[masterID]
print("Processing %s, layer '%s'" % (thisGlyph.name,
masterLayer.name))
if masterLayer.hints:
for thisHint in masterLayer.hints:
if (thisHint.type == CORNER and thisHint.name ==
fromCornerName):
node = thisHint.originNode
angle = self.angleBetweenVectors(node.
prevNode, node, node.nextNode)
if (smallerThan and angle < thresholdAngle or
not smallerThan and angle > thresholdAngle
):
thisHint.name = toCornerName
print(
'- replaced hint at %i, %i (angle: %.1f)'
% (node.x, node.y, angle))
else:
print(angle)
if not self.SavePreferences(self):
print(
"Note: 'Replace Corners At Certain Angles' could not write preferences."
)
except Exception as e:
Glyphs.showMacroWindow()
print('Replace Corners At Certain Angles Error: %s' % e)
ReplaceCornersAtCertainAngles()
| #MenuTitle: Find and Replace Corner Components at Certain Angles
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Replace Corner Components at blunt or acute angles.
"""
import vanilla, math
from Foundation import NSPoint
class ReplaceCornersAtCertainAngles( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 250
windowHeight = 140
windowWidthResize = 100 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Replace Corners At Certain Angles", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow" # stores last window position and size
)
self.cornerList = self.getAllCorners()
# UI elements:
self.w.text_1 = vanilla.TextBox( (15-1, 12+2, 75, 14), "Replace", sizeStyle='small' )
self.w.searchForCorner = vanilla.PopUpButton( (15+60, 12, -15, 17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_2 = vanilla.TextBox( (15-1, 36+2, 75, 14), "with", sizeStyle='small' )
self.w.replaceWithCorner = vanilla.PopUpButton( (15+60, 36, -15, 17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_3a = vanilla.TextBox( (15-1, 60+2, 75, 14), "at angles", sizeStyle='small' )
self.w.largerOrSmaller = vanilla.PopUpButton( (15+60, 60, 70, 17), ("larger","smaller"), sizeStyle='small', callback=self.SavePreferences )
self.w.text_3b = vanilla.TextBox( (150, 60+2, 30, 14), "than", sizeStyle='small' )
self.w.thresholdAngle = vanilla.EditText( (180, 60, -15, 15+3), "90", sizeStyle = 'small')
# Run Button:
self.w.runButton = vanilla.Button((-80-15, -20-15, -15, -15), "Replace", sizeStyle='regular', callback=self.ReplaceCornersAtCertainAnglesMain )
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.CheckButton(None)
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller"] = self.w.largerOrSmaller.get()
Glyphs.defaults["com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle"] = self.w.thresholdAngle.get()
except:
return False
return True
def LoadPreferences( self ):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_(
{
"com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller": "0",
"com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle": "90"
}
)
self.w.largerOrSmaller.set( Glyphs.defaults["com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller"] )
self.w.thresholdAngle.set( Glyphs.defaults["com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle"] )
except:
return False
return True
def CheckButton( self, sender ):
if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():
self.w.runButton.enable(onOff=False)
else:
self.w.runButton.enable(onOff=True)
def getAllCorners(self):
thisFont = Glyphs.font
corners = [g.name for g in thisFont.glyphs if g.name.startswith("_corner.")]
return corners
def angleBetweenVectors(self, P0, P1, P2):
vector1 = NSPoint(P0.x-P1.x, P0.y-P1.y)
vector2 = NSPoint(P2.x-P1.x, P2.y-P1.y)
angle1 = math.degrees(math.atan2(vector1.y,vector1.x))
angle2 = math.degrees(math.atan2(vector2.y,vector2.x))
angleBetweenVectors = ( angle1 - angle2 ) % 360.0
return angleBetweenVectors
def ReplaceCornersAtCertainAnglesMain( self, sender ):
try:
fromSelection = self.w.searchForCorner.get()
fromCornerName = self.cornerList[fromSelection]
toSelection = self.w.replaceWithCorner.get()
toCornerName = self.cornerList[toSelection]
smallerThan = bool(self.w.largerOrSmaller.get())
thresholdAngle = float(self.w.thresholdAngle.get())
thisFont = Glyphs.font # frontmost font
masterIDs = [m.id for m in thisFont.masters]
selectedGlyphs = [l.parent for l in thisFont.selectedLayers]
for thisGlyph in selectedGlyphs:
for masterID in masterIDs:
masterLayer = thisGlyph.layers[masterID]
print("Processing %s, layer '%s'" % ( thisGlyph.name, masterLayer.name ))
if masterLayer.hints:
for thisHint in masterLayer.hints:
if thisHint.type == CORNER and thisHint.name == fromCornerName:
node = thisHint.originNode
angle = self.angleBetweenVectors( node.prevNode, node, node.nextNode )
if (smallerThan and angle < thresholdAngle) or (not smallerThan and angle > thresholdAngle):
thisHint.name = toCornerName
print("- replaced hint at %i, %i (angle: %.1f)" % (node.x, node.y, angle))
else:
print(angle)
if not self.SavePreferences( self ):
print("Note: 'Replace Corners At Certain Angles' could not write preferences.")
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Replace Corners At Certain Angles Error: %s" % e)
ReplaceCornersAtCertainAngles() | [
4,
6,
9,
11,
12
] |
117 | 91df15d6d89d070677704572d35218558317a6ec | <mask token>
| <mask token>
ax.plot(data['Date'], data['HCHFI'], label='HCHFI')
ax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')
ax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=
'Hushen300 Index')
plt.xlabel('Time/year')
plt.ylabel('Index Point')
plt.title('Comparison of HCHFI,HS300 and SSE Composite Index')
plt.legend(loc='upper right')
plt.ylim(0, 7000)
plt.show()
| <mask token>
data = pd.read_excel('data_SHA.xls')
fig, ax = plt.subplots()
ax.plot(data['Date'], data['HCHFI'], label='HCHFI')
ax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')
ax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=
'Hushen300 Index')
plt.xlabel('Time/year')
plt.ylabel('Index Point')
plt.title('Comparison of HCHFI,HS300 and SSE Composite Index')
plt.legend(loc='upper right')
plt.ylim(0, 7000)
plt.show()
| import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
data = pd.read_excel('data_SHA.xls')
fig, ax = plt.subplots()
ax.plot(data['Date'], data['HCHFI'], label='HCHFI')
ax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')
ax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=
'Hushen300 Index')
plt.xlabel('Time/year')
plt.ylabel('Index Point')
plt.title('Comparison of HCHFI,HS300 and SSE Composite Index')
plt.legend(loc='upper right')
plt.ylim(0, 7000)
plt.show()
| import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
data=pd.read_excel("data_SHA.xls")
fig,ax=plt.subplots()
ax.plot(data["Date"],data["HCHFI"],label="HCHFI")
ax.plot(data["Date"],data["SHA"]/2.67547,label="SSE Composite Index")
ax.plot(data["Date"],data["Hushen300 Index"]/3.20393,label="Hushen300 Index")
plt.xlabel("Time/year")
plt.ylabel("Index Point")
plt.title("Comparison of HCHFI,HS300 and SSE Composite Index")
plt.legend(loc='upper right')
plt.ylim(0,7000)
plt.show() | [
0,
1,
2,
3,
4
] |
118 | b38d23a7de3c805ddde4ed2d236e3c6e7bb5e2d0 | <mask token>
| <mask token>
for i in NICList:
os.system('sudo ifconfig ' + i + ' promisc')
os.system('sudo python ./src/top.py')
| <mask token>
NICList = [i for i in netifaces.interfaces() if i != 'lo']
for i in NICList:
os.system('sudo ifconfig ' + i + ' promisc')
os.system('sudo python ./src/top.py')
| import os
import netifaces
NICList = [i for i in netifaces.interfaces() if i != 'lo']
for i in NICList:
os.system('sudo ifconfig ' + i + ' promisc')
os.system('sudo python ./src/top.py')
| #!/usr/bin/python3
import os
import netifaces
# nicList = netifaces.interfaces()
NICList = [i for i in netifaces.interfaces() if i != "lo"]
for i in NICList:
os.system("sudo ifconfig " + i + " promisc")
os.system("sudo python ./src/top.py")
| [
0,
1,
2,
3,
4
] |
119 | 203e678d565753bb51e1bbd90ffec0f3260b22fb | <mask token>
class Metadata(object):
def __init__(self, num_groups, data_type, dimension):
self.num_groups = num_groups
self.data_type = data_type
self.dimension = dimension
self.groups = dict()
def add_group(self, name, path, val_frac):
print('group', name)
self.groups[name] = dict()
all_partitions = []
hdfs_used = check_hdfs_path(path)
client = None
if hdfs_used:
client = InsecureClient('http://namenode:9870')
path = path[len('hdfs://'):]
def fetch_info(filename):
if filename.endswith('.csv'):
file = path + filename
return {'file_path': file, 'num_examples': ray.get(
line_count_remote.remote(file, hdfs_used))}
if filename.endswith('.png'):
file = path + filename
return {'file_path': file, 'num_examples': 1}
if hdfs_used:
files = client.list(path)
executor = ThreadPoolExecutor(max_workers=160)
all_partitions += [i for i in executor.map(fetch_info, files) if
i is not None]
else:
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith('.csv'):
file = path + filename
all_partitions.append({'file_path': file,
'num_examples': line_count(file)})
if filename.endswith('.png'):
file = path + filename
all_partitions.append({'file_path': file,
'num_examples': 1})
num_files = len(all_partitions)
if val_frac * num_files < 1:
df = pd.concat([pd.read_csv(f, header=None) for f in glob.glob(
path + '*.csv')], ignore_index=True)
num_examples = df.shape[0]
val_examples = int(num_examples * val_frac)
val = df[:val_examples if val_examples != 0 else 1]
train = df[val_examples:]
for f in glob.glob(path + '*'):
os.remove(f)
train.to_csv(path + 'train.csv', index=False)
val.to_csv(path + 'val.csv', index=False)
self.groups[name]['train_files'] = [{'file_path': path +
'train.csv', 'num_examples': train.shape[0]}]
self.groups[name]['val_files'] = [{'file_path': path +
'val.csv', 'num_examples': val.shape[0]}]
self.groups[name]['total_examples'] = train.shape[0]
else:
num_val_files = int(val_frac * num_files)
self.groups[name]['train_files'] = all_partitions[:num_files -
num_val_files]
self.groups[name]['val_files'] = all_partitions[num_files -
num_val_files:]
self.groups[name]['total_examples'] = sum([p['num_examples'] for
p in self.groups[name]['train_files']])
def to_json(self):
return {'num_groups': self.num_groups, 'type': self.data_type,
'dimension': self.dimension, 'groups': self.groups}
<mask token>
| <mask token>
def line_count(filename, hdfs_used=False, client=None):
if hdfs_used:
with client.read(filename, encoding='utf-8') as reader:
num_lines = sum(1 for _ in reader)
return num_lines
else:
return int(subprocess.check_output(['wc', '-l', filename]).split()[0])
<mask token>
def check_hdfs_path(path):
return path.startswith('hdfs')
class Metadata(object):
def __init__(self, num_groups, data_type, dimension):
self.num_groups = num_groups
self.data_type = data_type
self.dimension = dimension
self.groups = dict()
def add_group(self, name, path, val_frac):
print('group', name)
self.groups[name] = dict()
all_partitions = []
hdfs_used = check_hdfs_path(path)
client = None
if hdfs_used:
client = InsecureClient('http://namenode:9870')
path = path[len('hdfs://'):]
def fetch_info(filename):
if filename.endswith('.csv'):
file = path + filename
return {'file_path': file, 'num_examples': ray.get(
line_count_remote.remote(file, hdfs_used))}
if filename.endswith('.png'):
file = path + filename
return {'file_path': file, 'num_examples': 1}
if hdfs_used:
files = client.list(path)
executor = ThreadPoolExecutor(max_workers=160)
all_partitions += [i for i in executor.map(fetch_info, files) if
i is not None]
else:
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith('.csv'):
file = path + filename
all_partitions.append({'file_path': file,
'num_examples': line_count(file)})
if filename.endswith('.png'):
file = path + filename
all_partitions.append({'file_path': file,
'num_examples': 1})
num_files = len(all_partitions)
if val_frac * num_files < 1:
df = pd.concat([pd.read_csv(f, header=None) for f in glob.glob(
path + '*.csv')], ignore_index=True)
num_examples = df.shape[0]
val_examples = int(num_examples * val_frac)
val = df[:val_examples if val_examples != 0 else 1]
train = df[val_examples:]
for f in glob.glob(path + '*'):
os.remove(f)
train.to_csv(path + 'train.csv', index=False)
val.to_csv(path + 'val.csv', index=False)
self.groups[name]['train_files'] = [{'file_path': path +
'train.csv', 'num_examples': train.shape[0]}]
self.groups[name]['val_files'] = [{'file_path': path +
'val.csv', 'num_examples': val.shape[0]}]
self.groups[name]['total_examples'] = train.shape[0]
else:
num_val_files = int(val_frac * num_files)
self.groups[name]['train_files'] = all_partitions[:num_files -
num_val_files]
self.groups[name]['val_files'] = all_partitions[num_files -
num_val_files:]
self.groups[name]['total_examples'] = sum([p['num_examples'] for
p in self.groups[name]['train_files']])
def to_json(self):
return {'num_groups': self.num_groups, 'type': self.data_type,
'dimension': self.dimension, 'groups': self.groups}
<mask token>
| <mask token>
def line_count(filename, hdfs_used=False, client=None):
if hdfs_used:
with client.read(filename, encoding='utf-8') as reader:
num_lines = sum(1 for _ in reader)
return num_lines
else:
return int(subprocess.check_output(['wc', '-l', filename]).split()[0])
@ray.remote(num_cpus=1, resources={'CustomResource': 1})
def line_count_remote(filename, hdfs_used=False, client=None):
if hdfs_used:
if client is None:
client = InsecureClient('http://namenode:9870')
with client.read(filename, encoding='utf-8') as reader:
num_lines = sum(1 for _ in reader)
return num_lines
else:
return int(subprocess.check_output(['wc', '-l', filename]).split()[0])
def check_hdfs_path(path):
return path.startswith('hdfs')
class Metadata(object):
def __init__(self, num_groups, data_type, dimension):
self.num_groups = num_groups
self.data_type = data_type
self.dimension = dimension
self.groups = dict()
def add_group(self, name, path, val_frac):
print('group', name)
self.groups[name] = dict()
all_partitions = []
hdfs_used = check_hdfs_path(path)
client = None
if hdfs_used:
client = InsecureClient('http://namenode:9870')
path = path[len('hdfs://'):]
def fetch_info(filename):
if filename.endswith('.csv'):
file = path + filename
return {'file_path': file, 'num_examples': ray.get(
line_count_remote.remote(file, hdfs_used))}
if filename.endswith('.png'):
file = path + filename
return {'file_path': file, 'num_examples': 1}
if hdfs_used:
files = client.list(path)
executor = ThreadPoolExecutor(max_workers=160)
all_partitions += [i for i in executor.map(fetch_info, files) if
i is not None]
else:
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith('.csv'):
file = path + filename
all_partitions.append({'file_path': file,
'num_examples': line_count(file)})
if filename.endswith('.png'):
file = path + filename
all_partitions.append({'file_path': file,
'num_examples': 1})
num_files = len(all_partitions)
if val_frac * num_files < 1:
df = pd.concat([pd.read_csv(f, header=None) for f in glob.glob(
path + '*.csv')], ignore_index=True)
num_examples = df.shape[0]
val_examples = int(num_examples * val_frac)
val = df[:val_examples if val_examples != 0 else 1]
train = df[val_examples:]
for f in glob.glob(path + '*'):
os.remove(f)
train.to_csv(path + 'train.csv', index=False)
val.to_csv(path + 'val.csv', index=False)
self.groups[name]['train_files'] = [{'file_path': path +
'train.csv', 'num_examples': train.shape[0]}]
self.groups[name]['val_files'] = [{'file_path': path +
'val.csv', 'num_examples': val.shape[0]}]
self.groups[name]['total_examples'] = train.shape[0]
else:
num_val_files = int(val_frac * num_files)
self.groups[name]['train_files'] = all_partitions[:num_files -
num_val_files]
self.groups[name]['val_files'] = all_partitions[num_files -
num_val_files:]
self.groups[name]['total_examples'] = sum([p['num_examples'] for
p in self.groups[name]['train_files']])
def to_json(self):
return {'num_groups': self.num_groups, 'type': self.data_type,
'dimension': self.dimension, 'groups': self.groups}
def criteo_etl(base_path, in_file, block_size, val_frac):
spark = SparkSession.builder.appName('PreprocessCriteoData').config(
'spark.sql.files.maxRecordsPerFile', int(block_size / (4 * (4096 *
2 + 4) / 1024 / 1024))).getOrCreate()
sc = spark.sparkContext
field = [StructField('Sale', IntegerType(), True), StructField(
'SalesAmount', FloatType(), True), StructField('ConversionDelay',
FloatType(), True), StructField('ClickTimestamp', StringType(),
True), StructField('NumClicksPerWeek', FloatType(), True),
StructField('ProductPrice', FloatType(), True), StructField(
'ProductAgeGroup', StringType(), True), StructField('DeviceType',
StringType(), True), StructField('AudienceId', StringType(), True),
StructField('ProductGender', StringType(), True), StructField(
'ProductBrand', StringType(), True), StructField('ProductCategory1',
StringType(), True), StructField('ProductCategory2', StringType(),
True), StructField('ProductCategory3', StringType(), True),
StructField('ProductCategory4', StringType(), True), StructField(
'ProductCategory5', StringType(), True), StructField(
'ProductCategory6', StringType(), True), StructField(
'ProductCategory7', StringType(), True), StructField(
'ProductCountry', StringType(), True), StructField('ProductId',
StringType(), True), StructField('ProductTitle', StringType(), True
), StructField('PartnerId', StringType(), True), StructField(
'UserId', StringType(), True)]
schema = StructType(field)
df = spark.read.format('csv').option('delimiter', '\t').schema(schema
).load(base_path + in_file)
hasher = FeatureHasher(numFeatures=4096 * 2, inputCols=[
'ProductAgeGroup', 'DeviceType', 'AudienceId', 'ProductGender',
'ProductBrand', 'ProductCategory1', 'ProductCategory2',
'ProductCategory3', 'ProductCategory4', 'ProductCategory5',
'ProductCategory6', 'ProductCategory7', 'ProductId', 'UserId'],
outputCol='CatFeatures')
product_indexer = StringIndexer(inputCol='ProductCountry', outputCol=
'ProductCountryIndexed')
partner_indexer = StringIndexer(inputCol='PartnerId', outputCol=
'PartnerIdIndexed')
vector_assembler = VectorAssembler(inputCols=['NumClicksPerWeek',
'ProductPrice'], outputCol='NumFeatures')
scaler = StandardScaler(inputCol='NumFeatures', outputCol=
'ScaledNumFeatures')
final_assembler = VectorAssembler(inputCols=['ScaledNumFeatures',
'CatFeatures'], outputCol='Features')
pipeline = Pipeline().setStages([product_indexer, partner_indexer,
hasher, vector_assembler, scaler, final_assembler])
transformedDf = pipeline.fit(df).transform(df).select('Sale',
'ProductCountryIndexed', 'PartnerIdIndexed', 'Features')
asDense = udf(lambda v: v.toArray().tolist(), ArrayType(DoubleType()))
transformedDf = transformedDf.withColumn('Features', asDense('Features'))
def extract(row):
return (row.Sale, row.ProductCountryIndexed, row.PartnerIdIndexed
) + tuple(row.Features)
transformedDf = transformedDf.rdd.map(extract).toDF(['Sale',
'ProductCountryIndexed', 'PartnerIdIndexed'])
transformedDf.write.partitionBy('ProductCountryIndexed').csv(base_path +
'country/')
| from pyspark.sql import SparkSession
from pyspark.ml.feature import FeatureHasher, StringIndexer, VectorAssembler, StandardScaler
from pyspark.sql.types import *
from pyspark.ml import Pipeline
from pyspark.sql.functions import udf
import random
import pandas as pd
import os
import subprocess
import glob
import json
from hdfs import InsecureClient
import ray
from concurrent.futures import ThreadPoolExecutor
def line_count(filename, hdfs_used=False, client=None):
if hdfs_used:
with client.read(filename, encoding='utf-8') as reader:
num_lines = sum(1 for _ in reader)
return num_lines
else:
return int(subprocess.check_output(['wc', '-l', filename]).split()[0])
@ray.remote(num_cpus=1, resources={'CustomResource': 1})
def line_count_remote(filename, hdfs_used=False, client=None):
if hdfs_used:
if client is None:
client = InsecureClient('http://namenode:9870')
with client.read(filename, encoding='utf-8') as reader:
num_lines = sum(1 for _ in reader)
return num_lines
else:
return int(subprocess.check_output(['wc', '-l', filename]).split()[0])
def check_hdfs_path(path):
return path.startswith('hdfs')
class Metadata(object):
def __init__(self, num_groups, data_type, dimension):
self.num_groups = num_groups
self.data_type = data_type
self.dimension = dimension
self.groups = dict()
def add_group(self, name, path, val_frac):
print('group', name)
self.groups[name] = dict()
all_partitions = []
hdfs_used = check_hdfs_path(path)
client = None
if hdfs_used:
client = InsecureClient('http://namenode:9870')
path = path[len('hdfs://'):]
def fetch_info(filename):
if filename.endswith('.csv'):
file = path + filename
return {'file_path': file, 'num_examples': ray.get(
line_count_remote.remote(file, hdfs_used))}
if filename.endswith('.png'):
file = path + filename
return {'file_path': file, 'num_examples': 1}
if hdfs_used:
files = client.list(path)
executor = ThreadPoolExecutor(max_workers=160)
all_partitions += [i for i in executor.map(fetch_info, files) if
i is not None]
else:
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith('.csv'):
file = path + filename
all_partitions.append({'file_path': file,
'num_examples': line_count(file)})
if filename.endswith('.png'):
file = path + filename
all_partitions.append({'file_path': file,
'num_examples': 1})
num_files = len(all_partitions)
if val_frac * num_files < 1:
df = pd.concat([pd.read_csv(f, header=None) for f in glob.glob(
path + '*.csv')], ignore_index=True)
num_examples = df.shape[0]
val_examples = int(num_examples * val_frac)
val = df[:val_examples if val_examples != 0 else 1]
train = df[val_examples:]
for f in glob.glob(path + '*'):
os.remove(f)
train.to_csv(path + 'train.csv', index=False)
val.to_csv(path + 'val.csv', index=False)
self.groups[name]['train_files'] = [{'file_path': path +
'train.csv', 'num_examples': train.shape[0]}]
self.groups[name]['val_files'] = [{'file_path': path +
'val.csv', 'num_examples': val.shape[0]}]
self.groups[name]['total_examples'] = train.shape[0]
else:
num_val_files = int(val_frac * num_files)
self.groups[name]['train_files'] = all_partitions[:num_files -
num_val_files]
self.groups[name]['val_files'] = all_partitions[num_files -
num_val_files:]
self.groups[name]['total_examples'] = sum([p['num_examples'] for
p in self.groups[name]['train_files']])
def to_json(self):
return {'num_groups': self.num_groups, 'type': self.data_type,
'dimension': self.dimension, 'groups': self.groups}
def criteo_etl(base_path, in_file, block_size, val_frac):
spark = SparkSession.builder.appName('PreprocessCriteoData').config(
'spark.sql.files.maxRecordsPerFile', int(block_size / (4 * (4096 *
2 + 4) / 1024 / 1024))).getOrCreate()
sc = spark.sparkContext
field = [StructField('Sale', IntegerType(), True), StructField(
'SalesAmount', FloatType(), True), StructField('ConversionDelay',
FloatType(), True), StructField('ClickTimestamp', StringType(),
True), StructField('NumClicksPerWeek', FloatType(), True),
StructField('ProductPrice', FloatType(), True), StructField(
'ProductAgeGroup', StringType(), True), StructField('DeviceType',
StringType(), True), StructField('AudienceId', StringType(), True),
StructField('ProductGender', StringType(), True), StructField(
'ProductBrand', StringType(), True), StructField('ProductCategory1',
StringType(), True), StructField('ProductCategory2', StringType(),
True), StructField('ProductCategory3', StringType(), True),
StructField('ProductCategory4', StringType(), True), StructField(
'ProductCategory5', StringType(), True), StructField(
'ProductCategory6', StringType(), True), StructField(
'ProductCategory7', StringType(), True), StructField(
'ProductCountry', StringType(), True), StructField('ProductId',
StringType(), True), StructField('ProductTitle', StringType(), True
), StructField('PartnerId', StringType(), True), StructField(
'UserId', StringType(), True)]
schema = StructType(field)
df = spark.read.format('csv').option('delimiter', '\t').schema(schema
).load(base_path + in_file)
hasher = FeatureHasher(numFeatures=4096 * 2, inputCols=[
'ProductAgeGroup', 'DeviceType', 'AudienceId', 'ProductGender',
'ProductBrand', 'ProductCategory1', 'ProductCategory2',
'ProductCategory3', 'ProductCategory4', 'ProductCategory5',
'ProductCategory6', 'ProductCategory7', 'ProductId', 'UserId'],
outputCol='CatFeatures')
product_indexer = StringIndexer(inputCol='ProductCountry', outputCol=
'ProductCountryIndexed')
partner_indexer = StringIndexer(inputCol='PartnerId', outputCol=
'PartnerIdIndexed')
vector_assembler = VectorAssembler(inputCols=['NumClicksPerWeek',
'ProductPrice'], outputCol='NumFeatures')
scaler = StandardScaler(inputCol='NumFeatures', outputCol=
'ScaledNumFeatures')
final_assembler = VectorAssembler(inputCols=['ScaledNumFeatures',
'CatFeatures'], outputCol='Features')
pipeline = Pipeline().setStages([product_indexer, partner_indexer,
hasher, vector_assembler, scaler, final_assembler])
transformedDf = pipeline.fit(df).transform(df).select('Sale',
'ProductCountryIndexed', 'PartnerIdIndexed', 'Features')
asDense = udf(lambda v: v.toArray().tolist(), ArrayType(DoubleType()))
transformedDf = transformedDf.withColumn('Features', asDense('Features'))
def extract(row):
return (row.Sale, row.ProductCountryIndexed, row.PartnerIdIndexed
) + tuple(row.Features)
transformedDf = transformedDf.rdd.map(extract).toDF(['Sale',
'ProductCountryIndexed', 'PartnerIdIndexed'])
transformedDf.write.partitionBy('ProductCountryIndexed').csv(base_path +
'country/')
| #Copyright 2020 Side Li, Arun Kumar
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from pyspark.sql import SparkSession
from pyspark.ml.feature import FeatureHasher, StringIndexer, VectorAssembler, StandardScaler
from pyspark.sql.types import *
from pyspark.ml import Pipeline
from pyspark.sql.functions import udf
import random
import pandas as pd
import os
import subprocess
import glob
import json
from hdfs import InsecureClient
import ray
from concurrent.futures import ThreadPoolExecutor
def line_count(filename, hdfs_used=False, client=None):
if hdfs_used:
with client.read(filename, encoding='utf-8') as reader:
num_lines = sum(1 for _ in reader)
return num_lines
else:
return int(subprocess.check_output(['wc', '-l', filename]).split()[0])
@ray.remote(num_cpus=1, resources={"CustomResource":1})
def line_count_remote(filename, hdfs_used=False, client=None):
if hdfs_used:
if client is None:
client = InsecureClient('http://namenode:9870')
with client.read(filename, encoding='utf-8') as reader:
num_lines = sum(1 for _ in reader)
return num_lines
else:
return int(subprocess.check_output(['wc', '-l', filename]).split()[0])
def check_hdfs_path(path):
return path.startswith("hdfs")
class Metadata(object):
def __init__(self, num_groups, data_type, dimension):
self.num_groups = num_groups
self.data_type = data_type
self.dimension = dimension
self.groups = dict()
def add_group(self, name, path, val_frac):
print("group", name)
self.groups[name] = dict()
all_partitions = []
hdfs_used = check_hdfs_path(path)
client = None
if hdfs_used:
client = InsecureClient('http://namenode:9870')
path = path[len("hdfs://"):]
def fetch_info(filename):
if filename.endswith(".csv"):
file = path + filename
return {
"file_path": file,
"num_examples": ray.get(line_count_remote.remote(file, hdfs_used))
}
if filename.endswith(".png"):
file = path + filename
return {
"file_path": file,
"num_examples": 1
}
if hdfs_used:
files = client.list(path)
executor = ThreadPoolExecutor(max_workers=160)
all_partitions += [i for i in executor.map(fetch_info, files) if i is not None]
else:
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith(".csv"):
file = path + filename
all_partitions.append({
"file_path": file,
"num_examples": line_count(file)
})
if filename.endswith(".png"):
file = path + filename
all_partitions.append({
"file_path": file,
"num_examples": 1
})
num_files = len(all_partitions)
if val_frac * num_files < 1:
df = pd.concat([pd.read_csv(f, header=None) for f in glob.glob(path + "*.csv")], ignore_index=True)
num_examples = df.shape[0]
val_examples = int(num_examples * val_frac)
val = df[:(val_examples if val_examples != 0 else 1)]
train = df[val_examples:]
for f in glob.glob(path + "*"):
os.remove(f)
train.to_csv(path + "train.csv", index=False)
val.to_csv(path + "val.csv", index=False)
self.groups[name]["train_files"] = [{
"file_path": path + "train.csv",
"num_examples": train.shape[0]
}]
self.groups[name]["val_files"] = [{
"file_path": path + "val.csv",
"num_examples": val.shape[0]
}]
self.groups[name]["total_examples"] = train.shape[0]
else:
num_val_files = int(val_frac * num_files)
self.groups[name]["train_files"] = all_partitions[:num_files - num_val_files]
self.groups[name]["val_files"] = all_partitions[num_files - num_val_files:]
self.groups[name]["total_examples"] = sum([p["num_examples"] for p in self.groups[name]["train_files"]])
def to_json(self):
return {
"num_groups": self.num_groups,
"type": self.data_type,
"dimension": self.dimension,
"groups": self.groups
}
def criteo_etl(base_path, in_file, block_size, val_frac):
spark = SparkSession\
.builder\
.appName("PreprocessCriteoData") \
.config("spark.sql.files.maxRecordsPerFile", int(block_size/(4 * (4096*2 + 4) / 1024 / 1024)))\
.getOrCreate()
sc = spark.sparkContext
field = [StructField("Sale", IntegerType(), True),
StructField("SalesAmount", FloatType(), True),
StructField("ConversionDelay", FloatType(), True),
StructField("ClickTimestamp", StringType(), True),
StructField("NumClicksPerWeek", FloatType(), True),
StructField("ProductPrice", FloatType(), True),
StructField("ProductAgeGroup", StringType(), True),
StructField("DeviceType", StringType(), True),
StructField("AudienceId", StringType(), True),
StructField("ProductGender", StringType(), True),
StructField("ProductBrand", StringType(), True),
StructField("ProductCategory1", StringType(), True),
StructField("ProductCategory2", StringType(), True),
StructField("ProductCategory3", StringType(), True),
StructField("ProductCategory4", StringType(), True),
StructField("ProductCategory5", StringType(), True),
StructField("ProductCategory6", StringType(), True),
StructField("ProductCategory7", StringType(), True),
StructField("ProductCountry", StringType(), True),
StructField("ProductId", StringType(), True),
StructField("ProductTitle", StringType(), True),
StructField("PartnerId", StringType(), True),
StructField("UserId", StringType(), True)]
schema = StructType(field)
df = spark.read.format("csv").option("delimiter", "\t").schema(schema).load(base_path + in_file)
hasher = FeatureHasher(numFeatures=4096 * 2,
inputCols=["ProductAgeGroup", "DeviceType", "AudienceId",
"ProductGender", "ProductBrand", "ProductCategory1", "ProductCategory2",
"ProductCategory3", "ProductCategory4", "ProductCategory5",
"ProductCategory6", "ProductCategory7", "ProductId", "UserId"],
outputCol="CatFeatures")
product_indexer = StringIndexer(inputCol="ProductCountry", outputCol="ProductCountryIndexed")
partner_indexer = StringIndexer(inputCol="PartnerId", outputCol="PartnerIdIndexed")
vector_assembler = VectorAssembler(inputCols=["NumClicksPerWeek", "ProductPrice"], outputCol="NumFeatures")
scaler = StandardScaler(inputCol="NumFeatures", outputCol="ScaledNumFeatures")
final_assembler = VectorAssembler(inputCols=["ScaledNumFeatures", "CatFeatures"], outputCol="Features")
pipeline = Pipeline().setStages([product_indexer, partner_indexer, hasher,
vector_assembler, scaler, final_assembler])
transformedDf = pipeline.fit(df).transform(df).select("Sale", "ProductCountryIndexed", "PartnerIdIndexed",
"Features")
asDense = udf(lambda v: v.toArray().tolist(), ArrayType(DoubleType()))
transformedDf = transformedDf.withColumn("Features", asDense("Features"))
def extract(row):
return (row.Sale, row.ProductCountryIndexed, row.PartnerIdIndexed,) + tuple(row.Features)
transformedDf = transformedDf.rdd.map(extract).toDF(["Sale", "ProductCountryIndexed", "PartnerIdIndexed"])
transformedDf.write.partitionBy("ProductCountryIndexed").csv(base_path + "country/")
| [
4,
6,
8,
9,
10
] |
120 | bf5422792533f85967a5573d9e6f370a7967a914 | <mask token>
| <mask token>
for i in range(2, N + 1):
s.add(i)
for num in sorted(s):
k = num + num
while k <= N:
if k in s:
s.remove(k)
k += num
print('Primes:', end=' ')
for num in sorted(s):
print(num, end=' ')
| N = int(input('Max value N? '))
s = set()
for i in range(2, N + 1):
s.add(i)
for num in sorted(s):
k = num + num
while k <= N:
if k in s:
s.remove(k)
k += num
print('Primes:', end=' ')
for num in sorted(s):
print(num, end=' ')
| N = int(input("Max value N? "))
s = set()
for i in range(2, N + 1):
s.add(i)
for num in sorted(s):
k = num + num
while k <= N:
if k in s:
s.remove(k)
k += num
print("Primes:", end = " ")
for num in sorted(s):
print(num, end = " ")
| null | [
0,
1,
2,
3
] |
121 | 9c4676edbeef3748a4947f827fefa29e95674bfa | <mask token>
def Set_Date():
global end_date
global start_date
end_date = datetime.datetime(2017, 1, 30)
start_date = end_date
print(end_date)
return
def Actual_Value():
global df
print('The Actual Closing Value is Displayed below')
df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')
ao = df['Close']
print(str(ao))
return
<mask token>
def Forcast_Values():
global forecast_out
global forecast_col
forecast_col = 'Close'
forecast_out = int(math.ceil(0.01 * len(df)))
return
<mask token>
def Setup_Validate_data():
global y
global X
global X_train, X_test, y_train, y_test
X = np.array(df.drop(['label'], 1))
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,
y, test_size=0.2)
return
def Set_Model():
global clf
clf = LinearRegression()
clf.fit(X_train, y_train)
return
def get_Accuracy():
global accuracy
accuracy = clf.score(X_test, y_test)
return ()
def Prediction():
global X
X = X[:-forecast_out]
global X_lately
global forecast_set
X_lately = X[-forecast_out:]
forecast_set = clf.predict(X_lately)
<mask token>
| <mask token>
def Set_Ticker():
global stockTicker
stockTicker = 'ONGC.NS'
print('Possible options: ONGC.NS, ')
return
def Set_Date():
global end_date
global start_date
end_date = datetime.datetime(2017, 1, 30)
start_date = end_date
print(end_date)
return
def Actual_Value():
global df
print('The Actual Closing Value is Displayed below')
df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')
ao = df['Close']
print(str(ao))
return
def Add_Features_x():
global df
df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100
df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100
df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]
return
def Forcast_Values():
global forecast_out
global forecast_col
forecast_col = 'Close'
forecast_out = int(math.ceil(0.01 * len(df)))
return
def Add_Features_y():
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace=True)
return
def Setup_Validate_data():
global y
global X
global X_train, X_test, y_train, y_test
X = np.array(df.drop(['label'], 1))
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,
y, test_size=0.2)
return
def Set_Model():
global clf
clf = LinearRegression()
clf.fit(X_train, y_train)
return
def get_Accuracy():
global accuracy
accuracy = clf.score(X_test, y_test)
return ()
def Prediction():
global X
X = X[:-forecast_out]
global X_lately
global forecast_set
X_lately = X[-forecast_out:]
forecast_set = clf.predict(X_lately)
def Data_frame_Create():
global df
df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)
return
<mask token>
| <mask token>
style.use('ggplot')
<mask token>
def Set_Ticker():
global stockTicker
stockTicker = 'ONGC.NS'
print('Possible options: ONGC.NS, ')
return
def Set_Date():
global end_date
global start_date
end_date = datetime.datetime(2017, 1, 30)
start_date = end_date
print(end_date)
return
def Actual_Value():
global df
print('The Actual Closing Value is Displayed below')
df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')
ao = df['Close']
print(str(ao))
return
def Add_Features_x():
global df
df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100
df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100
df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]
return
def Forcast_Values():
global forecast_out
global forecast_col
forecast_col = 'Close'
forecast_out = int(math.ceil(0.01 * len(df)))
return
def Add_Features_y():
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace=True)
return
def Setup_Validate_data():
global y
global X
global X_train, X_test, y_train, y_test
X = np.array(df.drop(['label'], 1))
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,
y, test_size=0.2)
return
def Set_Model():
global clf
clf = LinearRegression()
clf.fit(X_train, y_train)
return
def get_Accuracy():
global accuracy
accuracy = clf.score(X_test, y_test)
return ()
def Prediction():
global X
X = X[:-forecast_out]
global X_lately
global forecast_set
X_lately = X[-forecast_out:]
forecast_set = clf.predict(X_lately)
def Data_frame_Create():
global df
df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)
return
Set_Ticker()
Actual_Value()
Set_Date()
start_date += datetime.timedelta(weeks=-100)
Data_frame_Create()
Add_Features_x()
Forcast_Values()
Add_Features_y()
Setup_Validate_data()
Set_Model()
get_Accuracy()
Prediction()
print(stockTicker.partition('.')[0])
print('Accuracy: ' + str(accuracy * 100))
print('Next day value: ' + str(forecast_set[0]))
print(forecast_set)
print('3rd day value: ' + str(forecast_set[1]))
print('5th day value: ' + str(forecast_set[2]))
print('7th day value: ' + str(forecast_set[3]))
print('10th day value: ' + str(forecast_set[4]))
<mask token>
with open('mycsvfile.csv', 'wb') as f:
w = csv.writer(f)
w.writerows(somedict.items())
| import pandas as pd
import math, datetime
import time
import numpy as np
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import cross_validation, preprocessing, svm
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from matplotlib import style
style.use('ggplot')
import datetime
from pandas_datareader import data
import csv
def Set_Ticker():
global stockTicker
stockTicker = 'ONGC.NS'
print('Possible options: ONGC.NS, ')
return
def Set_Date():
global end_date
global start_date
end_date = datetime.datetime(2017, 1, 30)
start_date = end_date
print(end_date)
return
def Actual_Value():
global df
print('The Actual Closing Value is Displayed below')
df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')
ao = df['Close']
print(str(ao))
return
def Add_Features_x():
global df
df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100
df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100
df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]
return
def Forcast_Values():
global forecast_out
global forecast_col
forecast_col = 'Close'
forecast_out = int(math.ceil(0.01 * len(df)))
return
def Add_Features_y():
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace=True)
return
def Setup_Validate_data():
global y
global X
global X_train, X_test, y_train, y_test
X = np.array(df.drop(['label'], 1))
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,
y, test_size=0.2)
return
def Set_Model():
global clf
clf = LinearRegression()
clf.fit(X_train, y_train)
return
def get_Accuracy():
global accuracy
accuracy = clf.score(X_test, y_test)
return ()
def Prediction():
global X
X = X[:-forecast_out]
global X_lately
global forecast_set
X_lately = X[-forecast_out:]
forecast_set = clf.predict(X_lately)
def Data_frame_Create():
global df
df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)
return
Set_Ticker()
Actual_Value()
Set_Date()
start_date += datetime.timedelta(weeks=-100)
Data_frame_Create()
Add_Features_x()
Forcast_Values()
Add_Features_y()
Setup_Validate_data()
Set_Model()
get_Accuracy()
Prediction()
print(stockTicker.partition('.')[0])
print('Accuracy: ' + str(accuracy * 100))
print('Next day value: ' + str(forecast_set[0]))
print(forecast_set)
print('3rd day value: ' + str(forecast_set[1]))
print('5th day value: ' + str(forecast_set[2]))
print('7th day value: ' + str(forecast_set[3]))
print('10th day value: ' + str(forecast_set[4]))
somedict = dict(NextDay=forecast_set[0], ThirdDay=forecast_set[1], FifthDay
=forecast_set[2])
with open('mycsvfile.csv', 'wb') as f:
w = csv.writer(f)
w.writerows(somedict.items())
| #https://www.youtube.com/watch?v=CQ5kc_j4RjA
import pandas as pd
#import quandl
import math, datetime
import time
import numpy as np
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import cross_validation, preprocessing, svm
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from matplotlib import style
style.use ('ggplot')
import datetime
from pandas_datareader import data
import csv
#Setting Companies
def Set_Ticker():
global stockTicker
stockTicker = 'ONGC.NS'
## stockTicker = input("Enter the Ticker: ")
print ("Possible options: ONGC.NS, ")
return
def Set_Date():
#Setting Date
global end_date
global start_date
## end_date = input("Enter prediction date(YYYY-MM-DD):")
end_date = datetime.datetime(2017,1,30)
start_date = end_date
print (end_date)
return
def Actual_Value():
#Actual Value
global df
print("The Actual Closing Value is Displayed below")
df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')
ao=df['Close']
print (str(ao))
return
def Add_Features_x():
#Create Features - X
global df
df ['OC_Change'] = (df['Close']-df['Open']/df['Open']*100)
df ['HL_Change'] = (df['High']-df['Low']/df['Low']*100)
df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]
return
def Forcast_Values():
#Forecast
global forecast_out
global forecast_col
forecast_col = 'Close'
forecast_out = int(math.ceil(0.01*len(df)))
return
def Add_Features_y():
#Label - y
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace=True)
return
def Setup_Validate_data():
#Set X and y
global y
global X
global X_train, X_test, y_train, y_test
X = np.array(df.drop(['label'],1))
y = np.array(df['label'])
#Split Training and Testing Data
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.2)
return
def Set_Model():
#Set Model for ML
global clf
clf = LinearRegression()
clf.fit(X_train, y_train)
return
def get_Accuracy():
#Accuracy of Test Data
global accuracy
accuracy = clf.score(X_test, y_test)
return()
def Prediction():
#Predict Next Values
global X
X = X[:-forecast_out]
global X_lately
global forecast_set
X_lately = X[-forecast_out:]
forecast_set = clf.predict(X_lately)
def Data_frame_Create():
#Creat a DataFrame
global df
df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)
## df.plot(kind="box", subplots=True, layout=(1,6), sharex=False, sharey=False)
## plt.show()
## df.hist()
## plt.show()
## scatter_matrix(df)
## plt.show()
return
Set_Ticker()
Actual_Value()
#Setting Date
Set_Date()
#Gap of 1 month in time
#n = int(input("Enter the No. of Years in Months:"))
start_date += datetime.timedelta(weeks=-100)
#Creat a DataFrame
Data_frame_Create()
#Create Features - X
Add_Features_x()
#Forecast
Forcast_Values()
#Label - y
Add_Features_y()
#Split Training and Testing Data
Setup_Validate_data()
#Set Model for ML
Set_Model()
#Accuracy of Test Data
get_Accuracy()
#Predict Next Values
Prediction()
print (stockTicker.partition('.')[0])
##print ("Start Date:" + str(start_date))
print ("Accuracy: " + str(accuracy*100))
print ("Next day value: "+ str(forecast_set[0]))
print (forecast_set)
print ("3rd day value: "+ str(forecast_set[1]))
print ("5th day value: "+ str(forecast_set[2]))
print ("7th day value: "+ str(forecast_set[3]))
print ("10th day value: "+ str(forecast_set[4]))
##dict = {'Next Day':forecast_set[0],'3rd Day':forecast_set[1],'5th Day':forecast_set[2]}
##print (dict)
somedict = dict(NextDay=forecast_set[0],ThirdDay=forecast_set[1],FifthDay=forecast_set[2])
with open('mycsvfile.csv','wb') as f:
w = csv.writer(f)
w.writerows(somedict.items())
| [
7,
11,
12,
14,
15
] |
122 | 4e61f9fefe8e6b5203ba05ac9bd626db1102df36 | #!/usr/bin/python
"""
Created on Aug 1 2014
"""
import rospy
def my_callback(event):
print 'Timer called at ' + str(event.current_real)
if __name__ == '__main__':
rospy.init_node('timer')
rospy.Timer(rospy.Duration(2), my_callback)
rospy.spin() | null | null | null | null | [
0
] |
123 | 53573a21364e9dfef9ed1164185ab441dbc29601 | <mask token>
class BostaForm(forms.Form):
maxPrice = forms.IntegerField()
livingArea = forms.IntegerField()
room = forms.IntegerField()
class BostaIdForm(forms.Form):
bostaId = forms.IntegerField()
class SearchBosta(forms.Form):
search_query = forms.CharField()
<mask token>
| <mask token>
class BostaForm(forms.Form):
maxPrice = forms.IntegerField()
livingArea = forms.IntegerField()
room = forms.IntegerField()
class BostaIdForm(forms.Form):
bostaId = forms.IntegerField()
class SearchBosta(forms.Form):
search_query = forms.CharField()
def show(request):
if request.method == 'POST':
form = BostaForm(request.POST)
if form.is_valid():
maxPrice = form.cleaned_data['maxPrice']
livingArea = form.cleaned_data['livingArea']
room = form.cleaned_data['room']
bostas = Bosta.objects.filter(listPrice__lte=maxPrice).filter(
livingArea__gte=livingArea).filter(rooms__gte=room).exclude(
listPrice=0).order_by('soldDate')
else:
form = BostaForm()
bostas = get_all_bostas()
for bosta in bostas:
if bosta.livingArea == 0:
bosta.sek_m2 = 0
elif bosta.soldPrice == 0:
bosta.sek_m2 = bosta.listPrice / bosta.livingArea
else:
bosta.sek_m2 = bosta.soldPrice / bosta.livingArea
data = {'bostas': bostas, 'form': form}
return render(request, 'main.html', data)
def update(request):
totalListing = 0
totalSold = 0
form = SearchBosta()
data = {'totalListing': totalListing, 'totalSold': totalSold,
'countListing': 0, 'countSold': 0, 'form': form}
if request.method == 'POST':
form = SearchBosta(request.POST)
if form.is_valid():
q = form.cleaned_data['search_query'].encode('utf8')
d1 = search('listings', q)
if d1:
data['totalListing'] = d1['total']
data['countListing'] = d1['count']
d1 = search('sold', q)
if d1:
data['totalSold'] = d1['total']
data['countSold'] = d1['count']
return render(request, 'update.html', data)
<mask token>
| <mask token>
class BostaForm(forms.Form):
maxPrice = forms.IntegerField()
livingArea = forms.IntegerField()
room = forms.IntegerField()
class BostaIdForm(forms.Form):
bostaId = forms.IntegerField()
class SearchBosta(forms.Form):
search_query = forms.CharField()
def show(request):
if request.method == 'POST':
form = BostaForm(request.POST)
if form.is_valid():
maxPrice = form.cleaned_data['maxPrice']
livingArea = form.cleaned_data['livingArea']
room = form.cleaned_data['room']
bostas = Bosta.objects.filter(listPrice__lte=maxPrice).filter(
livingArea__gte=livingArea).filter(rooms__gte=room).exclude(
listPrice=0).order_by('soldDate')
else:
form = BostaForm()
bostas = get_all_bostas()
for bosta in bostas:
if bosta.livingArea == 0:
bosta.sek_m2 = 0
elif bosta.soldPrice == 0:
bosta.sek_m2 = bosta.listPrice / bosta.livingArea
else:
bosta.sek_m2 = bosta.soldPrice / bosta.livingArea
data = {'bostas': bostas, 'form': form}
return render(request, 'main.html', data)
def update(request):
totalListing = 0
totalSold = 0
form = SearchBosta()
data = {'totalListing': totalListing, 'totalSold': totalSold,
'countListing': 0, 'countSold': 0, 'form': form}
if request.method == 'POST':
form = SearchBosta(request.POST)
if form.is_valid():
q = form.cleaned_data['search_query'].encode('utf8')
d1 = search('listings', q)
if d1:
data['totalListing'] = d1['total']
data['countListing'] = d1['count']
d1 = search('sold', q)
if d1:
data['totalSold'] = d1['total']
data['countSold'] = d1['count']
return render(request, 'update.html', data)
def search(type_search, q):
total = 0
while True:
result = booliwood(q, total, 50, type_search)
for listing in result[type_search]:
add_bosta(listing)
total = total + result['count']
if total >= result['totalCount']:
break
time.sleep(1)
data = {'total': total, 'count': result['totalCount']}
return data
| from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseNotAllowed
from booli import booliwood
from models import add_bosta, get_all_bostas, Bosta
from django import forms
import time
class BostaForm(forms.Form):
maxPrice = forms.IntegerField()
livingArea = forms.IntegerField()
room = forms.IntegerField()
class BostaIdForm(forms.Form):
bostaId = forms.IntegerField()
class SearchBosta(forms.Form):
search_query = forms.CharField()
def show(request):
if request.method == 'POST':
form = BostaForm(request.POST)
if form.is_valid():
maxPrice = form.cleaned_data['maxPrice']
livingArea = form.cleaned_data['livingArea']
room = form.cleaned_data['room']
bostas = Bosta.objects.filter(listPrice__lte=maxPrice).filter(
livingArea__gte=livingArea).filter(rooms__gte=room).exclude(
listPrice=0).order_by('soldDate')
else:
form = BostaForm()
bostas = get_all_bostas()
for bosta in bostas:
if bosta.livingArea == 0:
bosta.sek_m2 = 0
elif bosta.soldPrice == 0:
bosta.sek_m2 = bosta.listPrice / bosta.livingArea
else:
bosta.sek_m2 = bosta.soldPrice / bosta.livingArea
data = {'bostas': bostas, 'form': form}
return render(request, 'main.html', data)
def update(request):
totalListing = 0
totalSold = 0
form = SearchBosta()
data = {'totalListing': totalListing, 'totalSold': totalSold,
'countListing': 0, 'countSold': 0, 'form': form}
if request.method == 'POST':
form = SearchBosta(request.POST)
if form.is_valid():
q = form.cleaned_data['search_query'].encode('utf8')
d1 = search('listings', q)
if d1:
data['totalListing'] = d1['total']
data['countListing'] = d1['count']
d1 = search('sold', q)
if d1:
data['totalSold'] = d1['total']
data['countSold'] = d1['count']
return render(request, 'update.html', data)
def search(type_search, q):
total = 0
while True:
result = booliwood(q, total, 50, type_search)
for listing in result[type_search]:
add_bosta(listing)
total = total + result['count']
if total >= result['totalCount']:
break
time.sleep(1)
data = {'total': total, 'count': result['totalCount']}
return data
| from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseNotAllowed
from booli import booliwood
from models import add_bosta, get_all_bostas, Bosta
from django import forms
import time
class BostaForm(forms.Form):
maxPrice = forms.IntegerField()
livingArea = forms.IntegerField()
room = forms.IntegerField()
class BostaIdForm(forms.Form):
bostaId = forms.IntegerField()
class SearchBosta(forms.Form):
search_query = forms.CharField()
def show(request):
if request.method == 'POST':
form = BostaForm(request.POST)
if form.is_valid():
maxPrice = form.cleaned_data['maxPrice']
livingArea = form.cleaned_data['livingArea']
room = form.cleaned_data['room']
bostas = Bosta.objects \
.filter(listPrice__lte=maxPrice) \
.filter(livingArea__gte=livingArea) \
.filter(rooms__gte=room) \
.exclude(listPrice=0) \
.order_by('soldDate')
else:
form = BostaForm()
bostas = get_all_bostas()
for bosta in bostas:
if bosta.livingArea == 0:
bosta.sek_m2 = 0
elif bosta.soldPrice == 0:
bosta.sek_m2 = bosta.listPrice / bosta.livingArea
else:
bosta.sek_m2 = bosta.soldPrice / bosta.livingArea
data = {
'bostas': bostas,
'form': form,
}
return render(request, 'main.html', data)
def update(request):
totalListing = 0
totalSold = 0
form = SearchBosta()
data = {
'totalListing': totalListing,
'totalSold': totalSold,
'countListing': 0,
'countSold': 0,
'form': form
}
if request.method == 'POST':
form = SearchBosta(request.POST)
if form.is_valid():
q = form.cleaned_data['search_query'].encode('utf8')
d1 = search("listings", q)
if d1:
data['totalListing'] = d1['total']
data['countListing'] = d1['count']
d1 = search("sold", q)
if d1:
data['totalSold'] = d1['total']
data['countSold'] = d1['count']
return render(request, 'update.html', data)
def search(type_search, q):
total = 0
while True:
result = booliwood(q, total, 50, type_search)
for listing in result[type_search]:
add_bosta(listing)
total = total + result['count']
if total >= result['totalCount']:
break
time.sleep(1)
data = {
'total': total,
'count': result['totalCount'],
}
return data
| [
6,
8,
9,
10,
11
] |
124 | 4642537f8af1f060f5ee43cc9e98bd07be6a558c | # import libraries
import sys
import pandas as pd
import numpy as n
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
This function loads the message and categories files and
merge them and return the new dataframe for the project
"""
# Read messages and categories data
messaging = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
# Merge the two dataframes
dataframe = messaging.merge(categories, how='inner', on= 'id')
return dataframe
def clean_data(dataframe):
"""
Cleaning the merged dataframe to make it ready to analyze
"""
# split categories into seperate
categories = dataframe.categories.str.split(';', expand=True)
# select the first row&col of the categories dataframe
row&col = categories.iloc[0]
cate_col = row&col.apply(lambda x: x[:-2])
cate.columns = cate_colnames
#convert categories values to numeric instead of strings
for column in categories:
categories[column] = categories[column].str[-1]
categories[column] = categories[column].astype(int)
# replace categories column in dataframe
dataframe.drop(columns = ['categories'], inplace=True)
# concatenate the original dataframe with the new `categories` dataframe
dataframe = dataframe.join(categories)
#drop duplicates
dataframe.drop_duplicates(inplace=True)
return dataframe
def save_data(dataframe, database_filename):
"""
Take the input dataframe and save it into sqlite database
"""
# Creating sqlite engine and save the dataframe with the name message
engine_process = create_engine('sqlite:///Messages.db')
dataframe.to_sql('messaging', engine_process, index=False,if_exists='replace')
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
dataframe = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
dataframe = clean_data(dataframe)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(dataframe, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| null | null | null | null | [
0
] |
125 | a57059927a7bd3311c1d104bfc80877912c7d995 | <mask token>
| <mask token>
for jpg_file in jpg_files:
basename = os.path.basename(jpg_file)
if int(basename[:-4]) % 10 == 0:
cnt += 1
dirname = os.path.dirname(jpg_file)
dirs = dirname.split('/')
new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'
dst_bmp_path = dst_bmp_dir + new_fname
print(dst_bmp_path)
shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')
if cnt > 3:
break
| <mask token>
src_jpg_dir = 'D:/Develop/data/VOCdevkit/VOC2007/JPEGImages/'
dst_bmp_dir = 'D:/Temp/'
jpg_files = glob.glob(src_jpg_dir + '*.jpg')
cnt = 0
for jpg_file in jpg_files:
basename = os.path.basename(jpg_file)
if int(basename[:-4]) % 10 == 0:
cnt += 1
dirname = os.path.dirname(jpg_file)
dirs = dirname.split('/')
new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'
dst_bmp_path = dst_bmp_dir + new_fname
print(dst_bmp_path)
shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')
if cnt > 3:
break
| <mask token>
import os
from PIL import Image
import glob
import shutil
src_jpg_dir = 'D:/Develop/data/VOCdevkit/VOC2007/JPEGImages/'
dst_bmp_dir = 'D:/Temp/'
jpg_files = glob.glob(src_jpg_dir + '*.jpg')
cnt = 0
for jpg_file in jpg_files:
basename = os.path.basename(jpg_file)
if int(basename[:-4]) % 10 == 0:
cnt += 1
dirname = os.path.dirname(jpg_file)
dirs = dirname.split('/')
new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'
dst_bmp_path = dst_bmp_dir + new_fname
print(dst_bmp_path)
shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')
if cnt > 3:
break
| # -*- coding: utf-8 -*-
"""
Created on Sat Oct 20 07:48:47 2018
@author: hfuji
"""
import os
from PIL import Image
import glob
import shutil
src_jpg_dir = 'D:/Develop/data/VOCdevkit/VOC2007/JPEGImages/'
dst_bmp_dir = 'D:/Temp/'
jpg_files = glob.glob(src_jpg_dir + '*.jpg')
cnt = 0
for jpg_file in jpg_files:
basename = os.path.basename(jpg_file)
if int(basename[:-4]) % 10 == 0:
cnt += 1
dirname = os.path.dirname(jpg_file)
dirs = dirname.split('/')
new_fname = dirs[-2] + '_' + basename[:-4] + '.bmp'
dst_bmp_path = dst_bmp_dir + new_fname
print(dst_bmp_path)
# pil_img = Image.open(jpg_file)
# pil_img.save(dst_bmp_path, "bmp")
shutil.copyfile(jpg_file, dst_bmp_path[:-4] + '.jpg')
if cnt > 3:
break | [
0,
1,
2,
3,
4
] |
126 | 7dc99d33023dbb13938ac413af7d3e9471fdbc3d | <mask token>
| <mask token>
print('the original DNA sequence is', dnaSequence)
print('the first fragment is', firstFragment, 'and is', firstFragmentLen,
'letters long')
print('the second fragment is', secondFragment, 'and is', secondFragmentLen,
'letters long')
| dnaSequence = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT'
firstFragment = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT'[0:22]
secondFragment = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT'[
23:100]
firstFragmentLen = len(firstFragment)
secondFragmentLen = len(secondFragment)
print('the original DNA sequence is', dnaSequence)
print('the first fragment is', firstFragment, 'and is', firstFragmentLen,
'letters long')
print('the second fragment is', secondFragment, 'and is', secondFragmentLen,
'letters long')
| #the initial DNA sequence
dnaSequence = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT'
#seperating the DNA sequence at the specified location
firstFragment = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT' [0:22]
secondFragment = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT' [23:100]
#finsing the length of the 2 fragments
firstFragmentLen = len(firstFragment)
secondFragmentLen = len(secondFragment)
#printing the original and the split DNA sequence
print("the original DNA sequence is", dnaSequence)
print("the first fragment is", firstFragment, "and is", firstFragmentLen ,"letters long")
print("the second fragment is", secondFragment, "and is", secondFragmentLen,"letters long")
| null | [
0,
1,
2,
3
] |
127 | 000dd63089fd0c6184fd032fe75ccc920beee7a8 | <mask token>
def visualizeClusters(clusters):
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],
clusters[1][:, 1], 'bs')
plt.show()
return
<mask token>
def kmeans(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
visualizeClusters(clusters)
return clusters
<mask token>
def Func(clusters):
center = []
for i in range(len(clusters)):
center.append(clusters[i][0])
distSum = 0
for i in range(len(clusters)):
for j in range(1, len(clusters[i])):
distSum += np.linalg.norm(center[i] - clusters[i][j])
return distSum
<mask token>
def purity(X, clusters):
purities = []
for i in range(2):
count = 0
for idx in range(len(clusters[i])):
if int(clusters[i][idx][2]) == 1:
count += 1
purity = count * 1.0 / len(clusters[i])
if purity > 0.5:
purities.append(purity)
else:
purities.append(1 - purity)
return purities
<mask token>
| <mask token>
def getInitialCentroids(X, k):
initialCentroids = []
for i in range(k):
index = random.randint(0, len(X))
initialCentroids.append(X[index])
return initialCentroids
def visualizeClusters(clusters):
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],
clusters[1][:, 1], 'bs')
plt.show()
return
<mask token>
def euclidean_dist(data, centroids, clusters):
centroids = np.array(centroids)
for instance in data:
instance = np.array(instance)
mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for
i in enumerate(centroids)], key=lambda t: t[1])[0]
try:
clusters[mu_index].append(instance)
except KeyError:
clusters[mu_index] = [instance]
for cluster in clusters:
if not cluster:
cluster.append(data[np.random.randint(0, len(data), size=1)].
flatten().tolist())
return clusters
def kmeans(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
visualizeClusters(clusters)
return clusters
def kmeans_(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
return clusters
def Func(clusters):
center = []
for i in range(len(clusters)):
center.append(clusters[i][0])
distSum = 0
for i in range(len(clusters)):
for j in range(1, len(clusters[i])):
distSum += np.linalg.norm(center[i] - clusters[i][j])
return distSum
def kneeFinding(X, kList):
obj = []
for i in kList:
obj.append(Func(kmeans_(X, i)))
plt.plot(range(1, 7), obj)
plt.show()
return
def purity(X, clusters):
purities = []
for i in range(2):
count = 0
for idx in range(len(clusters[i])):
if int(clusters[i][idx][2]) == 1:
count += 1
purity = count * 1.0 / len(clusters[i])
if purity > 0.5:
purities.append(purity)
else:
purities.append(1 - purity)
return purities
<mask token>
| <mask token>
def loadData(fileDj):
data = []
fid = open(fileDj)
for line in fid:
line = line.strip()
m = [float(x) for x in line.split(' ')]
data.append(m)
return data
def getInitialCentroids(X, k):
initialCentroids = []
for i in range(k):
index = random.randint(0, len(X))
initialCentroids.append(X[index])
return initialCentroids
def visualizeClusters(clusters):
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],
clusters[1][:, 1], 'bs')
plt.show()
return
<mask token>
def euclidean_dist(data, centroids, clusters):
centroids = np.array(centroids)
for instance in data:
instance = np.array(instance)
mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for
i in enumerate(centroids)], key=lambda t: t[1])[0]
try:
clusters[mu_index].append(instance)
except KeyError:
clusters[mu_index] = [instance]
for cluster in clusters:
if not cluster:
cluster.append(data[np.random.randint(0, len(data), size=1)].
flatten().tolist())
return clusters
def kmeans(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
visualizeClusters(clusters)
return clusters
def kmeans_(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
return clusters
def Func(clusters):
center = []
for i in range(len(clusters)):
center.append(clusters[i][0])
distSum = 0
for i in range(len(clusters)):
for j in range(1, len(clusters[i])):
distSum += np.linalg.norm(center[i] - clusters[i][j])
return distSum
def kneeFinding(X, kList):
obj = []
for i in kList:
obj.append(Func(kmeans_(X, i)))
plt.plot(range(1, 7), obj)
plt.show()
return
def purity(X, clusters):
purities = []
for i in range(2):
count = 0
for idx in range(len(clusters[i])):
if int(clusters[i][idx][2]) == 1:
count += 1
purity = count * 1.0 / len(clusters[i])
if purity > 0.5:
purities.append(purity)
else:
purities.append(1 - purity)
return purities
<mask token>
| <mask token>
def loadData(fileDj):
data = []
fid = open(fileDj)
for line in fid:
line = line.strip()
m = [float(x) for x in line.split(' ')]
data.append(m)
return data
def getInitialCentroids(X, k):
initialCentroids = []
for i in range(k):
index = random.randint(0, len(X))
initialCentroids.append(X[index])
return initialCentroids
def visualizeClusters(clusters):
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],
clusters[1][:, 1], 'bs')
plt.show()
return
def has_converged(centroids, old_centroids, iterations):
MAX_ITERATIONS = 100
if iterations > MAX_ITERATIONS:
return True
return old_centroids == centroids
def euclidean_dist(data, centroids, clusters):
centroids = np.array(centroids)
for instance in data:
instance = np.array(instance)
mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for
i in enumerate(centroids)], key=lambda t: t[1])[0]
try:
clusters[mu_index].append(instance)
except KeyError:
clusters[mu_index] = [instance]
for cluster in clusters:
if not cluster:
cluster.append(data[np.random.randint(0, len(data), size=1)].
flatten().tolist())
return clusters
def kmeans(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
visualizeClusters(clusters)
return clusters
def kmeans_(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
return clusters
def Func(clusters):
center = []
for i in range(len(clusters)):
center.append(clusters[i][0])
distSum = 0
for i in range(len(clusters)):
for j in range(1, len(clusters[i])):
distSum += np.linalg.norm(center[i] - clusters[i][j])
return distSum
def kneeFinding(X, kList):
obj = []
for i in kList:
obj.append(Func(kmeans_(X, i)))
plt.plot(range(1, 7), obj)
plt.show()
return
def purity(X, clusters):
purities = []
for i in range(2):
count = 0
for idx in range(len(clusters[i])):
if int(clusters[i][idx][2]) == 1:
count += 1
purity = count * 1.0 / len(clusters[i])
if purity > 0.5:
purities.append(purity)
else:
purities.append(1 - purity)
return purities
<mask token>
def main():
datadir = ''
pathDataset1 = datadir + 'humanData.txt'
dataset1 = loadData(pathDataset1)
kneeFinding(dataset1, range(1, 7))
clusters = kmeans(dataset1, 2, maxIter=1000)
purity(dataset1, clusters)
<mask token>
| #!/usr/bin/python
import sys
import numpy as np
import random
import matplotlib.pyplot as plt
#Your code here
def loadData(fileDj):
data = []
fid = open(fileDj)
for line in fid:
line = line.strip()
m = [float(x) for x in line.split(' ')]
data.append(m)
return data
## K-means functions
def getInitialCentroids(X, k):
initialCentroids = []
for i in range(k):
index = random.randint(0, len(X))
initialCentroids.append(X[index])
#Your code here
return initialCentroids
def visualizeClusters(clusters):
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
plt.plot(clusters[0][:,0], clusters[0][:,1], 'rs', clusters[1][:,0], clusters[1][:,1], 'bs')
plt.show()
return
def has_converged(centroids, old_centroids, iterations):
MAX_ITERATIONS = 100
if iterations > MAX_ITERATIONS:
return True
return old_centroids == centroids
def euclidean_dist(data, centroids, clusters):
centroids = np.array(centroids)
for instance in data:
instance = np.array(instance)
mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) \
for i in enumerate(centroids)], key=lambda t: t[1])[0]
try:
clusters[mu_index].append(instance)
except KeyError:
clusters[mu_index] = [instance]
for cluster in clusters:
if not cluster:
cluster.append(data[np.random.randint(0, len(data), size=1)].flatten().tolist())
return clusters
def kmeans(X, k, maxIter=1000):
centroids = getInitialCentroids(X,k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not (has_converged(centroids, old_centroids, iterations)):
iterations += 1
clusters = [[] for i in range(k)]
# assign data points to clusters
clusters = euclidean_dist(X, centroids, clusters)
# recalculate centroids
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
visualizeClusters(clusters)
return clusters
def kmeans_(X, k, maxIter=1000):
centroids = getInitialCentroids(X,k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not (has_converged(centroids, old_centroids, iterations)):
iterations += 1
clusters = [[] for i in range(k)]
# assign data points to clusters
clusters = euclidean_dist(X, centroids, clusters)
# recalculate centroids
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
#visualizeClusters(clusters)
return clusters
def Func(clusters):
center = []
for i in range(len(clusters)):
center.append(clusters[i][0])
distSum = 0
for i in range(len(clusters)):
for j in range(1, len(clusters[i])):
distSum += np.linalg.norm(center[i] - clusters[i][j])
return distSum
def kneeFinding(X,kList):
obj = []
for i in kList:
obj.append(Func(kmeans_(X, i)))
plt.plot(range(1,7), obj)
plt.show()
return
def purity(X, clusters):
purities = []
#Your code
for i in range(2):
count = 0
for idx in range(len(clusters[i])):
if(int(clusters[i][idx][2]) == 1):
count += 1
purity = count*1.0 / len(clusters[i])
if purity > 0.5:
purities.append(purity)
else:
purities.append(1-purity)
#<type 'list'>: [0.9724249797242498, 0.999000999000999]
return purities
'''
## GMM functions
#calculate the initial covariance matrix
#covType: diag, full
def getInitialsGMM(X,k,covType):
if covType == 'full':
dataArray = np.transpose(np.array([pt[0:-1] for pt in X]))
covMat = np.cov(dataArray)
else:
covMatList = []
for i in range(len(X[0])-1):
data = [pt[i] for pt in X]
cov = np.asscalar(np.cov(data))
covMatList.append(cov)
covMat = np.diag(covMatList)
initialClusters = {}
#Your code here
return initialClusters
def calcLogLikelihood(X,clusters,k):
loglikelihood = 0
#Your code here
return loglikelihood
#E-step
def updateEStep(X,clusters,k):
EMatrix = []
#Your code here
return EMatrix
#M-step
def updateMStep(X,clusters,EMatrix):
#Your code here
return clusters
def visualizeClustersGMM(X,labels,clusters,covType):
#Your code here
def gmmCluster(X, k, covType, maxIter=1000):
#initial clusters
clustersGMM = getInitialsGMM(X,k,covType)
labels = []
#Your code here
visualizeClustersGMM(X,labels,clustersGMM,covType)
return labels,clustersGMM
def purityGMM(X, clusters, labels):
purities = []
#Your code here
return purities
'''
def main():
#######dataset path
#datadir = sys.argv[1]
datadir = ''
pathDataset1 = datadir+'humanData.txt'
#pathDataset2 = datadir+'/audioData.txt'
dataset1 = loadData(pathDataset1)
#dataset2 = loadData(pathDataset2)
#Q4
kneeFinding(dataset1,range(1,7))
#Q5
clusters = kmeans(dataset1, 2, maxIter=1000)
purity(dataset1,clusters)
'''
#Q7
labels11,clustersGMM11 = gmmCluster(dataset1, 2, 'diag')
labels12,clustersGMM12 = gmmCluster(dataset1, 2, 'full')
#Q8
labels21,clustersGMM21 = gmmCluster(dataset2, 2, 'diag')
labels22,clustersGMM22 = gmmCluster(dataset2, 2, 'full')
#Q9
purities11 = purityGMM(dataset1, clustersGMM11, labels11)
purities12 = purityGMM(dataset1, clustersGMM12, labels12)
purities21 = purityGMM(dataset2, clustersGMM21, labels21)
purities22 = purityGMM(dataset2, clustersGMM22, labels22)
'''
if __name__ == "__main__":
main() | [
4,
8,
9,
11,
14
] |
128 | 1980fb4d6e7d3c6fe51f4a242610b5489e553859 | <mask token>
class Zaojiaopage(Crazy):
<mask token>
<mask token>
def click_zao(self):
self.click(self.zao_btn_loc)
<mask token>
<mask token>
def click_find(self):
self.click(self.find_loc)
<mask token>
def click_title_btn(self):
self.click(self.title_btn_loc)
<mask token>
<mask token>
def click_helper(self):
self.click(self.helper_loc)
<mask token>
<mask token>
<mask token>
def element_small_name(self):
return self.find_element(self.small_name_loc)
def click_small_name(self):
self.click(self.small_name_loc)
<mask token>
def click_switching_applet_btn(self):
self.click(self.switching_applet_btn_loc)
<mask token>
def click_delete_small_btn(self):
self.click(self.delete_small_btn_loc)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def clicks_experience_version_btn(self):
self.clicks(self.experience_version_btn_loc, -1)
<mask token>
def element_audition_class_btn(self):
return self.find_element(self.audition_class_btn_loc)
def click_audition_class_btn(self):
self.click(self.audition_class_btn_loc)
<mask token>
def click_wechat_grant_btn(self):
self.click(self.wechat_grant_btn_loc)
def double_click_wechat_grant(self):
self.double_click(self.wechat_grant_btn_loc)
def element_wechat_grant_btn(self):
return self.find_element(self.wechat_grant_btn_loc)
<mask token>
<mask token>
<mask token>
def click_mouth_btn(self):
self.click(self.month_btn_loc)
<mask token>
def click_sure_btn(self):
self.click(self.sure_btn_loc)
<mask token>
def class_info_btn(self):
self.click(self.class_info_loc)
<mask token>
def element_attend_lectures_btn(self):
return self.find_element(self.attend_lectures_btn_loc)
<mask token>
<mask token>
def element_class_btn(self):
return self.find_element(self.class_btn_loc)
<mask token>
<mask token>
def element_get_to_know_btn(self):
return self.find_element(self.get_to_know_btn_loc)
<mask token>
<mask token>
<mask token>
def input_buy_password(self, paw):
self.send_keys(self.buy_password_loc, paw)
<mask token>
<mask token>
<mask token>
def click_success_btn(self):
self.click(self.success_btn_loc)
<mask token>
def click_check_address_btn(self):
self.click(self.check_address_btn_loc)
<mask token>
<mask token>
def click_add_address_btn(self):
self.click(self.add_address_btn_loc)
<mask token>
def input_name_btn(self, name):
self.send_keys(self.name_loc, name)
<mask token>
def input_phone_btn(self, phone):
self.send_keys(self.phone_btn_loc, phone)
<mask token>
<mask token>
<mask token>
def input_detailed_address_btn(self, address):
self.send_keys(self.detailed_address_btn_loc, address)
<mask token>
def click_save_btn(self):
self.click(self.save_btn_loc)
<mask token>
def click_receive_btn(self):
self.click(self.receive_btn_loc)
<mask token>
<mask token>
def clicks_addressee(self):
self.clicks(self.addressee_loc, 0)
<mask token>
<mask token>
def click_know(self):
self.click(self.know_btn_loc)
<mask token>
def element_all_curriculum_btn(self):
return self.find_element(self.all_curriculum_btn_loc)
def click_all_curriculum_btn(self):
self.click(self.all_curriculum_btn_loc)
<mask token>
<mask token>
<mask token>
def element_my_btn(self):
return self.find_element(self.my_btn_loc)
<mask token>
<mask token>
<mask token>
<mask token>
def text_my_baby_title(self):
return self.get_text(self.my_baby_title_loc)
<mask token>
<mask token>
def element_new_baby_btn(self):
return self.find_element(self.new_baby_btn_loc)
<mask token>
def clicks_new_baby_btn(self, n):
self.clicks(self.new_baby_btn_loc, n)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def click_baby_bir_btn(self):
self.click(self.baby_bir_btn_loc)
<mask token>
<mask token>
<mask token>
<mask token>
def click_my_home(self):
self.click(self.my_home_loc)
def element_my_home(self):
return self.find_element(self.my_home_loc)
<mask token>
def click_switch_btn(self):
self.click(self.switch_btn_loc)
<mask token>
def click_baby_bri(self):
self.click(self.baby_bri_loc)
<mask token>
def clicks_class_img(self):
self.clicks(self.class_img_btn_loc, 0)
<mask token>
def click_collection_btn(self):
self.click(self.collection_btn_loc)
def clicks_collection_btn(self, n):
self.clicks(self.collection_btn_loc, n)
<mask token>
<mask token>
def click_write_record_btn(self):
self.click(self.write_record_btn_loc)
<mask token>
<mask token>
<mask token>
def element_album_btn(self):
return self.find_element(self.album_btn_loc)
<mask token>
<mask token>
def element_small_video_btn(self):
return self.find_element(self.small_video_btn_loc)
<mask token>
<mask token>
def clicks_release_btn(self, n):
self.clicks(self.release_btn_loc, n)
def element_record_info(self, data):
record_info_loc = 'xpath', '//*[contains(@text, "{}")]'.format(data)
record_info = self.find_element(record_info_loc)
if record_info:
return True
else:
return False
<mask token>
<mask token>
<mask token>
def elements_class_name(self):
return self.find_elements(self.class_name_loc)
<mask token>
def click_class2_name(self):
self.click(self.class_name2_loc)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def clicks_choice_album(self, n):
self.clicks(self.choice_album_loc, n)
def elements_choice_album(self):
return self.find_elements(self.choice_album_loc)
<mask token>
def click_complete_btn(self):
self.click(self.complete_btn_loc)
<mask token>
def click_my_collection_btn(self):
self.click(self.my_collection_btn_loc)
<mask token>
def elements_my_collection_english_course_btn(self):
return self.find_elements(self.my_collection_english_course_btn_loc)
<mask token>
<mask token>
<mask token>
def click_my_course_btn(self):
self.click(self.my_course_btn_loc)
<mask token>
def elements_my_course_buy_btn(self):
return self.find_elements(self.my_course_buy_btn_loc)
<mask token>
def click_my_order_btn(self):
self.click(self.my_order_btn_loc)
<mask token>
def elements_my_order_card_btn(self):
return self.find_elements(self.my_order_card_btn_loc)
<mask token>
<mask token>
<mask token>
def elements_my_record_class_btn(self):
return self.find_elements(self.my_record_class_btn_loc)
<mask token>
<mask token>
def click_back_btn(self):
self.click(self.back_btn_loc)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def click_send(self):
self.click(self.send_5_loc)
<mask token>
<mask token>
def clicks_reply_code(self, n):
self.clicks(self.reply_code_loc, n)
<mask token>
def element_long_code(self):
return self.find_element(self.long_code_loc)
def click_long_code(self):
self.click(self.long_code_loc)
<mask token>
def click_discern_code(self):
self.click(self.discern_code_loc)
<mask token>
def text_class_group(self):
return self.get_text(self.class_group_loc)
<mask token>
def element_add_group_chat(self):
return self.find_element(self.add_group_chat_loc)
<mask token>
def elements_reply_8(self):
return self.find_elements(self.reply_8_loc)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def click_more_games_btn(self):
self.click(self.more_games_btn_loc)
<mask token>
<mask token>
<mask token>
<mask token>
def text_start_fingerprint_buy(self):
return self.get_text(self.start_fingerprint_buy_loc)
<mask token>
def click_no_more_reminder_btn(self):
self.click(self.no_more_reminder_btn_loc)
<mask token>
def click_cancel_btn(self):
self.click(self.cancel_btn_loc)
<mask token>
def element_usd_password(self):
return self.find_element(self.usd_password_loc)
<mask token>
<mask token>
def element_password_error(self):
return self.find_element(self.password_error_loc)
<mask token>
def click_again_btn(self):
self.click(self.again_btn_loc)
<mask token>
def text_payment(self):
return self.get_text(self.payment_loc)
<mask token>
def element_typewriting_finish_btn(self):
return self.find_element(self.typewriting_finish_btn_loc)
<mask token>
<mask token>
<mask token>
def element_clock_btn(self):
return self.find_element(self.clock_btn_loc)
<mask token>
def element_no_clock_btn(self):
return self.find_element(self.no_clock_btn_loc)
<mask token>
<mask token>
<mask token>
def click_upload_card_btn(self):
self.click(self.upload_card_btn_loc)
<mask token>
def click_again_upload_card_btn(self):
self.click(self.again_upload_card_btn_loc)
<mask token>
<mask token>
<mask token>
def click_copy_text_btn(self):
self.click(self.copy_text_btn_loc)
<mask token>
def element_copy_format_btn(self):
return self.find_element(self.copy_format_btn_loc)
<mask token>
<mask token>
<mask token>
def click_upload_btn(self):
self.click(self.upload_btn_loc)
<mask token>
<mask token>
<mask token>
def click_reset_img_btn(self):
self.click(self.reset_img_btn_loc)
<mask token>
def element_generated_loading(self):
return self.find_element(self.generated_loading_loc)
<mask token>
def element_reminder_btn(self):
return self.find_element(self.reminder_btn_loc)
<mask token>
def element_page_expired(self):
return self.find_element(self.page_expired_loc)
<mask token>
def click_x_btn(self):
self.click(self.x_btn_loc)
| <mask token>
class Zaojiaopage(Crazy):
<mask token>
<mask token>
def click_zao(self):
self.click(self.zao_btn_loc)
<mask token>
<mask token>
def click_find(self):
self.click(self.find_loc)
<mask token>
def click_title_btn(self):
self.click(self.title_btn_loc)
<mask token>
<mask token>
def click_helper(self):
self.click(self.helper_loc)
<mask token>
def click_small_help_btn(self):
self.click(self.small_help_btn_loc)
<mask token>
def element_small_name(self):
return self.find_element(self.small_name_loc)
def click_small_name(self):
self.click(self.small_name_loc)
<mask token>
def click_switching_applet_btn(self):
self.click(self.switching_applet_btn_loc)
<mask token>
def click_delete_small_btn(self):
self.click(self.delete_small_btn_loc)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def clicks_experience_version_btn(self):
self.clicks(self.experience_version_btn_loc, -1)
<mask token>
def element_audition_class_btn(self):
return self.find_element(self.audition_class_btn_loc)
def click_audition_class_btn(self):
self.click(self.audition_class_btn_loc)
<mask token>
def click_wechat_grant_btn(self):
self.click(self.wechat_grant_btn_loc)
def double_click_wechat_grant(self):
self.double_click(self.wechat_grant_btn_loc)
def element_wechat_grant_btn(self):
return self.find_element(self.wechat_grant_btn_loc)
<mask token>
<mask token>
<mask token>
def click_mouth_btn(self):
self.click(self.month_btn_loc)
<mask token>
def click_sure_btn(self):
self.click(self.sure_btn_loc)
<mask token>
def class_info_btn(self):
self.click(self.class_info_loc)
<mask token>
def element_attend_lectures_btn(self):
return self.find_element(self.attend_lectures_btn_loc)
<mask token>
<mask token>
def element_class_btn(self):
return self.find_element(self.class_btn_loc)
<mask token>
<mask token>
def element_get_to_know_btn(self):
return self.find_element(self.get_to_know_btn_loc)
<mask token>
<mask token>
<mask token>
def input_buy_password(self, paw):
self.send_keys(self.buy_password_loc, paw)
<mask token>
<mask token>
<mask token>
def click_success_btn(self):
self.click(self.success_btn_loc)
<mask token>
def click_check_address_btn(self):
self.click(self.check_address_btn_loc)
<mask token>
<mask token>
def click_add_address_btn(self):
self.click(self.add_address_btn_loc)
<mask token>
def input_name_btn(self, name):
self.send_keys(self.name_loc, name)
<mask token>
def input_phone_btn(self, phone):
self.send_keys(self.phone_btn_loc, phone)
<mask token>
<mask token>
<mask token>
def input_detailed_address_btn(self, address):
self.send_keys(self.detailed_address_btn_loc, address)
<mask token>
def click_save_btn(self):
self.click(self.save_btn_loc)
<mask token>
def click_receive_btn(self):
self.click(self.receive_btn_loc)
<mask token>
def elements_addressee(self):
return self.find_elements(self.addressee_loc)
def clicks_addressee(self):
self.clicks(self.addressee_loc, 0)
<mask token>
<mask token>
def click_know(self):
self.click(self.know_btn_loc)
<mask token>
def element_all_curriculum_btn(self):
return self.find_element(self.all_curriculum_btn_loc)
def click_all_curriculum_btn(self):
self.click(self.all_curriculum_btn_loc)
<mask token>
<mask token>
<mask token>
def element_my_btn(self):
return self.find_element(self.my_btn_loc)
<mask token>
<mask token>
def click_my_baby(self):
self.click(self.my_baby_btn_loc)
<mask token>
def text_my_baby_title(self):
return self.get_text(self.my_baby_title_loc)
<mask token>
<mask token>
def element_new_baby_btn(self):
return self.find_element(self.new_baby_btn_loc)
<mask token>
def clicks_new_baby_btn(self, n):
self.clicks(self.new_baby_btn_loc, n)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def click_baby_bir_btn(self):
self.click(self.baby_bir_btn_loc)
<mask token>
<mask token>
<mask token>
<mask token>
def click_my_home(self):
self.click(self.my_home_loc)
def element_my_home(self):
return self.find_element(self.my_home_loc)
<mask token>
def click_switch_btn(self):
self.click(self.switch_btn_loc)
<mask token>
def click_baby_bri(self):
self.click(self.baby_bri_loc)
<mask token>
def clicks_class_img(self):
self.clicks(self.class_img_btn_loc, 0)
<mask token>
def click_collection_btn(self):
self.click(self.collection_btn_loc)
def clicks_collection_btn(self, n):
self.clicks(self.collection_btn_loc, n)
<mask token>
<mask token>
def click_write_record_btn(self):
self.click(self.write_record_btn_loc)
<mask token>
<mask token>
<mask token>
def element_album_btn(self):
return self.find_element(self.album_btn_loc)
<mask token>
<mask token>
def element_small_video_btn(self):
return self.find_element(self.small_video_btn_loc)
<mask token>
<mask token>
def clicks_release_btn(self, n):
self.clicks(self.release_btn_loc, n)
def element_record_info(self, data):
record_info_loc = 'xpath', '//*[contains(@text, "{}")]'.format(data)
record_info = self.find_element(record_info_loc)
if record_info:
return True
else:
return False
<mask token>
<mask token>
<mask token>
def elements_class_name(self):
return self.find_elements(self.class_name_loc)
<mask token>
def click_class2_name(self):
self.click(self.class_name2_loc)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def clicks_choice_album(self, n):
self.clicks(self.choice_album_loc, n)
def elements_choice_album(self):
return self.find_elements(self.choice_album_loc)
<mask token>
def click_complete_btn(self):
self.click(self.complete_btn_loc)
<mask token>
def click_my_collection_btn(self):
self.click(self.my_collection_btn_loc)
<mask token>
def elements_my_collection_english_course_btn(self):
return self.find_elements(self.my_collection_english_course_btn_loc)
<mask token>
<mask token>
<mask token>
def click_my_course_btn(self):
self.click(self.my_course_btn_loc)
<mask token>
def elements_my_course_buy_btn(self):
return self.find_elements(self.my_course_buy_btn_loc)
<mask token>
def click_my_order_btn(self):
self.click(self.my_order_btn_loc)
<mask token>
def elements_my_order_card_btn(self):
return self.find_elements(self.my_order_card_btn_loc)
<mask token>
def click_my_record_btn(self):
self.click(self.my_record_btn_loc)
<mask token>
def elements_my_record_class_btn(self):
return self.find_elements(self.my_record_class_btn_loc)
<mask token>
def element_back_btn(self):
return self.find_element(self.back_btn_loc)
def click_back_btn(self):
self.click(self.back_btn_loc)
<mask token>
def click_reply_5(self):
self.click(self.reply_5_loc)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def click_send(self):
self.click(self.send_5_loc)
<mask token>
<mask token>
def clicks_reply_code(self, n):
self.clicks(self.reply_code_loc, n)
<mask token>
def element_long_code(self):
return self.find_element(self.long_code_loc)
def click_long_code(self):
self.click(self.long_code_loc)
<mask token>
def click_discern_code(self):
self.click(self.discern_code_loc)
<mask token>
def text_class_group(self):
return self.get_text(self.class_group_loc)
<mask token>
def element_add_group_chat(self):
return self.find_element(self.add_group_chat_loc)
<mask token>
def elements_reply_8(self):
return self.find_elements(self.reply_8_loc)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def click_more_games_btn(self):
self.click(self.more_games_btn_loc)
<mask token>
def click_look_all_btn(self):
self.click(self.look_all_btn_loc)
<mask token>
<mask token>
def text_start_fingerprint_buy(self):
return self.get_text(self.start_fingerprint_buy_loc)
<mask token>
def click_no_more_reminder_btn(self):
self.click(self.no_more_reminder_btn_loc)
<mask token>
def click_cancel_btn(self):
self.click(self.cancel_btn_loc)
<mask token>
def element_usd_password(self):
return self.find_element(self.usd_password_loc)
<mask token>
<mask token>
def element_password_error(self):
return self.find_element(self.password_error_loc)
<mask token>
def click_again_btn(self):
self.click(self.again_btn_loc)
<mask token>
def text_payment(self):
return self.get_text(self.payment_loc)
<mask token>
def element_typewriting_finish_btn(self):
return self.find_element(self.typewriting_finish_btn_loc)
<mask token>
<mask token>
<mask token>
def element_clock_btn(self):
return self.find_element(self.clock_btn_loc)
<mask token>
def element_no_clock_btn(self):
return self.find_element(self.no_clock_btn_loc)
<mask token>
<mask token>
<mask token>
def click_upload_card_btn(self):
self.click(self.upload_card_btn_loc)
<mask token>
def click_again_upload_card_btn(self):
self.click(self.again_upload_card_btn_loc)
<mask token>
def click_save_img_btn(self):
self.click(self.save_img_btn_loc)
<mask token>
def click_copy_text_btn(self):
self.click(self.copy_text_btn_loc)
<mask token>
def element_copy_format_btn(self):
return self.find_element(self.copy_format_btn_loc)
<mask token>
def click_card_go_btn(self):
self.click(self.card_go_btn_loc)
<mask token>
def click_upload_btn(self):
self.click(self.upload_btn_loc)
<mask token>
<mask token>
<mask token>
def click_reset_img_btn(self):
self.click(self.reset_img_btn_loc)
<mask token>
def element_generated_loading(self):
return self.find_element(self.generated_loading_loc)
<mask token>
def element_reminder_btn(self):
return self.find_element(self.reminder_btn_loc)
<mask token>
def element_page_expired(self):
return self.find_element(self.page_expired_loc)
<mask token>
def click_x_btn(self):
self.click(self.x_btn_loc)
| <mask token>
class Zaojiaopage(Crazy):
<mask token>
<mask token>
def click_zao(self):
self.click(self.zao_btn_loc)
def element_zao(self):
return self.find_element(self.zao_btn_loc)
<mask token>
def click_find(self):
self.click(self.find_loc)
<mask token>
def click_title_btn(self):
self.click(self.title_btn_loc)
<mask token>
<mask token>
def click_helper(self):
self.click(self.helper_loc)
<mask token>
def click_small_help_btn(self):
self.click(self.small_help_btn_loc)
<mask token>
def element_small_name(self):
return self.find_element(self.small_name_loc)
def click_small_name(self):
self.click(self.small_name_loc)
<mask token>
def click_switching_applet_btn(self):
self.click(self.switching_applet_btn_loc)
<mask token>
def click_delete_small_btn(self):
self.click(self.delete_small_btn_loc)
<mask token>
def element_edition_btn(self):
return self.find_element(self.edition_btn_loc)
<mask token>
def element_delete_small1_btn(self):
return self.find_element(self.delete_small1_btn_loc)
<mask token>
<mask token>
<mask token>
def clicks_experience_version_btn(self):
self.clicks(self.experience_version_btn_loc, -1)
<mask token>
def element_audition_class_btn(self):
return self.find_element(self.audition_class_btn_loc)
def click_audition_class_btn(self):
self.click(self.audition_class_btn_loc)
<mask token>
def click_wechat_grant_btn(self):
self.click(self.wechat_grant_btn_loc)
def double_click_wechat_grant(self):
self.double_click(self.wechat_grant_btn_loc)
def element_wechat_grant_btn(self):
return self.find_element(self.wechat_grant_btn_loc)
<mask token>
def click_allow_btn(self):
self.click(self.allow_btn_loc)
<mask token>
def click_mouth_btn(self):
self.click(self.month_btn_loc)
<mask token>
def click_sure_btn(self):
self.click(self.sure_btn_loc)
<mask token>
def class_info_btn(self):
self.click(self.class_info_loc)
<mask token>
def element_attend_lectures_btn(self):
return self.find_element(self.attend_lectures_btn_loc)
<mask token>
<mask token>
def element_class_btn(self):
return self.find_element(self.class_btn_loc)
<mask token>
def click_get_to_know_btn(self):
self.click(self.get_to_know_btn_loc)
def element_get_to_know_btn(self):
return self.find_element(self.get_to_know_btn_loc)
<mask token>
def click_sure_buy_btn(self):
self.click(self.sure_buy_btn_loc)
<mask token>
def input_buy_password(self, paw):
self.send_keys(self.buy_password_loc, paw)
<mask token>
def text_buy_money(self):
return self.get_text(self.check_buy_money_loc)
<mask token>
def click_success_btn(self):
self.click(self.success_btn_loc)
<mask token>
def click_check_address_btn(self):
self.click(self.check_address_btn_loc)
<mask token>
<mask token>
def click_add_address_btn(self):
self.click(self.add_address_btn_loc)
<mask token>
def input_name_btn(self, name):
self.send_keys(self.name_loc, name)
<mask token>
def input_phone_btn(self, phone):
self.send_keys(self.phone_btn_loc, phone)
<mask token>
def click_region_btn(self):
self.click(self.region_btn_loc)
<mask token>
def input_detailed_address_btn(self, address):
self.send_keys(self.detailed_address_btn_loc, address)
<mask token>
def click_save_btn(self):
self.click(self.save_btn_loc)
<mask token>
def click_receive_btn(self):
self.click(self.receive_btn_loc)
<mask token>
def elements_addressee(self):
return self.find_elements(self.addressee_loc)
def clicks_addressee(self):
self.clicks(self.addressee_loc, 0)
<mask token>
def element_know(self):
return self.find_element(self.know_btn_loc)
def click_know(self):
self.click(self.know_btn_loc)
<mask token>
def element_all_curriculum_btn(self):
return self.find_element(self.all_curriculum_btn_loc)
def click_all_curriculum_btn(self):
self.click(self.all_curriculum_btn_loc)
<mask token>
<mask token>
<mask token>
def element_my_btn(self):
return self.find_element(self.my_btn_loc)
def click_my(self):
self.click(self.my_btn_loc)
<mask token>
def click_my_baby(self):
self.click(self.my_baby_btn_loc)
<mask token>
def text_my_baby_title(self):
return self.get_text(self.my_baby_title_loc)
<mask token>
<mask token>
def element_new_baby_btn(self):
return self.find_element(self.new_baby_btn_loc)
def click_new_baby_btn(self):
self.click(self.new_baby_btn_loc)
def clicks_new_baby_btn(self, n):
self.clicks(self.new_baby_btn_loc, n)
<mask token>
def element_get_set(self):
return self.find_element(self.get_set_loc)
<mask token>
<mask token>
<mask token>
def inputs_baby_name(self, name, n):
self.sends_keys(self.baby_name_loc, name, n)
<mask token>
def click_baby_bir_btn(self):
self.click(self.baby_bir_btn_loc)
<mask token>
def click_finish_btn(self):
self.click(self.finish_btn_loc)
<mask token>
<mask token>
def click_my_home(self):
self.click(self.my_home_loc)
def element_my_home(self):
return self.find_element(self.my_home_loc)
<mask token>
def click_switch_btn(self):
self.click(self.switch_btn_loc)
<mask token>
def click_baby_bri(self):
self.click(self.baby_bri_loc)
<mask token>
def clicks_class_img(self):
self.clicks(self.class_img_btn_loc, 0)
<mask token>
def click_collection_btn(self):
self.click(self.collection_btn_loc)
def clicks_collection_btn(self, n):
self.clicks(self.collection_btn_loc, n)
<mask token>
<mask token>
def click_write_record_btn(self):
self.click(self.write_record_btn_loc)
def clicks_write_record_btn(self, n):
self.clicks(self.write_record_btn_loc, n)
<mask token>
def click_album_btn(self):
self.click(self.album_btn_loc)
def element_album_btn(self):
return self.find_element(self.album_btn_loc)
<mask token>
def click_small_video_btn(self):
self.click(self.small_video_btn_loc)
def element_small_video_btn(self):
return self.find_element(self.small_video_btn_loc)
<mask token>
<mask token>
def clicks_release_btn(self, n):
self.clicks(self.release_btn_loc, n)
def element_record_info(self, data):
record_info_loc = 'xpath', '//*[contains(@text, "{}")]'.format(data)
record_info = self.find_element(record_info_loc)
if record_info:
return True
else:
return False
<mask token>
<mask token>
def clicks_class_name(self, n):
self.clicks(self.class_name_loc, n)
def elements_class_name(self):
return self.find_elements(self.class_name_loc)
<mask token>
def click_class2_name(self):
self.click(self.class_name2_loc)
<mask token>
<mask token>
def input_write_text(self, text):
self.send_keys(self.write_text_loc, text)
<mask token>
<mask token>
def clicks_choice_album(self, n):
self.clicks(self.choice_album_loc, n)
def elements_choice_album(self):
return self.find_elements(self.choice_album_loc)
<mask token>
def click_complete_btn(self):
self.click(self.complete_btn_loc)
<mask token>
def click_my_collection_btn(self):
self.click(self.my_collection_btn_loc)
<mask token>
def elements_my_collection_english_course_btn(self):
return self.find_elements(self.my_collection_english_course_btn_loc)
<mask token>
<mask token>
<mask token>
def click_my_course_btn(self):
self.click(self.my_course_btn_loc)
<mask token>
def elements_my_course_buy_btn(self):
return self.find_elements(self.my_course_buy_btn_loc)
<mask token>
def click_my_order_btn(self):
self.click(self.my_order_btn_loc)
<mask token>
def elements_my_order_card_btn(self):
return self.find_elements(self.my_order_card_btn_loc)
<mask token>
def click_my_record_btn(self):
self.click(self.my_record_btn_loc)
<mask token>
def elements_my_record_class_btn(self):
return self.find_elements(self.my_record_class_btn_loc)
<mask token>
def element_back_btn(self):
return self.find_element(self.back_btn_loc)
def click_back_btn(self):
self.click(self.back_btn_loc)
<mask token>
def click_reply_5(self):
self.click(self.reply_5_loc)
<mask token>
<mask token>
<mask token>
<mask token>
def input_reply_5(self, num):
self.send_keys(self.reply_input_5_loc, num)
<mask token>
def click_send(self):
self.click(self.send_5_loc)
<mask token>
<mask token>
def clicks_reply_code(self, n):
self.clicks(self.reply_code_loc, n)
<mask token>
def element_long_code(self):
return self.find_element(self.long_code_loc)
def click_long_code(self):
self.click(self.long_code_loc)
<mask token>
def click_discern_code(self):
self.click(self.discern_code_loc)
<mask token>
def text_class_group(self):
return self.get_text(self.class_group_loc)
<mask token>
def element_add_group_chat(self):
return self.find_element(self.add_group_chat_loc)
<mask token>
def elements_reply_8(self):
return self.find_elements(self.reply_8_loc)
<mask token>
def element_parent_btn(self):
return self.find_element(self.parent_btn_loc)
<mask token>
<mask token>
<mask token>
<mask token>
def click_more_games_btn(self):
self.click(self.more_games_btn_loc)
<mask token>
def click_look_all_btn(self):
self.click(self.look_all_btn_loc)
def element_look_all_btn(self):
return self.find_elements(self.look_all_btn_loc)
<mask token>
def text_start_fingerprint_buy(self):
return self.get_text(self.start_fingerprint_buy_loc)
<mask token>
def click_no_more_reminder_btn(self):
self.click(self.no_more_reminder_btn_loc)
<mask token>
def click_cancel_btn(self):
self.click(self.cancel_btn_loc)
<mask token>
def element_usd_password(self):
return self.find_element(self.usd_password_loc)
<mask token>
<mask token>
def element_password_error(self):
return self.find_element(self.password_error_loc)
<mask token>
def click_again_btn(self):
self.click(self.again_btn_loc)
<mask token>
def text_payment(self):
return self.get_text(self.payment_loc)
<mask token>
def element_typewriting_finish_btn(self):
return self.find_element(self.typewriting_finish_btn_loc)
def click_typewriting_finish_btn(self):
self.click(self.typewriting_finish_btn_loc)
<mask token>
<mask token>
def element_clock_btn(self):
return self.find_element(self.clock_btn_loc)
<mask token>
def element_no_clock_btn(self):
return self.find_element(self.no_clock_btn_loc)
<mask token>
<mask token>
<mask token>
def click_upload_card_btn(self):
self.click(self.upload_card_btn_loc)
<mask token>
def click_again_upload_card_btn(self):
self.click(self.again_upload_card_btn_loc)
<mask token>
def click_save_img_btn(self):
self.click(self.save_img_btn_loc)
<mask token>
def click_copy_text_btn(self):
self.click(self.copy_text_btn_loc)
<mask token>
def element_copy_format_btn(self):
return self.find_element(self.copy_format_btn_loc)
<mask token>
def click_card_go_btn(self):
self.click(self.card_go_btn_loc)
<mask token>
def click_upload_btn(self):
self.click(self.upload_btn_loc)
<mask token>
def element_today_card_btn(self):
return self.find_element(self.today_card_btn_loc)
<mask token>
def click_reset_img_btn(self):
self.click(self.reset_img_btn_loc)
<mask token>
def element_generated_loading(self):
return self.find_element(self.generated_loading_loc)
<mask token>
def element_reminder_btn(self):
return self.find_element(self.reminder_btn_loc)
<mask token>
def element_page_expired(self):
return self.find_element(self.page_expired_loc)
<mask token>
def click_x_btn(self):
self.click(self.x_btn_loc)
| <mask token>
class Zaojiaopage(Crazy):
<mask token>
<mask token>
def click_zao(self):
self.click(self.zao_btn_loc)
def element_zao(self):
return self.find_element(self.zao_btn_loc)
<mask token>
def click_find(self):
self.click(self.find_loc)
<mask token>
def click_title_btn(self):
self.click(self.title_btn_loc)
<mask token>
<mask token>
def click_helper(self):
self.click(self.helper_loc)
<mask token>
def click_small_help_btn(self):
self.click(self.small_help_btn_loc)
<mask token>
def element_small_name(self):
return self.find_element(self.small_name_loc)
def click_small_name(self):
self.click(self.small_name_loc)
<mask token>
def click_switching_applet_btn(self):
self.click(self.switching_applet_btn_loc)
<mask token>
def click_delete_small_btn(self):
self.click(self.delete_small_btn_loc)
<mask token>
def element_edition_btn(self):
return self.find_element(self.edition_btn_loc)
<mask token>
def element_delete_small1_btn(self):
return self.find_element(self.delete_small1_btn_loc)
<mask token>
def click_version_btn(self):
self.click(self.version_btn_loc)
<mask token>
def clicks_experience_version_btn(self):
self.clicks(self.experience_version_btn_loc, -1)
<mask token>
def element_audition_class_btn(self):
return self.find_element(self.audition_class_btn_loc)
def click_audition_class_btn(self):
self.click(self.audition_class_btn_loc)
<mask token>
def click_wechat_grant_btn(self):
self.click(self.wechat_grant_btn_loc)
def double_click_wechat_grant(self):
self.double_click(self.wechat_grant_btn_loc)
def element_wechat_grant_btn(self):
return self.find_element(self.wechat_grant_btn_loc)
<mask token>
def click_allow_btn(self):
self.click(self.allow_btn_loc)
<mask token>
def click_mouth_btn(self):
self.click(self.month_btn_loc)
<mask token>
def click_sure_btn(self):
self.click(self.sure_btn_loc)
<mask token>
def class_info_btn(self):
self.click(self.class_info_loc)
<mask token>
def element_attend_lectures_btn(self):
return self.find_element(self.attend_lectures_btn_loc)
<mask token>
<mask token>
def element_class_btn(self):
return self.find_element(self.class_btn_loc)
<mask token>
def click_get_to_know_btn(self):
self.click(self.get_to_know_btn_loc)
def element_get_to_know_btn(self):
return self.find_element(self.get_to_know_btn_loc)
<mask token>
def click_sure_buy_btn(self):
self.click(self.sure_buy_btn_loc)
<mask token>
def input_buy_password(self, paw):
self.send_keys(self.buy_password_loc, paw)
<mask token>
def text_buy_money(self):
return self.get_text(self.check_buy_money_loc)
<mask token>
def click_success_btn(self):
self.click(self.success_btn_loc)
<mask token>
def click_check_address_btn(self):
self.click(self.check_address_btn_loc)
<mask token>
<mask token>
def click_add_address_btn(self):
self.click(self.add_address_btn_loc)
<mask token>
def input_name_btn(self, name):
self.send_keys(self.name_loc, name)
<mask token>
def input_phone_btn(self, phone):
self.send_keys(self.phone_btn_loc, phone)
<mask token>
def click_region_btn(self):
self.click(self.region_btn_loc)
<mask token>
def input_detailed_address_btn(self, address):
self.send_keys(self.detailed_address_btn_loc, address)
<mask token>
def click_save_btn(self):
self.click(self.save_btn_loc)
<mask token>
def click_receive_btn(self):
self.click(self.receive_btn_loc)
<mask token>
def elements_addressee(self):
return self.find_elements(self.addressee_loc)
def clicks_addressee(self):
self.clicks(self.addressee_loc, 0)
<mask token>
def element_know(self):
return self.find_element(self.know_btn_loc)
def click_know(self):
self.click(self.know_btn_loc)
<mask token>
def element_all_curriculum_btn(self):
return self.find_element(self.all_curriculum_btn_loc)
def click_all_curriculum_btn(self):
self.click(self.all_curriculum_btn_loc)
<mask token>
<mask token>
<mask token>
def element_my_btn(self):
return self.find_element(self.my_btn_loc)
def click_my(self):
self.click(self.my_btn_loc)
<mask token>
def click_my_baby(self):
self.click(self.my_baby_btn_loc)
<mask token>
def text_my_baby_title(self):
return self.get_text(self.my_baby_title_loc)
def elements_title(self):
return self.find_elements(self.my_baby_title_loc)
<mask token>
def element_new_baby_btn(self):
return self.find_element(self.new_baby_btn_loc)
def click_new_baby_btn(self):
self.click(self.new_baby_btn_loc)
def clicks_new_baby_btn(self, n):
self.clicks(self.new_baby_btn_loc, n)
<mask token>
def element_get_set(self):
return self.find_element(self.get_set_loc)
<mask token>
<mask token>
<mask token>
def inputs_baby_name(self, name, n):
self.sends_keys(self.baby_name_loc, name, n)
<mask token>
def click_baby_bir_btn(self):
self.click(self.baby_bir_btn_loc)
<mask token>
def click_finish_btn(self):
self.click(self.finish_btn_loc)
<mask token>
<mask token>
def click_my_home(self):
self.click(self.my_home_loc)
def element_my_home(self):
return self.find_element(self.my_home_loc)
<mask token>
def click_switch_btn(self):
self.click(self.switch_btn_loc)
<mask token>
def click_baby_bri(self):
self.click(self.baby_bri_loc)
<mask token>
def clicks_class_img(self):
self.clicks(self.class_img_btn_loc, 0)
<mask token>
def click_collection_btn(self):
self.click(self.collection_btn_loc)
def clicks_collection_btn(self, n):
self.clicks(self.collection_btn_loc, n)
<mask token>
<mask token>
def click_write_record_btn(self):
self.click(self.write_record_btn_loc)
def clicks_write_record_btn(self, n):
self.clicks(self.write_record_btn_loc, n)
<mask token>
def click_album_btn(self):
self.click(self.album_btn_loc)
def element_album_btn(self):
return self.find_element(self.album_btn_loc)
<mask token>
def click_small_video_btn(self):
self.click(self.small_video_btn_loc)
def element_small_video_btn(self):
return self.find_element(self.small_video_btn_loc)
<mask token>
<mask token>
def clicks_release_btn(self, n):
self.clicks(self.release_btn_loc, n)
def element_record_info(self, data):
record_info_loc = 'xpath', '//*[contains(@text, "{}")]'.format(data)
record_info = self.find_element(record_info_loc)
if record_info:
return True
else:
return False
<mask token>
<mask token>
def clicks_class_name(self, n):
self.clicks(self.class_name_loc, n)
def elements_class_name(self):
return self.find_elements(self.class_name_loc)
<mask token>
def click_class2_name(self):
self.click(self.class_name2_loc)
<mask token>
<mask token>
def input_write_text(self, text):
self.send_keys(self.write_text_loc, text)
<mask token>
<mask token>
def clicks_choice_album(self, n):
self.clicks(self.choice_album_loc, n)
def elements_choice_album(self):
return self.find_elements(self.choice_album_loc)
<mask token>
def click_complete_btn(self):
self.click(self.complete_btn_loc)
<mask token>
def click_my_collection_btn(self):
self.click(self.my_collection_btn_loc)
<mask token>
def elements_my_collection_english_course_btn(self):
return self.find_elements(self.my_collection_english_course_btn_loc)
<mask token>
<mask token>
<mask token>
def click_my_course_btn(self):
self.click(self.my_course_btn_loc)
<mask token>
def elements_my_course_buy_btn(self):
return self.find_elements(self.my_course_buy_btn_loc)
<mask token>
def click_my_order_btn(self):
self.click(self.my_order_btn_loc)
<mask token>
def elements_my_order_card_btn(self):
return self.find_elements(self.my_order_card_btn_loc)
<mask token>
def click_my_record_btn(self):
self.click(self.my_record_btn_loc)
<mask token>
def elements_my_record_class_btn(self):
return self.find_elements(self.my_record_class_btn_loc)
<mask token>
def element_back_btn(self):
return self.find_element(self.back_btn_loc)
def click_back_btn(self):
self.click(self.back_btn_loc)
<mask token>
def click_reply_5(self):
self.click(self.reply_5_loc)
def elements_reply_5(self):
return self.find_elements(self.reply_5_loc)
<mask token>
<mask token>
<mask token>
def input_reply_5(self, num):
self.send_keys(self.reply_input_5_loc, num)
<mask token>
def click_send(self):
self.click(self.send_5_loc)
<mask token>
def elements_reply_code(self):
return self.find_elements(self.reply_code_loc)
def clicks_reply_code(self, n):
self.clicks(self.reply_code_loc, n)
<mask token>
def element_long_code(self):
return self.find_element(self.long_code_loc)
def click_long_code(self):
self.click(self.long_code_loc)
<mask token>
def click_discern_code(self):
self.click(self.discern_code_loc)
<mask token>
def text_class_group(self):
return self.get_text(self.class_group_loc)
<mask token>
def element_add_group_chat(self):
return self.find_element(self.add_group_chat_loc)
<mask token>
def elements_reply_8(self):
return self.find_elements(self.reply_8_loc)
<mask token>
def element_parent_btn(self):
return self.find_element(self.parent_btn_loc)
<mask token>
<mask token>
<mask token>
<mask token>
def click_more_games_btn(self):
self.click(self.more_games_btn_loc)
<mask token>
def click_look_all_btn(self):
self.click(self.look_all_btn_loc)
def element_look_all_btn(self):
return self.find_elements(self.look_all_btn_loc)
<mask token>
def text_start_fingerprint_buy(self):
return self.get_text(self.start_fingerprint_buy_loc)
<mask token>
def click_no_more_reminder_btn(self):
self.click(self.no_more_reminder_btn_loc)
<mask token>
def click_cancel_btn(self):
self.click(self.cancel_btn_loc)
<mask token>
def element_usd_password(self):
return self.find_element(self.usd_password_loc)
def click_usd_password(self):
self.click(self.usd_password_loc)
<mask token>
def element_password_error(self):
return self.find_element(self.password_error_loc)
<mask token>
def click_again_btn(self):
self.click(self.again_btn_loc)
<mask token>
def text_payment(self):
return self.get_text(self.payment_loc)
<mask token>
def element_typewriting_finish_btn(self):
return self.find_element(self.typewriting_finish_btn_loc)
def click_typewriting_finish_btn(self):
self.click(self.typewriting_finish_btn_loc)
<mask token>
<mask token>
def element_clock_btn(self):
return self.find_element(self.clock_btn_loc)
<mask token>
def element_no_clock_btn(self):
return self.find_element(self.no_clock_btn_loc)
<mask token>
<mask token>
<mask token>
def click_upload_card_btn(self):
self.click(self.upload_card_btn_loc)
<mask token>
def click_again_upload_card_btn(self):
self.click(self.again_upload_card_btn_loc)
<mask token>
def click_save_img_btn(self):
self.click(self.save_img_btn_loc)
<mask token>
def click_copy_text_btn(self):
self.click(self.copy_text_btn_loc)
<mask token>
def element_copy_format_btn(self):
return self.find_element(self.copy_format_btn_loc)
<mask token>
def click_card_go_btn(self):
self.click(self.card_go_btn_loc)
<mask token>
def click_upload_btn(self):
self.click(self.upload_btn_loc)
<mask token>
def element_today_card_btn(self):
return self.find_element(self.today_card_btn_loc)
<mask token>
def click_reset_img_btn(self):
self.click(self.reset_img_btn_loc)
<mask token>
def element_generated_loading(self):
return self.find_element(self.generated_loading_loc)
<mask token>
def element_reminder_btn(self):
return self.find_element(self.reminder_btn_loc)
<mask token>
def element_page_expired(self):
return self.find_element(self.page_expired_loc)
<mask token>
def click_x_btn(self):
self.click(self.x_btn_loc)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/4/14 14:31
# @Author : lixiaofeng
# @File : page_zaojiao.py
# @Software: PyCharm
# @desc :
from common.basics import Crazy
class Zaojiaopage(Crazy):
"""早教小程序"""
zao_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/cx" and @text="包妈优选"]')
# zao_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/cx" and @text="小小包早教"]')
def click_zao(self):
self.click(self.zao_btn_loc)
def element_zao(self):
return self.find_element(self.zao_btn_loc)
find_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/d7b" and @text="发现"]') # 发现按钮
def click_find(self):
self.click(self.find_loc)
title_btn_loc = ('xpath', '//*[@resource-id="android:id/title" and @text="小程序"]') # 发现页小程序按钮
def click_title_btn(self):
self.click(self.title_btn_loc)
helper_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/c5" and @text="小程序助手"]') # 小程序助手
def element_helper(self):
return self.find_element(self.helper_loc)
def click_helper(self):
self.click(self.helper_loc)
small_help_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/cx" and @text="小程序助手"]') # 小程序助手
def click_small_help_btn(self):
self.click(self.small_help_btn_loc)
small_name_loc = ('xpath', '//*[contains(@text, "包妈优选")]') # 包妈优选
def element_small_name(self):
return self.find_element(self.small_name_loc)
def click_small_name(self):
self.click(self.small_name_loc)
switching_applet_btn_loc = ('xpath', '//*[contains(@text, "切换小程序")]') # 切换小程序
def click_switching_applet_btn(self):
self.click(self.switching_applet_btn_loc)
delete_small_btn_loc = ('xpath', '//*[contains(@text, "删除")]') # 删除小程序按钮
def click_delete_small_btn(self):
self.click(self.delete_small_btn_loc)
edition_btn_loc = ('xpath', '//*[contains(@text, "百宝福利Buy")]')
def element_edition_btn(self):
return self.find_element(self.edition_btn_loc)
delete_small1_btn_loc = ('xpath', '//*[contains(@text, "拖动到此处删除")]')
def element_delete_small1_btn(self):
return self.find_element(self.delete_small1_btn_loc)
version_btn_loc = ('xpath', '//*[contains(@text, "版本查看")]') # 版本查看按钮
def click_version_btn(self):
self.click(self.version_btn_loc)
experience_version_btn_loc = ('xpath', '//*[contains(@text, "6.0.09")]') # 体验版
def clicks_experience_version_btn(self):
self.clicks(self.experience_version_btn_loc, -1)
audition_class_btn_loc = ('xpath', '//*[contains(@text, "0元领取10节试听课")]') # 领取试听课
def element_audition_class_btn(self):
return self.find_element(self.audition_class_btn_loc)
def click_audition_class_btn(self):
self.click(self.audition_class_btn_loc)
wechat_grant_btn_loc = (('xpath', '//*[contains(@text, "微信授权") and @class="android.widget.Button" ]')) # 微信授权
def click_wechat_grant_btn(self):
self.click(self.wechat_grant_btn_loc)
def double_click_wechat_grant(self):
self.double_click(self.wechat_grant_btn_loc)
def element_wechat_grant_btn(self):
return self.find_element(self.wechat_grant_btn_loc)
allow_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/st" and @text="允许"]') # 完成按钮
def click_allow_btn(self):
self.click(self.allow_btn_loc)
month_btn_loc = ('xpath', '//*[contains(@text, "2018")]') # 选择月份
def click_mouth_btn(self):
self.click(self.month_btn_loc)
sure_btn_loc = ('xpath', '//*[contains(@text, "确定")]') # 确定按钮
def click_sure_btn(self):
self.click(self.sure_btn_loc)
class_info_loc = ('xpath', '//*[contains(@text, "课程介绍")]') # 课程介绍
# class_info_loc = ('xpath', '//android.widget.FrameLayout/android.view.ViewGroup[0]') # 课程介绍
def class_info_btn(self):
self.click(self.class_info_loc)
attend_lectures_btn_loc = ('xpath', '//*[contains(@text, "立即听课")]') # 立即听课
def element_attend_lectures_btn(self):
return self.find_element(self.attend_lectures_btn_loc)
def click_attend_lectures_btn(self):
self.click(self.attend_lectures_btn_loc)
class_btn_loc = ('xpath', '//*[contains(@text, "预备课 预备课")]') # 预备课 预备课
def element_class_btn(self):
return self.find_element(self.class_btn_loc)
get_to_know_btn_loc = ('xpath', '//*[contains(@text, "立即了解正式课 ")]') # 立即了解正式课
def click_get_to_know_btn(self):
self.click(self.get_to_know_btn_loc)
def element_get_to_know_btn(self):
return self.find_element(self.get_to_know_btn_loc)
sure_buy_btn_loc = ('xpath', '//*[contains(@text, "立即购买")]') # 立即购买
def click_sure_buy_btn(self):
self.click(self.sure_buy_btn_loc)
buy_password_loc = ('id', 'com.tencent.mm:id/cfs') # 输入支付密码
def input_buy_password(self, paw):
self.send_keys(self.buy_password_loc, paw)
check_buy_money_loc = ('id', 'com.tencent.mm:id/dlh') # 获取支付金额
def text_buy_money(self):
return self.get_text(self.check_buy_money_loc)
success_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/f8o" and @text="完成"]') # 完成按钮
def click_success_btn(self):
self.click(self.success_btn_loc)
check_address_btn_loc = ('xpath', '//*[contains(@text, "收货地址:请选择地址")]') # 选择收货地址
def click_check_address_btn(self):
self.click(self.check_address_btn_loc)
def element_check_address_btn(self):
return self.find_element(self.check_address_btn_loc)
add_address_btn_loc = ('xpath', '//*[contains(@text, "添加地址")]') # 添加地址
def click_add_address_btn(self):
self.click(self.add_address_btn_loc)
name_loc = ('xpath', '//*[contains(@text, "请输入你的姓名")]') # 请输入你的姓名
def input_name_btn(self, name):
self.send_keys(self.name_loc, name)
phone_btn_loc = ('xpath', '//*[contains(@text, "请填写收件人电话")]') # 请填写收件人电话
def input_phone_btn(self, phone):
self.send_keys(self.phone_btn_loc, phone)
region_btn_loc = ('xpath', '//*[contains(@text, "请输入你所在地区")]') # 请输入你所在地区
def click_region_btn(self):
self.click(self.region_btn_loc)
detailed_address_btn_loc = ('xpath', '//*[contains(@text, "请输入你的详细地址")]') # 请输入你的详细地址
def input_detailed_address_btn(self, address):
self.send_keys(self.detailed_address_btn_loc, address)
save_btn_loc = ('xpath', '//*[contains(@text, "保存")]') # 保存
def click_save_btn(self):
self.click(self.save_btn_loc)
receive_btn_loc = ('xpath', '//*[contains(@text, "立即领取")]') # 立即领取
def click_receive_btn(self):
self.click(self.receive_btn_loc)
addressee_loc = ('xpath', '//*[contains(@text, "收件人:")]') # 地址列表是否有地址信息
def elements_addressee(self):
return self.find_elements(self.addressee_loc)
def clicks_addressee(self):
self.clicks(self.addressee_loc, 0)
know_btn_loc = ('xpath', '//*[contains(@text, "知道了")]') # 地址列表是否有地址信息
def element_know(self):
return self.find_element(self.know_btn_loc)
def click_know(self):
self.click(self.know_btn_loc)
all_curriculum_btn_loc = ('xpath', '//*[contains(@text, "查看全部课程")]') # 查看全部课程
def element_all_curriculum_btn(self):
return self.find_element(self.all_curriculum_btn_loc)
def click_all_curriculum_btn(self):
self.click(self.all_curriculum_btn_loc)
curriculum_date_btn_loc = ('xpath', '//*[contains(@text, "2019-0")]') # 历史推送
def element_curriculum_date_btn(self):
return self.find_element(self.curriculum_date_btn_loc)
my_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/ct" and @text="我的"]') # 我的
def element_my_btn(self):
return self.find_element(self.my_btn_loc)
def click_my(self):
self.click(self.my_btn_loc)
my_baby_btn_loc = ('xpath', '//*[contains(@text, "我的宝宝")]') # 我的宝宝
def click_my_baby(self):
self.click(self.my_baby_btn_loc)
my_baby_title_loc = ('id', 'com.tencent.mm:id/ox')
def text_my_baby_title(self):
return self.get_text(self.my_baby_title_loc)
def elements_title(self):
return self.find_elements(self.my_baby_title_loc)
new_baby_btn_loc = ('xpath', '//*[contains(@text, "新建宝宝")]') # 新建宝宝
def element_new_baby_btn(self):
return self.find_element(self.new_baby_btn_loc)
def click_new_baby_btn(self):
self.click(self.new_baby_btn_loc)
def clicks_new_baby_btn(self, n):
self.clicks(self.new_baby_btn_loc, n)
get_set_loc = ('xpath', '//*[contains(@text, "预备课 预备课")]') # 新建宝宝
def element_get_set(self):
return self.find_element(self.get_set_loc)
next_btn_loc = ('xpath', '//*[contains(@text, "下一步")]') # 我的宝宝
def click_next(self):
self.click(self.next_btn_loc)
baby_name_loc = ('xpath', '//*[contains(@text, "请输入宝宝姓名")]') # 请输入宝宝姓名
def inputs_baby_name(self, name, n):
self.sends_keys(self.baby_name_loc, name, n)
baby_bir_btn_loc = ('xpath', '//*[contains(@text, "宝宝的生日:")]') # 宝宝的生日
def click_baby_bir_btn(self):
self.click(self.baby_bir_btn_loc)
finish_btn_loc = ('xpath', '//*[contains(@text, "完成")]') # 完成按钮
def click_finish_btn(self):
self.click(self.finish_btn_loc)
def clicks_finish_btn(self, n):
self.clicks(self.finish_btn_loc, n)
my_home_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/ct" and @text="首页"]') # 首页
def click_my_home(self):
self.click(self.my_home_loc)
def element_my_home(self):
return self.find_element(self.my_home_loc)
switch_btn_loc = ('xpath', '//*[contains(@text, "切换")]') # 切换
def click_switch_btn(self):
self.click(self.switch_btn_loc)
baby_bri_loc = ('xpath', '//*[contains(@text, "宝宝生日:")]') # 宝宝生日:
def click_baby_bri(self):
self.click(self.baby_bri_loc)
class_img_btn_loc = ('xpath', 'android.widget.Image')
def clicks_class_img(self):
self.clicks(self.class_img_btn_loc, 0)
collection_btn_loc = ('xpath', '//*[contains(@text, "收藏")]') # 收藏
def click_collection_btn(self):
self.click(self.collection_btn_loc)
def clicks_collection_btn(self, n):
self.clicks(self.collection_btn_loc, n)
def element_collection_btn(self):
return self.find_element(self.collection_btn_loc)
write_record_btn_loc = ('xpath', '//*[contains(@text, "写记录") and @class="android.widget.Button" ]') # 写记录按钮
def click_write_record_btn(self):
self.click(self.write_record_btn_loc)
def clicks_write_record_btn(self, n):
self.clicks(self.write_record_btn_loc, n)
album_btn_loc = ('xpath', '//*[contains(@text, "相册")]') # 相册
def click_album_btn(self):
self.click(self.album_btn_loc)
def element_album_btn(self):
return self.find_element(self.album_btn_loc)
small_video_btn_loc = ('xpath', '//*[contains(@text, "小视频")]') # 小视频
def click_small_video_btn(self):
self.click(self.small_video_btn_loc)
def element_small_video_btn(self):
return self.find_element(self.small_video_btn_loc)
release_btn_loc = ('xpath', '//*[contains(@text, "发布")]') # 发布
def click_release_btn(self):
self.click(self.release_btn_loc)
def clicks_release_btn(self, n):
self.clicks(self.release_btn_loc, n)
def element_record_info(self, data): # 判断是否定位到包含text的元素
record_info_loc = ('xpath', '//*[contains(@text, "{}")]'.format(data))
record_info = self.find_element(record_info_loc)
if record_info:
return True
else:
return False
class_name_loc = ('xpath', '//*[contains(@text, "歌曲")]') # 课程名称
# class_name_loc = ('xpath', '//*[contains(@text, "歌曲:Head and shoulders")]') # 课程名称
def click_class_name(self):
self.click(self.class_name_loc)
def clicks_class_name(self, n):
self.clicks(self.class_name_loc, n)
def elements_class_name(self):
return self.find_elements(self.class_name_loc)
class_name2_loc = ('xpath', '//*[contains(@text, "一起走")]') # 课程名称
# class_name2_loc = ('xpath', '//*[contains(@text, "弹出来的画")]') # 课程名称
def click_class2_name(self):
self.click(self.class_name2_loc)
def clicks_class2_name(self, n):
self.clicks(self.class_name2_loc, n)
write_text_loc = ('xpath', '//*[contains(@text, "0/1000")]') # 写记录
def input_write_text(self, text):
self.send_keys(self.write_text_loc, text)
def inputs_write_text(self, text, n):
self.sends_keys(self.write_text_loc, text, n)
choice_album_loc = ('id', 'com.tencent.mm:id/bpy')
def clicks_choice_album(self, n):
self.clicks(self.choice_album_loc, n)
def elements_choice_album(self):
return self.find_elements(self.choice_album_loc)
complete_btn_loc = ('id', 'com.tencent.mm:id/ki') # 完成
def click_complete_btn(self):
self.click(self.complete_btn_loc)
my_collection_btn_loc = ('xpath', '//*[contains(@text, "我的收藏")]') # 我的收藏
def click_my_collection_btn(self):
self.click(self.my_collection_btn_loc)
my_collection_english_course_btn_loc = ('xpath', '//*[contains(@text, "早教")]') # 早教英语课
def elements_my_collection_english_course_btn(self):
return self.find_elements(self.my_collection_english_course_btn_loc)
my_collection_game_course_btn_loc = ('xpath', '//*[contains(@text, "宝宝游戏馆")]') # 宝宝游戏馆
def elements_my_collection_game_course_btn(self):
return self.find_elements(self.my_collection_game_course_btn_loc)
my_course_btn_loc = ('xpath', '//*[contains(@text, "我的课程")]') # 我的课程
def click_my_course_btn(self):
self.click(self.my_course_btn_loc)
my_course_buy_btn_loc = ('xpath', '//*[contains(@text, "早教核心课年卡")]') # 早教核心课年卡
def elements_my_course_buy_btn(self):
return self.find_elements(self.my_course_buy_btn_loc)
my_order_btn_loc = ('xpath', '//*[contains(@text, "我的订单")]') # 我的订单
def click_my_order_btn(self):
self.click(self.my_order_btn_loc)
my_order_card_btn_loc = ('xpath', '//*[contains(@text, "订单编号:")]') # 订单编号:
def elements_my_order_card_btn(self):
return self.find_elements(self.my_order_card_btn_loc)
my_record_btn_loc = ('xpath', '//*[contains(@text, "成长记录")]') # 成长记录
def click_my_record_btn(self):
self.click(self.my_record_btn_loc)
my_record_class_btn_loc = ('xpath', '//*[contains(@text, "#")]') # # 测试英语课程组
def elements_my_record_class_btn(self):
return self.find_elements(self.my_record_class_btn_loc)
back_btn_loc = (
'xpath', '//*[@resource-id="com.tencent.mm:id/on" and @class="android.widget.LinearLayout"]') # 返回按钮
def element_back_btn(self):
return self.find_element(self.back_btn_loc)
def click_back_btn(self):
self.click(self.back_btn_loc)
reply_5_loc = ('xpath', '//android.widget.Image') # 回复5
def click_reply_5(self):
self.click(self.reply_5_loc)
def elements_reply_5(self):
return self.find_elements(self.reply_5_loc)
add_to_btn_loc = ('xpath', '//*[contains(@text, "立即添加")]') # 立即添加
def click_add_to_btn(self):
self.click(self.add_to_btn_loc)
reply_input_5_loc = ('id', 'com.tencent.mm:id/ami')
def input_reply_5(self, num):
self.send_keys(self.reply_input_5_loc, num)
send_5_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/amp" and @text="发送"]') # 发送
def click_send(self):
self.click(self.send_5_loc)
reply_code_loc = ('id', 'com.tencent.mm:id/ap9') # 获取回复的二维码
def elements_reply_code(self):
return self.find_elements(self.reply_code_loc)
def clicks_reply_code(self, n):
self.clicks(self.reply_code_loc, n)
long_code_loc = ('id', 'com.tencent.mm:id/adi') # 长按二维码
def element_long_code(self):
return self.find_element(self.long_code_loc)
def click_long_code(self):
self.click(self.long_code_loc)
discern_code_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/cx" and @text="识别图中二维码"]') # 识别图中二维码
def click_discern_code(self):
self.click(self.discern_code_loc)
class_group_loc = ('id', 'android:id/text1') # 群名称
def text_class_group(self):
return self.get_text(self.class_group_loc)
add_group_chat_loc = ('xpath', '//*[contains(@text, "加入该群聊")]') # 加入该群聊
def element_add_group_chat(self):
return self.find_element(self.add_group_chat_loc)
reply_8_loc = ('xpath', '//android.widget.Image') # 回复8的banner 回复8->进公众号->点击推送 看到的二维码
def elements_reply_8(self):
return self.find_elements(self.reply_8_loc)
parent_btn_loc = ('xpath', '//*[contains(@text, "亲爱的家长:")]') # 亲爱的家长:
def element_parent_btn(self):
return self.find_element(self.parent_btn_loc)
info_btn_loc = ('id', 'com.tencent.mm:id/a8q') # 详情
def elements_info_btn(self):
return self.find_elements(self.info_btn_loc)
def clicks_info_btn(self, n):
self.clicks(self.info_btn_loc, n)
more_games_btn_loc = ('xpath', '//*[contains(@text, "更多亲子游戏")]') # 更多亲子游戏
def click_more_games_btn(self):
self.click(self.more_games_btn_loc)
look_all_btn_loc = ('xpath', '//*[contains(@text, "查看全部")]') # 查看全部
def click_look_all_btn(self):
self.click(self.look_all_btn_loc)
def element_look_all_btn(self):
return self.find_elements(self.look_all_btn_loc)
start_fingerprint_buy_loc = ('id', 'com.tencent.mm:id/btp') # 开启指纹支付弹窗文本 开启指纹支付,支付时可通过验证指纹快速完成付款。
def text_start_fingerprint_buy(self):
return self.get_text(self.start_fingerprint_buy_loc)
no_more_reminder_btn_loc = ('id', 'com.tencent.mm:id/btq') # 不再提醒
def click_no_more_reminder_btn(self):
self.click(self.no_more_reminder_btn_loc)
cancel_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/azz" and @text="取消"]') # 取消
def click_cancel_btn(self):
self.click(self.cancel_btn_loc)
usd_password_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/fg4" and @text="使用密码"]') # 使用密码
def element_usd_password(self):
return self.find_element(self.usd_password_loc)
def click_usd_password(self):
self.click(self.usd_password_loc)
password_error_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/d8x" and @text="支付密码错误,请重试"]') # 支付密码错误,请重试
def element_password_error(self):
return self.find_element(self.password_error_loc)
again_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/azz" and @text="重试"]') # 重试
def click_again_btn(self):
self.click(self.again_btn_loc)
payment_loc = ('id', 'com.tencent.mm:id/fg3') # 请输入支付密码 文本
def text_payment(self):
return self.get_text(self.payment_loc)
typewriting_finish_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/z2" and @text="完成"]') # 输入法上的完成按钮
def element_typewriting_finish_btn(self):
return self.find_element(self.typewriting_finish_btn_loc)
def click_typewriting_finish_btn(self):
self.click(self.typewriting_finish_btn_loc)
# 打卡
clock_btn_loc = ('xpath', '//*[contains(@text, "打卡")]') # 打卡
def click_clock_btn(self):
self.click(self.clock_btn_loc)
def element_clock_btn(self):
return self.find_element(self.clock_btn_loc)
# com.tencent.mm:id/ox
no_clock_btn_loc = ('xpath', '//*[contains(@text, "你还未开启打卡")]') # 你还未开启打卡
def element_no_clock_btn(self):
return self.find_element(self.no_clock_btn_loc)
get_card_btn_loc = ('xpath', '//*[@text="获取打卡海报" and @class="android.widget.Button"]') # 获取打卡海报
def click_get_card_btn(self):
self.click(self.get_card_btn_loc)
upload_card_btn_loc = ('xpath', '//*[@text="上传截图" and @class="android.widget.Button"]') # 上传截图
def click_upload_card_btn(self):
self.click(self.upload_card_btn_loc)
again_upload_card_btn_loc = ('xpath', '//*[@text="重新上传截图" and @class="android.widget.Button"]') # 重新上传截图
def click_again_upload_card_btn(self):
self.click(self.again_upload_card_btn_loc)
save_img_btn_loc = ('xpath', '//*[@text="保存图片" and @class="android.widget.Button"]') # 保存图片
def click_save_img_btn(self):
self.click(self.save_img_btn_loc)
copy_text_btn_loc = ('xpath', '//*[@text="复制发圈文案" and @class="android.widget.Button"]') # 复制发圈文案
def click_copy_text_btn(self):
self.click(self.copy_text_btn_loc)
copy_format_btn_loc = ('xpath', '//*[contains(@text, "发布朋友圈截图规范")]') # 发布朋友圈截图规范
def element_copy_format_btn(self):
return self.find_element(self.copy_format_btn_loc)
card_go_btn_loc = ('xpath', '//*[contains(@text, "关闭小程序,去朋友圈打卡截图")]') # 关闭小程序,去朋友圈打卡截图
def click_card_go_btn(self):
self.click(self.card_go_btn_loc)
upload_btn_loc = ('xpath', '//*[@text="上传" and @class="android.widget.Button"]') # 上传
def click_upload_btn(self):
self.click(self.upload_btn_loc)
today_card_btn_loc = ('xpath', '//*[contains(@text, "今日已提交打卡")]') # 今日已提交打卡
def element_today_card_btn(self):
return self.find_element(self.today_card_btn_loc)
reset_img_btn_loc = ('xpath', '//*[@text="重新选择截图" and @class="android.widget.Button"]') # 重新选择截图
def click_reset_img_btn(self):
self.click(self.reset_img_btn_loc)
generated_loading_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/cx" and @text="正在生成..."]') # 正在生成...
def element_generated_loading(self):
return self.find_element(self.generated_loading_loc)
reminder_btn_loc = ('xpath', '//*[contains(@text, "温馨提示")]') # 温馨提示
def element_reminder_btn(self):
return self.find_element(self.reminder_btn_loc)
page_expired_loc = ('xpath', '//*[contains(@text, "页面已经过期")]') # 页面已经过期
def element_page_expired(self):
return self.find_element(self.page_expired_loc)
x_btn_loc = ('id', 'com.tencent.mm:id/kx')
def click_x_btn(self):
self.click(self.x_btn_loc)
| [
93,
102,
126,
131,
152
] |
129 | fa8d603fbea287161d31499f96a7fe7e56e8eaa1 | def filtra_acima(wires, origem):
return [wire for wire in wires if wire[0] > origem ]
def filtra_abaixo(wires, destino):
return [wire for wire in wires if wire[1] < destino ]
def calculate(wires):
count = 0
for i in xrange(len(wires)):
wires_acima = filtra_acima(wires, wires[i][0])
wires_abaixo = filtra_abaixo(wires_acima, wires[i][1])
count += len(wires_abaixo)
return count
#print calculate([(1,3), (2,5), (4,1), (6,7)])
#print calculate([(1,10), (5,5), (7,7)])
#print calculate([(1,1), (2,2)])
def read_input(n):
wires = []
for i in xrange(n):
o, d = map(int, raw_input().split())
wires.append( (o,d) )
return wires
for case_number in xrange(int(raw_input())):
n, = map(int, raw_input().split())
wires = read_input(n)
result = calculate(wires)
print 'Case #%d: %s' % (case_number+1, result) | null | null | null | null | [
0
] |
130 | fa3cec0781b9ca5c1d99a7500748104d7cdce631 | <mask token>
class Song:
def __init__(self, songName, artistName, lyric):
self.songName = songName
self.artistName = artistName
self.lyric = lyric
self.phrasePinyinDict = util.lyricToPinYi(self.lyric)
<mask token>
<mask token>
def getLyric(self):
return self.lyric
def getName(self):
return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(
self.songName)
<mask token>
def write(self):
file = open(self.getSongName(), 'w+')
file.write(self.getLyric())
file.close()
| <mask token>
class Song:
def __init__(self, songName, artistName, lyric):
self.songName = songName
self.artistName = artistName
self.lyric = lyric
self.phrasePinyinDict = util.lyricToPinYi(self.lyric)
def getSongName(self):
return self.songName
def getArtistName(self):
return self.artistName
def getLyric(self):
return self.lyric
def getName(self):
return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(
self.songName)
<mask token>
def write(self):
file = open(self.getSongName(), 'w+')
file.write(self.getLyric())
file.close()
| <mask token>
class Song:
def __init__(self, songName, artistName, lyric):
self.songName = songName
self.artistName = artistName
self.lyric = lyric
self.phrasePinyinDict = util.lyricToPinYi(self.lyric)
def getSongName(self):
return self.songName
def getArtistName(self):
return self.artistName
def getLyric(self):
return self.lyric
def getName(self):
return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(
self.songName)
def storeToFileSystem(self, filename, append):
file = open(filename, ('w+', 'a+')[append], encoding='utf8')
json.dump(self.__dict__, file, indent=4, ensure_ascii=False)
file.close()
def write(self):
file = open(self.getSongName(), 'w+')
file.write(self.getLyric())
file.close()
| import json
import jieba
import util
from pypinyin import pinyin, Style
class Song:
def __init__(self, songName, artistName, lyric):
self.songName = songName
self.artistName = artistName
self.lyric = lyric
self.phrasePinyinDict = util.lyricToPinYi(self.lyric)
def getSongName(self):
return self.songName
def getArtistName(self):
return self.artistName
def getLyric(self):
return self.lyric
def getName(self):
return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(
self.songName)
def storeToFileSystem(self, filename, append):
file = open(filename, ('w+', 'a+')[append], encoding='utf8')
json.dump(self.__dict__, file, indent=4, ensure_ascii=False)
file.close()
def write(self):
file = open(self.getSongName(), 'w+')
file.write(self.getLyric())
file.close()
| import json
import jieba
import util
from pypinyin import pinyin, Style
class Song:
def __init__(self, songName, artistName, lyric):
self.songName = songName
self.artistName = artistName
self.lyric = lyric
self.phrasePinyinDict = util.lyricToPinYi(self.lyric)
def getSongName(self):
return self.songName
def getArtistName(self):
return self.artistName
def getLyric(self):
return self.lyric
def getName(self):
return util.sanitizeName(self.artistName)+"-"+ util.sanitizeName(self.songName)
def storeToFileSystem(self, filename, append):
file = open(filename, ("w+","a+")[append],encoding="utf8")
json.dump(self.__dict__, file, indent=4, ensure_ascii=False)
file.close()
def write(self):
file = open(self.getSongName(), "w+")
file.write(self.getLyric())
file.close()
| [
5,
7,
8,
9,
10
] |
131 | 089bdd6d68a69aff6f3c11f7f5ffb75aed73cd24 | <mask token>
def draw_area(dna, room):
area_center, area = DNA_Object.get_room_area(room)
plt.plot(area['x'], area['y'], linewidth='0.5', color='k')
plt.xlim((-15000, 20000))
plt.ylim((-15000, 20000))
title = 'Id' + str(dna['solutionId'])
plt.title(title)
def draw_bounds(minx, miny, maxx, maxy):
x = [minx, maxx, maxx, minx, minx]
y = [miny, miny, maxy, maxy, miny]
plt.plot(x, y, linewidth='0.8', color='r')
def draw_house_area(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if cur_dict:
room_num = len(cur_dict['roomList'])
for i in range(room_num):
room = cur_dict['roomList'][i]
draw_area(dna, room)
"""if room['roomUsageName'] == '主卧':
print('主卧area:',area)"""
return True
else:
return False
def draw_room_area(dna, room_name):
plt.figure()
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if cur_dict:
room_num = len(cur_dict['roomList'])
for i in range(room_num):
room = cur_dict['roomList'][i]
if room['roomName'] == room_name:
draw_area(dna, room)
return True
else:
return False
def draw_house_wall(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'walls' in cur_dict:
wall_num, wall_pos = DNA_Object.get_wall_from_dna(cur_dict)
for i in range(wall_num):
plt.plot(wall_pos['x'][i], wall_pos['y'][i], alpha=0.7, color=
'b', linewidth=1, solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_house_window(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'windows' in cur_dict:
window_num, window_pos = DNA_Object.get_window_info_from_dna(cur_dict)
for i in range(window_num):
plt.plot(window_pos['x'][i], window_pos['y'][i], alpha=0.7,
color='c', linewidth='0.5', solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_house_door(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'doors' in cur_dict:
door_num, door_pos = DNA_Object.get_door_from_dna(cur_dict)
for i in range(door_num):
plt.plot(door_pos['x'][i], door_pos['y'][i], alpha=0.7, color=
'r', linewidth='0.5', solid_capstyle='round', zorder=2)
return True
else:
return False
<mask token>
def draw_house_obj(dna, obj_category, close_flag):
room_num, room = DNA_Object.get_room_list_from_dna(dna)
count = 0
for i in range(room_num):
flag = draw_room_obj(room[i], obj_category)
if flag == True:
count += 1
if count == 0 and close_flag == True:
plt.close()
<mask token>
| <mask token>
def draw_area(dna, room):
area_center, area = DNA_Object.get_room_area(room)
plt.plot(area['x'], area['y'], linewidth='0.5', color='k')
plt.xlim((-15000, 20000))
plt.ylim((-15000, 20000))
title = 'Id' + str(dna['solutionId'])
plt.title(title)
def draw_bounds(minx, miny, maxx, maxy):
x = [minx, maxx, maxx, minx, minx]
y = [miny, miny, maxy, maxy, miny]
plt.plot(x, y, linewidth='0.8', color='r')
def draw_house_area(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if cur_dict:
room_num = len(cur_dict['roomList'])
for i in range(room_num):
room = cur_dict['roomList'][i]
draw_area(dna, room)
"""if room['roomUsageName'] == '主卧':
print('主卧area:',area)"""
return True
else:
return False
def draw_room_area(dna, room_name):
plt.figure()
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if cur_dict:
room_num = len(cur_dict['roomList'])
for i in range(room_num):
room = cur_dict['roomList'][i]
if room['roomName'] == room_name:
draw_area(dna, room)
return True
else:
return False
def draw_house_wall(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'walls' in cur_dict:
wall_num, wall_pos = DNA_Object.get_wall_from_dna(cur_dict)
for i in range(wall_num):
plt.plot(wall_pos['x'][i], wall_pos['y'][i], alpha=0.7, color=
'b', linewidth=1, solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_house_window(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'windows' in cur_dict:
window_num, window_pos = DNA_Object.get_window_info_from_dna(cur_dict)
for i in range(window_num):
plt.plot(window_pos['x'][i], window_pos['y'][i], alpha=0.7,
color='c', linewidth='0.5', solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_house_door(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'doors' in cur_dict:
door_num, door_pos = DNA_Object.get_door_from_dna(cur_dict)
for i in range(door_num):
plt.plot(door_pos['x'][i], door_pos['y'][i], alpha=0.7, color=
'r', linewidth='0.5', solid_capstyle='round', zorder=2)
return True
else:
return False
<mask token>
def draw_house_obj(dna, obj_category, close_flag):
room_num, room = DNA_Object.get_room_list_from_dna(dna)
count = 0
for i in range(room_num):
flag = draw_room_obj(room[i], obj_category)
if flag == True:
count += 1
if count == 0 and close_flag == True:
plt.close()
def draw_relative_info(room2bed):
plt.figure()
plt.plot(room2bed['x'], room2bed['y'], '*')
def draw_scatter_distribution(data, title_name, xlabel, ylabel):
plt.figure()
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.plot(data['x'], data['y'], '*')
plt.title(title_name)
plt.plot([0, 10000], [0, 10000], '-')
plt.plot([0, 10000], [3000, 3000], '-')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
| <mask token>
def draw_area(dna, room):
area_center, area = DNA_Object.get_room_area(room)
plt.plot(area['x'], area['y'], linewidth='0.5', color='k')
plt.xlim((-15000, 20000))
plt.ylim((-15000, 20000))
title = 'Id' + str(dna['solutionId'])
plt.title(title)
def draw_bounds(minx, miny, maxx, maxy):
x = [minx, maxx, maxx, minx, minx]
y = [miny, miny, maxy, maxy, miny]
plt.plot(x, y, linewidth='0.8', color='r')
def draw_house_area(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if cur_dict:
room_num = len(cur_dict['roomList'])
for i in range(room_num):
room = cur_dict['roomList'][i]
draw_area(dna, room)
"""if room['roomUsageName'] == '主卧':
print('主卧area:',area)"""
return True
else:
return False
def draw_room_area(dna, room_name):
plt.figure()
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if cur_dict:
room_num = len(cur_dict['roomList'])
for i in range(room_num):
room = cur_dict['roomList'][i]
if room['roomName'] == room_name:
draw_area(dna, room)
return True
else:
return False
def draw_house_wall(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'walls' in cur_dict:
wall_num, wall_pos = DNA_Object.get_wall_from_dna(cur_dict)
for i in range(wall_num):
plt.plot(wall_pos['x'][i], wall_pos['y'][i], alpha=0.7, color=
'b', linewidth=1, solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_house_window(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'windows' in cur_dict:
window_num, window_pos = DNA_Object.get_window_info_from_dna(cur_dict)
for i in range(window_num):
plt.plot(window_pos['x'][i], window_pos['y'][i], alpha=0.7,
color='c', linewidth='0.5', solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_house_door(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'doors' in cur_dict:
door_num, door_pos = DNA_Object.get_door_from_dna(cur_dict)
for i in range(door_num):
plt.plot(door_pos['x'][i], door_pos['y'][i], alpha=0.7, color=
'r', linewidth='0.5', solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_room_obj(room, obj_category):
obj_num, obj_center, obj_size, obj_point = (DNA_Object.
get_obj_info_from_room(room, obj_category))
if obj_num > 0:
plt.plot(obj_point['x'], obj_point['y'], linewidth='0.5')
plt.plot(obj_center[0], obj_center[1], 'o')
return True
else:
return False
def draw_house_obj(dna, obj_category, close_flag):
room_num, room = DNA_Object.get_room_list_from_dna(dna)
count = 0
for i in range(room_num):
flag = draw_room_obj(room[i], obj_category)
if flag == True:
count += 1
if count == 0 and close_flag == True:
plt.close()
def draw_relative_info(room2bed):
plt.figure()
plt.plot(room2bed['x'], room2bed['y'], '*')
def draw_scatter_distribution(data, title_name, xlabel, ylabel):
plt.figure()
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.plot(data['x'], data['y'], '*')
plt.title(title_name)
plt.plot([0, 10000], [0, 10000], '-')
plt.plot([0, 10000], [3000, 3000], '-')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
| <mask token>
import matplotlib.pyplot as plt
import DNA_Object
import json
def draw_area(dna, room):
area_center, area = DNA_Object.get_room_area(room)
plt.plot(area['x'], area['y'], linewidth='0.5', color='k')
plt.xlim((-15000, 20000))
plt.ylim((-15000, 20000))
title = 'Id' + str(dna['solutionId'])
plt.title(title)
def draw_bounds(minx, miny, maxx, maxy):
x = [minx, maxx, maxx, minx, minx]
y = [miny, miny, maxy, maxy, miny]
plt.plot(x, y, linewidth='0.8', color='r')
def draw_house_area(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if cur_dict:
room_num = len(cur_dict['roomList'])
for i in range(room_num):
room = cur_dict['roomList'][i]
draw_area(dna, room)
"""if room['roomUsageName'] == '主卧':
print('主卧area:',area)"""
return True
else:
return False
def draw_room_area(dna, room_name):
plt.figure()
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if cur_dict:
room_num = len(cur_dict['roomList'])
for i in range(room_num):
room = cur_dict['roomList'][i]
if room['roomName'] == room_name:
draw_area(dna, room)
return True
else:
return False
def draw_house_wall(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'walls' in cur_dict:
wall_num, wall_pos = DNA_Object.get_wall_from_dna(cur_dict)
for i in range(wall_num):
plt.plot(wall_pos['x'][i], wall_pos['y'][i], alpha=0.7, color=
'b', linewidth=1, solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_house_window(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'windows' in cur_dict:
window_num, window_pos = DNA_Object.get_window_info_from_dna(cur_dict)
for i in range(window_num):
plt.plot(window_pos['x'][i], window_pos['y'][i], alpha=0.7,
color='c', linewidth='0.5', solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_house_door(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'doors' in cur_dict:
door_num, door_pos = DNA_Object.get_door_from_dna(cur_dict)
for i in range(door_num):
plt.plot(door_pos['x'][i], door_pos['y'][i], alpha=0.7, color=
'r', linewidth='0.5', solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_room_obj(room, obj_category):
obj_num, obj_center, obj_size, obj_point = (DNA_Object.
get_obj_info_from_room(room, obj_category))
if obj_num > 0:
plt.plot(obj_point['x'], obj_point['y'], linewidth='0.5')
plt.plot(obj_center[0], obj_center[1], 'o')
return True
else:
return False
def draw_house_obj(dna, obj_category, close_flag):
room_num, room = DNA_Object.get_room_list_from_dna(dna)
count = 0
for i in range(room_num):
flag = draw_room_obj(room[i], obj_category)
if flag == True:
count += 1
if count == 0 and close_flag == True:
plt.close()
def draw_relative_info(room2bed):
plt.figure()
plt.plot(room2bed['x'], room2bed['y'], '*')
def draw_scatter_distribution(data, title_name, xlabel, ylabel):
plt.figure()
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.plot(data['x'], data['y'], '*')
plt.title(title_name)
plt.plot([0, 10000], [0, 10000], '-')
plt.plot([0, 10000], [3000, 3000], '-')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
| # -*- coding: utf-8 -*-
"""
Created on Fri Jul 13 14:52:03 2018
@author: mayn
"""
import matplotlib.pyplot as plt
import DNA_Object
import json
def draw_area(dna, room):
area_center, area = DNA_Object.get_room_area(room)
plt.plot(area['x'], area['y'], linewidth='0.5', color='k',)
plt.xlim((-15000, 20000))
plt.ylim((-15000, 20000))
#plt.plot(area_center[0], area_center[1], '*', linewidth='0.5')
title = 'Id'+str(dna['solutionId'])
plt.title(title)
def draw_bounds(minx, miny, maxx, maxy):
x = [minx, maxx, maxx, minx, minx]
y = [miny, miny, maxy, maxy, miny]
plt.plot(x, y, linewidth = '0.8', color='r')
def draw_house_area(dna):
#plt.figure()
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if cur_dict:
room_num = len(cur_dict['roomList'])
for i in range(room_num):
room = cur_dict['roomList'][i]
draw_area(dna, room)
#bed = 318
#draw_room_obj(room, bed)
'''if room['roomUsageName'] == '主卧':
print('主卧area:',area)'''
#plt.show()
return True
else:
return False
def draw_room_area(dna, room_name):
plt.figure()
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if cur_dict:
room_num = len(cur_dict['roomList'])
for i in range(room_num):
room = cur_dict['roomList'][i]
if room['roomName'] == room_name:
draw_area(dna, room)
#bed = 318
#draw_room_obj(room, bed)
return True
else:
return False
def draw_house_wall(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'walls' in cur_dict:
wall_num, wall_pos = DNA_Object.get_wall_from_dna(cur_dict)
for i in range(wall_num):
plt.plot(wall_pos['x'][i], wall_pos['y'][i], alpha=0.7, color='b', linewidth=1, solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_house_window(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'windows' in cur_dict:
window_num, window_pos = DNA_Object.get_window_info_from_dna(cur_dict)
for i in range(window_num):
plt.plot(window_pos['x'][i], window_pos['y'][i], alpha=0.7, color='c', linewidth='0.5', solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_house_door(dna):
cur_dict = {}
if 'roomList' in dna:
cur_dict = dna
elif 'request' in dna and 'feedback' not in dna:
cur_dict = json.loads(dna['request'])
elif 'feedback' in dna:
cur_dict = json.loads(dna['feedback'])
if 'doors' in cur_dict:
door_num, door_pos = DNA_Object.get_door_from_dna(cur_dict)
for i in range(door_num):
#print('【door',i,'pos】', door_pos['x'][i], door_pos['y'][i])
plt.plot(door_pos['x'][i], door_pos['y'][i], alpha=0.7, color='r', linewidth='0.5', solid_capstyle='round', zorder=2)
return True
else:
return False
def draw_room_obj(room, obj_category):
obj_num, obj_center, obj_size, obj_point= DNA_Object.get_obj_info_from_room(room, obj_category)
if obj_num > 0:
plt.plot(obj_point['x'], obj_point['y'], linewidth='0.5')
#print(bed_center)
plt.plot(obj_center[0], obj_center[1], 'o')
return True
else:
return False
def draw_house_obj(dna, obj_category, close_flag):
room_num, room = DNA_Object.get_room_list_from_dna(dna)
count = 0
for i in range(room_num):
flag = draw_room_obj(room[i], obj_category)
if flag == True:
count += 1
if count == 0 and close_flag == True:
plt.close()
def draw_relative_info(room2bed):
plt.figure()
plt.plot(room2bed['x'], room2bed['y'], '*')
def draw_scatter_distribution(data, title_name, xlabel, ylabel):
plt.figure()
#解决中文显示问题
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
plt.plot(data['x'], data['y'], '*')
plt.title(title_name)
#plt.grid(True, linestyle="-.", color='k', linewidth='0.5')
plt.plot([0, 10000], [0, 10000], '-')
plt.plot([0, 10000], [3000, 3000], '-')
plt.xlabel(xlabel)
plt.ylabel(ylabel) | [
8,
10,
11,
12,
13
] |
132 | 50a4084dd3028acc2e6788e77794c100efcb3fac | <mask token>
class Agent:
<mask token>
<mask token>
<mask token>
| <mask token>
class Agent:
def __init__(self):
self.model = torch.load(__file__[:-8] + '/agent.pkl')
def act(self, state):
state = torch.tensor(state)
with torch.no_grad():
return self.model(state.unsqueeze(0)).max(1)[1].item()
<mask token>
| <mask token>
class Agent:
def __init__(self):
self.model = torch.load(__file__[:-8] + '/agent.pkl')
def act(self, state):
state = torch.tensor(state)
with torch.no_grad():
return self.model(state.unsqueeze(0)).max(1)[1].item()
def reset(self):
pass
| import random
import numpy as np
import os
import torch
class Agent:
def __init__(self):
self.model = torch.load(__file__[:-8] + '/agent.pkl')
def act(self, state):
state = torch.tensor(state)
with torch.no_grad():
return self.model(state.unsqueeze(0)).max(1)[1].item()
def reset(self):
pass
| import random
import numpy as np
import os
import torch
class Agent:
def __init__(self):
self.model = torch.load(__file__[:-8] + "/agent.pkl")
def act(self, state):
state = torch.tensor(state)
with torch.no_grad():
return self.model(state.unsqueeze(0)).max(1)[1].item()
def reset(self):
pass
| [
1,
3,
4,
5,
6
] |
133 | de704bffe2e23a8a83d34204e325b7fb2454ef66 | <mask token>
def build_recursive_traversal_spec(client_factory):
rp_to_rp = client_factory.create('ns0:TraversalSpec')
rp_to_rp.name = 'rpToRp'
rp_to_rp.type = 'ResourcePool'
rp_to_rp.path = 'resourcePool'
rp_to_rp.skip = False
rp_to_vm = client_factory.create('ns0:TraversalSpec')
rp_to_vm.name = 'rpToVm'
rp_to_vm.type = 'ResourcePool'
rp_to_vm.path = 'vm'
rp_to_vm.skip = False
spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_resource_pool[0].name = 'rpToRp'
spec_array_resource_pool[1].name = 'rpToVm'
rp_to_rp.selectSet = spec_array_resource_pool
cr_to_rp = client_factory.create('ns0:TraversalSpec')
cr_to_rp.name = 'crToRp'
cr_to_rp.type = 'ComputeResource'
cr_to_rp.path = 'resourcePool'
cr_to_rp.skip = False
spec_array_compute_resource = [client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]
spec_array_compute_resource[0].name = 'rpToRp'
spec_array_compute_resource[1].name = 'rpToVm'
cr_to_rp.selectSet = spec_array_compute_resource
cr_to_h = client_factory.create('ns0:TraversalSpec')
cr_to_h.name = 'crToH'
cr_to_h.type = 'ComputeResource'
cr_to_h.path = 'host'
cr_to_h.skip = False
dc_to_hf = client_factory.create('ns0:TraversalSpec')
dc_to_hf.name = 'dcToHf'
dc_to_hf.type = 'Datacenter'
dc_to_hf.path = 'hostFolder'
dc_to_hf.skip = False
spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_host[0].name = 'visitFolders'
dc_to_hf.selectSet = spec_array_datacenter_host
dc_to_vmf = client_factory.create('ns0:TraversalSpec')
dc_to_vmf.name = 'dcToVmf'
dc_to_vmf.type = 'Datacenter'
dc_to_vmf.path = 'vmFolder'
dc_to_vmf.skip = False
spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_vm[0].name = 'visitFolders'
dc_to_vmf.selectSet = spec_array_datacenter_vm
dc_to_ds = client_factory.create('ns0:TraversalSpec')
dc_to_ds.name = 'dcToDs'
dc_to_ds.type = 'Datacenter'
dc_to_ds.path = 'datastore'
dc_to_ds.skip = False
spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_ds[0].name = 'visitFolders'
dc_to_ds.selectSet = spec_array_datacenter_ds
h_to_vm = client_factory.create('ns0:TraversalSpec')
h_to_vm.name = 'hToVm'
h_to_vm.type = 'HostSystem'
h_to_vm.path = 'vm'
h_to_vm.skip = False
spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_host_vm[0].name = 'visitFolders'
h_to_vm.selectSet = spec_array_host_vm
ds_to_vm = client_factory.create('ns0:TraversalSpec')
ds_to_vm.name = 'dsToVm'
ds_to_vm.type = 'Datastore'
ds_to_vm.path = 'vm'
ds_to_vm.skip = False
spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datastore_vm[0].name = 'visitFolders'
ds_to_vm.selectSet = spec_array_datastore_vm
visit_folders = client_factory.create('ns0:TraversalSpec')
visit_folders.name = 'visitFolders'
visit_folders.type = 'Folder'
visit_folders.path = 'childEntity'
visit_folders.skip = False
spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec')]
spec_array_visit_folders[0].name = 'visitFolders'
spec_array_visit_folders[1].name = 'dcToHf'
spec_array_visit_folders[2].name = 'dcToVmf'
spec_array_visit_folders[3].name = 'crToH'
spec_array_visit_folders[4].name = 'crToRp'
spec_array_visit_folders[5].name = 'dcToDs'
spec_array_visit_folders[6].name = 'hToVm'
spec_array_visit_folders[7].name = 'dsToVm'
spec_array_visit_folders[8].name = 'rpToVm'
visit_folders.selectSet = spec_array_visit_folders
spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,
cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]
return spec_array
def get_object_properties(vim, collector, mobj, type, properties):
"""Gets the properties of the Managed object specified."""
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.service_content.propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = properties is None or len(properties) == 0
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
return retrieve_properties_ex(vim, usecoll, [property_filter_spec])
def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
properties = get_dynamic_properties(vim, mobj, [property_name], type)
property_value = None
if property_name in properties:
property_value = properties.get(property_name)
return property_value
<mask token>
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
"""Builds the Property Filter Spec Object."""
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
def get_property_filter_specs(vim, property_dict, objects=None):
client_factory = vim.client.factory
object_specs = []
if not objects:
objects = [vim.service_content.rootFolder]
for obj in objects:
if obj.value == get_root_folder_id(vim):
traversal_spec = [vim_util.build_recursive_traversal_spec(
client_factory)]
else:
traversal_spec = build_recursive_traversal_spec(client_factory)
object_spec = vim_util.build_object_spec(client_factory, obj,
traversal_spec)
object_specs.append(object_spec)
property_specs = []
for obj_type in property_dict:
props = property_dict[obj_type]
property_spec = vim_util.build_property_spec(client_factory, type_=
obj_type, properties_to_collect=props)
property_specs.append(property_spec)
property_filter_spec = vim_util.build_property_filter_spec(client_factory,
property_specs, object_specs)
return property_filter_spec
<mask token>
def create_property_collector(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreatePropertyCollector(collector)
<mask token>
def cancel_wait_for_updates(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CancelWaitForUpdates(collector)
def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties
):
"""Gets the list of properties for the collection of objects."""
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,
[prop_spec])
return retrieve_properties_ex(vim, vim.service_content.
propertyCollector, [prop_filter_spec])
<mask token>
def find_by_inventory_path(vim, search_index, path):
return vim.FindByInventoryPath(search_index, inventoryPath=path)
def get_root_folder_id(vim):
return vim.service_content.rootFolder.value
def get_dv_switch_manager(vim):
"""Get reference of DistributedVirtualSwitchManager."""
return vim.service_content.dvSwitchManager
def get_dvs_mor_by_uuid(vim, uuid):
"""Query DVS by UUID."""
dvs_mgr = get_dv_switch_manager(vim)
return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)
| <mask token>
def build_recursive_traversal_spec(client_factory):
rp_to_rp = client_factory.create('ns0:TraversalSpec')
rp_to_rp.name = 'rpToRp'
rp_to_rp.type = 'ResourcePool'
rp_to_rp.path = 'resourcePool'
rp_to_rp.skip = False
rp_to_vm = client_factory.create('ns0:TraversalSpec')
rp_to_vm.name = 'rpToVm'
rp_to_vm.type = 'ResourcePool'
rp_to_vm.path = 'vm'
rp_to_vm.skip = False
spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_resource_pool[0].name = 'rpToRp'
spec_array_resource_pool[1].name = 'rpToVm'
rp_to_rp.selectSet = spec_array_resource_pool
cr_to_rp = client_factory.create('ns0:TraversalSpec')
cr_to_rp.name = 'crToRp'
cr_to_rp.type = 'ComputeResource'
cr_to_rp.path = 'resourcePool'
cr_to_rp.skip = False
spec_array_compute_resource = [client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]
spec_array_compute_resource[0].name = 'rpToRp'
spec_array_compute_resource[1].name = 'rpToVm'
cr_to_rp.selectSet = spec_array_compute_resource
cr_to_h = client_factory.create('ns0:TraversalSpec')
cr_to_h.name = 'crToH'
cr_to_h.type = 'ComputeResource'
cr_to_h.path = 'host'
cr_to_h.skip = False
dc_to_hf = client_factory.create('ns0:TraversalSpec')
dc_to_hf.name = 'dcToHf'
dc_to_hf.type = 'Datacenter'
dc_to_hf.path = 'hostFolder'
dc_to_hf.skip = False
spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_host[0].name = 'visitFolders'
dc_to_hf.selectSet = spec_array_datacenter_host
dc_to_vmf = client_factory.create('ns0:TraversalSpec')
dc_to_vmf.name = 'dcToVmf'
dc_to_vmf.type = 'Datacenter'
dc_to_vmf.path = 'vmFolder'
dc_to_vmf.skip = False
spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_vm[0].name = 'visitFolders'
dc_to_vmf.selectSet = spec_array_datacenter_vm
dc_to_ds = client_factory.create('ns0:TraversalSpec')
dc_to_ds.name = 'dcToDs'
dc_to_ds.type = 'Datacenter'
dc_to_ds.path = 'datastore'
dc_to_ds.skip = False
spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_ds[0].name = 'visitFolders'
dc_to_ds.selectSet = spec_array_datacenter_ds
h_to_vm = client_factory.create('ns0:TraversalSpec')
h_to_vm.name = 'hToVm'
h_to_vm.type = 'HostSystem'
h_to_vm.path = 'vm'
h_to_vm.skip = False
spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_host_vm[0].name = 'visitFolders'
h_to_vm.selectSet = spec_array_host_vm
ds_to_vm = client_factory.create('ns0:TraversalSpec')
ds_to_vm.name = 'dsToVm'
ds_to_vm.type = 'Datastore'
ds_to_vm.path = 'vm'
ds_to_vm.skip = False
spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datastore_vm[0].name = 'visitFolders'
ds_to_vm.selectSet = spec_array_datastore_vm
visit_folders = client_factory.create('ns0:TraversalSpec')
visit_folders.name = 'visitFolders'
visit_folders.type = 'Folder'
visit_folders.path = 'childEntity'
visit_folders.skip = False
spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec')]
spec_array_visit_folders[0].name = 'visitFolders'
spec_array_visit_folders[1].name = 'dcToHf'
spec_array_visit_folders[2].name = 'dcToVmf'
spec_array_visit_folders[3].name = 'crToH'
spec_array_visit_folders[4].name = 'crToRp'
spec_array_visit_folders[5].name = 'dcToDs'
spec_array_visit_folders[6].name = 'hToVm'
spec_array_visit_folders[7].name = 'dsToVm'
spec_array_visit_folders[8].name = 'rpToVm'
visit_folders.selectSet = spec_array_visit_folders
spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,
cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]
return spec_array
def get_object_properties(vim, collector, mobj, type, properties):
"""Gets the properties of the Managed object specified."""
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.service_content.propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = properties is None or len(properties) == 0
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
return retrieve_properties_ex(vim, usecoll, [property_filter_spec])
def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
properties = get_dynamic_properties(vim, mobj, [property_name], type)
property_value = None
if property_name in properties:
property_value = properties.get(property_name)
return property_value
<mask token>
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
"""Builds the Property Filter Spec Object."""
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
def get_property_filter_specs(vim, property_dict, objects=None):
client_factory = vim.client.factory
object_specs = []
if not objects:
objects = [vim.service_content.rootFolder]
for obj in objects:
if obj.value == get_root_folder_id(vim):
traversal_spec = [vim_util.build_recursive_traversal_spec(
client_factory)]
else:
traversal_spec = build_recursive_traversal_spec(client_factory)
object_spec = vim_util.build_object_spec(client_factory, obj,
traversal_spec)
object_specs.append(object_spec)
property_specs = []
for obj_type in property_dict:
props = property_dict[obj_type]
property_spec = vim_util.build_property_spec(client_factory, type_=
obj_type, properties_to_collect=props)
property_specs.append(property_spec)
property_filter_spec = vim_util.build_property_filter_spec(client_factory,
property_specs, object_specs)
return property_filter_spec
def create_filter(vim, prop_filter_spec, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreateFilter(collector, spec=prop_filter_spec,
partialUpdates=False)
def create_property_collector(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreatePropertyCollector(collector)
<mask token>
def wait_for_updates_ex(vim, version, collector=None, max_wait=85,
max_update_count=-1):
"""Polling mechanism for property collection
args:
:param vim: Vim object
:param version: version string
:param collector: PropertyCollector MOR
:param max_wait: Max time in seconds before the call returns
(Default set to 85 as 90 is the http socket timeout)
:param max_update_count: Max num of ObjectUpdates returned
in a single call. Not set if <= 0
"""
client_factory = vim.client.factory
waitopts = client_factory.create('ns0:WaitOptions')
waitopts.maxWaitSeconds = max_wait
if max_update_count > 0:
waitopts.maxObjectUpdates = max_update_count
if not collector:
collector = vim.service_content.propertyCollector
return vim.WaitForUpdatesEx(collector, version=version, options=waitopts)
def cancel_wait_for_updates(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CancelWaitForUpdates(collector)
def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties
):
"""Gets the list of properties for the collection of objects."""
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,
[prop_spec])
return retrieve_properties_ex(vim, vim.service_content.
propertyCollector, [prop_filter_spec])
<mask token>
def find_by_inventory_path(vim, search_index, path):
return vim.FindByInventoryPath(search_index, inventoryPath=path)
def get_root_folder_id(vim):
return vim.service_content.rootFolder.value
def get_dv_switch_manager(vim):
"""Get reference of DistributedVirtualSwitchManager."""
return vim.service_content.dvSwitchManager
def get_dvs_mor_by_uuid(vim, uuid):
"""Query DVS by UUID."""
dvs_mgr = get_dv_switch_manager(vim)
return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)
| <mask token>
def build_recursive_traversal_spec(client_factory):
rp_to_rp = client_factory.create('ns0:TraversalSpec')
rp_to_rp.name = 'rpToRp'
rp_to_rp.type = 'ResourcePool'
rp_to_rp.path = 'resourcePool'
rp_to_rp.skip = False
rp_to_vm = client_factory.create('ns0:TraversalSpec')
rp_to_vm.name = 'rpToVm'
rp_to_vm.type = 'ResourcePool'
rp_to_vm.path = 'vm'
rp_to_vm.skip = False
spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_resource_pool[0].name = 'rpToRp'
spec_array_resource_pool[1].name = 'rpToVm'
rp_to_rp.selectSet = spec_array_resource_pool
cr_to_rp = client_factory.create('ns0:TraversalSpec')
cr_to_rp.name = 'crToRp'
cr_to_rp.type = 'ComputeResource'
cr_to_rp.path = 'resourcePool'
cr_to_rp.skip = False
spec_array_compute_resource = [client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]
spec_array_compute_resource[0].name = 'rpToRp'
spec_array_compute_resource[1].name = 'rpToVm'
cr_to_rp.selectSet = spec_array_compute_resource
cr_to_h = client_factory.create('ns0:TraversalSpec')
cr_to_h.name = 'crToH'
cr_to_h.type = 'ComputeResource'
cr_to_h.path = 'host'
cr_to_h.skip = False
dc_to_hf = client_factory.create('ns0:TraversalSpec')
dc_to_hf.name = 'dcToHf'
dc_to_hf.type = 'Datacenter'
dc_to_hf.path = 'hostFolder'
dc_to_hf.skip = False
spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_host[0].name = 'visitFolders'
dc_to_hf.selectSet = spec_array_datacenter_host
dc_to_vmf = client_factory.create('ns0:TraversalSpec')
dc_to_vmf.name = 'dcToVmf'
dc_to_vmf.type = 'Datacenter'
dc_to_vmf.path = 'vmFolder'
dc_to_vmf.skip = False
spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_vm[0].name = 'visitFolders'
dc_to_vmf.selectSet = spec_array_datacenter_vm
dc_to_ds = client_factory.create('ns0:TraversalSpec')
dc_to_ds.name = 'dcToDs'
dc_to_ds.type = 'Datacenter'
dc_to_ds.path = 'datastore'
dc_to_ds.skip = False
spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_ds[0].name = 'visitFolders'
dc_to_ds.selectSet = spec_array_datacenter_ds
h_to_vm = client_factory.create('ns0:TraversalSpec')
h_to_vm.name = 'hToVm'
h_to_vm.type = 'HostSystem'
h_to_vm.path = 'vm'
h_to_vm.skip = False
spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_host_vm[0].name = 'visitFolders'
h_to_vm.selectSet = spec_array_host_vm
ds_to_vm = client_factory.create('ns0:TraversalSpec')
ds_to_vm.name = 'dsToVm'
ds_to_vm.type = 'Datastore'
ds_to_vm.path = 'vm'
ds_to_vm.skip = False
spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datastore_vm[0].name = 'visitFolders'
ds_to_vm.selectSet = spec_array_datastore_vm
visit_folders = client_factory.create('ns0:TraversalSpec')
visit_folders.name = 'visitFolders'
visit_folders.type = 'Folder'
visit_folders.path = 'childEntity'
visit_folders.skip = False
spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec')]
spec_array_visit_folders[0].name = 'visitFolders'
spec_array_visit_folders[1].name = 'dcToHf'
spec_array_visit_folders[2].name = 'dcToVmf'
spec_array_visit_folders[3].name = 'crToH'
spec_array_visit_folders[4].name = 'crToRp'
spec_array_visit_folders[5].name = 'dcToDs'
spec_array_visit_folders[6].name = 'hToVm'
spec_array_visit_folders[7].name = 'dsToVm'
spec_array_visit_folders[8].name = 'rpToVm'
visit_folders.selectSet = spec_array_visit_folders
spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,
cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]
return spec_array
def get_object_properties(vim, collector, mobj, type, properties):
"""Gets the properties of the Managed object specified."""
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.service_content.propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = properties is None or len(properties) == 0
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
return retrieve_properties_ex(vim, usecoll, [property_filter_spec])
def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
properties = get_dynamic_properties(vim, mobj, [property_name], type)
property_value = None
if property_name in properties:
property_value = properties.get(property_name)
return property_value
<mask token>
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
"""Builds the Property Filter Spec Object."""
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
def get_property_filter_specs(vim, property_dict, objects=None):
client_factory = vim.client.factory
object_specs = []
if not objects:
objects = [vim.service_content.rootFolder]
for obj in objects:
if obj.value == get_root_folder_id(vim):
traversal_spec = [vim_util.build_recursive_traversal_spec(
client_factory)]
else:
traversal_spec = build_recursive_traversal_spec(client_factory)
object_spec = vim_util.build_object_spec(client_factory, obj,
traversal_spec)
object_specs.append(object_spec)
property_specs = []
for obj_type in property_dict:
props = property_dict[obj_type]
property_spec = vim_util.build_property_spec(client_factory, type_=
obj_type, properties_to_collect=props)
property_specs.append(property_spec)
property_filter_spec = vim_util.build_property_filter_spec(client_factory,
property_specs, object_specs)
return property_filter_spec
def create_filter(vim, prop_filter_spec, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreateFilter(collector, spec=prop_filter_spec,
partialUpdates=False)
def create_property_collector(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreatePropertyCollector(collector)
def destroy_property_collector(vim, collector):
if collector:
return vim.DestroyPropertyCollector(collector)
def wait_for_updates_ex(vim, version, collector=None, max_wait=85,
max_update_count=-1):
"""Polling mechanism for property collection
args:
:param vim: Vim object
:param version: version string
:param collector: PropertyCollector MOR
:param max_wait: Max time in seconds before the call returns
(Default set to 85 as 90 is the http socket timeout)
:param max_update_count: Max num of ObjectUpdates returned
in a single call. Not set if <= 0
"""
client_factory = vim.client.factory
waitopts = client_factory.create('ns0:WaitOptions')
waitopts.maxWaitSeconds = max_wait
if max_update_count > 0:
waitopts.maxObjectUpdates = max_update_count
if not collector:
collector = vim.service_content.propertyCollector
return vim.WaitForUpdatesEx(collector, version=version, options=waitopts)
def cancel_wait_for_updates(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CancelWaitForUpdates(collector)
def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties
):
"""Gets the list of properties for the collection of objects."""
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,
[prop_spec])
return retrieve_properties_ex(vim, vim.service_content.
propertyCollector, [prop_filter_spec])
<mask token>
def find_by_inventory_path(vim, search_index, path):
return vim.FindByInventoryPath(search_index, inventoryPath=path)
def get_root_folder_id(vim):
return vim.service_content.rootFolder.value
def get_dv_switch_manager(vim):
"""Get reference of DistributedVirtualSwitchManager."""
return vim.service_content.dvSwitchManager
def get_dvs_mor_by_uuid(vim, uuid):
"""Query DVS by UUID."""
dvs_mgr = get_dv_switch_manager(vim)
return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)
| <mask token>
def build_recursive_traversal_spec(client_factory):
rp_to_rp = client_factory.create('ns0:TraversalSpec')
rp_to_rp.name = 'rpToRp'
rp_to_rp.type = 'ResourcePool'
rp_to_rp.path = 'resourcePool'
rp_to_rp.skip = False
rp_to_vm = client_factory.create('ns0:TraversalSpec')
rp_to_vm.name = 'rpToVm'
rp_to_vm.type = 'ResourcePool'
rp_to_vm.path = 'vm'
rp_to_vm.skip = False
spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_resource_pool[0].name = 'rpToRp'
spec_array_resource_pool[1].name = 'rpToVm'
rp_to_rp.selectSet = spec_array_resource_pool
cr_to_rp = client_factory.create('ns0:TraversalSpec')
cr_to_rp.name = 'crToRp'
cr_to_rp.type = 'ComputeResource'
cr_to_rp.path = 'resourcePool'
cr_to_rp.skip = False
spec_array_compute_resource = [client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]
spec_array_compute_resource[0].name = 'rpToRp'
spec_array_compute_resource[1].name = 'rpToVm'
cr_to_rp.selectSet = spec_array_compute_resource
cr_to_h = client_factory.create('ns0:TraversalSpec')
cr_to_h.name = 'crToH'
cr_to_h.type = 'ComputeResource'
cr_to_h.path = 'host'
cr_to_h.skip = False
dc_to_hf = client_factory.create('ns0:TraversalSpec')
dc_to_hf.name = 'dcToHf'
dc_to_hf.type = 'Datacenter'
dc_to_hf.path = 'hostFolder'
dc_to_hf.skip = False
spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_host[0].name = 'visitFolders'
dc_to_hf.selectSet = spec_array_datacenter_host
dc_to_vmf = client_factory.create('ns0:TraversalSpec')
dc_to_vmf.name = 'dcToVmf'
dc_to_vmf.type = 'Datacenter'
dc_to_vmf.path = 'vmFolder'
dc_to_vmf.skip = False
spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_vm[0].name = 'visitFolders'
dc_to_vmf.selectSet = spec_array_datacenter_vm
dc_to_ds = client_factory.create('ns0:TraversalSpec')
dc_to_ds.name = 'dcToDs'
dc_to_ds.type = 'Datacenter'
dc_to_ds.path = 'datastore'
dc_to_ds.skip = False
spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_ds[0].name = 'visitFolders'
dc_to_ds.selectSet = spec_array_datacenter_ds
h_to_vm = client_factory.create('ns0:TraversalSpec')
h_to_vm.name = 'hToVm'
h_to_vm.type = 'HostSystem'
h_to_vm.path = 'vm'
h_to_vm.skip = False
spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_host_vm[0].name = 'visitFolders'
h_to_vm.selectSet = spec_array_host_vm
ds_to_vm = client_factory.create('ns0:TraversalSpec')
ds_to_vm.name = 'dsToVm'
ds_to_vm.type = 'Datastore'
ds_to_vm.path = 'vm'
ds_to_vm.skip = False
spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datastore_vm[0].name = 'visitFolders'
ds_to_vm.selectSet = spec_array_datastore_vm
visit_folders = client_factory.create('ns0:TraversalSpec')
visit_folders.name = 'visitFolders'
visit_folders.type = 'Folder'
visit_folders.path = 'childEntity'
visit_folders.skip = False
spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'), client_factory.create(
'ns0:SelectionSpec')]
spec_array_visit_folders[0].name = 'visitFolders'
spec_array_visit_folders[1].name = 'dcToHf'
spec_array_visit_folders[2].name = 'dcToVmf'
spec_array_visit_folders[3].name = 'crToH'
spec_array_visit_folders[4].name = 'crToRp'
spec_array_visit_folders[5].name = 'dcToDs'
spec_array_visit_folders[6].name = 'hToVm'
spec_array_visit_folders[7].name = 'dsToVm'
spec_array_visit_folders[8].name = 'rpToVm'
visit_folders.selectSet = spec_array_visit_folders
spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,
cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]
return spec_array
def get_object_properties(vim, collector, mobj, type, properties):
"""Gets the properties of the Managed object specified."""
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.service_content.propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = properties is None or len(properties) == 0
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
return retrieve_properties_ex(vim, usecoll, [property_filter_spec])
def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
properties = get_dynamic_properties(vim, mobj, [property_name], type)
property_value = None
if property_name in properties:
property_value = properties.get(property_name)
return property_value
<mask token>
def get_prop_spec(client_factory, spec_type, properties):
"""Builds the Property Spec Object."""
prop_spec = client_factory.create('ns0:PropertySpec')
prop_spec.type = spec_type
prop_spec.pathSet = properties
return prop_spec
def get_obj_spec(client_factory, obj, select_set=None):
"""Builds the Object Spec object."""
obj_spec = client_factory.create('ns0:ObjectSpec')
obj_spec.obj = obj
obj_spec.skip = False
if select_set is not None:
obj_spec.selectSet = select_set
return obj_spec
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
"""Builds the Property Filter Spec Object."""
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
def get_property_filter_specs(vim, property_dict, objects=None):
client_factory = vim.client.factory
object_specs = []
if not objects:
objects = [vim.service_content.rootFolder]
for obj in objects:
if obj.value == get_root_folder_id(vim):
traversal_spec = [vim_util.build_recursive_traversal_spec(
client_factory)]
else:
traversal_spec = build_recursive_traversal_spec(client_factory)
object_spec = vim_util.build_object_spec(client_factory, obj,
traversal_spec)
object_specs.append(object_spec)
property_specs = []
for obj_type in property_dict:
props = property_dict[obj_type]
property_spec = vim_util.build_property_spec(client_factory, type_=
obj_type, properties_to_collect=props)
property_specs.append(property_spec)
property_filter_spec = vim_util.build_property_filter_spec(client_factory,
property_specs, object_specs)
return property_filter_spec
def create_filter(vim, prop_filter_spec, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreateFilter(collector, spec=prop_filter_spec,
partialUpdates=False)
def create_property_collector(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreatePropertyCollector(collector)
def destroy_property_collector(vim, collector):
if collector:
return vim.DestroyPropertyCollector(collector)
def wait_for_updates_ex(vim, version, collector=None, max_wait=85,
max_update_count=-1):
"""Polling mechanism for property collection
args:
:param vim: Vim object
:param version: version string
:param collector: PropertyCollector MOR
:param max_wait: Max time in seconds before the call returns
(Default set to 85 as 90 is the http socket timeout)
:param max_update_count: Max num of ObjectUpdates returned
in a single call. Not set if <= 0
"""
client_factory = vim.client.factory
waitopts = client_factory.create('ns0:WaitOptions')
waitopts.maxWaitSeconds = max_wait
if max_update_count > 0:
waitopts.maxObjectUpdates = max_update_count
if not collector:
collector = vim.service_content.propertyCollector
return vim.WaitForUpdatesEx(collector, version=version, options=waitopts)
def cancel_wait_for_updates(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CancelWaitForUpdates(collector)
def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties
):
"""Gets the list of properties for the collection of objects."""
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,
[prop_spec])
return retrieve_properties_ex(vim, vim.service_content.
propertyCollector, [prop_filter_spec])
def get_search_index(vim):
return vim.service_content.searchIndex
def find_by_inventory_path(vim, search_index, path):
return vim.FindByInventoryPath(search_index, inventoryPath=path)
def get_root_folder_id(vim):
return vim.service_content.rootFolder.value
def get_dv_switch_manager(vim):
"""Get reference of DistributedVirtualSwitchManager."""
return vim.service_content.dvSwitchManager
def get_dvs_mor_by_uuid(vim, uuid):
"""Query DVS by UUID."""
dvs_mgr = get_dv_switch_manager(vim)
return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)
| # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_vmware import vim_util
def build_recursive_traversal_spec(client_factory):
# Recurse through all ResourcePools
rp_to_rp = client_factory.create('ns0:TraversalSpec')
rp_to_rp.name = 'rpToRp'
rp_to_rp.type = 'ResourcePool'
rp_to_rp.path = 'resourcePool'
rp_to_rp.skip = False
rp_to_vm = client_factory.create('ns0:TraversalSpec')
rp_to_vm.name = 'rpToVm'
rp_to_vm.type = 'ResourcePool'
rp_to_vm.path = 'vm'
rp_to_vm.skip = False
spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_resource_pool[0].name = 'rpToRp'
spec_array_resource_pool[1].name = 'rpToVm'
rp_to_rp.selectSet = spec_array_resource_pool
# Traversal through resource pool branch
cr_to_rp = client_factory.create('ns0:TraversalSpec')
cr_to_rp.name = 'crToRp'
cr_to_rp.type = 'ComputeResource'
cr_to_rp.path = 'resourcePool'
cr_to_rp.skip = False
spec_array_compute_resource = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_compute_resource[0].name = 'rpToRp'
spec_array_compute_resource[1].name = 'rpToVm'
cr_to_rp.selectSet = spec_array_compute_resource
# Traversal through host branch
cr_to_h = client_factory.create('ns0:TraversalSpec')
cr_to_h.name = 'crToH'
cr_to_h.type = 'ComputeResource'
cr_to_h.path = 'host'
cr_to_h.skip = False
# Traversal through hostFolder branch
dc_to_hf = client_factory.create('ns0:TraversalSpec')
dc_to_hf.name = 'dcToHf'
dc_to_hf.type = 'Datacenter'
dc_to_hf.path = 'hostFolder'
dc_to_hf.skip = False
spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_host[0].name = 'visitFolders'
dc_to_hf.selectSet = spec_array_datacenter_host
# Traversal through vmFolder branch
dc_to_vmf = client_factory.create('ns0:TraversalSpec')
dc_to_vmf.name = 'dcToVmf'
dc_to_vmf.type = 'Datacenter'
dc_to_vmf.path = 'vmFolder'
dc_to_vmf.skip = False
spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_vm[0].name = 'visitFolders'
dc_to_vmf.selectSet = spec_array_datacenter_vm
# Traversal through datastore branch
dc_to_ds = client_factory.create('ns0:TraversalSpec')
dc_to_ds.name = 'dcToDs'
dc_to_ds.type = 'Datacenter'
dc_to_ds.path = 'datastore'
dc_to_ds.skip = False
spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_ds[0].name = 'visitFolders'
dc_to_ds.selectSet = spec_array_datacenter_ds
# Recurse through all hosts
h_to_vm = client_factory.create('ns0:TraversalSpec')
h_to_vm.name = 'hToVm'
h_to_vm.type = 'HostSystem'
h_to_vm.path = 'vm'
h_to_vm.skip = False
spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_host_vm[0].name = 'visitFolders'
h_to_vm.selectSet = spec_array_host_vm
# Recurse through all datastores
ds_to_vm = client_factory.create('ns0:TraversalSpec')
ds_to_vm.name = 'dsToVm'
ds_to_vm.type = 'Datastore'
ds_to_vm.path = 'vm'
ds_to_vm.skip = False
spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datastore_vm[0].name = 'visitFolders'
ds_to_vm.selectSet = spec_array_datastore_vm
# Recurse through the folders
visit_folders = client_factory.create('ns0:TraversalSpec')
visit_folders.name = 'visitFolders'
visit_folders.type = 'Folder'
visit_folders.path = 'childEntity'
visit_folders.skip = False
spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_visit_folders[0].name = 'visitFolders'
spec_array_visit_folders[1].name = 'dcToHf'
spec_array_visit_folders[2].name = 'dcToVmf'
spec_array_visit_folders[3].name = 'crToH'
spec_array_visit_folders[4].name = 'crToRp'
spec_array_visit_folders[5].name = 'dcToDs'
spec_array_visit_folders[6].name = 'hToVm'
spec_array_visit_folders[7].name = 'dsToVm'
spec_array_visit_folders[8].name = 'rpToVm'
visit_folders.selectSet = spec_array_visit_folders
# Add all of them here
spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,
cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]
return spec_array
def get_object_properties(vim, collector, mobj, type, properties):
"""Gets the properties of the Managed object specified."""
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.service_content.propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = (properties is None or len(properties) == 0)
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
return retrieve_properties_ex(vim,
usecoll,
[property_filter_spec])
def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
properties = get_dynamic_properties(vim, mobj, [property_name], type)
property_value = None
if property_name in properties:
property_value = properties.get(property_name)
return property_value
def get_dynamic_properties(vim, mobj, property_names, obj_type=None):
"""Gets specific properties of the Managed Object."""
if not obj_type:
obj_type = mobj._type
obj_content = get_object_properties(
vim, None, mobj, obj_type, property_names)
properties = {}
if obj_content:
dynamic_properties = obj_content[0].propSet
for dynamic_property in dynamic_properties:
property_name = dynamic_property.name
property_value = dynamic_property.val
properties[property_name] = property_value
return properties
def retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):
"""Retrieve properties.
Retrieve properties using PropertyCollector.RetrievePropertiesEx
and PropertyCollector.ContinueRetrievePropertiesEx
args:
:param vim: Vim object
:param prop_coll: PropertyCollector MOR
:param max_count: Max num of objects returned in a single call.
"""
objcont = []
client_factory = vim.client.factory
opts = client_factory.create('ns0:RetrieveOptions')
opts.maxObjects = max_count
res = vim.RetrievePropertiesEx(prop_coll,
specSet=spec_set,
options=opts)
while True:
if res and res.objects:
objcont.extend(res.objects)
if hasattr(res, "token") and res.token:
res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)
else:
break
return objcont
def get_objects(vim, type, properties_to_collect=None, all=False):
"""Gets the list of objects of the type specified."""
if not properties_to_collect:
properties_to_collect = ["name"]
client_factory = vim.client.factory
trav_spec = vim_util.build_recursive_traversal_spec(client_factory)
object_spec = vim_util.build_object_spec(client_factory,
vim.service_content.rootFolder,
[trav_spec])
property_spec = vim_util.build_property_spec(
client_factory, type_=type,
properties_to_collect=properties_to_collect,
all_properties=all)
property_filter_spec = vim_util.build_property_filter_spec(client_factory,
[property_spec],
[object_spec])
property_collector = vim.service_content.propertyCollector
return retrieve_properties_ex(vim,
property_collector,
[property_filter_spec])
def get_prop_spec(client_factory, spec_type, properties):
"""Builds the Property Spec Object."""
prop_spec = client_factory.create('ns0:PropertySpec')
prop_spec.type = spec_type
prop_spec.pathSet = properties
return prop_spec
def get_obj_spec(client_factory, obj, select_set=None):
"""Builds the Object Spec object."""
obj_spec = client_factory.create('ns0:ObjectSpec')
obj_spec.obj = obj
obj_spec.skip = False
if select_set is not None:
obj_spec.selectSet = select_set
return obj_spec
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
"""Builds the Property Filter Spec Object."""
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
def get_property_filter_specs(vim, property_dict, objects=None):
client_factory = vim.client.factory
object_specs = []
if not objects:
objects = [vim.service_content.rootFolder]
for obj in objects:
if obj.value == get_root_folder_id(vim):
traversal_spec = [
vim_util.build_recursive_traversal_spec(client_factory)]
else:
traversal_spec = build_recursive_traversal_spec(client_factory)
object_spec = vim_util.build_object_spec(client_factory,
obj,
traversal_spec)
object_specs.append(object_spec)
property_specs = []
for obj_type in property_dict:
props = property_dict[obj_type]
property_spec = vim_util.build_property_spec(
client_factory, type_=obj_type, properties_to_collect=props)
property_specs.append(property_spec)
property_filter_spec = vim_util.build_property_filter_spec(client_factory,
property_specs,
object_specs)
return property_filter_spec
def create_filter(vim, prop_filter_spec, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreateFilter(collector,
spec=prop_filter_spec,
partialUpdates=False)
def create_property_collector(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreatePropertyCollector(collector)
def destroy_property_collector(vim, collector):
if collector:
return vim.DestroyPropertyCollector(collector)
def wait_for_updates_ex(vim, version, collector=None,
max_wait=85, max_update_count=-1):
"""Polling mechanism for property collection
args:
:param vim: Vim object
:param version: version string
:param collector: PropertyCollector MOR
:param max_wait: Max time in seconds before the call returns
(Default set to 85 as 90 is the http socket timeout)
:param max_update_count: Max num of ObjectUpdates returned
in a single call. Not set if <= 0
"""
client_factory = vim.client.factory
waitopts = client_factory.create('ns0:WaitOptions')
waitopts.maxWaitSeconds = max_wait
if max_update_count > 0:
waitopts.maxObjectUpdates = max_update_count
if not collector:
collector = vim.service_content.propertyCollector
return vim.WaitForUpdatesEx(collector,
version=version,
options=waitopts)
def cancel_wait_for_updates(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CancelWaitForUpdates(collector)
def get_properties_for_a_collection_of_objects(vim, type,
obj_list, properties):
"""Gets the list of properties for the collection of objects."""
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory,
lst_obj_specs, [prop_spec])
return retrieve_properties_ex(vim,
vim.service_content.propertyCollector,
[prop_filter_spec])
def get_search_index(vim):
return vim.service_content.searchIndex
def find_by_inventory_path(vim, search_index, path):
return vim.FindByInventoryPath(search_index, inventoryPath=path)
def get_root_folder_id(vim):
return vim.service_content.rootFolder.value
def get_dv_switch_manager(vim):
"""Get reference of DistributedVirtualSwitchManager."""
return vim.service_content.dvSwitchManager
def get_dvs_mor_by_uuid(vim, uuid):
"""Query DVS by UUID."""
dvs_mgr = get_dv_switch_manager(vim)
return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)
| [
12,
14,
15,
18,
23
] |
134 | 0b0eebd31d822ff5c1b951c3ee213f58a3a13aa0 | <mask token>
def print_error(err_code, err_msg):
"""格式化打印错误信息
Args:
err_code: 错误码
err_msg: 错误信息
"""
print(u'[{0}]: {1}'.format(err_code, err_msg))
def get_image_base64_content(image_file):
"""获取图片base64编码信息
Args:
image_file: 图片
Returns:
base64编码的图片信息
"""
with open(image_file, 'rb') as fp:
return str(base64.b64encode(fp.read()), 'utf-8')
| <mask token>
def print_json(obj):
"""json格式打印信息
Args:
obj 待打印的对象信息
"""
print(json.dumps(obj, ensure_ascii=False))
def print_error(err_code, err_msg):
"""格式化打印错误信息
Args:
err_code: 错误码
err_msg: 错误信息
"""
print(u'[{0}]: {1}'.format(err_code, err_msg))
def get_image_base64_content(image_file):
"""获取图片base64编码信息
Args:
image_file: 图片
Returns:
base64编码的图片信息
"""
with open(image_file, 'rb') as fp:
return str(base64.b64encode(fp.read()), 'utf-8')
| <mask token>
APP_ID = '10676432'
API_KEY = 'Hy1D1urUTdXzTOzqr9LeN3gc'
SECRET_KEY = 'foS4GMg2w3QZtO9XNoSQF17Kkk007xWk'
def print_json(obj):
"""json格式打印信息
Args:
obj 待打印的对象信息
"""
print(json.dumps(obj, ensure_ascii=False))
def print_error(err_code, err_msg):
"""格式化打印错误信息
Args:
err_code: 错误码
err_msg: 错误信息
"""
print(u'[{0}]: {1}'.format(err_code, err_msg))
def get_image_base64_content(image_file):
"""获取图片base64编码信息
Args:
image_file: 图片
Returns:
base64编码的图片信息
"""
with open(image_file, 'rb') as fp:
return str(base64.b64encode(fp.read()), 'utf-8')
| <mask token>
import json
import base64
APP_ID = '10676432'
API_KEY = 'Hy1D1urUTdXzTOzqr9LeN3gc'
SECRET_KEY = 'foS4GMg2w3QZtO9XNoSQF17Kkk007xWk'
def print_json(obj):
"""json格式打印信息
Args:
obj 待打印的对象信息
"""
print(json.dumps(obj, ensure_ascii=False))
def print_error(err_code, err_msg):
"""格式化打印错误信息
Args:
err_code: 错误码
err_msg: 错误信息
"""
print(u'[{0}]: {1}'.format(err_code, err_msg))
def get_image_base64_content(image_file):
"""获取图片base64编码信息
Args:
image_file: 图片
Returns:
base64编码的图片信息
"""
with open(image_file, 'rb') as fp:
return str(base64.b64encode(fp.read()), 'utf-8')
| #coding=utf-8
#
"""
my custom common module
"""
import json
import base64
# sdk账号信息
APP_ID = '10676432'
API_KEY = 'Hy1D1urUTdXzTOzqr9LeN3gc'
SECRET_KEY = 'foS4GMg2w3QZtO9XNoSQF17Kkk007xWk'
def print_json(obj):
"""json格式打印信息
Args:
obj 待打印的对象信息
"""
print(json.dumps(obj, ensure_ascii=False))
def print_error(err_code, err_msg):
"""格式化打印错误信息
Args:
err_code: 错误码
err_msg: 错误信息
"""
print(u"[{0}]: {1}".format(err_code, err_msg))
def get_image_base64_content(image_file):
"""获取图片base64编码信息
Args:
image_file: 图片
Returns:
base64编码的图片信息
"""
with open(image_file, 'rb') as fp:
return str(base64.b64encode(fp.read()), 'utf-8')
| [
2,
3,
4,
5,
6
] |
135 | 68e09f72e8338efbef108ffd0c93eff067bf7b07 | <mask token>
| <mask token>
http1.post('http://' + ip + '/music_download/api/login',
'username=admin&password=123456')
http1.upload('http://' + ip + '/music_download/api/song/upload',
'speed=0&styleId=c0a4bd86-a09b-43ac-8169-14bb69630ac0&file=G:\\music_data\\1.mp3'
)
| <mask token>
http1 = HTTP()
ip = '10.68.170.184:8080'
http1.post('http://' + ip + '/music_download/api/login',
'username=admin&password=123456')
http1.upload('http://' + ip + '/music_download/api/song/upload',
'speed=0&styleId=c0a4bd86-a09b-43ac-8169-14bb69630ac0&file=G:\\music_data\\1.mp3'
)
| from keywords.httpkeys1 import HTTP
http1 = HTTP()
ip = '10.68.170.184:8080'
http1.post('http://' + ip + '/music_download/api/login',
'username=admin&password=123456')
http1.upload('http://' + ip + '/music_download/api/song/upload',
'speed=0&styleId=c0a4bd86-a09b-43ac-8169-14bb69630ac0&file=G:\\music_data\\1.mp3'
)
| # -*- coding: UTF-8 -*-
from keywords.httpkeys1 import HTTP
http1 = HTTP()
# ip = '10.68.170.184:8080'
ip = '10.68.170.184:8080'
http1.post('http://'+ip+'/music_download/api/login','username=admin&password=123456')
# http1.savejson('result','id')
# http1.get('http://47.101.197.102:8080/music/api/user','{id}')
# data = {'username':'admin','password':'123456'}
# # json方式传递数据
# http1.postjson('http://47.101.197.102:8080/music/api/login',data=data)
# http1.savejson('result','id')
# http1.get('http://47.101.197.102:8080/music/api/user','{id}')
# http1.addheader('Content-type','multipart/form-data')
http1.upload('http://'+ip+'/music_download/api/song/upload','speed=0&styleId=c0a4bd86-a09b-43ac-8169-14bb69630ac0&file=G:\\music_data\\1.mp3')
# http1.upload('http://10.68.170.184:8080/music/api/song/upload','filename=1.mp3&speed=0&styleId=c0a4bd86-a09b-43ac-8169-14bb69630ac0&file1=G:/music_data/1.mp3')
| [
0,
1,
2,
3,
4
] |
136 | 2af8677e76b77b9bfa579012a85ea331c0c7f390 | <mask token>
class floatlayoutApp(App):
def build(self):
return LayoutWindow()
<mask token>
| <mask token>
class LayoutWindow(FloatLayout):
pass
class floatlayoutApp(App):
def build(self):
return LayoutWindow()
<mask token>
| <mask token>
class LayoutWindow(FloatLayout):
pass
class floatlayoutApp(App):
def build(self):
return LayoutWindow()
if __name__ == '__main__':
display = floatlayoutApp()
display.run()
| from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
class LayoutWindow(FloatLayout):
pass
class floatlayoutApp(App):
def build(self):
return LayoutWindow()
if __name__ == '__main__':
display = floatlayoutApp()
display.run()
| from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
class LayoutWindow(FloatLayout):
pass
class floatlayoutApp(App):
def build(self):
return LayoutWindow()
if __name__== "__main__":
display = floatlayoutApp()
display.run() | [
2,
3,
4,
5,
6
] |
137 | 0d20b75bcc87db8f3e4bdd9d6448cc44c979de1d | <mask token>
| <mask token>
print('输入:')
while True:
s = input()
if s == '0 0 0 0 0 0':
break
S.append(s)
print('\n输出:')
<mask token>
for k in range(len(S)):
p = [int(i) for i in S[k].split()]
_sum = sum(i * j for i, j in zip(w, p))
if _sum % 2 != 0:
print(f'集合{k + 1}:\n不能被分割')
continue
V = _sum // 2
n = len(w)
dp = [False] * (V + 1)
dp[0] = True
for i in range(n):
num, total = 1, p[i]
while total > 0:
if num > total:
num = total
group_w = w[i] * num
for j in range(V, group_w - 1, -1):
dp[j] = dp[j - group_w]
total -= num
num <<= 1
if dp[V]:
print(f'集合{k + 1}:\n可以被分割')
else:
print(f'集合{k + 1}:\n不能被分割')
| <mask token>
S = []
print('输入:')
while True:
s = input()
if s == '0 0 0 0 0 0':
break
S.append(s)
print('\n输出:')
w = [1, 2, 3, 4, 5, 6]
for k in range(len(S)):
p = [int(i) for i in S[k].split()]
_sum = sum(i * j for i, j in zip(w, p))
if _sum % 2 != 0:
print(f'集合{k + 1}:\n不能被分割')
continue
V = _sum // 2
n = len(w)
dp = [False] * (V + 1)
dp[0] = True
for i in range(n):
num, total = 1, p[i]
while total > 0:
if num > total:
num = total
group_w = w[i] * num
for j in range(V, group_w - 1, -1):
dp[j] = dp[j - group_w]
total -= num
num <<= 1
if dp[V]:
print(f'集合{k + 1}:\n可以被分割')
else:
print(f'集合{k + 1}:\n不能被分割')
| """
问题描述
玛莎(Marsha)和比尔(Bill)拥有一系列大理石。他们希望将藏品分开,以使两者获得相等的份额。如果所有的大理石都具有相同的价值,这将很容易,因为那样他们就可以将收藏品分成两半。
但不幸的是,有些大理石比其他大理石更大或更漂亮。因此,玛莎(Marsha)和比尔(Bill)首先为每个大理石分配一个值,即一个介于1到6之间的自然数。
现在,他们希望对大理石进行分割,以使每个大理石都获得相同的总价值。不幸的是,他们意识到以这种方式分割大理石可能是不可能的(即使所有大理石的总价值是均匀的)。
例如,如果存在一个值为1的大理石,值为3的一个,值为4的两个,则不能将它们拆分为相等值的集合。因此,他们要求您编写一个程序来检查大理石是否存在合理的分区。
输入
输入中的每一行都描述了一组要分割的大理石。每一行由六个非负整数n1,n2,...,n6组成,其中ni是值i的大理石数。因此,上面的示例将由输入行``1 0 1 2 0 0''描述。大理石的最大总数为20000。
输入文件的最后一行将为“ 0 0 0 0 0 0”;不要处理此行。
输出
对于每个集合,输出"集合k:",其中k是测试用例的编号,然后是``可以被分割"或``不能被分割''。
在每个测试用例之后输出空白行。
样本输入
1 0 1 2 0 0
1 0 0 0 1 1
0 0 0 0 0 0
样本输出
集合1:
不能被分割
集合2:
可以被分割
"""
S = []
print('输入:')
while True:
s = input()
if s == '0 0 0 0 0 0':
break
S.append(s)
print('\n输出:')
w = [1, 2, 3, 4, 5, 6]
for k in range(len(S)):
p = [int(i) for i in S[k].split()]
_sum = sum(i * j for i, j in zip(w, p))
if _sum % 2 != 0:
print(f'集合{k + 1}:\n不能被分割')
continue
V = _sum // 2
n = len(w)
dp = [False] * (V + 1)
dp[0] = True # 只有0件物品能达到0价值
for i in range(n):
num, total = 1, p[i]
while total > 0:
if num > total:
num = total
group_w = w[i] * num
for j in range(V, group_w - 1, -1):
dp[j] = dp[j - group_w]
total -= num
num <<= 1
if dp[V]:
print(f'集合{k + 1}:\n可以被分割')
else:
print(f'集合{k + 1}:\n不能被分割')
| null | [
0,
1,
2,
3
] |
138 | 264896da4d92797b9f31e28c19a2e315efff815a | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('exchange', '0004_auto_20170826_2120')]
operations = [migrations.AlterModelOptions(name='type', options={
'verbose_name': 'тип задания', 'verbose_name_plural':
'Типы задания'}), migrations.AlterField(model_name='task', name=
'count', field=models.IntegerField(default=0, verbose_name=
'Количество выполненных действий')), migrations.AlterField(
model_name='task', name='max_count', field=models.IntegerField(
default=1, verbose_name='Количество запланированных действий')),
migrations.AlterField(model_name='task', name='status', field=
models.CharField(choices=[('NEW', 'Новая'), ('CNF', 'Подтверждена'),
('Y', 'Активна'), ('BLC', 'Заблокирована модератором'), ('DEL',
'Удалено'), ('DON', 'Завершено')], default='NEW', max_length=3))]
| from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('exchange', '0004_auto_20170826_2120')]
operations = [migrations.AlterModelOptions(name='type', options={
'verbose_name': 'тип задания', 'verbose_name_plural':
'Типы задания'}), migrations.AlterField(model_name='task', name=
'count', field=models.IntegerField(default=0, verbose_name=
'Количество выполненных действий')), migrations.AlterField(
model_name='task', name='max_count', field=models.IntegerField(
default=1, verbose_name='Количество запланированных действий')),
migrations.AlterField(model_name='task', name='status', field=
models.CharField(choices=[('NEW', 'Новая'), ('CNF', 'Подтверждена'),
('Y', 'Активна'), ('BLC', 'Заблокирована модератором'), ('DEL',
'Удалено'), ('DON', 'Завершено')], default='NEW', max_length=3))]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-08-26 21:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exchange', '0004_auto_20170826_2120'),
]
operations = [
migrations.AlterModelOptions(
name='type',
options={'verbose_name': '\u0442\u0438\u043f \u0437\u0430\u0434\u0430\u043d\u0438\u044f', 'verbose_name_plural': '\u0422\u0438\u043f\u044b \u0437\u0430\u0434\u0430\u043d\u0438\u044f'},
),
migrations.AlterField(
model_name='task',
name='count',
field=models.IntegerField(default=0, verbose_name='\u041a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e \u0432\u044b\u043f\u043e\u043b\u043d\u0435\u043d\u043d\u044b\u0445 \u0434\u0435\u0439\u0441\u0442\u0432\u0438\u0439'),
),
migrations.AlterField(
model_name='task',
name='max_count',
field=models.IntegerField(default=1, verbose_name='\u041a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e \u0437\u0430\u043f\u043b\u0430\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u043d\u044b\u0445 \u0434\u0435\u0439\u0441\u0442\u0432\u0438\u0439'),
),
migrations.AlterField(
model_name='task',
name='status',
field=models.CharField(choices=[('NEW', '\u041d\u043e\u0432\u0430\u044f'), ('CNF', '\u041f\u043e\u0434\u0442\u0432\u0435\u0440\u0436\u0434\u0435\u043d\u0430'), ('Y', '\u0410\u043a\u0442\u0438\u0432\u043d\u0430'), ('BLC', '\u0417\u0430\u0431\u043b\u043e\u043a\u0438\u0440\u043e\u0432\u0430\u043d\u0430 \u043c\u043e\u0434\u0435\u0440\u0430\u0442\u043e\u0440\u043e\u043c'), ('DEL', '\u0423\u0434\u0430\u043b\u0435\u043d\u043e'), ('DON', '\u0417\u0430\u0432\u0435\u0440\u0448\u0435\u043d\u043e')], default='NEW', max_length=3),
),
]
| [
0,
1,
2,
3,
4
] |
139 | 63a2258bf0ed779254b68a683e3d30e9fb356b1f | <mask token>
| from django.contrib import admin
| from django.contrib import admin
# from .models import Usuario
# from .models import Lote
# from .models import Fornecedor
# from .models import Cliente
# from .models import Medicamento
# from .models import Medicamento_Entrada
# from .models import Medicamento_Saida
# Register your models here.
#
# class UsuarioAdmin(admin.ModelAdmin):
# list_display = ['nome','login','senha']
# class FornecedorAdmin(admin.ModelAdmin):
# list_display = ['nome','contato']
# class LoteAdmin(admin.ModelAdmin):
# list_display = ['numero','fornecedor','fabricacao','vencimento']
# class ClienteAdmin(admin.ModelAdmin):
# list_display = ['nome','contato']
# class MedicamentoAdmin(admin.ModelAdmin):
# list_display = ['nome','data_insercao','descricao']
# class Medicamento_EntradaAdmin(admin.ModelAdmin):
# list_display = ['medicamento','lote','quantidade','data_entrada','usuario']
# class Medicamento_SaidaAdmin(admin.ModelAdmin):
# list_display = ['medicamento','quantidade','data_saida','usuario']
# admin.site.register(Usuario,UsuarioAdmin)
# admin.site.register(Lote,LoteAdmin)
# admin.site.register(Fornecedor,FornecedorAdmin)
# admin.site.register(Cliente,ClienteAdmin)
# admin.site.register(Medicamento,MedicamentoAdmin)
# admin.site.register(Medicamento_Entrada,Medicamento_EntradaAdmin)
# admin.site.register(Medicamento_Saida,Medicamento_SaidaAdmin) | null | null | [
0,
1,
2
] |
140 | 0e4c82d6eb77d2b6357925c9aab516bcc3310a4c | <mask token>
class House2:
def setWall(self, dynamicWall):
self.wall = dynamicWall
<mask token>
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print('Starting engine')
def stop(self):
print('Stopping engine')
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print('Car is in drive mode')
| <mask token>
class House2:
def setWall(self, dynamicWall):
self.wall = dynamicWall
def getWall(self):
print(self.wall)
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print('Starting engine')
def stop(self):
print('Stopping engine')
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print('Car is in drive mode')
| <mask token>
class House:
<mask token>
<mask token>
class House2:
def setWall(self, dynamicWall):
self.wall = dynamicWall
def getWall(self):
print(self.wall)
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print('Starting engine')
def stop(self):
print('Stopping engine')
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print('Car is in drive mode')
| <mask token>
class House:
def __init__(self, wallDynamic):
self.__wall = wallDynamic
house = House(1)
print(house._House__wall)
class House2:
def setWall(self, dynamicWall):
self.wall = dynamicWall
def getWall(self):
print(self.wall)
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print('Starting engine')
def stop(self):
print('Stopping engine')
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print('Car is in drive mode')
| # 4 Pillars of OOP:
# 1. Encapsulation: Encapsulation in Python is the process of wrapping up variables and methods into a single entity.In programming, a class is an example that wraps all the variables and methods defined inside it.
# 2. Abstraction: Abstraction in Python is the process of hiding the real implementation of an application from the user and emphasizing only on usage of it.
# 3. Inheritance: It is the process of creating a class that can derive or inherit the properties and methods from another class(parent/base).
# 4. Polymorphism: Polymorphism means the ability to take various forms.
# Encapsulation:
# Encapsulation is a process of protecting the data and functionality of a class in a single unit, called an object.
# This mechanism is often used to protect the data of an object from other objects.
# It’s one of the fundamental principles in any programming language that supports object-oriented programming.
# We can protect the variables in the class by marking them as private. We need to add two underscores as a prefix to make a variable private.
# Once we make a variable as private, we can’t access them directly from the objects of that class.
# Now, let’s see how to create private variables:
# eg:
from abc import abstractmethod, ABC
class House:
def __init__(self, wallDynamic):
self.__wall = wallDynamic
# In the above example, wall is a private variable.
# Once a variable is declared as private, the only way to access those variables is through name mangling.
# In the name mangling process, an identifier with two leading underscores and one trailing underscore is
# textually replaced with _classname__identifier , where class-name is the name of the current class and identifier is the private variable.
house = House(1)
# Using name mangling to access private variables
print(house._House__wall) # OutPut - 1
# To implement proper encapsulation in Python, we need to use setters and getters, as shown below:
class House2:
def setWall(self, dynamicWall):
self.wall = dynamicWall
def getWall(self):
print(self.wall)
# Abstraction:
# Abstraction in OOP is a process of hiding the real implementation of the method by only showing a method signature.
# In Python, we can achieve abstraction using ABC(abstraction class) or abstract method.
# ABC is a class from the abc module in Python.
# If we extend any class with ABC and include any abstraction methods,
# then the classes inherited from this class will have to mandatorily implement those abstract methods.
# When we annotate any method with an abstractmethod keyword, then it is an abstract method in Python(it won’t have any method implementation).
# If the parent class has abstractmethod and not inherited from an abstract class, then it is optional to implement the abstractmethod .
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print("Starting engine")
def stop(self):
print("Stopping engine")
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print("Car is in drive mode")
# Here, Vehicle is a parent inherited from ABC class. It has an abstraction method drive.
# Car is another class that is inherited from Vehicle, so it had to implement the drive method.
| [
10,
11,
12,
15,
17
] |
141 | 26d14bc74d893f6f14ee7405280f4af41854c544 | <mask token>
| <mask token>
def run(path, output):
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
base_file, ext = os.path.splitext(root.find('filename').text)
txtFileName = os.path.join(output, base_file + '.txt')
l = []
for member in root.findall('object'):
if member[0].text == 'opened_door':
iclass = 0
elif member[0].text == 'closed_door':
iclass = 1
elif member[0].text == 'elevator_door':
iclass = 2
elif member[0].text == 'ascending_stair':
iclass = 3
elif member[0].text == 'descending_stair':
iclass = 4
elif member[0].text == 'door':
iclass = 1
l.append([iclass, int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymin').text), int(member.
find('bndbox').find('xmax').text) - int(member.find(
'bndbox').find('xmin').text), int(member.find('bndbox').
find('ymax').text) - int(member.find('bndbox').find('ymin')
.text), int(root.find('size')[0].text), int(root.find(
'size')[1].text)])
np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',
newline='\n')
print('Successfully converted xml to txt.')
<mask token>
ap.add_argument('-p', '--path', required=True, help='annotations path')
ap.add_argument('-o', '--output', required=True, help='txt output path')
<mask token>
print()
print()
print()
print(
'=========================================================================='
)
print(
' ATENTION '
)
print()
print(
' ATENTION '
)
print()
print()
print('Hi body - dont forget update CLASS NAMES')
print()
print(
'=========================================================================='
)
print()
print()
print()
run(args['path'], args['output'])
| <mask token>
def run(path, output):
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
base_file, ext = os.path.splitext(root.find('filename').text)
txtFileName = os.path.join(output, base_file + '.txt')
l = []
for member in root.findall('object'):
if member[0].text == 'opened_door':
iclass = 0
elif member[0].text == 'closed_door':
iclass = 1
elif member[0].text == 'elevator_door':
iclass = 2
elif member[0].text == 'ascending_stair':
iclass = 3
elif member[0].text == 'descending_stair':
iclass = 4
elif member[0].text == 'door':
iclass = 1
l.append([iclass, int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymin').text), int(member.
find('bndbox').find('xmax').text) - int(member.find(
'bndbox').find('xmin').text), int(member.find('bndbox').
find('ymax').text) - int(member.find('bndbox').find('ymin')
.text), int(root.find('size')[0].text), int(root.find(
'size')[1].text)])
np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',
newline='\n')
print('Successfully converted xml to txt.')
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--path', required=True, help='annotations path')
ap.add_argument('-o', '--output', required=True, help='txt output path')
args = vars(ap.parse_args())
print()
print()
print()
print(
'=========================================================================='
)
print(
' ATENTION '
)
print()
print(
' ATENTION '
)
print()
print()
print('Hi body - dont forget update CLASS NAMES')
print()
print(
'=========================================================================='
)
print()
print()
print()
run(args['path'], args['output'])
| import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
import argparse
import numpy as np
def run(path, output):
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
base_file, ext = os.path.splitext(root.find('filename').text)
txtFileName = os.path.join(output, base_file + '.txt')
l = []
for member in root.findall('object'):
if member[0].text == 'opened_door':
iclass = 0
elif member[0].text == 'closed_door':
iclass = 1
elif member[0].text == 'elevator_door':
iclass = 2
elif member[0].text == 'ascending_stair':
iclass = 3
elif member[0].text == 'descending_stair':
iclass = 4
elif member[0].text == 'door':
iclass = 1
l.append([iclass, int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymin').text), int(member.
find('bndbox').find('xmax').text) - int(member.find(
'bndbox').find('xmin').text), int(member.find('bndbox').
find('ymax').text) - int(member.find('bndbox').find('ymin')
.text), int(root.find('size')[0].text), int(root.find(
'size')[1].text)])
np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',
newline='\n')
print('Successfully converted xml to txt.')
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--path', required=True, help='annotations path')
ap.add_argument('-o', '--output', required=True, help='txt output path')
args = vars(ap.parse_args())
print()
print()
print()
print(
'=========================================================================='
)
print(
' ATENTION '
)
print()
print(
' ATENTION '
)
print()
print()
print('Hi body - dont forget update CLASS NAMES')
print()
print(
'=========================================================================='
)
print()
print()
print()
run(args['path'], args['output'])
| import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
import argparse
import numpy as np
def run(path, output):
#xml_df = xml_to_csv(path)
#xml_df.to_csv(output, index=None)
# for filename in os.listdir(path):
# base_file, ext = os.path.splitext(filename)
# print(base_file, ext)
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
base_file, ext = os.path.splitext(root.find('filename').text)
txtFileName = os.path.join(output, base_file+".txt")
l = []
for member in root.findall('object'):
#================ CLASS NAMES =======================
if member[0].text == 'opened_door':
iclass = 0
elif member[0].text == 'closed_door':
iclass = 1
elif member[0].text == 'elevator_door':
iclass = 2
elif member[0].text == 'ascending_stair':
iclass = 3
elif member[0].text == 'descending_stair':
iclass = 4
elif member[0].text == 'door':
iclass = 1
#class_number x1 y1 width height image_width image_height
l.append([iclass,
int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymin').text),
int(member.find('bndbox').find('xmax').text)-int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymax').text)-int(member.find('bndbox').find('ymin').text),
int(root.find('size')[0].text),
int(root.find('size')[1].text) ])
np.savetxt(txtFileName, np.asarray(l),fmt='%d', delimiter =' ',newline='\n')
print('Successfully converted xml to txt.')
#=============================================================================
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True, help="annotations path")
ap.add_argument("-o", "--output", required=True, help="txt output path")
args = vars(ap.parse_args())
print()
print()
print()
print('==========================================================================')
print(' ATENTION ')
print()
print(' ATENTION ')
print()
print()
print('Hi body - dont forget update CLASS NAMES')
print()
print('==========================================================================')
print()
print()
print()
run(args["path"], args["output"])
| [
0,
2,
3,
4,
5
] |
142 | 3b1426e0f29093e1e462765bcf1d351a064b9639 | <mask token>
def SetCu2Wave():
"""Set the parameters to the two-line Cu K alpha 1+2 spectrum
"""
parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}
parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}
parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}
<mask token>
def XferFPAsettings(InpParms):
"""convert Topas-type parameters to SI units for NIST and place in a dict sorted
according to use in each convoluter
:param dict InpParms: a dict with Topas-like parameters, as set in
:func:`MakeTopasFPASizer`
:returns: a nested dict with global parameters and those for each convolution
"""
wavenums = range(InpParms['numWave'])
source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in
wavenums])
la = [InpParms['int'][i] for i in wavenums]
source_intensities = np.array(la) / max(la)
source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for
i in wavenums])
source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in
wavenums])
NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,
'emiss_intensities': source_intensities, 'emiss_gauss_widths':
source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,
'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),
'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}
if InpParms['filament_length'] == InpParms['receiving_slit_length']:
InpParms['receiving_slit_length'] *= 1.00001
NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *
InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms
['receiving_slit_length'], 'length_sample': 0.001 * InpParms[
'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[
'soller_angle'], 'angD_deg': InpParms['soller_angle']}
if InpParms.get('LAC_cm', 0) > 0:
NISTparms['absorption'] = {'absorption_coefficient': InpParms[
'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[
'sample_thickness']}
elif 'absorption' in NISTparms:
del NISTparms['absorption']
if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(
'lpsd_th2_angular_range', 0) > 0:
PSDdetector_length_mm = np.arcsin(np.pi * InpParms[
'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']
NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[
'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0,
PSDdetector_length_mm / 1000.0)}
elif 'si_psd' in NISTparms:
del NISTparms['si_psd']
if InpParms.get('Specimen_Displacement'):
NISTparms['displacement'] = {'specimen_displacement': 0.001 *
InpParms['Specimen_Displacement']}
elif 'displacement' in NISTparms:
del NISTparms['displacement']
if InpParms.get('receiving_slit_width'):
NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[
'receiving_slit_width']}
elif 'receiver_slit' in NISTparms:
del NISTparms['receiver_slit']
if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(
'tube-tails_rel-I', 0) > 0:
NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(
'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(
'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(
'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(
'tube-tails_rel-I', 0.0)}
elif 'tube_tails' in NISTparms:
del NISTparms['tube_tails']
max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]
NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],
'dominant_wavelength': max_wavelength, 'diffractometer_radius':
0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}
def setupFPAcalc():
"""Create a peak profile object using the NIST XRD Fundamental
Parameters Code.
:returns: a profile object that can provide information on
each convolution or compute the composite peak shape.
"""
p = FP.FP_profile(anglemode='twotheta',
output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get
('oversampling', 10))
p.debug_cache = False
for key in NISTparms:
if key:
p.set_parameters(convolver=key, **NISTparms[key])
else:
p.set_parameters(**NISTparms[key])
return p
def doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):
"""Compute a single peak using a NIST profile object
:param object NISTpk: a peak profile computational object from the
NIST XRD Fundamental Parameters Code, typically established from
a call to :func:`SetupFPAcalc`
:param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)
:param float twotheta: nominal center of peak (degrees)
:param float calcwid: width to perform convolution (degrees)
:param float step: step size
"""
center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)
NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,
twotheta_window_center_deg=ttArr[center_bin_idx],
twotheta_approx_window_fullwidth_deg=calcwid)
NISTpk.set_parameters(twotheta0_deg=twotheta)
return center_bin_idx, NISTpk.compute_line_profile()
def MakeSimSizer(G2frame, dlg):
"""Create a GUI to get simulation with parameters for Fundamental
Parameters fitting.
:param wx.Window dlg: Frame or Dialog where GUI will appear
:returns: a sizer with the GUI controls
"""
def _onOK(event):
msg = ''
if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:
msg += 'First peak minus half the calc width is too low'
if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:
if msg:
msg += '\n'
msg += 'Last peak plus half the calc width is too high'
if simParms['npeaks'] < 8:
if msg:
msg += '\n'
msg += 'At least 8 peaks are needed'
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
return
ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /
1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[
'step'])
intArr = np.zeros_like(ttArr)
peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],
simParms['npeaks'], endpoint=True)
peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)
NISTpk = setupFPAcalc()
minPtsHM = len(intArr)
maxPtsHM = 0
for num, twoth_peak in enumerate(peaklist):
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,
twoth_peak, simParms['calcwid'], simParms['step'])
except:
if msg:
msg += '\n'
msg = 'Error computing convolution, revise input'
continue
if num == 0:
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkMax = peakObj.peak.max()
pkPts = len(peakObj.peak)
minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
break
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset
] += 10000 * peakObj.peak[:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts
] += 10000 * peakObj.peak / pkMax
if maxPtsHM * simParms['step'] > peakSpacing / 4:
if msg:
msg += '\n'
msg += (
'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'
.format(maxPtsHM * simParms['step'], peakSpacing))
if minPtsHM < 10:
if msg:
msg += '\n'
msg += (
'There are only {} points above the half-max. 10 are needed. Dropping step size.'
.format(minPtsHM))
simParms['step'] *= 0.5
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
wx.CallAfter(MakeSimSizer, G2frame, dlg)
return
dlg.Destroy()
wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)
def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):
"""Perform a peak fit to the FP simulated pattern
"""
plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.
RESIZE_BORDER)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
txt = wx.StaticText(plswait, wx.ID_ANY,
'Fitting peaks...\nPlease wait...', style=wx.ALIGN_CENTER)
vbox.Add(txt, 0, wx.ALL | wx.EXPAND)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
plswait.SetSizer(vbox)
plswait.Layout()
plswait.CenterOnParent()
plswait.Show()
wx.BeginBusyCursor()
ints = list(NISTparms['emission']['emiss_intensities'])
Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
if len(ints) > 1:
ints[np.argmax(ints)] = -1
Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
else:
Lam2 = None
histId = G2frame.AddSimulatedPowder(ttArr, intArr,
'NIST Fundamental Parameters simulation', Lam1, Lam2)
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, G2frame.root, 'Controls'))
controldat = controls.get('data', {'deriv type': 'analytic',
'min dM/M': 0.001})
Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId
(G2frame, histId, 'Instrument Parameters'))
peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Peak List'))
bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.
GetGPXtreeItemId(G2frame, histId, 'Background'))
bkg1[1] = False
bkg1[2] = 0
bkg1[3] = 0.0
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Limits'))
try:
Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +
NISTparms['axial']['slit_length_source']) / NISTparms[''][
'diffractometer_radius']
except:
pass
for pos in peaklist:
i = ttArr.searchsorted(pos)
area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +
maxPtsHM)])
peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,
area))
histData = G2frame.GPXtree.GetItemPyData(histId)
bxye = np.zeros(len(histData[1][1]))
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False,
controldat, None)[0]
for pk in peakData['peaks']:
pk[1] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in ('U', 'V', 'W', 'X', 'Y'):
Parms[p][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
Parms['SH/L'][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in Parms:
if len(Parms[p]) == 3:
Parms[p][0] = Parms[p][1]
Parms[p][2] = False
wx.EndBusyCursor()
plswait.Destroy()
pth = G2G.GetExportPath(G2frame)
fldlg = wx.FileDialog(G2frame,
'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |
wx.FD_OVERWRITE_PROMPT)
try:
if fldlg.ShowModal() == wx.ID_OK:
filename = fldlg.GetPath()
filename = os.path.splitext(filename)[0] + '.instprm'
File = open(filename, 'w')
File.write(
'#GSAS-II instrument parameter file; do not add/delete items!\n'
)
for item in Parms:
File.write(item + ':' + str(Parms[item][1]) + '\n')
File.close()
print('Instrument parameters saved to: ' + filename)
finally:
fldlg.Destroy()
def _onClose(event):
dlg.Destroy()
def SetButtonStatus(done=False):
OKbtn.Enable(bool(NISTparms))
saveBtn.Enable(bool(NISTparms))
if done:
_onOK(None)
def _onSetFPA(event):
FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)
FPdlg.CenterOnParent()
FPdlg.Raise()
FPdlg.Show()
def _onSaveFPA(event):
filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',
'dict of NIST FPA values', dlg)
if not filename:
return
fp = open(filename, 'w')
fp.write(
'# parameters to be used in the NIST XRD Fundamental Parameters program\n'
)
fp.write('{\n')
for key in sorted(NISTparms):
fp.write(" '" + key + "' : " + str(NISTparms[key]) + ',')
if not key:
fp.write(' # global parameters')
fp.write('\n')
fp.write('}\n')
fp.close()
def _onReadFPA(event):
filename = G2G.GetImportFile(G2frame, message=
'Read file with dict of values for NIST Fundamental Parameters',
parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')
if not filename:
return
if not filename[0]:
return
try:
txt = open(filename[0], 'r').read()
NISTparms.clear()
array = np.array
d = eval(txt)
NISTparms.update(d)
except Exception as err:
G2G.G2MessageBox(dlg, u'Error reading file {}:{}\n'.format(
filename, err), 'Bad dict input')
SetButtonStatus()
if dlg.GetSizer():
dlg.GetSizer().Clear(True)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,
'Fit Profile Parameters to Peaks from Fundamental Parameters',
style=wx.ALIGN_CENTER), 0, wx.EXPAND)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)
text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for key, defVal, text in (('minTT', 3.0,
'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,
'Location of last peak in 2theta (deg)'), ('step', 0.01,
'Pattern step size (deg 2theta)'), ('npeaks', 13.0,
'Number of peaks'), ('calcwid', 2.0,
'Range to compute each peak (deg 2theta)')):
if key not in simParms:
simParms[key] = defVal
ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))
txt.Wrap(280)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetFPA)
saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')
btnsizer.Add(saveBtn)
saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)
readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')
btnsizer.Add(readBtn)
readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +
Citation, size=(350, -1))
txt.Wrap(340)
MainSizer.Add(txt, 0, wx.ALIGN_CENTER)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(dlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
SetButtonStatus()
dlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(dlg)
dlg.SetMinSize(dlg.GetSize())
dlg.SendSizeEvent()
dlg.Raise()
def GetFPAInput(G2frame):
dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeSimSizer(G2frame, dlg)
dlg.CenterOnParent()
dlg.Show()
return
| <mask token>
def SetCu2Wave():
"""Set the parameters to the two-line Cu K alpha 1+2 spectrum
"""
parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}
parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}
parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}
<mask token>
def MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):
"""Create a GUI with parameters for the NIST XRD Fundamental Parameters Code.
Parameter input is modeled after Topas input parameters.
:param wx.Window FPdlg: Frame or Dialog where GUI will appear
:param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or
(linear) position sensitive detector
:param dict parmDict: dict to place parameters. If empty, default values from
globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in
the array.
:returns: a sizer with the GUI controls
"""
def _onOK(event):
XferFPAsettings(parmDict)
SetButtonStatus(done=True)
FPdlg.Destroy()
def _onClose(event):
SetButtonStatus()
FPdlg.Destroy()
def _onAddWave(event):
parmDict['numWave'] += 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onRemWave(event):
parmDict['numWave'] -= 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu5Wave(event):
parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,
1.541058, 1.54441, 1.544721))}
parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791,
0.0762, 0.2417, 0.0871))}
parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437,
0.6, 0.52, 0.62))}
parmDict['numWave'] = 5
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu2Wave(event):
SetCu2Wave()
parmDict['numWave'] = 2
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetPoint(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',
SetButtonStatus)
def _onSetPSD(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',
SetButtonStatus)
def PlotTopasFPA(event):
XferFPAsettings(parmDict)
ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'
]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])
intArr = np.zeros_like(ttArr)
NISTpk = setupFPAcalc()
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[
'plotpos'], simParms['calcwid'], simParms['step'])
except Exception as err:
msg = 'Error computing convolution, revise input'
print(msg)
print(err)
return
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkPts = len(peakObj.peak)
pkMax = peakObj.peak.max()
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
return
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[
:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax
G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\theta, deg$',
labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,
lines=True)
if FPdlg.GetSizer():
FPdlg.GetSizer().Clear(True)
numWave = parmDict['numWave']
if mode == 'BBpoint':
itemList = BraggBrentanoParms + BBPointDetector
elif mode == 'BBPSD':
itemList = BraggBrentanoParms + BBPSDDetector
else:
raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add((-1, 5))
waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)
for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',
u'Lorentz Width\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)
):
text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
waveSizer.Add(text, 0, wx.EXPAND)
if prm not in parmDict:
parmDict[prm] = {}
for i in range(numWave):
if i not in parmDict[prm]:
parmDict[prm][i] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))
waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)
MainSizer.Add(waveSizer)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onAddWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onRemWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')
btn.Enable(not mode == 'BBpoint')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPoint)
btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')
btn.Enable(not mode == 'BBPSD')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPSD)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER
)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for lbl, defVal, text in itemList:
prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.
ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)
if lbl not in parmDict:
parmDict[lbl] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))
txt.Wrap(380)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))
if 'plotpos' not in simParms:
simParms['plotpos'] = simParms['minTT']
ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))
btnsizer.Add(ctrl)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(FPdlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
FPdlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(FPdlg)
FPdlg.SetMinSize(FPdlg.GetSize())
FPdlg.SendSizeEvent()
def XferFPAsettings(InpParms):
"""convert Topas-type parameters to SI units for NIST and place in a dict sorted
according to use in each convoluter
:param dict InpParms: a dict with Topas-like parameters, as set in
:func:`MakeTopasFPASizer`
:returns: a nested dict with global parameters and those for each convolution
"""
wavenums = range(InpParms['numWave'])
source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in
wavenums])
la = [InpParms['int'][i] for i in wavenums]
source_intensities = np.array(la) / max(la)
source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for
i in wavenums])
source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in
wavenums])
NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,
'emiss_intensities': source_intensities, 'emiss_gauss_widths':
source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,
'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),
'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}
if InpParms['filament_length'] == InpParms['receiving_slit_length']:
InpParms['receiving_slit_length'] *= 1.00001
NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *
InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms
['receiving_slit_length'], 'length_sample': 0.001 * InpParms[
'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[
'soller_angle'], 'angD_deg': InpParms['soller_angle']}
if InpParms.get('LAC_cm', 0) > 0:
NISTparms['absorption'] = {'absorption_coefficient': InpParms[
'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[
'sample_thickness']}
elif 'absorption' in NISTparms:
del NISTparms['absorption']
if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(
'lpsd_th2_angular_range', 0) > 0:
PSDdetector_length_mm = np.arcsin(np.pi * InpParms[
'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']
NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[
'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0,
PSDdetector_length_mm / 1000.0)}
elif 'si_psd' in NISTparms:
del NISTparms['si_psd']
if InpParms.get('Specimen_Displacement'):
NISTparms['displacement'] = {'specimen_displacement': 0.001 *
InpParms['Specimen_Displacement']}
elif 'displacement' in NISTparms:
del NISTparms['displacement']
if InpParms.get('receiving_slit_width'):
NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[
'receiving_slit_width']}
elif 'receiver_slit' in NISTparms:
del NISTparms['receiver_slit']
if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(
'tube-tails_rel-I', 0) > 0:
NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(
'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(
'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(
'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(
'tube-tails_rel-I', 0.0)}
elif 'tube_tails' in NISTparms:
del NISTparms['tube_tails']
max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]
NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],
'dominant_wavelength': max_wavelength, 'diffractometer_radius':
0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}
def setupFPAcalc():
"""Create a peak profile object using the NIST XRD Fundamental
Parameters Code.
:returns: a profile object that can provide information on
each convolution or compute the composite peak shape.
"""
p = FP.FP_profile(anglemode='twotheta',
output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get
('oversampling', 10))
p.debug_cache = False
for key in NISTparms:
if key:
p.set_parameters(convolver=key, **NISTparms[key])
else:
p.set_parameters(**NISTparms[key])
return p
def doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):
"""Compute a single peak using a NIST profile object
:param object NISTpk: a peak profile computational object from the
NIST XRD Fundamental Parameters Code, typically established from
a call to :func:`SetupFPAcalc`
:param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)
:param float twotheta: nominal center of peak (degrees)
:param float calcwid: width to perform convolution (degrees)
:param float step: step size
"""
center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)
NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,
twotheta_window_center_deg=ttArr[center_bin_idx],
twotheta_approx_window_fullwidth_deg=calcwid)
NISTpk.set_parameters(twotheta0_deg=twotheta)
return center_bin_idx, NISTpk.compute_line_profile()
def MakeSimSizer(G2frame, dlg):
"""Create a GUI to get simulation with parameters for Fundamental
Parameters fitting.
:param wx.Window dlg: Frame or Dialog where GUI will appear
:returns: a sizer with the GUI controls
"""
def _onOK(event):
msg = ''
if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:
msg += 'First peak minus half the calc width is too low'
if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:
if msg:
msg += '\n'
msg += 'Last peak plus half the calc width is too high'
if simParms['npeaks'] < 8:
if msg:
msg += '\n'
msg += 'At least 8 peaks are needed'
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
return
ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /
1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[
'step'])
intArr = np.zeros_like(ttArr)
peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],
simParms['npeaks'], endpoint=True)
peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)
NISTpk = setupFPAcalc()
minPtsHM = len(intArr)
maxPtsHM = 0
for num, twoth_peak in enumerate(peaklist):
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,
twoth_peak, simParms['calcwid'], simParms['step'])
except:
if msg:
msg += '\n'
msg = 'Error computing convolution, revise input'
continue
if num == 0:
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkMax = peakObj.peak.max()
pkPts = len(peakObj.peak)
minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
break
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset
] += 10000 * peakObj.peak[:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts
] += 10000 * peakObj.peak / pkMax
if maxPtsHM * simParms['step'] > peakSpacing / 4:
if msg:
msg += '\n'
msg += (
'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'
.format(maxPtsHM * simParms['step'], peakSpacing))
if minPtsHM < 10:
if msg:
msg += '\n'
msg += (
'There are only {} points above the half-max. 10 are needed. Dropping step size.'
.format(minPtsHM))
simParms['step'] *= 0.5
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
wx.CallAfter(MakeSimSizer, G2frame, dlg)
return
dlg.Destroy()
wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)
def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):
"""Perform a peak fit to the FP simulated pattern
"""
plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.
RESIZE_BORDER)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
txt = wx.StaticText(plswait, wx.ID_ANY,
'Fitting peaks...\nPlease wait...', style=wx.ALIGN_CENTER)
vbox.Add(txt, 0, wx.ALL | wx.EXPAND)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
plswait.SetSizer(vbox)
plswait.Layout()
plswait.CenterOnParent()
plswait.Show()
wx.BeginBusyCursor()
ints = list(NISTparms['emission']['emiss_intensities'])
Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
if len(ints) > 1:
ints[np.argmax(ints)] = -1
Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
else:
Lam2 = None
histId = G2frame.AddSimulatedPowder(ttArr, intArr,
'NIST Fundamental Parameters simulation', Lam1, Lam2)
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, G2frame.root, 'Controls'))
controldat = controls.get('data', {'deriv type': 'analytic',
'min dM/M': 0.001})
Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId
(G2frame, histId, 'Instrument Parameters'))
peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Peak List'))
bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.
GetGPXtreeItemId(G2frame, histId, 'Background'))
bkg1[1] = False
bkg1[2] = 0
bkg1[3] = 0.0
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Limits'))
try:
Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +
NISTparms['axial']['slit_length_source']) / NISTparms[''][
'diffractometer_radius']
except:
pass
for pos in peaklist:
i = ttArr.searchsorted(pos)
area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +
maxPtsHM)])
peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,
area))
histData = G2frame.GPXtree.GetItemPyData(histId)
bxye = np.zeros(len(histData[1][1]))
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False,
controldat, None)[0]
for pk in peakData['peaks']:
pk[1] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in ('U', 'V', 'W', 'X', 'Y'):
Parms[p][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
Parms['SH/L'][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in Parms:
if len(Parms[p]) == 3:
Parms[p][0] = Parms[p][1]
Parms[p][2] = False
wx.EndBusyCursor()
plswait.Destroy()
pth = G2G.GetExportPath(G2frame)
fldlg = wx.FileDialog(G2frame,
'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |
wx.FD_OVERWRITE_PROMPT)
try:
if fldlg.ShowModal() == wx.ID_OK:
filename = fldlg.GetPath()
filename = os.path.splitext(filename)[0] + '.instprm'
File = open(filename, 'w')
File.write(
'#GSAS-II instrument parameter file; do not add/delete items!\n'
)
for item in Parms:
File.write(item + ':' + str(Parms[item][1]) + '\n')
File.close()
print('Instrument parameters saved to: ' + filename)
finally:
fldlg.Destroy()
def _onClose(event):
dlg.Destroy()
def SetButtonStatus(done=False):
OKbtn.Enable(bool(NISTparms))
saveBtn.Enable(bool(NISTparms))
if done:
_onOK(None)
def _onSetFPA(event):
FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)
FPdlg.CenterOnParent()
FPdlg.Raise()
FPdlg.Show()
def _onSaveFPA(event):
filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',
'dict of NIST FPA values', dlg)
if not filename:
return
fp = open(filename, 'w')
fp.write(
'# parameters to be used in the NIST XRD Fundamental Parameters program\n'
)
fp.write('{\n')
for key in sorted(NISTparms):
fp.write(" '" + key + "' : " + str(NISTparms[key]) + ',')
if not key:
fp.write(' # global parameters')
fp.write('\n')
fp.write('}\n')
fp.close()
def _onReadFPA(event):
filename = G2G.GetImportFile(G2frame, message=
'Read file with dict of values for NIST Fundamental Parameters',
parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')
if not filename:
return
if not filename[0]:
return
try:
txt = open(filename[0], 'r').read()
NISTparms.clear()
array = np.array
d = eval(txt)
NISTparms.update(d)
except Exception as err:
G2G.G2MessageBox(dlg, u'Error reading file {}:{}\n'.format(
filename, err), 'Bad dict input')
SetButtonStatus()
if dlg.GetSizer():
dlg.GetSizer().Clear(True)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,
'Fit Profile Parameters to Peaks from Fundamental Parameters',
style=wx.ALIGN_CENTER), 0, wx.EXPAND)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)
text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for key, defVal, text in (('minTT', 3.0,
'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,
'Location of last peak in 2theta (deg)'), ('step', 0.01,
'Pattern step size (deg 2theta)'), ('npeaks', 13.0,
'Number of peaks'), ('calcwid', 2.0,
'Range to compute each peak (deg 2theta)')):
if key not in simParms:
simParms[key] = defVal
ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))
txt.Wrap(280)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetFPA)
saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')
btnsizer.Add(saveBtn)
saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)
readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')
btnsizer.Add(readBtn)
readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +
Citation, size=(350, -1))
txt.Wrap(340)
MainSizer.Add(txt, 0, wx.ALIGN_CENTER)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(dlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
SetButtonStatus()
dlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(dlg)
dlg.SetMinSize(dlg.GetSize())
dlg.SendSizeEvent()
dlg.Raise()
def GetFPAInput(G2frame):
dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeSimSizer(G2frame, dlg)
dlg.CenterOnParent()
dlg.Show()
return
| <mask token>
simParms = {}
<mask token>
parmDict = {'numWave': 2}
<mask token>
NISTparms = {}
<mask token>
BraggBrentanoParms = [('divergence', 0.5,
'Bragg-Brentano divergence angle (degrees)'), ('soller_angle', 2.0,
'Soller slit axial divergence (degrees)'), ('Rs', 220,
'Diffractometer radius (mm)'), ('filament_length', 12.0,
'X-ray tube line focus length (mm)'), ('sample_length', 12.0,
'Illuminated sample length in axial direction (mm)'), (
'receiving_slit_length', 12.0,
'Length of receiving slit in axial direction (mm)'), ('LAC_cm', 0.0,
'Linear absorption coef. adjusted for packing density (cm-1)'), (
'sample_thickness', 1.0, 'Depth of sample (mm)'), ('convolution_steps',
8, 'Number of Fourier-space bins per two-theta step'), (
'tube-tails_width', 0.04,
'Tube filament width, in projection at takeoff angle (mm)'), (
'tube-tails_L-tail', -1.0,
'Left-side tube tails width, in projection (mm)'), ('tube-tails_R-tail',
1.0, 'Right-side tube tails width, in projection (mm)'), (
'tube-tails_rel-I', 0.001, 'Tube tails fractional intensity (no units)')]
<mask token>
BBPointDetector = [('receiving_slit_width', 0.2,
'Width of receiving slit (mm)')]
<mask token>
BBPSDDetector = [('lpsd_th2_angular_range', 3.0,
'Angular range observed by PSD (degrees 2Theta)'), (
'lpsd_equitorial_divergence', 0.1,
'Equatorial divergence of the primary beam (degrees)')]
<mask token>
Citation = """MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.
"""
def SetCu2Wave():
"""Set the parameters to the two-line Cu K alpha 1+2 spectrum
"""
parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}
parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}
parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}
SetCu2Wave()
def MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):
"""Create a GUI with parameters for the NIST XRD Fundamental Parameters Code.
Parameter input is modeled after Topas input parameters.
:param wx.Window FPdlg: Frame or Dialog where GUI will appear
:param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or
(linear) position sensitive detector
:param dict parmDict: dict to place parameters. If empty, default values from
globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in
the array.
:returns: a sizer with the GUI controls
"""
def _onOK(event):
XferFPAsettings(parmDict)
SetButtonStatus(done=True)
FPdlg.Destroy()
def _onClose(event):
SetButtonStatus()
FPdlg.Destroy()
def _onAddWave(event):
parmDict['numWave'] += 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onRemWave(event):
parmDict['numWave'] -= 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu5Wave(event):
parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,
1.541058, 1.54441, 1.544721))}
parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791,
0.0762, 0.2417, 0.0871))}
parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437,
0.6, 0.52, 0.62))}
parmDict['numWave'] = 5
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu2Wave(event):
SetCu2Wave()
parmDict['numWave'] = 2
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetPoint(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',
SetButtonStatus)
def _onSetPSD(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',
SetButtonStatus)
def PlotTopasFPA(event):
XferFPAsettings(parmDict)
ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'
]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])
intArr = np.zeros_like(ttArr)
NISTpk = setupFPAcalc()
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[
'plotpos'], simParms['calcwid'], simParms['step'])
except Exception as err:
msg = 'Error computing convolution, revise input'
print(msg)
print(err)
return
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkPts = len(peakObj.peak)
pkMax = peakObj.peak.max()
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
return
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[
:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax
G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\theta, deg$',
labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,
lines=True)
if FPdlg.GetSizer():
FPdlg.GetSizer().Clear(True)
numWave = parmDict['numWave']
if mode == 'BBpoint':
itemList = BraggBrentanoParms + BBPointDetector
elif mode == 'BBPSD':
itemList = BraggBrentanoParms + BBPSDDetector
else:
raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add((-1, 5))
waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)
for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',
u'Lorentz Width\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)
):
text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
waveSizer.Add(text, 0, wx.EXPAND)
if prm not in parmDict:
parmDict[prm] = {}
for i in range(numWave):
if i not in parmDict[prm]:
parmDict[prm][i] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))
waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)
MainSizer.Add(waveSizer)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onAddWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onRemWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')
btn.Enable(not mode == 'BBpoint')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPoint)
btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')
btn.Enable(not mode == 'BBPSD')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPSD)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER
)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for lbl, defVal, text in itemList:
prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.
ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)
if lbl not in parmDict:
parmDict[lbl] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))
txt.Wrap(380)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))
if 'plotpos' not in simParms:
simParms['plotpos'] = simParms['minTT']
ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))
btnsizer.Add(ctrl)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(FPdlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
FPdlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(FPdlg)
FPdlg.SetMinSize(FPdlg.GetSize())
FPdlg.SendSizeEvent()
def XferFPAsettings(InpParms):
"""convert Topas-type parameters to SI units for NIST and place in a dict sorted
according to use in each convoluter
:param dict InpParms: a dict with Topas-like parameters, as set in
:func:`MakeTopasFPASizer`
:returns: a nested dict with global parameters and those for each convolution
"""
wavenums = range(InpParms['numWave'])
source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in
wavenums])
la = [InpParms['int'][i] for i in wavenums]
source_intensities = np.array(la) / max(la)
source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for
i in wavenums])
source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in
wavenums])
NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,
'emiss_intensities': source_intensities, 'emiss_gauss_widths':
source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,
'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),
'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}
if InpParms['filament_length'] == InpParms['receiving_slit_length']:
InpParms['receiving_slit_length'] *= 1.00001
NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *
InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms
['receiving_slit_length'], 'length_sample': 0.001 * InpParms[
'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[
'soller_angle'], 'angD_deg': InpParms['soller_angle']}
if InpParms.get('LAC_cm', 0) > 0:
NISTparms['absorption'] = {'absorption_coefficient': InpParms[
'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[
'sample_thickness']}
elif 'absorption' in NISTparms:
del NISTparms['absorption']
if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(
'lpsd_th2_angular_range', 0) > 0:
PSDdetector_length_mm = np.arcsin(np.pi * InpParms[
'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']
NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[
'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0,
PSDdetector_length_mm / 1000.0)}
elif 'si_psd' in NISTparms:
del NISTparms['si_psd']
if InpParms.get('Specimen_Displacement'):
NISTparms['displacement'] = {'specimen_displacement': 0.001 *
InpParms['Specimen_Displacement']}
elif 'displacement' in NISTparms:
del NISTparms['displacement']
if InpParms.get('receiving_slit_width'):
NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[
'receiving_slit_width']}
elif 'receiver_slit' in NISTparms:
del NISTparms['receiver_slit']
if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(
'tube-tails_rel-I', 0) > 0:
NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(
'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(
'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(
'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(
'tube-tails_rel-I', 0.0)}
elif 'tube_tails' in NISTparms:
del NISTparms['tube_tails']
max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]
NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],
'dominant_wavelength': max_wavelength, 'diffractometer_radius':
0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}
def setupFPAcalc():
"""Create a peak profile object using the NIST XRD Fundamental
Parameters Code.
:returns: a profile object that can provide information on
each convolution or compute the composite peak shape.
"""
p = FP.FP_profile(anglemode='twotheta',
output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get
('oversampling', 10))
p.debug_cache = False
for key in NISTparms:
if key:
p.set_parameters(convolver=key, **NISTparms[key])
else:
p.set_parameters(**NISTparms[key])
return p
def doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):
"""Compute a single peak using a NIST profile object
:param object NISTpk: a peak profile computational object from the
NIST XRD Fundamental Parameters Code, typically established from
a call to :func:`SetupFPAcalc`
:param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)
:param float twotheta: nominal center of peak (degrees)
:param float calcwid: width to perform convolution (degrees)
:param float step: step size
"""
center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)
NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,
twotheta_window_center_deg=ttArr[center_bin_idx],
twotheta_approx_window_fullwidth_deg=calcwid)
NISTpk.set_parameters(twotheta0_deg=twotheta)
return center_bin_idx, NISTpk.compute_line_profile()
def MakeSimSizer(G2frame, dlg):
"""Create a GUI to get simulation with parameters for Fundamental
Parameters fitting.
:param wx.Window dlg: Frame or Dialog where GUI will appear
:returns: a sizer with the GUI controls
"""
def _onOK(event):
msg = ''
if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:
msg += 'First peak minus half the calc width is too low'
if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:
if msg:
msg += '\n'
msg += 'Last peak plus half the calc width is too high'
if simParms['npeaks'] < 8:
if msg:
msg += '\n'
msg += 'At least 8 peaks are needed'
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
return
ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /
1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[
'step'])
intArr = np.zeros_like(ttArr)
peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],
simParms['npeaks'], endpoint=True)
peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)
NISTpk = setupFPAcalc()
minPtsHM = len(intArr)
maxPtsHM = 0
for num, twoth_peak in enumerate(peaklist):
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,
twoth_peak, simParms['calcwid'], simParms['step'])
except:
if msg:
msg += '\n'
msg = 'Error computing convolution, revise input'
continue
if num == 0:
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkMax = peakObj.peak.max()
pkPts = len(peakObj.peak)
minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
break
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset
] += 10000 * peakObj.peak[:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts
] += 10000 * peakObj.peak / pkMax
if maxPtsHM * simParms['step'] > peakSpacing / 4:
if msg:
msg += '\n'
msg += (
'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'
.format(maxPtsHM * simParms['step'], peakSpacing))
if minPtsHM < 10:
if msg:
msg += '\n'
msg += (
'There are only {} points above the half-max. 10 are needed. Dropping step size.'
.format(minPtsHM))
simParms['step'] *= 0.5
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
wx.CallAfter(MakeSimSizer, G2frame, dlg)
return
dlg.Destroy()
wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)
def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):
"""Perform a peak fit to the FP simulated pattern
"""
plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.
RESIZE_BORDER)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
txt = wx.StaticText(plswait, wx.ID_ANY,
'Fitting peaks...\nPlease wait...', style=wx.ALIGN_CENTER)
vbox.Add(txt, 0, wx.ALL | wx.EXPAND)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
plswait.SetSizer(vbox)
plswait.Layout()
plswait.CenterOnParent()
plswait.Show()
wx.BeginBusyCursor()
ints = list(NISTparms['emission']['emiss_intensities'])
Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
if len(ints) > 1:
ints[np.argmax(ints)] = -1
Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
else:
Lam2 = None
histId = G2frame.AddSimulatedPowder(ttArr, intArr,
'NIST Fundamental Parameters simulation', Lam1, Lam2)
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, G2frame.root, 'Controls'))
controldat = controls.get('data', {'deriv type': 'analytic',
'min dM/M': 0.001})
Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId
(G2frame, histId, 'Instrument Parameters'))
peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Peak List'))
bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.
GetGPXtreeItemId(G2frame, histId, 'Background'))
bkg1[1] = False
bkg1[2] = 0
bkg1[3] = 0.0
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Limits'))
try:
Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +
NISTparms['axial']['slit_length_source']) / NISTparms[''][
'diffractometer_radius']
except:
pass
for pos in peaklist:
i = ttArr.searchsorted(pos)
area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +
maxPtsHM)])
peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,
area))
histData = G2frame.GPXtree.GetItemPyData(histId)
bxye = np.zeros(len(histData[1][1]))
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False,
controldat, None)[0]
for pk in peakData['peaks']:
pk[1] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in ('U', 'V', 'W', 'X', 'Y'):
Parms[p][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
Parms['SH/L'][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in Parms:
if len(Parms[p]) == 3:
Parms[p][0] = Parms[p][1]
Parms[p][2] = False
wx.EndBusyCursor()
plswait.Destroy()
pth = G2G.GetExportPath(G2frame)
fldlg = wx.FileDialog(G2frame,
'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |
wx.FD_OVERWRITE_PROMPT)
try:
if fldlg.ShowModal() == wx.ID_OK:
filename = fldlg.GetPath()
filename = os.path.splitext(filename)[0] + '.instprm'
File = open(filename, 'w')
File.write(
'#GSAS-II instrument parameter file; do not add/delete items!\n'
)
for item in Parms:
File.write(item + ':' + str(Parms[item][1]) + '\n')
File.close()
print('Instrument parameters saved to: ' + filename)
finally:
fldlg.Destroy()
def _onClose(event):
dlg.Destroy()
def SetButtonStatus(done=False):
OKbtn.Enable(bool(NISTparms))
saveBtn.Enable(bool(NISTparms))
if done:
_onOK(None)
def _onSetFPA(event):
FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)
FPdlg.CenterOnParent()
FPdlg.Raise()
FPdlg.Show()
def _onSaveFPA(event):
filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',
'dict of NIST FPA values', dlg)
if not filename:
return
fp = open(filename, 'w')
fp.write(
'# parameters to be used in the NIST XRD Fundamental Parameters program\n'
)
fp.write('{\n')
for key in sorted(NISTparms):
fp.write(" '" + key + "' : " + str(NISTparms[key]) + ',')
if not key:
fp.write(' # global parameters')
fp.write('\n')
fp.write('}\n')
fp.close()
def _onReadFPA(event):
filename = G2G.GetImportFile(G2frame, message=
'Read file with dict of values for NIST Fundamental Parameters',
parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')
if not filename:
return
if not filename[0]:
return
try:
txt = open(filename[0], 'r').read()
NISTparms.clear()
array = np.array
d = eval(txt)
NISTparms.update(d)
except Exception as err:
G2G.G2MessageBox(dlg, u'Error reading file {}:{}\n'.format(
filename, err), 'Bad dict input')
SetButtonStatus()
if dlg.GetSizer():
dlg.GetSizer().Clear(True)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,
'Fit Profile Parameters to Peaks from Fundamental Parameters',
style=wx.ALIGN_CENTER), 0, wx.EXPAND)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)
text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for key, defVal, text in (('minTT', 3.0,
'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,
'Location of last peak in 2theta (deg)'), ('step', 0.01,
'Pattern step size (deg 2theta)'), ('npeaks', 13.0,
'Number of peaks'), ('calcwid', 2.0,
'Range to compute each peak (deg 2theta)')):
if key not in simParms:
simParms[key] = defVal
ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))
txt.Wrap(280)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetFPA)
saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')
btnsizer.Add(saveBtn)
saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)
readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')
btnsizer.Add(readBtn)
readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +
Citation, size=(350, -1))
txt.Wrap(340)
MainSizer.Add(txt, 0, wx.ALIGN_CENTER)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(dlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
SetButtonStatus()
dlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(dlg)
dlg.SetMinSize(dlg.GetSize())
dlg.SendSizeEvent()
dlg.Raise()
def GetFPAInput(G2frame):
dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeSimSizer(G2frame, dlg)
dlg.CenterOnParent()
dlg.Show()
return
| <mask token>
from __future__ import division, print_function
import wx
import os.path
import numpy as np
import NIST_profile as FP
import GSASIIpath
import GSASIIctrlGUI as G2G
import GSASIIdataGUI as G2gd
import GSASIIplot as G2plt
import GSASIImath as G2mth
import GSASIIpwd as G2pwd
simParms = {}
<mask token>
parmDict = {'numWave': 2}
<mask token>
NISTparms = {}
<mask token>
BraggBrentanoParms = [('divergence', 0.5,
'Bragg-Brentano divergence angle (degrees)'), ('soller_angle', 2.0,
'Soller slit axial divergence (degrees)'), ('Rs', 220,
'Diffractometer radius (mm)'), ('filament_length', 12.0,
'X-ray tube line focus length (mm)'), ('sample_length', 12.0,
'Illuminated sample length in axial direction (mm)'), (
'receiving_slit_length', 12.0,
'Length of receiving slit in axial direction (mm)'), ('LAC_cm', 0.0,
'Linear absorption coef. adjusted for packing density (cm-1)'), (
'sample_thickness', 1.0, 'Depth of sample (mm)'), ('convolution_steps',
8, 'Number of Fourier-space bins per two-theta step'), (
'tube-tails_width', 0.04,
'Tube filament width, in projection at takeoff angle (mm)'), (
'tube-tails_L-tail', -1.0,
'Left-side tube tails width, in projection (mm)'), ('tube-tails_R-tail',
1.0, 'Right-side tube tails width, in projection (mm)'), (
'tube-tails_rel-I', 0.001, 'Tube tails fractional intensity (no units)')]
<mask token>
BBPointDetector = [('receiving_slit_width', 0.2,
'Width of receiving slit (mm)')]
<mask token>
BBPSDDetector = [('lpsd_th2_angular_range', 3.0,
'Angular range observed by PSD (degrees 2Theta)'), (
'lpsd_equitorial_divergence', 0.1,
'Equatorial divergence of the primary beam (degrees)')]
<mask token>
Citation = """MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.
"""
def SetCu2Wave():
"""Set the parameters to the two-line Cu K alpha 1+2 spectrum
"""
parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}
parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}
parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}
SetCu2Wave()
def MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):
"""Create a GUI with parameters for the NIST XRD Fundamental Parameters Code.
Parameter input is modeled after Topas input parameters.
:param wx.Window FPdlg: Frame or Dialog where GUI will appear
:param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or
(linear) position sensitive detector
:param dict parmDict: dict to place parameters. If empty, default values from
globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in
the array.
:returns: a sizer with the GUI controls
"""
def _onOK(event):
XferFPAsettings(parmDict)
SetButtonStatus(done=True)
FPdlg.Destroy()
def _onClose(event):
SetButtonStatus()
FPdlg.Destroy()
def _onAddWave(event):
parmDict['numWave'] += 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onRemWave(event):
parmDict['numWave'] -= 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu5Wave(event):
parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,
1.541058, 1.54441, 1.544721))}
parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791,
0.0762, 0.2417, 0.0871))}
parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437,
0.6, 0.52, 0.62))}
parmDict['numWave'] = 5
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu2Wave(event):
SetCu2Wave()
parmDict['numWave'] = 2
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetPoint(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',
SetButtonStatus)
def _onSetPSD(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',
SetButtonStatus)
def PlotTopasFPA(event):
XferFPAsettings(parmDict)
ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'
]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])
intArr = np.zeros_like(ttArr)
NISTpk = setupFPAcalc()
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[
'plotpos'], simParms['calcwid'], simParms['step'])
except Exception as err:
msg = 'Error computing convolution, revise input'
print(msg)
print(err)
return
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkPts = len(peakObj.peak)
pkMax = peakObj.peak.max()
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
return
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[
:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax
G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\theta, deg$',
labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,
lines=True)
if FPdlg.GetSizer():
FPdlg.GetSizer().Clear(True)
numWave = parmDict['numWave']
if mode == 'BBpoint':
itemList = BraggBrentanoParms + BBPointDetector
elif mode == 'BBPSD':
itemList = BraggBrentanoParms + BBPSDDetector
else:
raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add((-1, 5))
waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)
for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',
u'Lorentz Width\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)
):
text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
waveSizer.Add(text, 0, wx.EXPAND)
if prm not in parmDict:
parmDict[prm] = {}
for i in range(numWave):
if i not in parmDict[prm]:
parmDict[prm][i] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))
waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)
MainSizer.Add(waveSizer)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onAddWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onRemWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')
btn.Enable(not mode == 'BBpoint')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPoint)
btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')
btn.Enable(not mode == 'BBPSD')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPSD)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER
)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for lbl, defVal, text in itemList:
prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.
ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)
if lbl not in parmDict:
parmDict[lbl] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))
txt.Wrap(380)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))
if 'plotpos' not in simParms:
simParms['plotpos'] = simParms['minTT']
ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))
btnsizer.Add(ctrl)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(FPdlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
FPdlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(FPdlg)
FPdlg.SetMinSize(FPdlg.GetSize())
FPdlg.SendSizeEvent()
def XferFPAsettings(InpParms):
"""convert Topas-type parameters to SI units for NIST and place in a dict sorted
according to use in each convoluter
:param dict InpParms: a dict with Topas-like parameters, as set in
:func:`MakeTopasFPASizer`
:returns: a nested dict with global parameters and those for each convolution
"""
wavenums = range(InpParms['numWave'])
source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in
wavenums])
la = [InpParms['int'][i] for i in wavenums]
source_intensities = np.array(la) / max(la)
source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for
i in wavenums])
source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in
wavenums])
NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,
'emiss_intensities': source_intensities, 'emiss_gauss_widths':
source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,
'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),
'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}
if InpParms['filament_length'] == InpParms['receiving_slit_length']:
InpParms['receiving_slit_length'] *= 1.00001
NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *
InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms
['receiving_slit_length'], 'length_sample': 0.001 * InpParms[
'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[
'soller_angle'], 'angD_deg': InpParms['soller_angle']}
if InpParms.get('LAC_cm', 0) > 0:
NISTparms['absorption'] = {'absorption_coefficient': InpParms[
'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[
'sample_thickness']}
elif 'absorption' in NISTparms:
del NISTparms['absorption']
if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(
'lpsd_th2_angular_range', 0) > 0:
PSDdetector_length_mm = np.arcsin(np.pi * InpParms[
'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']
NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[
'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0,
PSDdetector_length_mm / 1000.0)}
elif 'si_psd' in NISTparms:
del NISTparms['si_psd']
if InpParms.get('Specimen_Displacement'):
NISTparms['displacement'] = {'specimen_displacement': 0.001 *
InpParms['Specimen_Displacement']}
elif 'displacement' in NISTparms:
del NISTparms['displacement']
if InpParms.get('receiving_slit_width'):
NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[
'receiving_slit_width']}
elif 'receiver_slit' in NISTparms:
del NISTparms['receiver_slit']
if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(
'tube-tails_rel-I', 0) > 0:
NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(
'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(
'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(
'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(
'tube-tails_rel-I', 0.0)}
elif 'tube_tails' in NISTparms:
del NISTparms['tube_tails']
max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]
NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],
'dominant_wavelength': max_wavelength, 'diffractometer_radius':
0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}
def setupFPAcalc():
"""Create a peak profile object using the NIST XRD Fundamental
Parameters Code.
:returns: a profile object that can provide information on
each convolution or compute the composite peak shape.
"""
p = FP.FP_profile(anglemode='twotheta',
output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get
('oversampling', 10))
p.debug_cache = False
for key in NISTparms:
if key:
p.set_parameters(convolver=key, **NISTparms[key])
else:
p.set_parameters(**NISTparms[key])
return p
def doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):
"""Compute a single peak using a NIST profile object
:param object NISTpk: a peak profile computational object from the
NIST XRD Fundamental Parameters Code, typically established from
a call to :func:`SetupFPAcalc`
:param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)
:param float twotheta: nominal center of peak (degrees)
:param float calcwid: width to perform convolution (degrees)
:param float step: step size
"""
center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)
NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,
twotheta_window_center_deg=ttArr[center_bin_idx],
twotheta_approx_window_fullwidth_deg=calcwid)
NISTpk.set_parameters(twotheta0_deg=twotheta)
return center_bin_idx, NISTpk.compute_line_profile()
def MakeSimSizer(G2frame, dlg):
"""Create a GUI to get simulation with parameters for Fundamental
Parameters fitting.
:param wx.Window dlg: Frame or Dialog where GUI will appear
:returns: a sizer with the GUI controls
"""
def _onOK(event):
msg = ''
if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:
msg += 'First peak minus half the calc width is too low'
if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:
if msg:
msg += '\n'
msg += 'Last peak plus half the calc width is too high'
if simParms['npeaks'] < 8:
if msg:
msg += '\n'
msg += 'At least 8 peaks are needed'
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
return
ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /
1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[
'step'])
intArr = np.zeros_like(ttArr)
peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],
simParms['npeaks'], endpoint=True)
peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)
NISTpk = setupFPAcalc()
minPtsHM = len(intArr)
maxPtsHM = 0
for num, twoth_peak in enumerate(peaklist):
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,
twoth_peak, simParms['calcwid'], simParms['step'])
except:
if msg:
msg += '\n'
msg = 'Error computing convolution, revise input'
continue
if num == 0:
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkMax = peakObj.peak.max()
pkPts = len(peakObj.peak)
minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
break
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset
] += 10000 * peakObj.peak[:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts
] += 10000 * peakObj.peak / pkMax
if maxPtsHM * simParms['step'] > peakSpacing / 4:
if msg:
msg += '\n'
msg += (
'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'
.format(maxPtsHM * simParms['step'], peakSpacing))
if minPtsHM < 10:
if msg:
msg += '\n'
msg += (
'There are only {} points above the half-max. 10 are needed. Dropping step size.'
.format(minPtsHM))
simParms['step'] *= 0.5
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
wx.CallAfter(MakeSimSizer, G2frame, dlg)
return
dlg.Destroy()
wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)
def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):
"""Perform a peak fit to the FP simulated pattern
"""
plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.
RESIZE_BORDER)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
txt = wx.StaticText(plswait, wx.ID_ANY,
'Fitting peaks...\nPlease wait...', style=wx.ALIGN_CENTER)
vbox.Add(txt, 0, wx.ALL | wx.EXPAND)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
plswait.SetSizer(vbox)
plswait.Layout()
plswait.CenterOnParent()
plswait.Show()
wx.BeginBusyCursor()
ints = list(NISTparms['emission']['emiss_intensities'])
Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
if len(ints) > 1:
ints[np.argmax(ints)] = -1
Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
else:
Lam2 = None
histId = G2frame.AddSimulatedPowder(ttArr, intArr,
'NIST Fundamental Parameters simulation', Lam1, Lam2)
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, G2frame.root, 'Controls'))
controldat = controls.get('data', {'deriv type': 'analytic',
'min dM/M': 0.001})
Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId
(G2frame, histId, 'Instrument Parameters'))
peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Peak List'))
bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.
GetGPXtreeItemId(G2frame, histId, 'Background'))
bkg1[1] = False
bkg1[2] = 0
bkg1[3] = 0.0
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Limits'))
try:
Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +
NISTparms['axial']['slit_length_source']) / NISTparms[''][
'diffractometer_radius']
except:
pass
for pos in peaklist:
i = ttArr.searchsorted(pos)
area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +
maxPtsHM)])
peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,
area))
histData = G2frame.GPXtree.GetItemPyData(histId)
bxye = np.zeros(len(histData[1][1]))
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False,
controldat, None)[0]
for pk in peakData['peaks']:
pk[1] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in ('U', 'V', 'W', 'X', 'Y'):
Parms[p][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
Parms['SH/L'][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in Parms:
if len(Parms[p]) == 3:
Parms[p][0] = Parms[p][1]
Parms[p][2] = False
wx.EndBusyCursor()
plswait.Destroy()
pth = G2G.GetExportPath(G2frame)
fldlg = wx.FileDialog(G2frame,
'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |
wx.FD_OVERWRITE_PROMPT)
try:
if fldlg.ShowModal() == wx.ID_OK:
filename = fldlg.GetPath()
filename = os.path.splitext(filename)[0] + '.instprm'
File = open(filename, 'w')
File.write(
'#GSAS-II instrument parameter file; do not add/delete items!\n'
)
for item in Parms:
File.write(item + ':' + str(Parms[item][1]) + '\n')
File.close()
print('Instrument parameters saved to: ' + filename)
finally:
fldlg.Destroy()
def _onClose(event):
dlg.Destroy()
def SetButtonStatus(done=False):
OKbtn.Enable(bool(NISTparms))
saveBtn.Enable(bool(NISTparms))
if done:
_onOK(None)
def _onSetFPA(event):
FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)
FPdlg.CenterOnParent()
FPdlg.Raise()
FPdlg.Show()
def _onSaveFPA(event):
filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',
'dict of NIST FPA values', dlg)
if not filename:
return
fp = open(filename, 'w')
fp.write(
'# parameters to be used in the NIST XRD Fundamental Parameters program\n'
)
fp.write('{\n')
for key in sorted(NISTparms):
fp.write(" '" + key + "' : " + str(NISTparms[key]) + ',')
if not key:
fp.write(' # global parameters')
fp.write('\n')
fp.write('}\n')
fp.close()
def _onReadFPA(event):
filename = G2G.GetImportFile(G2frame, message=
'Read file with dict of values for NIST Fundamental Parameters',
parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')
if not filename:
return
if not filename[0]:
return
try:
txt = open(filename[0], 'r').read()
NISTparms.clear()
array = np.array
d = eval(txt)
NISTparms.update(d)
except Exception as err:
G2G.G2MessageBox(dlg, u'Error reading file {}:{}\n'.format(
filename, err), 'Bad dict input')
SetButtonStatus()
if dlg.GetSizer():
dlg.GetSizer().Clear(True)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,
'Fit Profile Parameters to Peaks from Fundamental Parameters',
style=wx.ALIGN_CENTER), 0, wx.EXPAND)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)
text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for key, defVal, text in (('minTT', 3.0,
'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,
'Location of last peak in 2theta (deg)'), ('step', 0.01,
'Pattern step size (deg 2theta)'), ('npeaks', 13.0,
'Number of peaks'), ('calcwid', 2.0,
'Range to compute each peak (deg 2theta)')):
if key not in simParms:
simParms[key] = defVal
ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))
txt.Wrap(280)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetFPA)
saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')
btnsizer.Add(saveBtn)
saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)
readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')
btnsizer.Add(readBtn)
readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +
Citation, size=(350, -1))
txt.Wrap(340)
MainSizer.Add(txt, 0, wx.ALIGN_CENTER)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(dlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
SetButtonStatus()
dlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(dlg)
dlg.SetMinSize(dlg.GetSize())
dlg.SendSizeEvent()
dlg.Raise()
def GetFPAInput(G2frame):
dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeSimSizer(G2frame, dlg)
dlg.CenterOnParent()
dlg.Show()
return
| # -*- coding: utf-8 -*-
########### SVN repository information ###################
# $Date: $
# $Author: $
# $Revision: $
# $URL: $
# $Id: $
########### SVN repository information ###################
'''
*GSASIIfpaGUI: Fundamental Parameters Routines*
===============================================
This module contains routines for getting Fundamental Parameters
Approach (FPA) input, setting up for running the NIST XRD Fundamental
Parameters Code, plotting the convolutors and computing a set of peaks
generated by that code.
'''
from __future__ import division, print_function
import wx
import os.path
import numpy as np
import NIST_profile as FP
import GSASIIpath
import GSASIIctrlGUI as G2G
import GSASIIdataGUI as G2gd
import GSASIIplot as G2plt
import GSASIImath as G2mth
import GSASIIpwd as G2pwd
simParms = {}
'''Parameters to set range for pattern simulation
'''
parmDict = {'numWave':2}
'''Parameter dict used for reading Topas-style values. These are
converted to SI units and placed into :data:`NISTparms`
'''
NISTparms = {}
'''Parameters in a nested dict, with an entry for each concolutor. Entries in
those dicts have values in SI units (of course). NISTparms can be
can be input directly or can be from created from :data:`parmDict`
by :func:`XferFPAsettings`
'''
BraggBrentanoParms = [
('divergence', 0.5, 'Bragg-Brentano divergence angle (degrees)'),
('soller_angle', 2.0, 'Soller slit axial divergence (degrees)'),
('Rs', 220, 'Diffractometer radius (mm)'),
('filament_length', 12., 'X-ray tube line focus length (mm)'),
('sample_length', 12., 'Illuminated sample length in axial direction (mm)'),
('receiving_slit_length', 12., 'Length of receiving slit in axial direction (mm)'),
('LAC_cm', 0.,'Linear absorption coef. adjusted for packing density (cm-1)'),
('sample_thickness', 1., 'Depth of sample (mm)'),
('convolution_steps', 8, 'Number of Fourier-space bins per two-theta step'),
('tube-tails_width', 0.04,'Tube filament width, in projection at takeoff angle (mm)'),
('tube-tails_L-tail', -1.,'Left-side tube tails width, in projection (mm)'),
('tube-tails_R-tail', 1.,'Right-side tube tails width, in projection (mm)'),
('tube-tails_rel-I', 0.001,'Tube tails fractional intensity (no units)'),
]
'''FPA dict entries used in :func:`MakeTopasFPASizer`. Tuple contains
a dict key, a default value and a description. These are the parameters
needed for all Bragg Brentano instruments
'''
BBPointDetector = [
('receiving_slit_width', 0.2, 'Width of receiving slit (mm)'),]
'''Additional FPA dict entries used in :func:`MakeTopasFPASizer`
needed for Bragg Brentano instruments with point detectors.
'''
BBPSDDetector = [
('lpsd_th2_angular_range', 3.0, 'Angular range observed by PSD (degrees 2Theta)'),
('lpsd_equitorial_divergence', 0.1, 'Equatorial divergence of the primary beam (degrees)'),]
'''Additional FPA dict entries used in :func:`MakeTopasFPASizer`
needed for Bragg Brentano instruments with linear (1-D) PSD detectors.
'''
Citation = '''MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.
'''
def SetCu2Wave():
'''Set the parameters to the two-line Cu K alpha 1+2 spectrum
'''
parmDict['wave'] = {i:v for i,v in enumerate((1.540596,1.544493))}
parmDict['int'] = {i:v for i,v in enumerate((0.653817, 0.346183))}
parmDict['lwidth'] = {i:v for i,v in enumerate((0.501844,0.626579))}
SetCu2Wave() # use these as default
def MakeTopasFPASizer(G2frame,FPdlg,mode,SetButtonStatus):
'''Create a GUI with parameters for the NIST XRD Fundamental Parameters Code.
Parameter input is modeled after Topas input parameters.
:param wx.Window FPdlg: Frame or Dialog where GUI will appear
:param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or
(linear) position sensitive detector
:param dict parmDict: dict to place parameters. If empty, default values from
globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in
the array.
:returns: a sizer with the GUI controls
'''
def _onOK(event):
XferFPAsettings(parmDict)
SetButtonStatus(done=True) # done=True triggers the simulation
FPdlg.Destroy()
def _onClose(event):
SetButtonStatus()
FPdlg.Destroy()
def _onAddWave(event):
parmDict['numWave'] += 1
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)
def _onRemWave(event):
parmDict['numWave'] -= 1
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)
def _onSetCu5Wave(event):
parmDict['wave'] = {i:v for i,v in enumerate((1.534753,1.540596,1.541058,1.54441,1.544721))}
parmDict['int'] = {i:v for i,v in enumerate((0.0159, 0.5791, 0.0762, 0.2417, 0.0871))}
parmDict['lwidth'] = {i:v for i,v in enumerate((3.6854, 0.437, 0.6, 0.52, 0.62))}
parmDict['numWave'] = 5
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)
def _onSetCu2Wave(event):
SetCu2Wave()
parmDict['numWave'] = 2
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)
def _onSetPoint(event):
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,'BBpoint',SetButtonStatus)
def _onSetPSD(event):
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,'BBPSD',SetButtonStatus)
def PlotTopasFPA(event):
XferFPAsettings(parmDict)
ttArr = np.arange(max(0.5,
simParms['plotpos']-simParms['calcwid']),
simParms['plotpos']+simParms['calcwid'],
simParms['step'])
intArr = np.zeros_like(ttArr)
NISTpk = setupFPAcalc()
try:
center_bin_idx,peakObj = doFPAcalc(
NISTpk,ttArr,simParms['plotpos'],simParms['calcwid'],
simParms['step'])
except Exception as err:
msg = "Error computing convolution, revise input"
print(msg)
print(err)
return
G2plt.PlotFPAconvolutors(G2frame,NISTpk)
pkPts = len(peakObj.peak)
pkMax = peakObj.peak.max()
startInd = center_bin_idx-(pkPts//2) #this should be the aligned start of the new data
# scale peak so max I=10,000 and add into intensity array
if startInd < 0:
intArr[:startInd+pkPts] += 10000 * peakObj.peak[-startInd:]/pkMax
elif startInd > len(intArr):
return
elif startInd+pkPts >= len(intArr):
offset = pkPts - len( intArr[startInd:] )
intArr[startInd:startInd+pkPts-offset] += 10000 * peakObj.peak[:-offset]/pkMax
else:
intArr[startInd:startInd+pkPts] += 10000 * peakObj.peak/pkMax
G2plt.PlotXY(G2frame, [(ttArr, intArr)],
labelX=r'$2\theta, deg$',
labelY=r'Intensity (arbitrary)',
Title='FPA peak', newPlot=True, lines=True)
if FPdlg.GetSizer(): FPdlg.GetSizer().Clear(True)
numWave = parmDict['numWave']
if mode == 'BBpoint':
itemList = BraggBrentanoParms+BBPointDetector
elif mode == 'BBPSD':
itemList = BraggBrentanoParms+BBPSDDetector
else:
raise Exception('Unknown mode in MakeTopasFPASizer: '+mode)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add((-1,5))
waveSizer = wx.FlexGridSizer(cols=numWave+1,hgap=3,vgap=5)
for lbl,prm,defVal in zip(
(u'Wavelength (\u212b)','Rel. Intensity',u'Lorentz Width\n(\u212b/1000)'),
('wave','int','lwidth'),
(0.0, 1.0, 0.1),
):
text = wx.StaticText(FPdlg,wx.ID_ANY,lbl,style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
waveSizer.Add(text,0,wx.EXPAND)
if prm not in parmDict: parmDict[prm] = {}
for i in range(numWave):
if i not in parmDict[prm]: parmDict[prm][i] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg,parmDict[prm],i,size=(90,-1))
waveSizer.Add(ctrl,1,wx.ALIGN_CENTER_VERTICAL,1)
MainSizer.Add(waveSizer)
MainSizer.Add((-1,5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY,'Add col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onAddWave)
btn = wx.Button(FPdlg, wx.ID_ANY,'Remove col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onRemWave)
btn = wx.Button(FPdlg, wx.ID_ANY,'CuKa1+2')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onSetCu2Wave)
btn = wx.Button(FPdlg, wx.ID_ANY,'CuKa-5wave')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onSetCu5Wave)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY,'Point Dect.')
btn.Enable(not mode == 'BBpoint')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onSetPoint)
btn = wx.Button(FPdlg, wx.ID_ANY,'PSD')
btn.Enable(not mode == 'BBPSD')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onSetPSD)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,5))
prmSizer = wx.FlexGridSizer(cols=3,hgap=3,vgap=5)
text = wx.StaticText(FPdlg,wx.ID_ANY,'label',style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text,0,wx.EXPAND)
text = wx.StaticText(FPdlg,wx.ID_ANY,'value',style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text,0,wx.EXPAND)
text = wx.StaticText(FPdlg,wx.ID_ANY,'explanation',style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text,0,wx.EXPAND)
for lbl,defVal,text in itemList:
prmSizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,lbl),1,wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL,1)
if lbl not in parmDict: parmDict[lbl] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg,parmDict,lbl,size=(70,-1))
prmSizer.Add(ctrl,1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,1)
txt = wx.StaticText(FPdlg,wx.ID_ANY,text,size=(400,-1))
txt.Wrap(380)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
MainSizer.Add((-1,4),1,wx.EXPAND,1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,PlotTopasFPA)
btnsizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,' at '))
if 'plotpos' not in simParms: simParms['plotpos'] = simParms['minTT']
ctrl = G2G.ValidatedTxtCtrl(FPdlg,simParms,'plotpos',size=(70,-1))
btnsizer.Add(ctrl)
btnsizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,' deg.'))
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,4),1,wx.EXPAND,1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(FPdlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(FPdlg, wx.ID_CLOSE,"Cancel")
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,4),1,wx.EXPAND,1)
# bindings for close of window
OKbtn.Bind(wx.EVT_BUTTON,_onOK)
Cbtn.Bind(wx.EVT_BUTTON,_onClose)
FPdlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(FPdlg)
FPdlg.SetMinSize(FPdlg.GetSize())
FPdlg.SendSizeEvent()
def XferFPAsettings(InpParms):
'''convert Topas-type parameters to SI units for NIST and place in a dict sorted
according to use in each convoluter
:param dict InpParms: a dict with Topas-like parameters, as set in
:func:`MakeTopasFPASizer`
:returns: a nested dict with global parameters and those for each convolution
'''
wavenums = range(InpParms['numWave'])
source_wavelengths_m = 1.e-10 * np.array([InpParms['wave'][i] for i in wavenums])
la = [InpParms['int'][i] for i in wavenums]
source_intensities = np.array(la)/max(la)
source_lor_widths_m = 1.e-10 * 1.e-3 * np.array([InpParms['lwidth'][i] for i in wavenums])
source_gauss_widths_m = 1.e-10 * 1.e-3 * np.array([0.001 for i in wavenums])
NISTparms["emission"] = {'emiss_wavelengths' : source_wavelengths_m,
'emiss_intensities' : source_intensities,
'emiss_gauss_widths' : source_gauss_widths_m,
'emiss_lor_widths' : source_lor_widths_m,
'crystallite_size_gauss' : 1.e-9 * InpParms.get('Size_G',1e6),
'crystallite_size_lor' : 1.e-9 * InpParms.get('Size_L',1e6)}
if InpParms['filament_length'] == InpParms['receiving_slit_length']: # workaround:
InpParms['receiving_slit_length'] *= 1.00001 # avoid bug when slit lengths are identical
NISTparms["axial"] = {
'axDiv':"full", 'slit_length_source' : 1e-3*InpParms['filament_length'],
'slit_length_target' : 1e-3*InpParms['receiving_slit_length'],
'length_sample' : 1e-3 * InpParms['sample_length'],
'n_integral_points' : 10,
'angI_deg' : InpParms['soller_angle'],
'angD_deg': InpParms['soller_angle']
}
if InpParms.get('LAC_cm',0) > 0:
NISTparms["absorption"] = {
'absorption_coefficient': InpParms['LAC_cm']*100, #like LaB6, in m^(-1)
'sample_thickness': 1e-3 * InpParms['sample_thickness'],
}
elif "absorption" in NISTparms:
del NISTparms["absorption"]
if InpParms.get('lpsd_equitorial_divergence',0) > 0 and InpParms.get(
'lpsd_th2_angular_range',0) > 0:
PSDdetector_length_mm=np.arcsin(np.pi*InpParms['lpsd_th2_angular_range']/180.
)*InpParms['Rs'] # mm
NISTparms["si_psd"] = {
'equatorial_divergence_deg': InpParms['lpsd_equitorial_divergence'],
'si_psd_window_bounds': (0.,PSDdetector_length_mm/1000.)
}
elif "si_psd" in NISTparms:
del NISTparms["si_psd"]
if InpParms.get('Specimen_Displacement'):
NISTparms["displacement"] = {'specimen_displacement': 1e-3 * InpParms['Specimen_Displacement']}
elif "displacement" in NISTparms:
del NISTparms["displacement"]
if InpParms.get('receiving_slit_width'):
NISTparms["receiver_slit"] = {'slit_width':1e-3*InpParms['receiving_slit_width']}
elif "receiver_slit" in NISTparms:
del NISTparms["receiver_slit"]
if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(
'tube-tails_rel-I',0) > 0:
NISTparms["tube_tails"] = {
'main_width' : 1e-3 * InpParms.get('tube-tails_width', 0.),
'tail_left' : -1e-3 * InpParms.get('tube-tails_L-tail',0.),
'tail_right' : 1e-3 * InpParms.get('tube-tails_R-tail',0.),
'tail_intens' : InpParms.get('tube-tails_rel-I',0.),}
elif "tube_tails" in NISTparms:
del NISTparms["tube_tails"]
# set Global parameters
max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]
NISTparms[""] = {
'equatorial_divergence_deg' : InpParms['divergence'],
'dominant_wavelength' : max_wavelength,
'diffractometer_radius' : 1e-3* InpParms['Rs'],
'oversampling' : InpParms['convolution_steps'],
}
def setupFPAcalc():
'''Create a peak profile object using the NIST XRD Fundamental
Parameters Code.
:returns: a profile object that can provide information on
each convolution or compute the composite peak shape.
'''
p=FP.FP_profile(anglemode="twotheta",
output_gaussian_smoother_bins_sigma=1.0,
oversampling=NISTparms.get('oversampling',10))
p.debug_cache=False
#set parameters for each convolver
for key in NISTparms:
if key:
p.set_parameters(convolver=key,**NISTparms[key])
else:
p.set_parameters(**NISTparms[key])
return p
def doFPAcalc(NISTpk,ttArr,twotheta,calcwid,step):
'''Compute a single peak using a NIST profile object
:param object NISTpk: a peak profile computational object from the
NIST XRD Fundamental Parameters Code, typically established from
a call to :func:`SetupFPAcalc`
:param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)
:param float twotheta: nominal center of peak (degrees)
:param float calcwid: width to perform convolution (degrees)
:param float step: step size
'''
# find closest point to twotheta (may be outside limits of the array)
center_bin_idx=min(ttArr.searchsorted(twotheta),len(ttArr)-1)
NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,
twotheta_window_center_deg=ttArr[center_bin_idx],
twotheta_approx_window_fullwidth_deg=calcwid,
)
NISTpk.set_parameters(twotheta0_deg=twotheta)
return center_bin_idx,NISTpk.compute_line_profile()
def MakeSimSizer(G2frame, dlg):
'''Create a GUI to get simulation with parameters for Fundamental
Parameters fitting.
:param wx.Window dlg: Frame or Dialog where GUI will appear
:returns: a sizer with the GUI controls
'''
def _onOK(event):
msg = ''
if simParms['minTT']-simParms['calcwid']/1.5 < 0.1:
msg += 'First peak minus half the calc width is too low'
if simParms['maxTT']+simParms['calcwid']/1.5 > 175:
if msg: msg += '\n'
msg += 'Last peak plus half the calc width is too high'
if simParms['npeaks'] < 8:
if msg: msg += '\n'
msg += 'At least 8 peaks are needed'
if msg:
G2G.G2MessageBox(dlg,msg,'Bad input, try again')
return
# compute "obs" pattern
ttArr = np.arange(max(0.5,
simParms['minTT']-simParms['calcwid']/1.5),
simParms['maxTT']+simParms['calcwid']/1.5,
simParms['step'])
intArr = np.zeros_like(ttArr)
peaklist = np.linspace(simParms['minTT'],simParms['maxTT'],
simParms['npeaks'],endpoint=True)
peakSpacing = (peaklist[-1]-peaklist[0])/(len(peaklist)-1)
NISTpk = setupFPAcalc()
minPtsHM = len(intArr) # initialize points above half-max
maxPtsHM = 0
for num,twoth_peak in enumerate(peaklist):
try:
center_bin_idx,peakObj = doFPAcalc(
NISTpk,ttArr,twoth_peak,simParms['calcwid'],
simParms['step'])
except:
if msg: msg += '\n'
msg = "Error computing convolution, revise input"
continue
if num == 0: G2plt.PlotFPAconvolutors(G2frame,NISTpk)
pkMax = peakObj.peak.max()
pkPts = len(peakObj.peak)
minPtsHM = min(minPtsHM,sum(peakObj.peak >= 0.5*pkMax)) # points above half-max
maxPtsHM = max(maxPtsHM,sum(peakObj.peak >= 0.5*pkMax)) # points above half-max
startInd = center_bin_idx-(pkPts//2) #this should be the aligned start of the new data
# scale peak so max I=10,000 and add into intensity array
if startInd < 0:
intArr[:startInd+pkPts] += 10000 * peakObj.peak[-startInd:]/pkMax
elif startInd > len(intArr):
break
elif startInd+pkPts >= len(intArr):
offset = pkPts - len( intArr[startInd:] )
intArr[startInd:startInd+pkPts-offset] += 10000 * peakObj.peak[:-offset]/pkMax
else:
intArr[startInd:startInd+pkPts] += 10000 * peakObj.peak/pkMax
# check if peaks are too closely spaced
if maxPtsHM*simParms['step'] > peakSpacing/4:
if msg: msg += '\n'
msg += 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'.format(
maxPtsHM*simParms['step'], peakSpacing)
# check if too few points across Hmax
if minPtsHM < 10:
if msg: msg += '\n'
msg += 'There are only {} points above the half-max. 10 are needed. Dropping step size.'.format(minPtsHM)
simParms['step'] *= 0.5
if msg:
G2G.G2MessageBox(dlg,msg,'Bad input, try again')
wx.CallAfter(MakeSimSizer,G2frame, dlg)
return
# pattern has been computed successfully
dlg.Destroy()
wx.CallAfter(FitFPApeaks,ttArr, intArr, peaklist, maxPtsHM) # do peakfit outside event callback
def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):
'''Perform a peak fit to the FP simulated pattern
'''
plswait = wx.Dialog(G2frame,style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((1,1),1,wx.ALL|wx.EXPAND,1)
txt = wx.StaticText(plswait,wx.ID_ANY,
'Fitting peaks...\nPlease wait...',
style=wx.ALIGN_CENTER)
vbox.Add(txt,0,wx.ALL|wx.EXPAND)
vbox.Add((1,1),1,wx.ALL|wx.EXPAND,1)
plswait.SetSizer(vbox)
plswait.Layout()
plswait.CenterOnParent()
plswait.Show() # post "please wait"
wx.BeginBusyCursor()
# pick out one or two most intense wavelengths
ints = list(NISTparms['emission']['emiss_intensities'])
Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)]*1e10
if len(ints) > 1:
ints[np.argmax(ints)] = -1
Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)]*1e10
else:
Lam2 = None
histId = G2frame.AddSimulatedPowder(ttArr,intArr,
'NIST Fundamental Parameters simulation',
Lam1,Lam2)
controls = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Controls'))
controldat = controls.get('data',
{'deriv type':'analytic','min dM/M':0.001,}) #fil
Parms,Parms2 = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,histId,'Instrument Parameters'))
peakData = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,histId,'Peak List'))
# set background to 0 with one term = 0; disable refinement
bkg1,bkg2 = bkg = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,histId,'Background'))
bkg1[1]=False
bkg1[2]=0
bkg1[3]=0.0
limits = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,histId,'Limits'))
# approximate asym correction
try:
Parms['SH/L'][1] = 0.25 * (
NISTparms['axial']['length_sample']+
NISTparms['axial']['slit_length_source']
) / NISTparms['']['diffractometer_radius']
except:
pass
for pos in peaklist:
i = ttArr.searchsorted(pos)
area = sum(intArr[max(0,i-maxPtsHM):min(len(intArr),i+maxPtsHM)])
peakData['peaks'].append(G2mth.setPeakparms(Parms,Parms2,pos,area))
histData = G2frame.GPXtree.GetItemPyData(histId)
# refine peak positions only
bxye = np.zeros(len(histData[1][1]))
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],
bkg,limits[1],
Parms,Parms2,histData[1],bxye,[],
False,controldat,None)[0]
# refine peak areas as well
for pk in peakData['peaks']:
pk[1] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],
bkg,limits[1],
Parms,Parms2,histData[1],bxye,[],
False,controldat)[0]
# refine profile function
for p in ('U', 'V', 'W', 'X', 'Y'):
Parms[p][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],
bkg,limits[1],
Parms,Parms2,histData[1],bxye,[],
False,controldat)[0]
# add in asymmetry
Parms['SH/L'][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],
bkg,limits[1],
Parms,Parms2,histData[1],bxye,[],
False,controldat)[0]
# reset "initial" profile
for p in Parms:
if len(Parms[p]) == 3:
Parms[p][0] = Parms[p][1]
Parms[p][2] = False
wx.EndBusyCursor()
plswait.Destroy() # remove "please wait"
# save Iparms
pth = G2G.GetExportPath(G2frame)
fldlg = wx.FileDialog(G2frame, 'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if fldlg.ShowModal() == wx.ID_OK:
filename = fldlg.GetPath()
# make sure extension is .instprm
filename = os.path.splitext(filename)[0]+'.instprm'
File = open(filename,'w')
File.write("#GSAS-II instrument parameter file; do not add/delete items!\n")
for item in Parms:
File.write(item+':'+str(Parms[item][1])+'\n')
File.close()
print ('Instrument parameters saved to: '+filename)
finally:
fldlg.Destroy()
#GSASIIpath.IPyBreak()
def _onClose(event):
dlg.Destroy()
def SetButtonStatus(done=False):
OKbtn.Enable(bool(NISTparms))
saveBtn.Enable(bool(NISTparms))
if done: _onOK(None)
def _onSetFPA(event):
# Create a non-modal dialog for Topas-style FP input.
FPdlg = wx.Dialog(dlg,wx.ID_ANY,'FPA parameters',
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
MakeTopasFPASizer(G2frame,FPdlg,'BBpoint',SetButtonStatus)
FPdlg.CenterOnParent()
FPdlg.Raise()
FPdlg.Show()
def _onSaveFPA(event):
filename = G2G.askSaveFile(G2frame,'','.NISTfpa',
'dict of NIST FPA values',dlg)
if not filename: return
fp = open(filename,'w')
fp.write('# parameters to be used in the NIST XRD Fundamental Parameters program\n')
fp.write('{\n')
for key in sorted(NISTparms):
fp.write(" '"+key+"' : "+str(NISTparms[key])+",")
if not key: fp.write(' # global parameters')
fp.write('\n')
fp.write('}\n')
fp.close()
def _onReadFPA(event):
filename = G2G.GetImportFile(G2frame,
message='Read file with dict of values for NIST Fundamental Parameters',
parent=dlg,
wildcard='dict of NIST FPA values|*.NISTfpa')
if not filename: return
if not filename[0]: return
try:
txt = open(filename[0],'r').read()
NISTparms.clear()
array = np.array
d = eval(txt)
NISTparms.update(d)
except Exception as err:
G2G.G2MessageBox(dlg,
u'Error reading file {}:{}\n'.format(filename,err),
'Bad dict input')
#GSASIIpath.IPyBreak()
SetButtonStatus()
if dlg.GetSizer(): dlg.GetSizer().Clear(True)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add(wx.StaticText(dlg,wx.ID_ANY,
'Fit Profile Parameters to Peaks from Fundamental Parameters',
style=wx.ALIGN_CENTER),0,wx.EXPAND)
MainSizer.Add((-1,5))
prmSizer = wx.FlexGridSizer(cols=2,hgap=3,vgap=5)
text = wx.StaticText(dlg,wx.ID_ANY,'value',style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text,0,wx.EXPAND)
text = wx.StaticText(dlg,wx.ID_ANY,'explanation',style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text,0,wx.EXPAND)
for key,defVal,text in (
('minTT',3.,'Location of first peak in 2theta (deg)'),
('maxTT',123.,'Location of last peak in 2theta (deg)'),
('step',0.01,'Pattern step size (deg 2theta)'),
('npeaks',13.,'Number of peaks'),
('calcwid',2.,'Range to compute each peak (deg 2theta)'),
):
if key not in simParms: simParms[key] = defVal
ctrl = G2G.ValidatedTxtCtrl(dlg,simParms,key,size=(70,-1))
prmSizer.Add(ctrl,1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,1)
txt = wx.StaticText(dlg,wx.ID_ANY,text,size=(300,-1))
txt.Wrap(280)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(dlg, wx.ID_ANY,'Input FP vals')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onSetFPA)
saveBtn = wx.Button(dlg, wx.ID_ANY,'Save FPA dict')
btnsizer.Add(saveBtn)
saveBtn.Bind(wx.EVT_BUTTON,_onSaveFPA)
readBtn = wx.Button(dlg, wx.ID_ANY,'Read FPA dict')
btnsizer.Add(readBtn)
readBtn.Bind(wx.EVT_BUTTON,_onReadFPA)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,4),1,wx.EXPAND,1)
txt = wx.StaticText(dlg,wx.ID_ANY,
'If you use this, please cite: '+Citation,
size=(350,-1))
txt.Wrap(340)
MainSizer.Add(txt,0,wx.ALIGN_CENTER)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(dlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(dlg, wx.ID_CLOSE,"Cancel")
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,4),1,wx.EXPAND,1)
# bindings for close of window
OKbtn.Bind(wx.EVT_BUTTON,_onOK)
Cbtn.Bind(wx.EVT_BUTTON,_onClose)
SetButtonStatus()
dlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(dlg)
dlg.SetMinSize(dlg.GetSize())
dlg.SendSizeEvent()
dlg.Raise()
def GetFPAInput(G2frame):
dlg = wx.Dialog(G2frame,wx.ID_ANY,'FPA input',
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
MakeSimSizer(G2frame,dlg)
dlg.CenterOnParent()
dlg.Show()
return
| [
6,
7,
9,
10,
11
] |
143 | 4652cd5548b550cc21d126fc4fbe3e316ecb71b2 | <mask token>
| <mask token>
@app.route('/', methods=['POST'])
def hello_world():
if request.method == 'POST':
json_data = request.get_data().decode('utf-8')
_data = json.loads(json_data)
orderNo = _data['orderNo']
name = _data['name']
idcard = _data['idcard']
mobile = _data['mobile']
json1 = json.dumps({'name': name, 'idcard': idcard, 'mobile': mobile})
param = str(AESCipher.encrypt(json1, tokenId.replace('-', '')),
encoding='utf-8')
parameter = 'param=%s' % param
parameterXY = 'name=%s,idCard=%s,mobile=%s' % (name, idcard, mobile)
XYTZparams = {'tokenKey': TokenKey.getTokenKey(parameterXY,
TCCreditNeedleUrl), 'appId': appId, 'name': name, 'idCard':
idcard, 'mobile': mobile}
WJTZparams = {'tokenKey': TokenKey.getTokenKey(parameter,
TCWJNeedleUrl), 'appId': appId, 'param': param}
ANparams = {'tokenKey': TokenKey.getTokenKey(parameter,
TCApplyNeedleUrl), 'appId': appId, 'param': param}
r1 = requests.post(TCCreditNeedleUrl, XYTZparams)
TCdata = r1.text
print(TCdata)
r2 = requests.post(TCWJNeedleUrl, WJTZparams)
print(r2.text)
rep = json.loads(r2.text)
if rep['status'] == 0:
data = rep['data']
TCdata1 = AESCipher.decode_data(data, tokenId.replace('-', ''))
print('TCdata1解密后', TCdata1)
r3 = requests.post(TCApplyNeedleUrl, ANparams)
print(r3.text)
rep = json.loads(r3.text)
if rep['status'] == 0:
data = rep['data']
TCdata2 = AESCipher.decode_data(data, tokenId.replace('-', ''))
print('TCdata2解密后', TCdata2)
return json.dumps(TCdata2)
if __name__ == '__main__':
app.run()
| <mask token>
app = Flask(__name__)
@app.route('/', methods=['POST'])
def hello_world():
if request.method == 'POST':
json_data = request.get_data().decode('utf-8')
_data = json.loads(json_data)
orderNo = _data['orderNo']
name = _data['name']
idcard = _data['idcard']
mobile = _data['mobile']
json1 = json.dumps({'name': name, 'idcard': idcard, 'mobile': mobile})
param = str(AESCipher.encrypt(json1, tokenId.replace('-', '')),
encoding='utf-8')
parameter = 'param=%s' % param
parameterXY = 'name=%s,idCard=%s,mobile=%s' % (name, idcard, mobile)
XYTZparams = {'tokenKey': TokenKey.getTokenKey(parameterXY,
TCCreditNeedleUrl), 'appId': appId, 'name': name, 'idCard':
idcard, 'mobile': mobile}
WJTZparams = {'tokenKey': TokenKey.getTokenKey(parameter,
TCWJNeedleUrl), 'appId': appId, 'param': param}
ANparams = {'tokenKey': TokenKey.getTokenKey(parameter,
TCApplyNeedleUrl), 'appId': appId, 'param': param}
r1 = requests.post(TCCreditNeedleUrl, XYTZparams)
TCdata = r1.text
print(TCdata)
r2 = requests.post(TCWJNeedleUrl, WJTZparams)
print(r2.text)
rep = json.loads(r2.text)
if rep['status'] == 0:
data = rep['data']
TCdata1 = AESCipher.decode_data(data, tokenId.replace('-', ''))
print('TCdata1解密后', TCdata1)
r3 = requests.post(TCApplyNeedleUrl, ANparams)
print(r3.text)
rep = json.loads(r3.text)
if rep['status'] == 0:
data = rep['data']
TCdata2 = AESCipher.decode_data(data, tokenId.replace('-', ''))
print('TCdata2解密后', TCdata2)
return json.dumps(TCdata2)
if __name__ == '__main__':
app.run()
| import json
import requests as requests
from flask import Flask
from flask import request
from tools import AESCipher, tokenId, TokenKey, appId
from tools import TCApplyNeedleUrl, TCCreditNeedleUrl, TCWJNeedleUrl
app = Flask(__name__)
@app.route('/', methods=['POST'])
def hello_world():
if request.method == 'POST':
json_data = request.get_data().decode('utf-8')
_data = json.loads(json_data)
orderNo = _data['orderNo']
name = _data['name']
idcard = _data['idcard']
mobile = _data['mobile']
json1 = json.dumps({'name': name, 'idcard': idcard, 'mobile': mobile})
param = str(AESCipher.encrypt(json1, tokenId.replace('-', '')),
encoding='utf-8')
parameter = 'param=%s' % param
parameterXY = 'name=%s,idCard=%s,mobile=%s' % (name, idcard, mobile)
XYTZparams = {'tokenKey': TokenKey.getTokenKey(parameterXY,
TCCreditNeedleUrl), 'appId': appId, 'name': name, 'idCard':
idcard, 'mobile': mobile}
WJTZparams = {'tokenKey': TokenKey.getTokenKey(parameter,
TCWJNeedleUrl), 'appId': appId, 'param': param}
ANparams = {'tokenKey': TokenKey.getTokenKey(parameter,
TCApplyNeedleUrl), 'appId': appId, 'param': param}
r1 = requests.post(TCCreditNeedleUrl, XYTZparams)
TCdata = r1.text
print(TCdata)
r2 = requests.post(TCWJNeedleUrl, WJTZparams)
print(r2.text)
rep = json.loads(r2.text)
if rep['status'] == 0:
data = rep['data']
TCdata1 = AESCipher.decode_data(data, tokenId.replace('-', ''))
print('TCdata1解密后', TCdata1)
r3 = requests.post(TCApplyNeedleUrl, ANparams)
print(r3.text)
rep = json.loads(r3.text)
if rep['status'] == 0:
data = rep['data']
TCdata2 = AESCipher.decode_data(data, tokenId.replace('-', ''))
print('TCdata2解密后', TCdata2)
return json.dumps(TCdata2)
if __name__ == '__main__':
app.run()
| import json
import requests as requests
from flask import Flask
from flask import request
from tools import AESCipher, tokenId, TokenKey, appId
from tools import TCApplyNeedleUrl, TCCreditNeedleUrl, TCWJNeedleUrl
app = Flask(__name__)
@app.route('/', methods=['POST'])
def hello_world():
if request.method == "POST":
json_data = request.get_data().decode('utf-8')
_data = json.loads(json_data)
orderNo = _data['orderNo']
name = _data['name']
idcard = _data['idcard']
mobile = _data['mobile']
json1 = json.dumps({'name': name, 'idcard': idcard, 'mobile': mobile})
param = str(AESCipher.encrypt(json1, tokenId.replace('-', '')), encoding="utf-8")
parameter = ("param=%s" % (param))
parameterXY = ("name=%s,idCard=%s,mobile=%s" % (name, idcard, mobile))
XYTZparams = {'tokenKey': TokenKey.getTokenKey(parameterXY, TCCreditNeedleUrl), 'appId': appId, 'name': name, 'idCard': idcard,
'mobile': mobile}
WJTZparams = {'tokenKey': TokenKey.getTokenKey(parameter,TCWJNeedleUrl), 'appId': appId, 'param': param}
ANparams = {'tokenKey': TokenKey.getTokenKey(parameter,TCApplyNeedleUrl), 'appId': appId, 'param': param}
r1 = requests.post(TCCreditNeedleUrl, XYTZparams)
TCdata = r1.text
print(TCdata)
r2 = requests.post(TCWJNeedleUrl,WJTZparams)
print(r2.text)
rep = json.loads(r2.text)
if rep["status"] == 0:
data = rep["data"]
TCdata1 = AESCipher.decode_data(data, tokenId.replace('-', ''))
print("TCdata1解密后", TCdata1)
r3 = requests.post(TCApplyNeedleUrl,ANparams)
print(r3.text)
rep = json.loads(r3.text)
if rep["status"] == 0:
data = rep["data"]
TCdata2 = AESCipher.decode_data(data, tokenId.replace('-', ''))
print("TCdata2解密后", TCdata2)
return json.dumps(TCdata2)
if __name__ == '__main__':
app.run()
| [
0,
2,
3,
4,
5
] |
144 | b88af16693eca10d0bd78fd706389f5468c9b99b | <mask token>
| <mask token>
app_name = 'jobs'
urlpatterns = [path('', job_view, name='job-index'), path('applicants/',
job_applicants_view, name='job-applicants'), path('posted/',
posted_job_view, name='job-posted'), path('business/',
bussiness_list_view, name='business'), path('upload/', job_upload_view,
name='job-upload')]
| from django.urls import path
from .views import job_upload_view, job_view, job_applicants_view, posted_job_view, bussiness_list_view
app_name = 'jobs'
urlpatterns = [path('', job_view, name='job-index'), path('applicants/',
job_applicants_view, name='job-applicants'), path('posted/',
posted_job_view, name='job-posted'), path('business/',
bussiness_list_view, name='business'), path('upload/', job_upload_view,
name='job-upload')]
| from django.urls import path
from .views import job_upload_view, job_view, job_applicants_view, posted_job_view, bussiness_list_view
app_name = 'jobs'
urlpatterns = [
path('', job_view, name='job-index'),
path('applicants/', job_applicants_view, name='job-applicants'),
path('posted/', posted_job_view, name='job-posted'),
path('business/', bussiness_list_view, name='business'),
path('upload/', job_upload_view, name='job-upload'),
] | null | [
0,
1,
2,
3
] |
145 | 43bad38d209b5c326cb9f17ba1ae135d06320e97 | <mask token>
class InvoicePositionViewSet(ModelViewSet):
queryset = InvoicePosition.objects.all()
serializer_class = InvoicePositionSerializer
permission_classes = IsAuthenticated,
class CountryListView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
filter_backends = [filters.SearchFilter]
search_fields = ['value']
permission_classes = IsAuthenticated,
class InvoiceViewSet(ModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['address__contact__name']
permission_classes = IsAuthenticated,
| <mask token>
class AddressViewSet(ModelViewSet):
<mask token>
<mask token>
<mask token>
class InvoicePositionViewSet(ModelViewSet):
queryset = InvoicePosition.objects.all()
serializer_class = InvoicePositionSerializer
permission_classes = IsAuthenticated,
class CountryListView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
filter_backends = [filters.SearchFilter]
search_fields = ['value']
permission_classes = IsAuthenticated,
class InvoiceViewSet(ModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['address__contact__name']
permission_classes = IsAuthenticated,
| <mask token>
class ContactViewSet(ModelViewSet):
<mask token>
<mask token>
<mask token>
<mask token>
class AddressViewSet(ModelViewSet):
queryset = Address.objects.all()
serializer_class = AddressSerializer
permission_classes = IsAuthenticated,
class InvoicePositionViewSet(ModelViewSet):
queryset = InvoicePosition.objects.all()
serializer_class = InvoicePositionSerializer
permission_classes = IsAuthenticated,
class CountryListView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
filter_backends = [filters.SearchFilter]
search_fields = ['value']
permission_classes = IsAuthenticated,
class InvoiceViewSet(ModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['address__contact__name']
permission_classes = IsAuthenticated,
| from rest_framework import filters
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import ModelViewSet
from apis.models import Contact, Address, InvoicePosition, Country, Invoice
from apis.serializers import ContactSerializer, AddressSerializer, InvoicePositionSerializer, CountrySerializer, InvoiceSerializer
class ContactViewSet(ModelViewSet):
queryset = Contact.objects.all()
serializer_class = ContactSerializer
filterset_fields = ['type']
permission_classes = IsAuthenticated,
class AddressViewSet(ModelViewSet):
queryset = Address.objects.all()
serializer_class = AddressSerializer
permission_classes = IsAuthenticated,
class InvoicePositionViewSet(ModelViewSet):
queryset = InvoicePosition.objects.all()
serializer_class = InvoicePositionSerializer
permission_classes = IsAuthenticated,
class CountryListView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
filter_backends = [filters.SearchFilter]
search_fields = ['value']
permission_classes = IsAuthenticated,
class InvoiceViewSet(ModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['address__contact__name']
permission_classes = IsAuthenticated,
| from rest_framework import filters
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import ModelViewSet
from apis.models import Contact, Address, InvoicePosition, Country, Invoice
from apis.serializers import ContactSerializer, AddressSerializer, InvoicePositionSerializer, CountrySerializer, \
InvoiceSerializer
class ContactViewSet(ModelViewSet):
queryset = Contact.objects.all()
serializer_class = ContactSerializer
filterset_fields = ['type']
permission_classes = (IsAuthenticated,)
class AddressViewSet(ModelViewSet):
queryset = Address.objects.all()
serializer_class = AddressSerializer
permission_classes = (IsAuthenticated,)
class InvoicePositionViewSet(ModelViewSet):
queryset = InvoicePosition.objects.all()
serializer_class = InvoicePositionSerializer
permission_classes = (IsAuthenticated,)
class CountryListView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
filter_backends = [filters.SearchFilter]
search_fields = ['value']
permission_classes = (IsAuthenticated,)
class InvoiceViewSet(ModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['address__contact__name']
permission_classes = (IsAuthenticated,)
| [
6,
7,
9,
11,
12
] |
146 | 1429524b0ae3b679bc3d4386dd17ed50b0fff381 | <mask token>
| <mask token>
for i in range(12):
mp = monthlyPaymentRate * rb
rb = rb - mp
rb = rb + rb * monthlyir
print('remaining balance: ', round(rb, 2))
| balance = 42
annualInterestRate = 0.2
monthlyPaymentRate = 0.04
monthlyir = annualInterestRate / 12
rb = balance
for i in range(12):
mp = monthlyPaymentRate * rb
rb = rb - mp
rb = rb + rb * monthlyir
print('remaining balance: ', round(rb, 2))
| balance=42
annualInterestRate=0.20
monthlyPaymentRate=0.04
monthlyir = annualInterestRate/12
rb=balance
for i in range(12):
mp = monthlyPaymentRate * rb
rb=rb-mp
rb=rb+rb*monthlyir
print('remaining balance: ',round(rb,2))
| null | [
0,
1,
2,
3
] |
147 | cef4568b4568bceeedca6d57c0ccacfaae67c061 | <mask token>
class TimerBackground(QtCore.QThread):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class Ui_Form1(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
if os.path.exists(os.getcwd() + '\\settings.ini') and os.path.getsize(
os.getcwd() + '\\settings.ini') > 0:
with open(os.getcwd() + '\\settings.ini', 'r') as var:
global movie1Time, movie2Time, movie3Time, movie4Time, movie5Time
movie1Time = var.readline().strip()
self.updateGUITimers(movie1Time, self.textBrowser_6)
movie2Time = var.readline().strip()
self.updateGUITimers(movie2Time, self.textBrowser_2)
movie3Time = var.readline().strip()
self.updateGUITimers(movie3Time, self.textBrowser_5)
movie4Time = var.readline().strip()
self.updateGUITimers(movie4Time, self.textBrowser_3)
movie5Time = var.readline().strip()
self.updateGUITimers(movie5Time, self.textBrowser_4)
def setupUi(self, Form):
Form.setObjectName(_fromUtf8('Form'))
Form.resize(611, 289)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.
QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setMinimumSize(QtCore.QSize(611, 289))
Form.setMaximumSize(QtCore.QSize(611, 289))
self.verticalLayoutWidget = QtGui.QWidget(Form)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 20, 61, 261))
self.verticalLayoutWidget.setObjectName(_fromUtf8(
'verticalLayoutWidget'))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))
self.movieOne = QtGui.QLabel(self.verticalLayoutWidget)
self.movieOne.setObjectName(_fromUtf8('movieOne'))
self.verticalLayout.addWidget(self.movieOne)
self.movieTwo = QtGui.QLabel(self.verticalLayoutWidget)
self.movieTwo.setObjectName(_fromUtf8('movieTwo'))
self.verticalLayout.addWidget(self.movieTwo)
self.movieThree = QtGui.QLabel(self.verticalLayoutWidget)
self.movieThree.setObjectName(_fromUtf8('movieThree'))
self.verticalLayout.addWidget(self.movieThree)
self.movieFour = QtGui.QLabel(self.verticalLayoutWidget)
self.movieFour.setObjectName(_fromUtf8('movieFour'))
self.verticalLayout.addWidget(self.movieFour)
self.movieFive = QtGui.QLabel(self.verticalLayoutWidget)
self.movieFive.setObjectName(_fromUtf8('movieFive'))
self.verticalLayout.addWidget(self.movieFive)
self.DesignedBy = QtGui.QLabel(Form)
self.DesignedBy.setGeometry(QtCore.QRect(440, 40, 111, 31))
self.DesignedBy.setAlignment(QtCore.Qt.AlignCenter)
self.DesignedBy.setObjectName(_fromUtf8('DesignedBy'))
self.sourceAt = QtGui.QLabel(Form)
self.sourceAt.setGeometry(QtCore.QRect(440, 170, 111, 20))
self.sourceAt.setObjectName(_fromUtf8('sourceAt'))
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(580, 270, 31, 16))
self.label.setObjectName(_fromUtf8('label'))
self.verticalLayoutWidget_2 = QtGui.QWidget(Form)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(210, 40, 101, 261)
)
self.verticalLayoutWidget_2.setObjectName(_fromUtf8(
'verticalLayoutWidget_2'))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8('verticalLayout_2'))
self.startTwo = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startTwo.setObjectName(_fromUtf8('startTwo'))
self.verticalLayout_2.addWidget(self.startTwo)
self.startOne = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startOne.setObjectName(_fromUtf8('startOne'))
self.verticalLayout_2.addWidget(self.startOne)
self.startThree = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startThree.setObjectName(_fromUtf8('startThree'))
self.verticalLayout_2.addWidget(self.startThree)
self.startFour = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startFour.setObjectName(_fromUtf8('startFour'))
self.verticalLayout_2.addWidget(self.startFour)
self.startFive = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startFive.setObjectName(_fromUtf8('startFive'))
self.verticalLayout_2.addWidget(self.startFive)
self.horizontalLayoutWidget = QtGui.QWidget(Form)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(400, 230, 160, 80)
)
self.horizontalLayoutWidget.setObjectName(_fromUtf8(
'horizontalLayoutWidget'))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))
self.save = QtGui.QPushButton(self.horizontalLayoutWidget)
self.save.setObjectName(_fromUtf8('save'))
self.horizontalLayout.addWidget(self.save)
self.settings = QtGui.QPushButton(self.horizontalLayoutWidget)
self.settings.setObjectName(_fromUtf8('settings'))
self.horizontalLayout.addWidget(self.settings)
self.textBrowser_2 = QtGui.QTextBrowser(Form)
self.textBrowser_2.setGeometry(QtCore.QRect(90, 110, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.
QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_2.sizePolicy().
hasHeightForWidth())
self.textBrowser_2.setSizePolicy(sizePolicy)
self.textBrowser_2.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_2.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_2.setReadOnly(False)
self.textBrowser_2.setUndoRedoEnabled(True)
self.textBrowser_2.setObjectName(_fromUtf8('textBrowser_2'))
self.textBrowser_5 = QtGui.QTextBrowser(Form)
self.textBrowser_5.setGeometry(QtCore.QRect(90, 160, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_5.sizePolicy().
hasHeightForWidth())
self.textBrowser_5.setSizePolicy(sizePolicy)
self.textBrowser_5.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_5.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_5.setReadOnly(False)
self.textBrowser_5.setUndoRedoEnabled(True)
self.textBrowser_5.setObjectName(_fromUtf8('textBrowser_5'))
self.textBrowser_4 = QtGui.QTextBrowser(Form)
self.textBrowser_4.setGeometry(QtCore.QRect(90, 260, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_4.sizePolicy().
hasHeightForWidth())
self.textBrowser_4.setSizePolicy(sizePolicy)
self.textBrowser_4.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_4.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_4.setReadOnly(False)
self.textBrowser_4.setUndoRedoEnabled(True)
self.textBrowser_4.setObjectName(_fromUtf8('textBrowser_4'))
self.textBrowser_3 = QtGui.QTextBrowser(Form)
self.textBrowser_3.setGeometry(QtCore.QRect(90, 210, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_3.sizePolicy().
hasHeightForWidth())
self.textBrowser_3.setSizePolicy(sizePolicy)
self.textBrowser_3.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_3.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_3.setReadOnly(False)
self.textBrowser_3.setUndoRedoEnabled(True)
self.textBrowser_3.setObjectName(_fromUtf8('textBrowser_3'))
self.textBrowser_6 = QtGui.QTextBrowser(Form)
self.textBrowser_6.setGeometry(QtCore.QRect(90, 60, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.
QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_6.sizePolicy().
hasHeightForWidth())
self.textBrowser_6.setSizePolicy(sizePolicy)
self.textBrowser_6.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_6.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_6.setReadOnly(False)
self.textBrowser_6.setUndoRedoEnabled(True)
self.textBrowser_6.setObjectName(_fromUtf8('textBrowser_6'))
self.line = QtGui.QFrame(Form)
self.line.setGeometry(QtCore.QRect(340, 50, 20, 211))
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8('line'))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(430, 190, 151, 20))
self.label_2.setOpenExternalLinks(True)
self.label_2.setObjectName(_fromUtf8('label_2'))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(420, 80, 161, 91))
self.label_3.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_3.setText(_fromUtf8(''))
self.label_3.setPixmap(QtGui.QPixmap(_fromUtf8('logo.jpg')))
self.label_3.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop |
QtCore.Qt.AlignTrailing)
self.label_3.setObjectName(_fromUtf8('label_3'))
self.retranslateUi(Form)
QtCore.QObject.connect(self.textBrowser_6, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie1)
QtCore.QObject.connect(self.textBrowser_2, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie2)
QtCore.QObject.connect(self.textBrowser_5, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie3)
QtCore.QObject.connect(self.textBrowser_3, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie4)
QtCore.QObject.connect(self.textBrowser_4, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie5)
QtCore.QObject.connect(self.startTwo, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer1State)
QtCore.QObject.connect(self.startOne, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer2State)
QtCore.QObject.connect(self.startThree, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer3State)
QtCore.QObject.connect(self.startFour, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer4State)
QtCore.QObject.connect(self.startFive, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer5State)
QtCore.QObject.connect(self.save, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.saveChanges)
QtCore.QObject.connect(self.settings, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.reset)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate('Form', 'Multiple Movie Timer', None))
self.movieOne.setText(_translate('Form', 'Movie 1', None))
self.movieTwo.setText(_translate('Form', 'Movie 2', None))
self.movieThree.setText(_translate('Form', 'Movie 3', None))
self.movieFour.setText(_translate('Form', 'Movie 4', None))
self.movieFive.setText(_translate('Form', 'Movie 5', None))
self.DesignedBy.setText(_translate('Form',
'This program was\ndesigned by:', None))
self.sourceAt.setText(_translate('Form', ' Source is available at:',
None))
self.label.setText(_translate('Form', 'V 1.2', None))
self.startTwo.setText(_translate('Form', 'Start / Stop', None))
self.startOne.setText(_translate('Form', 'Start / Stop', None))
self.startThree.setText(_translate('Form', 'Start / Stop', None))
self.startFour.setText(_translate('Form', 'Start / Stop', None))
self.startFive.setText(_translate('Form', 'Start / Stop', None))
self.save.setToolTip(_translate('Form',
'<html><head/><body><p>Save all the current times</p></body></html>'
, None))
self.save.setText(_translate('Form', 'Save', None))
self.settings.setText(_translate('Form', 'Reset timers', None))
self.textBrowser_2.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_5.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_4.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_3.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_6.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.label_2.setText(_translate('Form',
'<html><head/><body><p><a href="https://github.com/tmwbook/small-projects/tree/Master/MultipleMovieTimer"><span style=" text-decoration: underline; color:#0000ff;">https://github.com/tmwbook</span></a></p></body></html>'
, None))
def changeMovie1(self):
pass
def changeMovie2(self):
pass
def changeMovie3(self):
pass
def changeMovie4(self):
pass
def changeMovie5(self):
pass
def changeTimer1State(self):
global movie1Time, timer1Running, timer1Start, timer1Time
if not timer1Running:
timer1Running = True
timer1Start = time()
self.thread1 = TimerBackground(timer1Start, timer1Running, 1,
movie1Time, self.textBrowser_6)
self.thread1.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer1Running:
self.thread1.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer1Running:
timer1Running = False
movie1Time = timer1Time
def changeTimer2State(self):
global movie2Time, timer2Running, timer2Start, timer2Time
if not timer2Running:
timer2Running = True
timer2Start = time()
self.thread2 = TimerBackground(timer2Start, timer2Running, 2,
movie2Time, self.textBrowser_2)
self.thread2.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer2Running:
self.thread2.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer2Running:
timer2Running = False
movie2Time = timer2Time
def changeTimer3State(self):
global movie3Time, timer3Running, timer3Start, timer3Time
if not timer3Running:
timer3Running = True
timer3Start = time()
self.thread3 = TimerBackground(timer3Start, timer3Running, 3,
movie3Time, self.textBrowser_5)
self.thread3.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer3Running:
self.thread3.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer3Running:
timer3Running = False
movie3Time = timer3Time
def changeTimer4State(self):
global movie4Time, timer4Running, timer4Start, timer4Time
if not timer4Running:
timer4Running = True
timer4Start = time()
self.thread4 = TimerBackground(timer4Start, timer4Running, 4,
movie4Time, self.textBrowser_3)
self.thread4.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer4Running:
self.thread4.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer4Running:
timer4Running = False
movie4Time = timer4Time
def changeTimer5State(self):
global movie5Time, timer5Running, timer5Start, timer5Time
if not timer5Running:
timer5Running = True
timer5Start = time()
self.thread5 = TimerBackground(timer5Start, timer5Running, 5,
movie5Time, self.textBrowser_4)
self.thread5.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer5Running:
self.thread5.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer5Running:
timer5Running = False
movie5Time = timer5Time
def reset(self):
global movie1Time, movie2Time, movie3Time, movie4Time, movie5Time
global timer1Time, timer2Time, timer3Time, timer4Time, timer5Time
self.updateGUITimers('00:00:00', self.textBrowser_2)
self.updateGUITimers('00:00:00', self.textBrowser_3)
self.updateGUITimers('00:00:00', self.textBrowser_4)
self.updateGUITimers('00:00:00', self.textBrowser_5)
self.updateGUITimers('00:00:00', self.textBrowser_6)
timerStartingValue = '00:00:00'
movie1Time = timerStartingValue
movie2Time = timerStartingValue
movie3Time = timerStartingValue
movie4Time = timerStartingValue
movie5Time = timerStartingValue
timer1Time = timerStartingValue
timer2Time = timerStartingValue
timer3Time = timerStartingValue
timer4Time = timerStartingValue
timer5time = timerStartingValue
def saveChanges(self):
cwd = os.getcwd()
with open(cwd + '\\settings.ini', 'w') as var:
toWrite = [movie1Time, movie2Time, movie3Time, movie4Time,
movie5Time]
for i in toWrite:
var.write(i + '\n')
def updateGUITimers(self, time, textBrowser):
if time != 'none':
textBrowser.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">"""
+ str(time) + '</span></p></body></html>', None))
<mask token>
| <mask token>
class TimerBackground(QtCore.QThread):
<mask token>
def __init__(self, timerStart, timerRunning, timerNumber, movieTime,
textBrowser, parent=None):
QtCore.QThread.__init__(self, parent)
self.timerStart = timerStart
self.timerRunning = timerRunning
self.timerNumber = timerNumber
self.textBrowser = textBrowser
self.movieTime = movieTime
<mask token>
<mask token>
def deformatTime(self, time):
timeInSecs = 0
timeInSecs += int(time[0:2]) * 3600
timeInSecs += int(time[3:5]) * 60
timeInSecs += int(time[6:8])
return timeInSecs
<mask token>
class Ui_Form1(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
if os.path.exists(os.getcwd() + '\\settings.ini') and os.path.getsize(
os.getcwd() + '\\settings.ini') > 0:
with open(os.getcwd() + '\\settings.ini', 'r') as var:
global movie1Time, movie2Time, movie3Time, movie4Time, movie5Time
movie1Time = var.readline().strip()
self.updateGUITimers(movie1Time, self.textBrowser_6)
movie2Time = var.readline().strip()
self.updateGUITimers(movie2Time, self.textBrowser_2)
movie3Time = var.readline().strip()
self.updateGUITimers(movie3Time, self.textBrowser_5)
movie4Time = var.readline().strip()
self.updateGUITimers(movie4Time, self.textBrowser_3)
movie5Time = var.readline().strip()
self.updateGUITimers(movie5Time, self.textBrowser_4)
def setupUi(self, Form):
Form.setObjectName(_fromUtf8('Form'))
Form.resize(611, 289)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.
QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setMinimumSize(QtCore.QSize(611, 289))
Form.setMaximumSize(QtCore.QSize(611, 289))
self.verticalLayoutWidget = QtGui.QWidget(Form)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 20, 61, 261))
self.verticalLayoutWidget.setObjectName(_fromUtf8(
'verticalLayoutWidget'))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))
self.movieOne = QtGui.QLabel(self.verticalLayoutWidget)
self.movieOne.setObjectName(_fromUtf8('movieOne'))
self.verticalLayout.addWidget(self.movieOne)
self.movieTwo = QtGui.QLabel(self.verticalLayoutWidget)
self.movieTwo.setObjectName(_fromUtf8('movieTwo'))
self.verticalLayout.addWidget(self.movieTwo)
self.movieThree = QtGui.QLabel(self.verticalLayoutWidget)
self.movieThree.setObjectName(_fromUtf8('movieThree'))
self.verticalLayout.addWidget(self.movieThree)
self.movieFour = QtGui.QLabel(self.verticalLayoutWidget)
self.movieFour.setObjectName(_fromUtf8('movieFour'))
self.verticalLayout.addWidget(self.movieFour)
self.movieFive = QtGui.QLabel(self.verticalLayoutWidget)
self.movieFive.setObjectName(_fromUtf8('movieFive'))
self.verticalLayout.addWidget(self.movieFive)
self.DesignedBy = QtGui.QLabel(Form)
self.DesignedBy.setGeometry(QtCore.QRect(440, 40, 111, 31))
self.DesignedBy.setAlignment(QtCore.Qt.AlignCenter)
self.DesignedBy.setObjectName(_fromUtf8('DesignedBy'))
self.sourceAt = QtGui.QLabel(Form)
self.sourceAt.setGeometry(QtCore.QRect(440, 170, 111, 20))
self.sourceAt.setObjectName(_fromUtf8('sourceAt'))
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(580, 270, 31, 16))
self.label.setObjectName(_fromUtf8('label'))
self.verticalLayoutWidget_2 = QtGui.QWidget(Form)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(210, 40, 101, 261)
)
self.verticalLayoutWidget_2.setObjectName(_fromUtf8(
'verticalLayoutWidget_2'))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8('verticalLayout_2'))
self.startTwo = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startTwo.setObjectName(_fromUtf8('startTwo'))
self.verticalLayout_2.addWidget(self.startTwo)
self.startOne = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startOne.setObjectName(_fromUtf8('startOne'))
self.verticalLayout_2.addWidget(self.startOne)
self.startThree = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startThree.setObjectName(_fromUtf8('startThree'))
self.verticalLayout_2.addWidget(self.startThree)
self.startFour = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startFour.setObjectName(_fromUtf8('startFour'))
self.verticalLayout_2.addWidget(self.startFour)
self.startFive = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startFive.setObjectName(_fromUtf8('startFive'))
self.verticalLayout_2.addWidget(self.startFive)
self.horizontalLayoutWidget = QtGui.QWidget(Form)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(400, 230, 160, 80)
)
self.horizontalLayoutWidget.setObjectName(_fromUtf8(
'horizontalLayoutWidget'))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))
self.save = QtGui.QPushButton(self.horizontalLayoutWidget)
self.save.setObjectName(_fromUtf8('save'))
self.horizontalLayout.addWidget(self.save)
self.settings = QtGui.QPushButton(self.horizontalLayoutWidget)
self.settings.setObjectName(_fromUtf8('settings'))
self.horizontalLayout.addWidget(self.settings)
self.textBrowser_2 = QtGui.QTextBrowser(Form)
self.textBrowser_2.setGeometry(QtCore.QRect(90, 110, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.
QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_2.sizePolicy().
hasHeightForWidth())
self.textBrowser_2.setSizePolicy(sizePolicy)
self.textBrowser_2.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_2.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_2.setReadOnly(False)
self.textBrowser_2.setUndoRedoEnabled(True)
self.textBrowser_2.setObjectName(_fromUtf8('textBrowser_2'))
self.textBrowser_5 = QtGui.QTextBrowser(Form)
self.textBrowser_5.setGeometry(QtCore.QRect(90, 160, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_5.sizePolicy().
hasHeightForWidth())
self.textBrowser_5.setSizePolicy(sizePolicy)
self.textBrowser_5.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_5.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_5.setReadOnly(False)
self.textBrowser_5.setUndoRedoEnabled(True)
self.textBrowser_5.setObjectName(_fromUtf8('textBrowser_5'))
self.textBrowser_4 = QtGui.QTextBrowser(Form)
self.textBrowser_4.setGeometry(QtCore.QRect(90, 260, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_4.sizePolicy().
hasHeightForWidth())
self.textBrowser_4.setSizePolicy(sizePolicy)
self.textBrowser_4.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_4.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_4.setReadOnly(False)
self.textBrowser_4.setUndoRedoEnabled(True)
self.textBrowser_4.setObjectName(_fromUtf8('textBrowser_4'))
self.textBrowser_3 = QtGui.QTextBrowser(Form)
self.textBrowser_3.setGeometry(QtCore.QRect(90, 210, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_3.sizePolicy().
hasHeightForWidth())
self.textBrowser_3.setSizePolicy(sizePolicy)
self.textBrowser_3.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_3.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_3.setReadOnly(False)
self.textBrowser_3.setUndoRedoEnabled(True)
self.textBrowser_3.setObjectName(_fromUtf8('textBrowser_3'))
self.textBrowser_6 = QtGui.QTextBrowser(Form)
self.textBrowser_6.setGeometry(QtCore.QRect(90, 60, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.
QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_6.sizePolicy().
hasHeightForWidth())
self.textBrowser_6.setSizePolicy(sizePolicy)
self.textBrowser_6.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_6.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_6.setReadOnly(False)
self.textBrowser_6.setUndoRedoEnabled(True)
self.textBrowser_6.setObjectName(_fromUtf8('textBrowser_6'))
self.line = QtGui.QFrame(Form)
self.line.setGeometry(QtCore.QRect(340, 50, 20, 211))
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8('line'))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(430, 190, 151, 20))
self.label_2.setOpenExternalLinks(True)
self.label_2.setObjectName(_fromUtf8('label_2'))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(420, 80, 161, 91))
self.label_3.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_3.setText(_fromUtf8(''))
self.label_3.setPixmap(QtGui.QPixmap(_fromUtf8('logo.jpg')))
self.label_3.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop |
QtCore.Qt.AlignTrailing)
self.label_3.setObjectName(_fromUtf8('label_3'))
self.retranslateUi(Form)
QtCore.QObject.connect(self.textBrowser_6, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie1)
QtCore.QObject.connect(self.textBrowser_2, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie2)
QtCore.QObject.connect(self.textBrowser_5, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie3)
QtCore.QObject.connect(self.textBrowser_3, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie4)
QtCore.QObject.connect(self.textBrowser_4, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie5)
QtCore.QObject.connect(self.startTwo, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer1State)
QtCore.QObject.connect(self.startOne, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer2State)
QtCore.QObject.connect(self.startThree, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer3State)
QtCore.QObject.connect(self.startFour, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer4State)
QtCore.QObject.connect(self.startFive, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer5State)
QtCore.QObject.connect(self.save, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.saveChanges)
QtCore.QObject.connect(self.settings, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.reset)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate('Form', 'Multiple Movie Timer', None))
self.movieOne.setText(_translate('Form', 'Movie 1', None))
self.movieTwo.setText(_translate('Form', 'Movie 2', None))
self.movieThree.setText(_translate('Form', 'Movie 3', None))
self.movieFour.setText(_translate('Form', 'Movie 4', None))
self.movieFive.setText(_translate('Form', 'Movie 5', None))
self.DesignedBy.setText(_translate('Form',
'This program was\ndesigned by:', None))
self.sourceAt.setText(_translate('Form', ' Source is available at:',
None))
self.label.setText(_translate('Form', 'V 1.2', None))
self.startTwo.setText(_translate('Form', 'Start / Stop', None))
self.startOne.setText(_translate('Form', 'Start / Stop', None))
self.startThree.setText(_translate('Form', 'Start / Stop', None))
self.startFour.setText(_translate('Form', 'Start / Stop', None))
self.startFive.setText(_translate('Form', 'Start / Stop', None))
self.save.setToolTip(_translate('Form',
'<html><head/><body><p>Save all the current times</p></body></html>'
, None))
self.save.setText(_translate('Form', 'Save', None))
self.settings.setText(_translate('Form', 'Reset timers', None))
self.textBrowser_2.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_5.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_4.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_3.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_6.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.label_2.setText(_translate('Form',
'<html><head/><body><p><a href="https://github.com/tmwbook/small-projects/tree/Master/MultipleMovieTimer"><span style=" text-decoration: underline; color:#0000ff;">https://github.com/tmwbook</span></a></p></body></html>'
, None))
def changeMovie1(self):
pass
def changeMovie2(self):
pass
def changeMovie3(self):
pass
def changeMovie4(self):
pass
def changeMovie5(self):
pass
def changeTimer1State(self):
global movie1Time, timer1Running, timer1Start, timer1Time
if not timer1Running:
timer1Running = True
timer1Start = time()
self.thread1 = TimerBackground(timer1Start, timer1Running, 1,
movie1Time, self.textBrowser_6)
self.thread1.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer1Running:
self.thread1.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer1Running:
timer1Running = False
movie1Time = timer1Time
def changeTimer2State(self):
global movie2Time, timer2Running, timer2Start, timer2Time
if not timer2Running:
timer2Running = True
timer2Start = time()
self.thread2 = TimerBackground(timer2Start, timer2Running, 2,
movie2Time, self.textBrowser_2)
self.thread2.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer2Running:
self.thread2.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer2Running:
timer2Running = False
movie2Time = timer2Time
def changeTimer3State(self):
global movie3Time, timer3Running, timer3Start, timer3Time
if not timer3Running:
timer3Running = True
timer3Start = time()
self.thread3 = TimerBackground(timer3Start, timer3Running, 3,
movie3Time, self.textBrowser_5)
self.thread3.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer3Running:
self.thread3.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer3Running:
timer3Running = False
movie3Time = timer3Time
def changeTimer4State(self):
global movie4Time, timer4Running, timer4Start, timer4Time
if not timer4Running:
timer4Running = True
timer4Start = time()
self.thread4 = TimerBackground(timer4Start, timer4Running, 4,
movie4Time, self.textBrowser_3)
self.thread4.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer4Running:
self.thread4.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer4Running:
timer4Running = False
movie4Time = timer4Time
def changeTimer5State(self):
global movie5Time, timer5Running, timer5Start, timer5Time
if not timer5Running:
timer5Running = True
timer5Start = time()
self.thread5 = TimerBackground(timer5Start, timer5Running, 5,
movie5Time, self.textBrowser_4)
self.thread5.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer5Running:
self.thread5.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer5Running:
timer5Running = False
movie5Time = timer5Time
def reset(self):
global movie1Time, movie2Time, movie3Time, movie4Time, movie5Time
global timer1Time, timer2Time, timer3Time, timer4Time, timer5Time
self.updateGUITimers('00:00:00', self.textBrowser_2)
self.updateGUITimers('00:00:00', self.textBrowser_3)
self.updateGUITimers('00:00:00', self.textBrowser_4)
self.updateGUITimers('00:00:00', self.textBrowser_5)
self.updateGUITimers('00:00:00', self.textBrowser_6)
timerStartingValue = '00:00:00'
movie1Time = timerStartingValue
movie2Time = timerStartingValue
movie3Time = timerStartingValue
movie4Time = timerStartingValue
movie5Time = timerStartingValue
timer1Time = timerStartingValue
timer2Time = timerStartingValue
timer3Time = timerStartingValue
timer4Time = timerStartingValue
timer5time = timerStartingValue
def saveChanges(self):
cwd = os.getcwd()
with open(cwd + '\\settings.ini', 'w') as var:
toWrite = [movie1Time, movie2Time, movie3Time, movie4Time,
movie5Time]
for i in toWrite:
var.write(i + '\n')
def updateGUITimers(self, time, textBrowser):
if time != 'none':
textBrowser.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">"""
+ str(time) + '</span></p></body></html>', None))
<mask token>
| <mask token>
class TimerBackground(QtCore.QThread):
<mask token>
def __init__(self, timerStart, timerRunning, timerNumber, movieTime,
textBrowser, parent=None):
QtCore.QThread.__init__(self, parent)
self.timerStart = timerStart
self.timerRunning = timerRunning
self.timerNumber = timerNumber
self.textBrowser = textBrowser
self.movieTime = movieTime
<mask token>
def formatTime(self, time):
formattedTime = ''
hours = time / 3600
minutes = time / 60
seconds = time % 60
if hours == 0:
formattedTime += '00:'
elif len(str(hours)) == 1:
formattedTime += '0' + str(hours) + ':'
else:
formattedTime += str(hours)
if minutes == 0:
formattedTime += '00:'
elif minutes >= 60:
newMinutes = minutes
if minutes % 60 == 0:
newMinutes = 0
while newMinutes > 60:
newMinutes -= 60
if len(str(newMinutes)) == 1:
formattedTime += '0' + str(newMinutes) + ':'
else:
formattedTime += str(newMinutes) + ':'
elif len(str(minutes)) == 1:
formattedTime += '0' + str(minutes) + ':'
else:
formattedTime += str(minutes)
if len(str(seconds)) == 1:
formattedTime += '0' + str(seconds)
else:
formattedTime += str(seconds)
return formattedTime
def deformatTime(self, time):
timeInSecs = 0
timeInSecs += int(time[0:2]) * 3600
timeInSecs += int(time[3:5]) * 60
timeInSecs += int(time[6:8])
return timeInSecs
<mask token>
class Ui_Form1(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
if os.path.exists(os.getcwd() + '\\settings.ini') and os.path.getsize(
os.getcwd() + '\\settings.ini') > 0:
with open(os.getcwd() + '\\settings.ini', 'r') as var:
global movie1Time, movie2Time, movie3Time, movie4Time, movie5Time
movie1Time = var.readline().strip()
self.updateGUITimers(movie1Time, self.textBrowser_6)
movie2Time = var.readline().strip()
self.updateGUITimers(movie2Time, self.textBrowser_2)
movie3Time = var.readline().strip()
self.updateGUITimers(movie3Time, self.textBrowser_5)
movie4Time = var.readline().strip()
self.updateGUITimers(movie4Time, self.textBrowser_3)
movie5Time = var.readline().strip()
self.updateGUITimers(movie5Time, self.textBrowser_4)
def setupUi(self, Form):
Form.setObjectName(_fromUtf8('Form'))
Form.resize(611, 289)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.
QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setMinimumSize(QtCore.QSize(611, 289))
Form.setMaximumSize(QtCore.QSize(611, 289))
self.verticalLayoutWidget = QtGui.QWidget(Form)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 20, 61, 261))
self.verticalLayoutWidget.setObjectName(_fromUtf8(
'verticalLayoutWidget'))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))
self.movieOne = QtGui.QLabel(self.verticalLayoutWidget)
self.movieOne.setObjectName(_fromUtf8('movieOne'))
self.verticalLayout.addWidget(self.movieOne)
self.movieTwo = QtGui.QLabel(self.verticalLayoutWidget)
self.movieTwo.setObjectName(_fromUtf8('movieTwo'))
self.verticalLayout.addWidget(self.movieTwo)
self.movieThree = QtGui.QLabel(self.verticalLayoutWidget)
self.movieThree.setObjectName(_fromUtf8('movieThree'))
self.verticalLayout.addWidget(self.movieThree)
self.movieFour = QtGui.QLabel(self.verticalLayoutWidget)
self.movieFour.setObjectName(_fromUtf8('movieFour'))
self.verticalLayout.addWidget(self.movieFour)
self.movieFive = QtGui.QLabel(self.verticalLayoutWidget)
self.movieFive.setObjectName(_fromUtf8('movieFive'))
self.verticalLayout.addWidget(self.movieFive)
self.DesignedBy = QtGui.QLabel(Form)
self.DesignedBy.setGeometry(QtCore.QRect(440, 40, 111, 31))
self.DesignedBy.setAlignment(QtCore.Qt.AlignCenter)
self.DesignedBy.setObjectName(_fromUtf8('DesignedBy'))
self.sourceAt = QtGui.QLabel(Form)
self.sourceAt.setGeometry(QtCore.QRect(440, 170, 111, 20))
self.sourceAt.setObjectName(_fromUtf8('sourceAt'))
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(580, 270, 31, 16))
self.label.setObjectName(_fromUtf8('label'))
self.verticalLayoutWidget_2 = QtGui.QWidget(Form)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(210, 40, 101, 261)
)
self.verticalLayoutWidget_2.setObjectName(_fromUtf8(
'verticalLayoutWidget_2'))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8('verticalLayout_2'))
self.startTwo = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startTwo.setObjectName(_fromUtf8('startTwo'))
self.verticalLayout_2.addWidget(self.startTwo)
self.startOne = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startOne.setObjectName(_fromUtf8('startOne'))
self.verticalLayout_2.addWidget(self.startOne)
self.startThree = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startThree.setObjectName(_fromUtf8('startThree'))
self.verticalLayout_2.addWidget(self.startThree)
self.startFour = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startFour.setObjectName(_fromUtf8('startFour'))
self.verticalLayout_2.addWidget(self.startFour)
self.startFive = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startFive.setObjectName(_fromUtf8('startFive'))
self.verticalLayout_2.addWidget(self.startFive)
self.horizontalLayoutWidget = QtGui.QWidget(Form)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(400, 230, 160, 80)
)
self.horizontalLayoutWidget.setObjectName(_fromUtf8(
'horizontalLayoutWidget'))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))
self.save = QtGui.QPushButton(self.horizontalLayoutWidget)
self.save.setObjectName(_fromUtf8('save'))
self.horizontalLayout.addWidget(self.save)
self.settings = QtGui.QPushButton(self.horizontalLayoutWidget)
self.settings.setObjectName(_fromUtf8('settings'))
self.horizontalLayout.addWidget(self.settings)
self.textBrowser_2 = QtGui.QTextBrowser(Form)
self.textBrowser_2.setGeometry(QtCore.QRect(90, 110, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.
QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_2.sizePolicy().
hasHeightForWidth())
self.textBrowser_2.setSizePolicy(sizePolicy)
self.textBrowser_2.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_2.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_2.setReadOnly(False)
self.textBrowser_2.setUndoRedoEnabled(True)
self.textBrowser_2.setObjectName(_fromUtf8('textBrowser_2'))
self.textBrowser_5 = QtGui.QTextBrowser(Form)
self.textBrowser_5.setGeometry(QtCore.QRect(90, 160, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_5.sizePolicy().
hasHeightForWidth())
self.textBrowser_5.setSizePolicy(sizePolicy)
self.textBrowser_5.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_5.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_5.setReadOnly(False)
self.textBrowser_5.setUndoRedoEnabled(True)
self.textBrowser_5.setObjectName(_fromUtf8('textBrowser_5'))
self.textBrowser_4 = QtGui.QTextBrowser(Form)
self.textBrowser_4.setGeometry(QtCore.QRect(90, 260, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_4.sizePolicy().
hasHeightForWidth())
self.textBrowser_4.setSizePolicy(sizePolicy)
self.textBrowser_4.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_4.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_4.setReadOnly(False)
self.textBrowser_4.setUndoRedoEnabled(True)
self.textBrowser_4.setObjectName(_fromUtf8('textBrowser_4'))
self.textBrowser_3 = QtGui.QTextBrowser(Form)
self.textBrowser_3.setGeometry(QtCore.QRect(90, 210, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_3.sizePolicy().
hasHeightForWidth())
self.textBrowser_3.setSizePolicy(sizePolicy)
self.textBrowser_3.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_3.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_3.setReadOnly(False)
self.textBrowser_3.setUndoRedoEnabled(True)
self.textBrowser_3.setObjectName(_fromUtf8('textBrowser_3'))
self.textBrowser_6 = QtGui.QTextBrowser(Form)
self.textBrowser_6.setGeometry(QtCore.QRect(90, 60, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.
QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_6.sizePolicy().
hasHeightForWidth())
self.textBrowser_6.setSizePolicy(sizePolicy)
self.textBrowser_6.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_6.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_6.setReadOnly(False)
self.textBrowser_6.setUndoRedoEnabled(True)
self.textBrowser_6.setObjectName(_fromUtf8('textBrowser_6'))
self.line = QtGui.QFrame(Form)
self.line.setGeometry(QtCore.QRect(340, 50, 20, 211))
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8('line'))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(430, 190, 151, 20))
self.label_2.setOpenExternalLinks(True)
self.label_2.setObjectName(_fromUtf8('label_2'))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(420, 80, 161, 91))
self.label_3.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_3.setText(_fromUtf8(''))
self.label_3.setPixmap(QtGui.QPixmap(_fromUtf8('logo.jpg')))
self.label_3.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop |
QtCore.Qt.AlignTrailing)
self.label_3.setObjectName(_fromUtf8('label_3'))
self.retranslateUi(Form)
QtCore.QObject.connect(self.textBrowser_6, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie1)
QtCore.QObject.connect(self.textBrowser_2, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie2)
QtCore.QObject.connect(self.textBrowser_5, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie3)
QtCore.QObject.connect(self.textBrowser_3, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie4)
QtCore.QObject.connect(self.textBrowser_4, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie5)
QtCore.QObject.connect(self.startTwo, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer1State)
QtCore.QObject.connect(self.startOne, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer2State)
QtCore.QObject.connect(self.startThree, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer3State)
QtCore.QObject.connect(self.startFour, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer4State)
QtCore.QObject.connect(self.startFive, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer5State)
QtCore.QObject.connect(self.save, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.saveChanges)
QtCore.QObject.connect(self.settings, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.reset)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate('Form', 'Multiple Movie Timer', None))
self.movieOne.setText(_translate('Form', 'Movie 1', None))
self.movieTwo.setText(_translate('Form', 'Movie 2', None))
self.movieThree.setText(_translate('Form', 'Movie 3', None))
self.movieFour.setText(_translate('Form', 'Movie 4', None))
self.movieFive.setText(_translate('Form', 'Movie 5', None))
self.DesignedBy.setText(_translate('Form',
'This program was\ndesigned by:', None))
self.sourceAt.setText(_translate('Form', ' Source is available at:',
None))
self.label.setText(_translate('Form', 'V 1.2', None))
self.startTwo.setText(_translate('Form', 'Start / Stop', None))
self.startOne.setText(_translate('Form', 'Start / Stop', None))
self.startThree.setText(_translate('Form', 'Start / Stop', None))
self.startFour.setText(_translate('Form', 'Start / Stop', None))
self.startFive.setText(_translate('Form', 'Start / Stop', None))
self.save.setToolTip(_translate('Form',
'<html><head/><body><p>Save all the current times</p></body></html>'
, None))
self.save.setText(_translate('Form', 'Save', None))
self.settings.setText(_translate('Form', 'Reset timers', None))
self.textBrowser_2.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_5.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_4.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_3.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_6.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.label_2.setText(_translate('Form',
'<html><head/><body><p><a href="https://github.com/tmwbook/small-projects/tree/Master/MultipleMovieTimer"><span style=" text-decoration: underline; color:#0000ff;">https://github.com/tmwbook</span></a></p></body></html>'
, None))
def changeMovie1(self):
pass
def changeMovie2(self):
pass
def changeMovie3(self):
pass
def changeMovie4(self):
pass
def changeMovie5(self):
pass
def changeTimer1State(self):
global movie1Time, timer1Running, timer1Start, timer1Time
if not timer1Running:
timer1Running = True
timer1Start = time()
self.thread1 = TimerBackground(timer1Start, timer1Running, 1,
movie1Time, self.textBrowser_6)
self.thread1.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer1Running:
self.thread1.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer1Running:
timer1Running = False
movie1Time = timer1Time
def changeTimer2State(self):
global movie2Time, timer2Running, timer2Start, timer2Time
if not timer2Running:
timer2Running = True
timer2Start = time()
self.thread2 = TimerBackground(timer2Start, timer2Running, 2,
movie2Time, self.textBrowser_2)
self.thread2.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer2Running:
self.thread2.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer2Running:
timer2Running = False
movie2Time = timer2Time
def changeTimer3State(self):
global movie3Time, timer3Running, timer3Start, timer3Time
if not timer3Running:
timer3Running = True
timer3Start = time()
self.thread3 = TimerBackground(timer3Start, timer3Running, 3,
movie3Time, self.textBrowser_5)
self.thread3.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer3Running:
self.thread3.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer3Running:
timer3Running = False
movie3Time = timer3Time
def changeTimer4State(self):
global movie4Time, timer4Running, timer4Start, timer4Time
if not timer4Running:
timer4Running = True
timer4Start = time()
self.thread4 = TimerBackground(timer4Start, timer4Running, 4,
movie4Time, self.textBrowser_3)
self.thread4.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer4Running:
self.thread4.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer4Running:
timer4Running = False
movie4Time = timer4Time
def changeTimer5State(self):
global movie5Time, timer5Running, timer5Start, timer5Time
if not timer5Running:
timer5Running = True
timer5Start = time()
self.thread5 = TimerBackground(timer5Start, timer5Running, 5,
movie5Time, self.textBrowser_4)
self.thread5.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer5Running:
self.thread5.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer5Running:
timer5Running = False
movie5Time = timer5Time
def reset(self):
global movie1Time, movie2Time, movie3Time, movie4Time, movie5Time
global timer1Time, timer2Time, timer3Time, timer4Time, timer5Time
self.updateGUITimers('00:00:00', self.textBrowser_2)
self.updateGUITimers('00:00:00', self.textBrowser_3)
self.updateGUITimers('00:00:00', self.textBrowser_4)
self.updateGUITimers('00:00:00', self.textBrowser_5)
self.updateGUITimers('00:00:00', self.textBrowser_6)
timerStartingValue = '00:00:00'
movie1Time = timerStartingValue
movie2Time = timerStartingValue
movie3Time = timerStartingValue
movie4Time = timerStartingValue
movie5Time = timerStartingValue
timer1Time = timerStartingValue
timer2Time = timerStartingValue
timer3Time = timerStartingValue
timer4Time = timerStartingValue
timer5time = timerStartingValue
def saveChanges(self):
cwd = os.getcwd()
with open(cwd + '\\settings.ini', 'w') as var:
toWrite = [movie1Time, movie2Time, movie3Time, movie4Time,
movie5Time]
for i in toWrite:
var.write(i + '\n')
def updateGUITimers(self, time, textBrowser):
if time != 'none':
textBrowser.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">"""
+ str(time) + '</span></p></body></html>', None))
<mask token>
| <mask token>
class TimerBackground(QtCore.QThread):
index_finished = QtCore.pyqtSignal([str, QtCore.QObject])
def __init__(self, timerStart, timerRunning, timerNumber, movieTime,
textBrowser, parent=None):
QtCore.QThread.__init__(self, parent)
self.timerStart = timerStart
self.timerRunning = timerRunning
self.timerNumber = timerNumber
self.textBrowser = textBrowser
self.movieTime = movieTime
def run(self):
self.incrememnt(self.timerStart, self.timerRunning, self.
timerNumber, self.movieTime)
def formatTime(self, time):
formattedTime = ''
hours = time / 3600
minutes = time / 60
seconds = time % 60
if hours == 0:
formattedTime += '00:'
elif len(str(hours)) == 1:
formattedTime += '0' + str(hours) + ':'
else:
formattedTime += str(hours)
if minutes == 0:
formattedTime += '00:'
elif minutes >= 60:
newMinutes = minutes
if minutes % 60 == 0:
newMinutes = 0
while newMinutes > 60:
newMinutes -= 60
if len(str(newMinutes)) == 1:
formattedTime += '0' + str(newMinutes) + ':'
else:
formattedTime += str(newMinutes) + ':'
elif len(str(minutes)) == 1:
formattedTime += '0' + str(minutes) + ':'
else:
formattedTime += str(minutes)
if len(str(seconds)) == 1:
formattedTime += '0' + str(seconds)
else:
formattedTime += str(seconds)
return formattedTime
def deformatTime(self, time):
timeInSecs = 0
timeInSecs += int(time[0:2]) * 3600
timeInSecs += int(time[3:5]) * 60
timeInSecs += int(time[6:8])
return timeInSecs
def incrememnt(self, timerStart, timerRunning, timerNumber, movieTime):
global timer1Time, timer2Time, timer3Time, timer4Time, timer5Time
if timerRunning:
convertedTime = self.deformatTime(movieTime)
timerTime = self.formatTime(int(time()) - int(timerStart) +
convertedTime)
if timerNumber == 1:
timer1Time = timerTime
self.index_finished.emit(timer1Time, self.textBrowser)
elif timerNumber == 2:
timer2Time = timerTime
self.index_finished.emit(timer2Time, self.textBrowser)
elif timerNumber == 3:
timer3Time = timerTime
self.index_finished.emit(timer3Time, self.textBrowser)
elif timerNumber == 4:
timer4Time = timerTime
self.index_finished.emit(timer4Time, self.textBrowser)
elif timerNumber == 5:
timer5Time = timerTime
self.index_finished.emit(timer5Time, self.textBrowser)
else:
timerStart = None
self.index_finished.emit('none')
return timerStart
class Ui_Form1(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
if os.path.exists(os.getcwd() + '\\settings.ini') and os.path.getsize(
os.getcwd() + '\\settings.ini') > 0:
with open(os.getcwd() + '\\settings.ini', 'r') as var:
global movie1Time, movie2Time, movie3Time, movie4Time, movie5Time
movie1Time = var.readline().strip()
self.updateGUITimers(movie1Time, self.textBrowser_6)
movie2Time = var.readline().strip()
self.updateGUITimers(movie2Time, self.textBrowser_2)
movie3Time = var.readline().strip()
self.updateGUITimers(movie3Time, self.textBrowser_5)
movie4Time = var.readline().strip()
self.updateGUITimers(movie4Time, self.textBrowser_3)
movie5Time = var.readline().strip()
self.updateGUITimers(movie5Time, self.textBrowser_4)
def setupUi(self, Form):
Form.setObjectName(_fromUtf8('Form'))
Form.resize(611, 289)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.
QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setMinimumSize(QtCore.QSize(611, 289))
Form.setMaximumSize(QtCore.QSize(611, 289))
self.verticalLayoutWidget = QtGui.QWidget(Form)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 20, 61, 261))
self.verticalLayoutWidget.setObjectName(_fromUtf8(
'verticalLayoutWidget'))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))
self.movieOne = QtGui.QLabel(self.verticalLayoutWidget)
self.movieOne.setObjectName(_fromUtf8('movieOne'))
self.verticalLayout.addWidget(self.movieOne)
self.movieTwo = QtGui.QLabel(self.verticalLayoutWidget)
self.movieTwo.setObjectName(_fromUtf8('movieTwo'))
self.verticalLayout.addWidget(self.movieTwo)
self.movieThree = QtGui.QLabel(self.verticalLayoutWidget)
self.movieThree.setObjectName(_fromUtf8('movieThree'))
self.verticalLayout.addWidget(self.movieThree)
self.movieFour = QtGui.QLabel(self.verticalLayoutWidget)
self.movieFour.setObjectName(_fromUtf8('movieFour'))
self.verticalLayout.addWidget(self.movieFour)
self.movieFive = QtGui.QLabel(self.verticalLayoutWidget)
self.movieFive.setObjectName(_fromUtf8('movieFive'))
self.verticalLayout.addWidget(self.movieFive)
self.DesignedBy = QtGui.QLabel(Form)
self.DesignedBy.setGeometry(QtCore.QRect(440, 40, 111, 31))
self.DesignedBy.setAlignment(QtCore.Qt.AlignCenter)
self.DesignedBy.setObjectName(_fromUtf8('DesignedBy'))
self.sourceAt = QtGui.QLabel(Form)
self.sourceAt.setGeometry(QtCore.QRect(440, 170, 111, 20))
self.sourceAt.setObjectName(_fromUtf8('sourceAt'))
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(580, 270, 31, 16))
self.label.setObjectName(_fromUtf8('label'))
self.verticalLayoutWidget_2 = QtGui.QWidget(Form)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(210, 40, 101, 261)
)
self.verticalLayoutWidget_2.setObjectName(_fromUtf8(
'verticalLayoutWidget_2'))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8('verticalLayout_2'))
self.startTwo = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startTwo.setObjectName(_fromUtf8('startTwo'))
self.verticalLayout_2.addWidget(self.startTwo)
self.startOne = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startOne.setObjectName(_fromUtf8('startOne'))
self.verticalLayout_2.addWidget(self.startOne)
self.startThree = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startThree.setObjectName(_fromUtf8('startThree'))
self.verticalLayout_2.addWidget(self.startThree)
self.startFour = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startFour.setObjectName(_fromUtf8('startFour'))
self.verticalLayout_2.addWidget(self.startFour)
self.startFive = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startFive.setObjectName(_fromUtf8('startFive'))
self.verticalLayout_2.addWidget(self.startFive)
self.horizontalLayoutWidget = QtGui.QWidget(Form)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(400, 230, 160, 80)
)
self.horizontalLayoutWidget.setObjectName(_fromUtf8(
'horizontalLayoutWidget'))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))
self.save = QtGui.QPushButton(self.horizontalLayoutWidget)
self.save.setObjectName(_fromUtf8('save'))
self.horizontalLayout.addWidget(self.save)
self.settings = QtGui.QPushButton(self.horizontalLayoutWidget)
self.settings.setObjectName(_fromUtf8('settings'))
self.horizontalLayout.addWidget(self.settings)
self.textBrowser_2 = QtGui.QTextBrowser(Form)
self.textBrowser_2.setGeometry(QtCore.QRect(90, 110, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.
QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_2.sizePolicy().
hasHeightForWidth())
self.textBrowser_2.setSizePolicy(sizePolicy)
self.textBrowser_2.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_2.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_2.setReadOnly(False)
self.textBrowser_2.setUndoRedoEnabled(True)
self.textBrowser_2.setObjectName(_fromUtf8('textBrowser_2'))
self.textBrowser_5 = QtGui.QTextBrowser(Form)
self.textBrowser_5.setGeometry(QtCore.QRect(90, 160, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_5.sizePolicy().
hasHeightForWidth())
self.textBrowser_5.setSizePolicy(sizePolicy)
self.textBrowser_5.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_5.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_5.setReadOnly(False)
self.textBrowser_5.setUndoRedoEnabled(True)
self.textBrowser_5.setObjectName(_fromUtf8('textBrowser_5'))
self.textBrowser_4 = QtGui.QTextBrowser(Form)
self.textBrowser_4.setGeometry(QtCore.QRect(90, 260, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_4.sizePolicy().
hasHeightForWidth())
self.textBrowser_4.setSizePolicy(sizePolicy)
self.textBrowser_4.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_4.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_4.setReadOnly(False)
self.textBrowser_4.setUndoRedoEnabled(True)
self.textBrowser_4.setObjectName(_fromUtf8('textBrowser_4'))
self.textBrowser_3 = QtGui.QTextBrowser(Form)
self.textBrowser_3.setGeometry(QtCore.QRect(90, 210, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.
QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_3.sizePolicy().
hasHeightForWidth())
self.textBrowser_3.setSizePolicy(sizePolicy)
self.textBrowser_3.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_3.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_3.setReadOnly(False)
self.textBrowser_3.setUndoRedoEnabled(True)
self.textBrowser_3.setObjectName(_fromUtf8('textBrowser_3'))
self.textBrowser_6 = QtGui.QTextBrowser(Form)
self.textBrowser_6.setGeometry(QtCore.QRect(90, 60, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.
QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_6.sizePolicy().
hasHeightForWidth())
self.textBrowser_6.setSizePolicy(sizePolicy)
self.textBrowser_6.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_6.setVerticalScrollBarPolicy(QtCore.Qt.
ScrollBarAlwaysOff)
self.textBrowser_6.setReadOnly(False)
self.textBrowser_6.setUndoRedoEnabled(True)
self.textBrowser_6.setObjectName(_fromUtf8('textBrowser_6'))
self.line = QtGui.QFrame(Form)
self.line.setGeometry(QtCore.QRect(340, 50, 20, 211))
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8('line'))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(430, 190, 151, 20))
self.label_2.setOpenExternalLinks(True)
self.label_2.setObjectName(_fromUtf8('label_2'))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(420, 80, 161, 91))
self.label_3.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_3.setText(_fromUtf8(''))
self.label_3.setPixmap(QtGui.QPixmap(_fromUtf8('logo.jpg')))
self.label_3.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop |
QtCore.Qt.AlignTrailing)
self.label_3.setObjectName(_fromUtf8('label_3'))
self.retranslateUi(Form)
QtCore.QObject.connect(self.textBrowser_6, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie1)
QtCore.QObject.connect(self.textBrowser_2, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie2)
QtCore.QObject.connect(self.textBrowser_5, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie3)
QtCore.QObject.connect(self.textBrowser_3, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie4)
QtCore.QObject.connect(self.textBrowser_4, QtCore.SIGNAL(_fromUtf8(
'textChanged()')), Form.changeMovie5)
QtCore.QObject.connect(self.startTwo, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer1State)
QtCore.QObject.connect(self.startOne, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer2State)
QtCore.QObject.connect(self.startThree, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer3State)
QtCore.QObject.connect(self.startFour, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer4State)
QtCore.QObject.connect(self.startFive, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.changeTimer5State)
QtCore.QObject.connect(self.save, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.saveChanges)
QtCore.QObject.connect(self.settings, QtCore.SIGNAL(_fromUtf8(
'pressed()')), Form.reset)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate('Form', 'Multiple Movie Timer', None))
self.movieOne.setText(_translate('Form', 'Movie 1', None))
self.movieTwo.setText(_translate('Form', 'Movie 2', None))
self.movieThree.setText(_translate('Form', 'Movie 3', None))
self.movieFour.setText(_translate('Form', 'Movie 4', None))
self.movieFive.setText(_translate('Form', 'Movie 5', None))
self.DesignedBy.setText(_translate('Form',
'This program was\ndesigned by:', None))
self.sourceAt.setText(_translate('Form', ' Source is available at:',
None))
self.label.setText(_translate('Form', 'V 1.2', None))
self.startTwo.setText(_translate('Form', 'Start / Stop', None))
self.startOne.setText(_translate('Form', 'Start / Stop', None))
self.startThree.setText(_translate('Form', 'Start / Stop', None))
self.startFour.setText(_translate('Form', 'Start / Stop', None))
self.startFive.setText(_translate('Form', 'Start / Stop', None))
self.save.setToolTip(_translate('Form',
'<html><head/><body><p>Save all the current times</p></body></html>'
, None))
self.save.setText(_translate('Form', 'Save', None))
self.settings.setText(_translate('Form', 'Reset timers', None))
self.textBrowser_2.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_5.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_4.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_3.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.textBrowser_6.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">00:00:00</span></p></body></html>"""
, None))
self.label_2.setText(_translate('Form',
'<html><head/><body><p><a href="https://github.com/tmwbook/small-projects/tree/Master/MultipleMovieTimer"><span style=" text-decoration: underline; color:#0000ff;">https://github.com/tmwbook</span></a></p></body></html>'
, None))
def changeMovie1(self):
pass
def changeMovie2(self):
pass
def changeMovie3(self):
pass
def changeMovie4(self):
pass
def changeMovie5(self):
pass
def changeTimer1State(self):
global movie1Time, timer1Running, timer1Start, timer1Time
if not timer1Running:
timer1Running = True
timer1Start = time()
self.thread1 = TimerBackground(timer1Start, timer1Running, 1,
movie1Time, self.textBrowser_6)
self.thread1.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer1Running:
self.thread1.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer1Running:
timer1Running = False
movie1Time = timer1Time
def changeTimer2State(self):
global movie2Time, timer2Running, timer2Start, timer2Time
if not timer2Running:
timer2Running = True
timer2Start = time()
self.thread2 = TimerBackground(timer2Start, timer2Running, 2,
movie2Time, self.textBrowser_2)
self.thread2.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer2Running:
self.thread2.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer2Running:
timer2Running = False
movie2Time = timer2Time
def changeTimer3State(self):
global movie3Time, timer3Running, timer3Start, timer3Time
if not timer3Running:
timer3Running = True
timer3Start = time()
self.thread3 = TimerBackground(timer3Start, timer3Running, 3,
movie3Time, self.textBrowser_5)
self.thread3.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer3Running:
self.thread3.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer3Running:
timer3Running = False
movie3Time = timer3Time
def changeTimer4State(self):
global movie4Time, timer4Running, timer4Start, timer4Time
if not timer4Running:
timer4Running = True
timer4Start = time()
self.thread4 = TimerBackground(timer4Start, timer4Running, 4,
movie4Time, self.textBrowser_3)
self.thread4.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer4Running:
self.thread4.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer4Running:
timer4Running = False
movie4Time = timer4Time
def changeTimer5State(self):
global movie5Time, timer5Running, timer5Start, timer5Time
if not timer5Running:
timer5Running = True
timer5Start = time()
self.thread5 = TimerBackground(timer5Start, timer5Running, 5,
movie5Time, self.textBrowser_4)
self.thread5.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer5Running:
self.thread5.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer5Running:
timer5Running = False
movie5Time = timer5Time
def reset(self):
global movie1Time, movie2Time, movie3Time, movie4Time, movie5Time
global timer1Time, timer2Time, timer3Time, timer4Time, timer5Time
self.updateGUITimers('00:00:00', self.textBrowser_2)
self.updateGUITimers('00:00:00', self.textBrowser_3)
self.updateGUITimers('00:00:00', self.textBrowser_4)
self.updateGUITimers('00:00:00', self.textBrowser_5)
self.updateGUITimers('00:00:00', self.textBrowser_6)
timerStartingValue = '00:00:00'
movie1Time = timerStartingValue
movie2Time = timerStartingValue
movie3Time = timerStartingValue
movie4Time = timerStartingValue
movie5Time = timerStartingValue
timer1Time = timerStartingValue
timer2Time = timerStartingValue
timer3Time = timerStartingValue
timer4Time = timerStartingValue
timer5time = timerStartingValue
def saveChanges(self):
cwd = os.getcwd()
with open(cwd + '\\settings.ini', 'w') as var:
toWrite = [movie1Time, movie2Time, movie3Time, movie4Time,
movie5Time]
for i in toWrite:
var.write(i + '\n')
def updateGUITimers(self, time, textBrowser):
if time != 'none':
textBrowser.setHtml(_translate('Form',
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;">
<p align="right" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">"""
+ str(time) + '</span></p></body></html>', None))
<mask token>
| from time import time
import threading
import os
#hh:mm:ss
movie1Time = "00:00:00"
movie2Time = "00:00:00"
movie3Time = "00:00:00"
movie4Time = "00:00:00"
movie5Time = "00:00:00"
timer1Start = None
timer1Time = "00:00:00"
timer1Running = False
timer2Start = None
timer2Time = "00:00:00"
timer2Running = False
timer3Start = None
timer3Time = "00:00:00"
timer3Running = False
timer4Start = None
timer4Time = "00:00:00"
timer4Running = False
timer5Start = None
timer5Time = "00:00:00"
timer5Running = False
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created: Wed May 21 20:35:02 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
import sys
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class TimerBackground(QtCore.QThread):
index_finished = QtCore.pyqtSignal([str, QtCore.QObject])
def __init__(self, timerStart, timerRunning, timerNumber, movieTime, textBrowser, parent=None):
QtCore.QThread.__init__(self, parent)
self.timerStart = timerStart
self.timerRunning = timerRunning
self.timerNumber = timerNumber
self.textBrowser = textBrowser
self.movieTime = movieTime
def run(self):
self.incrememnt(self.timerStart, self.timerRunning, self.timerNumber, self.movieTime)
def formatTime(self, time):
formattedTime = ''
hours = time / 3600
minutes = time / 60
seconds = time % 60
#handles hours
if hours == 0:
formattedTime += "00:"
elif len(str(hours)) == 1:
formattedTime += '0' + str(hours) + ':'
else:
formattedTime += str(hours)
#handles minutes
if minutes == 0:
formattedTime += "00:"
elif minutes >= 60:
newMinutes = minutes
if minutes % 60 == 0:
newMinutes = 0
while newMinutes > 60:
newMinutes -= 60
if len(str(newMinutes)) == 1:
formattedTime += '0' + str(newMinutes) + ':'
else:
formattedTime += str(newMinutes) + ':'
else:
if len(str(minutes)) == 1:
formattedTime += '0' + str(minutes) + ':'
else:
formattedTime += str(minutes)
#handles seconds
if len(str(seconds)) == 1:
formattedTime += '0' + str(seconds)
else:
formattedTime += str(seconds)
return formattedTime
def deformatTime(self, time):
timeInSecs = 0
timeInSecs += int(time[0:2]) * 3600 # hours
timeInSecs += int(time[3:5]) * 60 # minutes
timeInSecs += int(time[6:8]) # seconds
return timeInSecs
def incrememnt(self, timerStart, timerRunning, timerNumber, movieTime):
global timer1Time, timer2Time, timer3Time, timer4Time, timer5Time
if timerRunning:
convertedTime = self.deformatTime(movieTime)
timerTime = self.formatTime(int(time()) - int(timerStart) + convertedTime)
if timerNumber == 1:
timer1Time = timerTime
self.index_finished.emit(timer1Time, self.textBrowser)
elif timerNumber == 2:
timer2Time = timerTime
self.index_finished.emit(timer2Time, self.textBrowser)
elif timerNumber == 3:
timer3Time = timerTime
self.index_finished.emit(timer3Time, self.textBrowser)
elif timerNumber == 4:
timer4Time = timerTime
self.index_finished.emit(timer4Time, self.textBrowser)
elif timerNumber == 5:
timer5Time = timerTime
self.index_finished.emit(timer5Time, self.textBrowser)
else:
timerStart = None
self.index_finished.emit('none')
return timerStart
class Ui_Form1(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
if os.path.exists(os.getcwd() + '\\settings.ini') and os.path.getsize(os.getcwd() + '\\settings.ini') > 0:
with open(os.getcwd() + '\\settings.ini', 'r') as var:
global movie1Time, movie2Time, movie3Time, movie4Time, movie5Time
movie1Time = var.readline().strip()
self.updateGUITimers(movie1Time, self.textBrowser_6)
movie2Time = var.readline().strip()
self.updateGUITimers(movie2Time, self.textBrowser_2)
movie3Time = var.readline().strip()
self.updateGUITimers(movie3Time, self.textBrowser_5)
movie4Time = var.readline().strip()
self.updateGUITimers(movie4Time, self.textBrowser_3)
movie5Time = var.readline().strip()
self.updateGUITimers(movie5Time, self.textBrowser_4)
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(611, 289)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setMinimumSize(QtCore.QSize(611, 289))
Form.setMaximumSize(QtCore.QSize(611, 289))
self.verticalLayoutWidget = QtGui.QWidget(Form)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 20, 61, 261))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.movieOne = QtGui.QLabel(self.verticalLayoutWidget)
self.movieOne.setObjectName(_fromUtf8("movieOne"))
self.verticalLayout.addWidget(self.movieOne)
self.movieTwo = QtGui.QLabel(self.verticalLayoutWidget)
self.movieTwo.setObjectName(_fromUtf8("movieTwo"))
self.verticalLayout.addWidget(self.movieTwo)
self.movieThree = QtGui.QLabel(self.verticalLayoutWidget)
self.movieThree.setObjectName(_fromUtf8("movieThree"))
self.verticalLayout.addWidget(self.movieThree)
self.movieFour = QtGui.QLabel(self.verticalLayoutWidget)
self.movieFour.setObjectName(_fromUtf8("movieFour"))
self.verticalLayout.addWidget(self.movieFour)
self.movieFive = QtGui.QLabel(self.verticalLayoutWidget)
self.movieFive.setObjectName(_fromUtf8("movieFive"))
self.verticalLayout.addWidget(self.movieFive)
self.DesignedBy = QtGui.QLabel(Form)
self.DesignedBy.setGeometry(QtCore.QRect(440, 40, 111, 31))
self.DesignedBy.setAlignment(QtCore.Qt.AlignCenter)
self.DesignedBy.setObjectName(_fromUtf8("DesignedBy"))
self.sourceAt = QtGui.QLabel(Form)
self.sourceAt.setGeometry(QtCore.QRect(440, 170, 111, 20))
self.sourceAt.setObjectName(_fromUtf8("sourceAt"))
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(580, 270, 31, 16))
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayoutWidget_2 = QtGui.QWidget(Form)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(210, 40, 101, 261))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.startTwo = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startTwo.setObjectName(_fromUtf8("startTwo"))
self.verticalLayout_2.addWidget(self.startTwo)
self.startOne = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startOne.setObjectName(_fromUtf8("startOne"))
self.verticalLayout_2.addWidget(self.startOne)
self.startThree = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startThree.setObjectName(_fromUtf8("startThree"))
self.verticalLayout_2.addWidget(self.startThree)
self.startFour = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startFour.setObjectName(_fromUtf8("startFour"))
self.verticalLayout_2.addWidget(self.startFour)
self.startFive = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.startFive.setObjectName(_fromUtf8("startFive"))
self.verticalLayout_2.addWidget(self.startFive)
self.horizontalLayoutWidget = QtGui.QWidget(Form)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(400, 230, 160, 80))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.save = QtGui.QPushButton(self.horizontalLayoutWidget)
self.save.setObjectName(_fromUtf8("save"))
self.horizontalLayout.addWidget(self.save)
self.settings = QtGui.QPushButton(self.horizontalLayoutWidget)
self.settings.setObjectName(_fromUtf8("settings"))
self.horizontalLayout.addWidget(self.settings)
self.textBrowser_2 = QtGui.QTextBrowser(Form)
self.textBrowser_2.setGeometry(QtCore.QRect(90, 110, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_2.sizePolicy().hasHeightForWidth())
self.textBrowser_2.setSizePolicy(sizePolicy)
self.textBrowser_2.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_2.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textBrowser_2.setReadOnly(False)
self.textBrowser_2.setUndoRedoEnabled(True)
self.textBrowser_2.setObjectName(_fromUtf8("textBrowser_2"))
self.textBrowser_5 = QtGui.QTextBrowser(Form)
self.textBrowser_5.setGeometry(QtCore.QRect(90, 160, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_5.sizePolicy().hasHeightForWidth())
self.textBrowser_5.setSizePolicy(sizePolicy)
self.textBrowser_5.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_5.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textBrowser_5.setReadOnly(False)
self.textBrowser_5.setUndoRedoEnabled(True)
self.textBrowser_5.setObjectName(_fromUtf8("textBrowser_5"))
self.textBrowser_4 = QtGui.QTextBrowser(Form)
self.textBrowser_4.setGeometry(QtCore.QRect(90, 260, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_4.sizePolicy().hasHeightForWidth())
self.textBrowser_4.setSizePolicy(sizePolicy)
self.textBrowser_4.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_4.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textBrowser_4.setReadOnly(False)
self.textBrowser_4.setUndoRedoEnabled(True)
self.textBrowser_4.setObjectName(_fromUtf8("textBrowser_4"))
self.textBrowser_3 = QtGui.QTextBrowser(Form)
self.textBrowser_3.setGeometry(QtCore.QRect(90, 210, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_3.sizePolicy().hasHeightForWidth())
self.textBrowser_3.setSizePolicy(sizePolicy)
self.textBrowser_3.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_3.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textBrowser_3.setReadOnly(False)
self.textBrowser_3.setUndoRedoEnabled(True)
self.textBrowser_3.setObjectName(_fromUtf8("textBrowser_3"))
self.textBrowser_6 = QtGui.QTextBrowser(Form)
self.textBrowser_6.setGeometry(QtCore.QRect(90, 60, 113, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(113)
sizePolicy.setVerticalStretch(20)
sizePolicy.setHeightForWidth(self.textBrowser_6.sizePolicy().hasHeightForWidth())
self.textBrowser_6.setSizePolicy(sizePolicy)
self.textBrowser_6.setMinimumSize(QtCore.QSize(113, 20))
self.textBrowser_6.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textBrowser_6.setReadOnly(False)
self.textBrowser_6.setUndoRedoEnabled(True)
self.textBrowser_6.setObjectName(_fromUtf8("textBrowser_6"))
self.line = QtGui.QFrame(Form)
self.line.setGeometry(QtCore.QRect(340, 50, 20, 211))
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(430, 190, 151, 20))
self.label_2.setOpenExternalLinks(True)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(420, 80, 161, 91))
self.label_3.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_3.setText(_fromUtf8(""))
self.label_3.setPixmap(QtGui.QPixmap(_fromUtf8("logo.jpg")))
self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTop|QtCore.Qt.AlignTrailing)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.retranslateUi(Form)
QtCore.QObject.connect(self.textBrowser_6, QtCore.SIGNAL(_fromUtf8("textChanged()")), Form.changeMovie1)
QtCore.QObject.connect(self.textBrowser_2, QtCore.SIGNAL(_fromUtf8("textChanged()")), Form.changeMovie2)
QtCore.QObject.connect(self.textBrowser_5, QtCore.SIGNAL(_fromUtf8("textChanged()")), Form.changeMovie3)
QtCore.QObject.connect(self.textBrowser_3, QtCore.SIGNAL(_fromUtf8("textChanged()")), Form.changeMovie4)
QtCore.QObject.connect(self.textBrowser_4, QtCore.SIGNAL(_fromUtf8("textChanged()")), Form.changeMovie5)
QtCore.QObject.connect(self.startTwo, QtCore.SIGNAL(_fromUtf8("pressed()")), Form.changeTimer1State)
QtCore.QObject.connect(self.startOne, QtCore.SIGNAL(_fromUtf8("pressed()")), Form.changeTimer2State)
QtCore.QObject.connect(self.startThree, QtCore.SIGNAL(_fromUtf8("pressed()")), Form.changeTimer3State)
QtCore.QObject.connect(self.startFour, QtCore.SIGNAL(_fromUtf8("pressed()")), Form.changeTimer4State)
QtCore.QObject.connect(self.startFive, QtCore.SIGNAL(_fromUtf8("pressed()")), Form.changeTimer5State)
QtCore.QObject.connect(self.save, QtCore.SIGNAL(_fromUtf8("pressed()")), Form.saveChanges)
QtCore.QObject.connect(self.settings, QtCore.SIGNAL(_fromUtf8("pressed()")), Form.reset)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Multiple Movie Timer", None))
self.movieOne.setText(_translate("Form", "Movie 1", None))
self.movieTwo.setText(_translate("Form", "Movie 2", None))
self.movieThree.setText(_translate("Form", "Movie 3", None))
self.movieFour.setText(_translate("Form", "Movie 4", None))
self.movieFive.setText(_translate("Form", "Movie 5", None))
self.DesignedBy.setText(_translate("Form", "This program was\n"
"designed by:", None))
self.sourceAt.setText(_translate("Form", " Source is available at:", None))
self.label.setText(_translate("Form", "V 1.2", None))
self.startTwo.setText(_translate("Form", "Start / Stop", None))
self.startOne.setText(_translate("Form", "Start / Stop", None))
self.startThree.setText(_translate("Form", "Start / Stop", None))
self.startFour.setText(_translate("Form", "Start / Stop", None))
self.startFive.setText(_translate("Form", "Start / Stop", None))
self.save.setToolTip(_translate("Form", "<html><head/><body><p>Save all the current times</p></body></html>", None))
self.save.setText(_translate("Form", "Save", None))
self.settings.setText(_translate("Form", "Reset timers", None))
self.textBrowser_2.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">00:00:00</span></p></body></html>", None))
self.textBrowser_5.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">00:00:00</span></p></body></html>", None))
self.textBrowser_4.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">00:00:00</span></p></body></html>", None))
self.textBrowser_3.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">00:00:00</span></p></body></html>", None))
self.textBrowser_6.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">00:00:00</span></p></body></html>", None))
self.label_2.setText(_translate("Form", "<html><head/><body><p><a href=\"https://github.com/tmwbook/small-projects/tree/Master/MultipleMovieTimer\"><span style=\" text-decoration: underline; color:#0000ff;\">https://github.com/tmwbook</span></a></p></body></html>", None))
def changeMovie1(self):
pass
def changeMovie2(self):
pass
def changeMovie3(self):
pass
def changeMovie4(self):
pass
def changeMovie5(self):
pass
def changeTimer1State(self):
global movie1Time, timer1Running, timer1Start, timer1Time
if not timer1Running:
timer1Running = True
timer1Start = time()
self.thread1 = TimerBackground(timer1Start, timer1Running, 1, movie1Time, self.textBrowser_6)
self.thread1.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer1Running:
self.thread1.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer1Running:
timer1Running = False
movie1Time = timer1Time
def changeTimer2State(self):
global movie2Time, timer2Running, timer2Start, timer2Time
if not timer2Running:
timer2Running = True
timer2Start = time()
self.thread2 = TimerBackground(timer2Start, timer2Running, 2, movie2Time, self.textBrowser_2)
self.thread2.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer2Running:
self.thread2.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer2Running:
timer2Running = False
movie2Time = timer2Time
def changeTimer3State(self):
global movie3Time, timer3Running, timer3Start, timer3Time
if not timer3Running:
timer3Running = True
timer3Start = time()
self.thread3 = TimerBackground(timer3Start, timer3Running, 3, movie3Time, self.textBrowser_5)
self.thread3.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer3Running:
self.thread3.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer3Running:
timer3Running = False
movie3Time = timer3Time
def changeTimer4State(self):
global movie4Time, timer4Running, timer4Start, timer4Time
if not timer4Running:
timer4Running = True
timer4Start = time()
self.thread4 = TimerBackground(timer4Start, timer4Running, 4, movie4Time, self.textBrowser_3)
self.thread4.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer4Running:
self.thread4.start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer4Running:
timer4Running = False
movie4Time = timer4Time
def changeTimer5State(self):
global movie5Time, timer5Running, timer5Start, timer5Time
if not timer5Running:
timer5Running = True
timer5Start = time()
self.thread5 = TimerBackground(timer5Start, timer5Running, 5, movie5Time, self.textBrowser_4)
self.thread5.index_finished.connect(self.updateGUITimers)
def loopThread():
if timer5Running:
self.thread5 .start()
threading.Timer(1, loopThread).start()
loopThread()
elif timer5Running:
timer5Running = False
movie5Time = timer5Time
def reset(self):
global movie1Time, movie2Time, movie3Time, movie4Time, movie5Time
global timer1Time, timer2Time, timer3Time, timer4Time, timer5Time
self.updateGUITimers('00:00:00', self.textBrowser_2)
self.updateGUITimers('00:00:00', self.textBrowser_3)
self.updateGUITimers('00:00:00', self.textBrowser_4)
self.updateGUITimers('00:00:00', self.textBrowser_5)
self.updateGUITimers('00:00:00', self.textBrowser_6)
timerStartingValue = '00:00:00'
movie1Time = timerStartingValue
movie2Time = timerStartingValue
movie3Time = timerStartingValue
movie4Time = timerStartingValue
movie5Time = timerStartingValue
timer1Time = timerStartingValue
timer2Time = timerStartingValue
timer3Time = timerStartingValue
timer4Time = timerStartingValue
timer5time = timerStartingValue
def saveChanges(self):
cwd = os.getcwd()
with open(cwd + '\\settings.ini', 'w') as var:
toWrite = [movie1Time, movie2Time, movie3Time, movie4Time, movie5Time]
for i in toWrite:
var.write(i + '\n')
def updateGUITimers(self, time, textBrowser):
if time != 'none':
textBrowser.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">" + str(time) + "</span></p></body></html>", None))
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
ex = Ui_Form1()
ex.show()
sys.exit(app.exec_()) | [
18,
20,
21,
24,
28
] |
148 | 051bd11c42815ec8f8ece8eae9d33890da77129c | <mask token>
| <mask token>
class GetCommunitiesByOffsetService(IService):
<mask token>
def run(self):
return DBService(self.core).getNextFields('Communities', self.
parameters['start'], self.parameters['offset'])
| <mask token>
class GetCommunitiesByOffsetService(IService):
def __init__(self, core, parameters):
super(GetCommunitiesByOffsetService, self).__init__(core, parameters)
def run(self):
return DBService(self.core).getNextFields('Communities', self.
parameters['start'], self.parameters['offset'])
| from services.interfaces.i_service import IService
from services.dbservices.db_service import DBService
class GetCommunitiesByOffsetService(IService):
def __init__(self, core, parameters):
super(GetCommunitiesByOffsetService, self).__init__(core, parameters)
def run(self):
return DBService(self.core).getNextFields('Communities', self.
parameters['start'], self.parameters['offset'])
| # -*- coding: utf-8 -*-
from services.interfaces.i_service import IService
from services.dbservices.db_service import DBService
class GetCommunitiesByOffsetService(IService):
def __init__(self, core, parameters):
super(GetCommunitiesByOffsetService, self).__init__(core, parameters)
def run(self):
return DBService(self.core).getNextFields("Communities", self.parameters["start"], self.parameters["offset"])
| [
0,
2,
3,
4,
5
] |
149 | 03f73a55e0a0773bbdbb0d5e29a2db598ba2e080 | <mask token>
| <mask token>
print(calculation)
print(calculation2)
print(calculation3)
<mask token>
print('Hi there, You are ' + myage + ' years old')
<mask token>
print('The result is ' + result)
print('average: %.2f' % ((3 + 11 + 78 + 112 + 4 + 18) / 6))
<mask token>
print(remainder)
<mask token>
print(remainder)
<mask token>
print(num3)
<mask token>
if userinput == 'Y':
print('Goodbye')
elif userinput == 'y':
print('Goodbye')
else:
print('Round 2 ~ Fight!')
<mask token>
if x > 0:
print(x)
if 1 + x > x ** sqrt(2):
y = y + x
<mask token>
if x == 1:
y += 1
print(x)
print(y)
<mask token>
if letterGrade >= 90:
print('A')
elif letterGrade >= 80:
print('B')
elif letterGrade >= 70:
print('C')
elif letterGrade >= 60:
print('D')
elif letterGrade <= 40:
print('F')
<mask token>
if richter >= 8.0:
print('Most structures fall')
elif richter >= 7.0:
print('many buildings destroyed')
elif richter >= 6.0:
print('Many buildings considerbly damaged, some collapse')
elif richter >= 4.5:
print('Damage to poorly constructed buildings.')
elif richter <= 4.4:
print('No destruction of buildings.')
<mask token>
print('Welcome ' + user + ' Please select a password')
<mask token>
while count <= 4:
if count == 4:
print(
'Access denied,Please press enter to exit and contact security to reset your password'
)
elif len(password) < 8:
input(
'Password needs to be more than 8 characters, Please try again : ')
elif len(password) >= 8:
print('Password changed successfully')
break
count += 1
for i in range(3):
for j in range(1, 4):
print(i + j, end='')
print()
for i in range(1, 6):
print('%d %d %d %d %d' % (i ** 1, i ** 2, i ** 3, i ** 4, i ** 5))
| a = 13
b = 14
calculation = a + 1 <= b
calculation2 = a + 1 >= b
calculation3 = a + 1 != b
print(calculation)
print(calculation2)
print(calculation3)
myage = input('How old are you : ')
print('Hi there, You are ' + myage + ' years old')
num1 = input('Enter the first number : ')
num2 = input('Enter the second number : ')
result = num1 + num2
print('The result is ' + result)
print('average: %.2f' % ((3 + 11 + 78 + 112 + 4 + 18) / 6))
num1 = int(input('Enter a number : '))
remainder = num1 % 7
print(remainder)
num1 = int(input('Enter a number : '))
remainder = num1 % 7
print(remainder)
num2 = 7
num3 = num1 / num2
print(num3)
userinput = input('Enter Y to quit : ')
if userinput == 'Y':
print('Goodbye')
elif userinput == 'y':
print('Goodbye')
else:
print('Round 2 ~ Fight!')
x = int(input('Enter a number : '))
if x > 0:
print(x)
if 1 + x > x ** sqrt(2):
y = y + x
x = 1
y = 5
if x == 1:
y += 1
print(x)
print(y)
letterGrade = int(input('Enter your grade : '))
if letterGrade >= 90:
print('A')
elif letterGrade >= 80:
print('B')
elif letterGrade >= 70:
print('C')
elif letterGrade >= 60:
print('D')
elif letterGrade <= 40:
print('F')
richter = float(input('Enter magnitude on richter scale : '))
if richter >= 8.0:
print('Most structures fall')
elif richter >= 7.0:
print('many buildings destroyed')
elif richter >= 6.0:
print('Many buildings considerbly damaged, some collapse')
elif richter >= 4.5:
print('Damage to poorly constructed buildings.')
elif richter <= 4.4:
print('No destruction of buildings.')
user = input('Enter a username : ')
print('Welcome ' + user + ' Please select a password')
password = input('Enter a password : ')
count = 0
while count <= 4:
if count == 4:
print(
'Access denied,Please press enter to exit and contact security to reset your password'
)
elif len(password) < 8:
input(
'Password needs to be more than 8 characters, Please try again : ')
elif len(password) >= 8:
print('Password changed successfully')
break
count += 1
for i in range(3):
for j in range(1, 4):
print(i + j, end='')
print()
for i in range(1, 6):
print('%d %d %d %d %d' % (i ** 1, i ** 2, i ** 3, i ** 4, i ** 5))
| #Week 5
#Task 1.1
a = 13
b = 14
calculation = a + 1 <=b
calculation2 = a + 1 >=b
calculation3 = a + 1 !=b
print (calculation)
print (calculation2)
print (calculation3)
#Task 1.2
myage = input("How old are you : ")
print ("Hi there, You are " +myage+ " years old")
#Task 1.3
num1 = input("Enter the first number : ")
num2 = input("Enter the second number : ")
result = num1 + num2
print ("The result is " +result)
#Task 1.4
print ("average: %.2f" % ((3 + 11 + 78 + 112 + 4 + 18) / 6))
#Task 1.5
num1 = int(input ("Enter a number : "))
remainder = num1 % 7
print (remainder)
#Task 1.6
num1 = int(input ("Enter a number : "))
remainder = num1 % 7
print (remainder)
num2 = 7
num3 = num1 / num2
print (num3)
#Task 1.8
userinput = input("Enter Y to quit : ")
if userinput == 'Y':
print ("Goodbye")
elif userinput == 'y':
print ("Goodbye")
else:
print ("Round 2 ~ Fight!")
#Task 1.9a
x = int(input ("Enter a number : "))
if (x) >0:
print(x)
#Task 1.9b
if 1 + x > x ** sqrt(2) : y = y + x
#Task 1.9c
x = 1
y = 5
if x == 1:
y += 1
print (x)
print (y)
#Task 1.9d
letterGrade = int(input("Enter your grade : "))
if letterGrade >= 90: print ("A")
elif letterGrade >= 80: print ("B")
elif letterGrade >= 70: print ("C")
elif letterGrade >= 60: print ("D")
elif letterGrade <= 40: print ("F")
#Task 1.10
richter = float(input ("Enter magnitude on richter scale : "))
if richter >= 8.0: print ("Most structures fall")
elif richter >= 7.0: print ("many buildings destroyed")
elif richter >= 6.0: print ("Many buildings considerbly damaged, some collapse")
elif richter >= 4.5: print ("Damage to poorly constructed buildings.")
elif richter <= 4.4: print ("No destruction of buildings.")
#Task 1.11
user = input("Enter a username : ")
print ("Welcome " + user + " Please select a password")
password = input("Enter a password : ")
count = 0
while count <= 4:
if count == 4:
print ("Access denied,Please press enter to exit and contact security to reset your password")
elif (len(password)<8):
input("Password needs to be more than 8 characters, Please try again : ")
elif (len(password)>=8):
print ("Password changed successfully")
break
count += 1
#Task 1.12
for i in range(3):
for j in range(1, 4):
print (i + j, end="")
print ()
#Task 1.13
for i in range (1,6):
print("%d %d %d %d %d" % ((i**1),(i**2),(i**3),(i**4),(i**5)))
| null | [
0,
1,
2,
3
] |
150 | ba72af921a9562d748bcd65f1837ea8eb5da5697 | from random import choice, random, randrange
from math import fsum
import os
import numpy as np
def mat17(N, ATOM_TYPES, ndenmax=0.04302, ndenmin=0.0000013905, xmax=51.2, xmin=25.6, ymax=51.2, ymin=25.6,
zmax=51.2, zmin=25.6, epmax=513.264, epmin=1.2580, sigmax=6.549291, sigmin=1.052342, qmax=0.0, qmin=0.0):
#epmax DEFINED WRT TO X-Y-Z LIMITS?
#max number density based on that of pure Iron
#max unit cell dimensions based on PCN-777 cages size
#max LJ parameters (for now using 1.5x highest values in GenericMOFs)
#max charge... UFF?
#ATOM_TYPES = 4
if type(N) != int:
print 'N must be an integer.'
Ntag = str(N)
ntag = str(ndenmax)
xtag = str(xmax)
ytag = str(xmax)
ztag = str(xmax)
eptag = str(xmax)
sigtag = str(xmax)
qtag = str(xmax)
top_path = ('materials' + '_' + Ntag + '.' + ntag + '_' + xtag + '.' + ytag
+ '.' + ztag + '_' + eptag + '.' + sigtag + '_' + qtag)
if not os.path.exists(top_path):
os.mkdir(top_path)
# def drange(start, stop, step):
# r = start
# while r < stop:
# yield r
# r+= step
# nden0 = drange(1, ndenmax*10000, ndenp*10000)
# ndendim = [nden for nden in nden0]
# x0 = drange(0, xmax + xp, xp)
# xdim = [x for x in x0]
# y0 = drange(0, ymax + yp, yp)
# ydim = [y for y in y0]
# z0 = drange(0, zmax + zp, zp)
# zdim = [z for z in z0]
# ep0 = drange(0, epmax + epp, epp)
# epdim = [ep for ep in ep0]
# sig0 = drange(0, sigmax + sigp, sigp)
# sigdim = [sig for sig in sig0]
#open mat_stats.txt, to track material data
mat_stats = open(os.path.abspath(top_path)+ '/mat_stats.txt', 'w')
mat_stat_heading = ('\nBOUNDARIES\nNumber of particles: ' + Ntag +
'\nnumber density: ' + ntag + '\nx-coordinate: ' +
xtag + '\ny-coordinate: ' + ytag + '\nz-coordinate: ' +
ztag + '\nEpsilon: ' + eptag + '\nSigma: ' + sigtag
+ '\nCharge: ' + qtag + '\n\n' +
'#name number density xdim ydim '+
'zdim total particles net charge\n')
mat_stats.write(mat_stat_heading)
#MAT-XXX loop...
for i in range(N + 1):
mat_name = 'MAT-' + str(i)
#make MAT-XXX directory
os.mkdir(top_path+'/'+mat_name)
#open .cif file
cif_file = open(os.path.abspath(top_path) + '/'+mat_name + '/' +
mat_name+'.cif', 'w')
#open force_field_mixing_rules.def
mixing_rules = open(os.path.abspath(top_path) + '/'+mat_name +
'/force_field_mixing_rules.def', 'w')
#open pseudo_atoms.def
pseudo_atoms = open(os.path.abspath(top_path) + '/'+mat_name +
'/pseudo_atoms.def', 'w')
#open force_field.def
force_field = open(os.path.abspath(top_path) + '/'+mat_name +
'/force_field.def', 'w')
#nden_ = choice(ndendim)/10000.
#xdim_ = choice(xdim)
#ydim_ = choice(ydim)
#zdim_ = choice(zdim)
#nden_ = randrange(0.0001, ndenmax, 1)
#xdim_ = randrange(15., xmax, 0.1)
#ydim_ = randrange(15., ymax, 0.1)
#zdim_ = randrange(15., zmax, 0.1)
#N_ = xdim_ * ydim_ * zdim_ * nden_
#n_ = int(N_)
nden_ = round(random() * (ndenmax - ndenmin) + ndenmin, 6)
xdim_ = round(random() * (xmax - xmin) + xmin, 4)
ydim_ = round(random() * (ymax - ymin) + ymin, 4)
zdim_ = round(random() * (zmax - zmin) + zmin, 4)
N_ = xdim_ * ydim_ * zdim_ * nden_
n_ = int(N_)
cif_heading = ('material' + str(i) +
'\n\nloop_\n' +
'_symmetry_equiv_pos_as_xyz\n' +
' x,y,z\n' +
'_cell_length_a ' + str(xdim_) +
'\n_cell_length_b ' + str(ydim_) +
'\n_cell_length_c ' + str(zdim_) +
'\n_cell_angle_alpha 90.0000\n' +
'_cell_angle_beta 90.0000\n' +
'_cell_angle_gamma 90.0000\n' +
'loop_\n' +
'_atom_site_label\n' +
'_atom_site_type_symbol\n' +
'_atom_site_fract_x\n' +
'_atom_site_fract_y\n' +
'_atom_site_fract_z\n' +
'_atom_site_charge\n')
cif_file.write(cif_heading)
# mixing_heading = ('# general rule for shifted vs truncated\nshifted\n' +
# '# general rule for tailcorrections\nno\n' +
# '# number of defined interactions\n' + str(108) + #check these + XXX values
# '\n# type interaction\n')
mixing_heading = ('# general rule for shifted vs truncated\n' +
'shifted\n' +
'# general rule tailcorrections\n' +
'no\n' +
'# number of defined interactions\n' +
str(ATOM_TYPES + 8) + '\n' +
'# type interaction, parameters. IMPORTANT: define shortest matches first, so that more specific ones overwrites these\n')
mixing_rules.write(mixing_heading)
pseudo_heading = ('#number of pseudo atoms\n' + str(ATOM_TYPES + 8) +
'\n#type print as chem oxidation' +
' mass charge polarization ' +
'B-factor radii connectivity anisotropic' +
' anisotrop-type tinker-type\n')
pseudo_atoms.write(pseudo_heading)
##make charges
#q = []
#for k in range(n_ + 1):
# q.append(0)
#for l in range(5*(n_ + 1)):
# m = choice(range(n_ + 1))
# n = choice(range(n_ + 1))
# if m == n:
# n = choice(range(n_ + 1))
# dq = random() * qmax
# if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax:
# q[m] = float(float(q[m]) + dq)
# q[n] = float(float(q[n]) - dq)
# if q[m] > qmax or q[n] < -1 * qmax:
# q[m] = q[m] - dq
# q[n] = q[n] + dq
#for o in range(5*(n_ + 1)):
# m = choice(range(n_ + 1))
# n = choice(range(n_ + 1))
# if m == n:
# n = choice(range(n_ + 1))
# dq = random() * qmax
# if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax:
# q[m] = float(float(q[m]) + dq)
# q[n] = float(float(q[n]) - dq)
# if q[m] > qmax or q[n] < -1 * qmax:
# q[m] = q[m] - dq
# q[n] = q[n] + dq
#p = choice(range(n_ + 1))
#q[p] = q[p] - sum(q)
#if sum(q) != 0.000000000000000000000:
# for l in range(5*(n_ + 1)):
# m = choice(range(n_ + 1))
# n = choice(range(n_ + 1))
# if m == n:
# n = choice(range(n_ + 1))
# dq = random() * qmax
# if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax:
# q[m] = float(float(q[m]) + dq)
# q[n] = float(float(q[n]) - dq)
# if q[m] > qmax or q[n] < -1 * qmax:
# q[m] = q[m] - dq
# q[n] = q[n] + dq
# for o in range(5*(n_ + 1)):
# m = choice(range(n_ + 1))
# n = choice(range(n_ + 1))
# if m == n:
# n = choice(range(n_ + 1))
# dq = random() * qmax
# if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax:
# q[m] = float(float(q[m]) + dq)
# q[n] = float(float(q[n]) - dq)
# if q[m] > qmax or q[n] < -1 * qmax:
# q[m] = q[m] - dq
# q[n] = q[n] + dq
# p = choice(range(n_ + 1))
# q[p] = q[p] - sum(q)
#LJ parameters
ep = []
sig = []
q = []
for i in range(ATOM_TYPES):
epsilon = round(random() * (epmax - epmin) + epmin, 4)
ep.append(epsilon)
sigma = round(random() * (sigmax -sigmin) + sigmin, 4)
sig.append(sigma)
charge = 0
q.append(charge)
ep_ = np.asarray(ep)
sig_ = np.asarray(sig)
q_ = np.asarray(q)
ID_ = np.asarray(range(0,ATOM_TYPES))
ep = ep_.reshape(-1,1)
sig = sig_.reshape(-1,1)
q = q_.reshape(-1,1)
ID = ID_.reshape(-1,1)
atoms = np.hstack((ID, ep, sig, q))
n_atoms = np.empty([0, 4])
for i in range(n_):
atomtype = choice(range(ATOM_TYPES))
n_atoms = np.vstack([n_atoms, atoms[atomtype, :]])
IDs = n_atoms[:,0]
for i in range(ATOM_TYPES):
if i in IDs:
charge = round(random() * (qmax - qmin) + qmin, 4)
weight_i = list(IDs).count(i)
k = choice(IDs)
weight_k = list(IDs).count(k)
for j in range(n_):
if n_atoms[j,0] == i:
n_atoms[j,3] = n_atoms[j,3] + charge * int(weight_k)
atoms[i,3] = n_atoms[j,3] + charge * int(weight_k)
if n_atoms[j,0] == k:
n_atoms[j,3] = n_atoms[j,3] - charge * int(weight_i)
atoms[k,3] = n_atoms[j,3] - charge * int(weight_i)
# for i in range(100):
# atoms[i,3] = round(atoms[i,3], 4)
# for i in range(n_):
# n_atoms[i,3] = round(n_atoms[i,3], 4)
# net_charge = sum(n_atoms[:,3])
# if net_charge != 0:
# atomID = choice(range(100))
# weight = list(IDs).count(atomID)
# atoms[atomID,3] = atoms[atomID,3] - net_charge/weight
# for i in range(n_):
# if n_atoms[i,0] == atomID:
# n_atoms[atomID,3] = n_atoms[atomID,3] - net_charge/weight
mat_charge = str(sum(n_atoms[:,3]))
cif_file.write('#NET CHARGE: ' + mat_charge + '\n')
mat_X_stats = (mat_name + ' ' + str(nden_) + ' ' + str(xdim_) + ' ' + str(ydim_) +
' ' + str(zdim_) + ' ' + str(n_) + ' ' +
mat_charge + '\n')
mat_stats.write(mat_X_stats)
eps = n_atoms[:,1]
sigs = n_atoms[:,2]
qs = n_atoms[:,3]
#writing mixing_rules, pseudo_atoms...
for i in range(ATOM_TYPES):
atom_X_pseudo = ('A_' + str(int(atoms[i,0])) + ' yes C C 0 ' +
'12.0 ' + str(atoms[i,3]) + ' 0.0 0.0 ' +
'1.0 1.00 0 0 absolute 0\n')
pseudo_atoms.write(atom_X_pseudo)
atom_X_mixing = ('A_' + str(int(atoms[i,0])) + ' ' +
'lennard-jones ' + str(atoms[i,1]) + ' '
+ str(atoms[i,2]) + '\n')
mixing_rules.write(atom_X_mixing)
#writing cif...
for i in range(n_):
#FIX THIS TO ALLOW FOR NON-INT VALUES?
x = choice(range(int(xdim_ + 1)))
y = choice(range(int(ydim_ + 1)))
z = choice(range(int(zdim_ + 1)))
atom_X_cif = ('A_' + str(int(n_atoms[i,0])) + ' ' + 'C ' +
str(round(x/xdim_, 4)) + ' ' + str(round(y/ydim_, 4)) +
' ' + str(round(z/zdim_, 4)) + ' ' +
str(n_atoms[i,3]) + '\n')
cif_file.write(atom_X_cif)
# #ep = choice(epdim)
# #sig = choice(sigdim)
# epval = ep[atomtype]
# sigval = sig[atomtype]
# charge = q[n_]
# #if charge < 0:
# atom_X_cif = ('A' + str(atomtype) + ' ' + 'C ' +
# str(x/xdim_) + ' ' + str(y/ydim_) +
# ' ' + str(z/zdim_) + ' ' +
# str(charge) + '\n')
# cif_file.write(atom_X_cif)
# for k in range(100):
# if k != atomtype:
# atom_X_pseudo = ('A' + str(k) + ' yes C C 0 12.0 0' +
# ' 0.0 0.0 1.0 1.00 0 ' +
# '0 absolute 0\n')
# if k == atomtype:
# atom_X_pseudo = ('A' + str(k) + ' yes C C 0 12.0 ' +
# str(q[n_]) + ' 0.0 0.0 1.0 1.00 0 ' +
# '0 absolute 0\n')
#
# pseudo_atoms.write(atom_X_pseudo)
#
# atom_X_mixing = ('A' + str(k) + ' LENNARD_JONES ' +
# str(ep[k]) + ' ' + str(sig[k]) + '\n')
# mixing_rules.write(atom_X_mixing)
#if charge >= 0:
# atom_X_cif = ('A' + str(atomtype) + ' ' + str(x) + ' ' +
# str(y) + ' ' + str(z) + ' ' +
# str(charge) + '\n')
#cif_file.write(atom_X_cif)
#for i in range(100):
# atom_X_mixing = ('A' + str(i) + ' LENNARD_JONES ' +
# str(ep[i]) + ' ' + str(sig[i]) + '\n')
# mixing_rules.write(atom_X_mixing)
#
# atom_X_pseudo = ('A' + str(i) + ' yes C C 0 12.0 ' +
# str(q[i]) + ' 0.0 0.0 1.0 1.00 0 ' +
# '0 absolute 0\n')
## pseudo_atoms.write(atom_X_pseudo)
#SUPPORTED ADSORBATES
# name pseudo-atoms
# N2 : N_n2; N_com
# CO2 : C_co2; O_co2
# methane : CH4_sp3
# helium : He
# hydrogen : H_h2; H_com
# H2 : H_h2; H_com
#adsorbate_mixing = ('N_n2 LENNARD_JONES 36.0 3.31\n' +
# 'N_com none\n' +
# 'C_co2 LENNARD_JONES 27.0 2.80\n' +
# 'O_co2 LENNARD_JONES 79.0 3.05\n' +
# 'CH4_sp3 LENNARD_JONES 158.5 3.72\n' +
# 'He LENNARD_JONES 10.9 2.64\n' +
# 'H_h2 none\n' +
# 'H_com LENNARD_JONES 36.7 2.958\n' +
# '# general mixing rule for Lennard-Jones\n' +
# 'Lorentz-Berthlot')
adsorbate_mixing = ('N_n2 lennard-jones 36.0 3.31\n' +
'N_com none\n' +
'C_co2 lennard-jones 27.0 2.80\n' +
'O_co2 lennard-jones 79.0 3.05\n' +
'CH4_sp3 lennard-jones 158.5 3.72\n' +
'He lennard-jones 10.9 2.64\n' +
'H_h2 none\n' +
'H_com lennard-jones 36.7 2.958\n' +
'# general mixing rule for Lennard-Jones\n' +
'Lorentz-Berthelot')
mixing_rules.write(adsorbate_mixing)
adsorbate_pseudo = ('N_n2 yes N N 0 14.00674 -0.4048' +
' 0.0 1.0 0.7 0 0 relative 0\n' +
'N_com no N - 0 0.0 0.8096' +
' 0.0 1.0 0.7 0 0 relative 0\n' +
'C_co2 yes C C 0 12.0 0.70' +
' 0.0 1.0 0.720 0 0 relative 0\n' +
'O_co2 yes O O 0 15.9994 -0.35' +
' 0.0 1.0 0.68 0 0 relative 0\n' +
'CH4_sp3 yes C C 0 16.04246 0.0' +
' 0.0 1.0 1.00 0 0 relative 0\n' +
'He yes He He 0 4.002602 0.0' +
' 0.0 1.0 1.0 0 0 relative 0\n' +
'H_h2 yes H H 0 1.00794 0.468' +
' 0.0 1.0 0.7 0 0 relative 0\n' +
'H_com no H H 0 0.0 - 0.936' +
' 0.0 1.0 0.7 0 0 relative 0\n')
pseudo_atoms.write(adsorbate_pseudo)
force_field_rules = ('# rules to overwrite\n0\n' +
'# number of defined interactions\n0\n' +
'# mixing rules to overwrite\n0')
force_field.write(force_field_rules)
cif_file.close()
mixing_rules.close()
pseudo_atoms.close()
force_field.close()
mat_stats.close()
| null | null | null | null | [
0
] |
151 | dab1adcd185092fc425b5d87150f27e7b67bff6c | <mask token>
| ba0563.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000001100110111111111111000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000001111111111111111111111000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000111111111111111111111111000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000111111111111111111111110000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111110000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000011111111111111111110000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111100000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111100000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111100000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000111101011100000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
,
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
]
| ba0563.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000001100110111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000001111111111111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000111111111111111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000111111111111111111111110000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111110000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000011111111111111111110000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111100000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000111101011100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
]
| null | null | [
0,
1,
2
] |
152 | 924fd89a835528fa28e1226912a2e4be9c4e1d5d | <mask token>
class UppercaseBrandFeed(CSVMerchantFeed):
<mask token>
class CSVMerchantFeedTest(TestCase):
def test_csv_empty(self):
feed = CSVMerchantFeed([])
output = feed.get_content()
self.assertEquals(output, CSV_HEADINGS)
def test_csv(self):
feed = CSVMerchantFeed([AttrNameFakeModel()])
output = feed.get_content()
self.assertEquals(output, CSV_HEADINGS * 2)
def test_csv_missing_attribute(self):
feed = CSVMerchantFeed([EmptyFakeModel()])
output = feed.get_content()
empty_data_row = ',' * (len(MERCHANT_FEED_COLUMNS) - 1) + '\r\n'
self.assertEquals(output, CSV_HEADINGS + empty_data_row)
def test_csv_with_get_method(self):
feed = UppercaseBrandFeed([AttrNameFakeModel()])
output = feed.get_content()
data_row = CSV_HEADINGS.replace('brand', 'BRAND')
self.assertEquals(output, CSV_HEADINGS + data_row)
class CSVFeedViewTest(TestCase):
def test_view_empty(self):
url = reverse('google_feed')
response = self.client.get(url)
self.assertEquals(response.content, CSV_HEADINGS)
def test_has_correct_headers(self):
url = reverse('google_feed')
response = self.client.get(url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(response['Content-Disposition'],
'attachment; filename="google.csv"')
| <mask token>
class EmptyFakeModel(object):
def __getattr__(self, name):
raise AttributeError
class UppercaseBrandFeed(CSVMerchantFeed):
def get_brand(self, obj):
return obj.brand.upper()
class CSVMerchantFeedTest(TestCase):
def test_csv_empty(self):
feed = CSVMerchantFeed([])
output = feed.get_content()
self.assertEquals(output, CSV_HEADINGS)
def test_csv(self):
feed = CSVMerchantFeed([AttrNameFakeModel()])
output = feed.get_content()
self.assertEquals(output, CSV_HEADINGS * 2)
def test_csv_missing_attribute(self):
feed = CSVMerchantFeed([EmptyFakeModel()])
output = feed.get_content()
empty_data_row = ',' * (len(MERCHANT_FEED_COLUMNS) - 1) + '\r\n'
self.assertEquals(output, CSV_HEADINGS + empty_data_row)
def test_csv_with_get_method(self):
feed = UppercaseBrandFeed([AttrNameFakeModel()])
output = feed.get_content()
data_row = CSV_HEADINGS.replace('brand', 'BRAND')
self.assertEquals(output, CSV_HEADINGS + data_row)
class CSVFeedViewTest(TestCase):
def test_view_empty(self):
url = reverse('google_feed')
response = self.client.get(url)
self.assertEquals(response.content, CSV_HEADINGS)
def test_has_correct_headers(self):
url = reverse('google_feed')
response = self.client.get(url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(response['Content-Disposition'],
'attachment; filename="google.csv"')
| <mask token>
class AttrNameFakeModel(object):
<mask token>
class EmptyFakeModel(object):
def __getattr__(self, name):
raise AttributeError
class UppercaseBrandFeed(CSVMerchantFeed):
def get_brand(self, obj):
return obj.brand.upper()
class CSVMerchantFeedTest(TestCase):
def test_csv_empty(self):
feed = CSVMerchantFeed([])
output = feed.get_content()
self.assertEquals(output, CSV_HEADINGS)
def test_csv(self):
feed = CSVMerchantFeed([AttrNameFakeModel()])
output = feed.get_content()
self.assertEquals(output, CSV_HEADINGS * 2)
def test_csv_missing_attribute(self):
feed = CSVMerchantFeed([EmptyFakeModel()])
output = feed.get_content()
empty_data_row = ',' * (len(MERCHANT_FEED_COLUMNS) - 1) + '\r\n'
self.assertEquals(output, CSV_HEADINGS + empty_data_row)
def test_csv_with_get_method(self):
feed = UppercaseBrandFeed([AttrNameFakeModel()])
output = feed.get_content()
data_row = CSV_HEADINGS.replace('brand', 'BRAND')
self.assertEquals(output, CSV_HEADINGS + data_row)
class CSVFeedViewTest(TestCase):
def test_view_empty(self):
url = reverse('google_feed')
response = self.client.get(url)
self.assertEquals(response.content, CSV_HEADINGS)
def test_has_correct_headers(self):
url = reverse('google_feed')
response = self.client.get(url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(response['Content-Disposition'],
'attachment; filename="google.csv"')
| from __future__ import unicode_literals
from django.test import TestCase
from django.core.urlresolvers import reverse
from google_product_feeder.feed import CSVMerchantFeed, MERCHANT_FEED_COLUMNS
CSV_HEADINGS = ','.join(MERCHANT_FEED_COLUMNS) + '\r\n'
class AttrNameFakeModel(object):
def __getattr__(self, name):
return name
class EmptyFakeModel(object):
def __getattr__(self, name):
raise AttributeError
class UppercaseBrandFeed(CSVMerchantFeed):
def get_brand(self, obj):
return obj.brand.upper()
class CSVMerchantFeedTest(TestCase):
def test_csv_empty(self):
feed = CSVMerchantFeed([])
output = feed.get_content()
self.assertEquals(output, CSV_HEADINGS)
def test_csv(self):
feed = CSVMerchantFeed([AttrNameFakeModel()])
output = feed.get_content()
self.assertEquals(output, CSV_HEADINGS * 2)
def test_csv_missing_attribute(self):
feed = CSVMerchantFeed([EmptyFakeModel()])
output = feed.get_content()
empty_data_row = ',' * (len(MERCHANT_FEED_COLUMNS) - 1) + '\r\n'
self.assertEquals(output, CSV_HEADINGS + empty_data_row)
def test_csv_with_get_method(self):
feed = UppercaseBrandFeed([AttrNameFakeModel()])
output = feed.get_content()
data_row = CSV_HEADINGS.replace('brand', 'BRAND')
self.assertEquals(output, CSV_HEADINGS + data_row)
class CSVFeedViewTest(TestCase):
def test_view_empty(self):
url = reverse('google_feed')
response = self.client.get(url)
self.assertEquals(response.content, CSV_HEADINGS)
def test_has_correct_headers(self):
url = reverse('google_feed')
response = self.client.get(url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(response['Content-Disposition'],
'attachment; filename="google.csv"')
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.core.urlresolvers import reverse
from google_product_feeder.feed import CSVMerchantFeed, MERCHANT_FEED_COLUMNS
CSV_HEADINGS = ','.join(MERCHANT_FEED_COLUMNS) + '\r\n'
class AttrNameFakeModel(object):
# A fake model that returns the attribute name upon attribute access.
def __getattr__(self, name):
return name
class EmptyFakeModel(object):
# A fake model with no attributes.
def __getattr__(self, name):
raise AttributeError
class UppercaseBrandFeed(CSVMerchantFeed):
def get_brand(self, obj):
return obj.brand.upper()
class CSVMerchantFeedTest(TestCase):
def test_csv_empty(self):
feed = CSVMerchantFeed([])
output = feed.get_content()
self.assertEquals(output, CSV_HEADINGS)
def test_csv(self):
feed = CSVMerchantFeed([AttrNameFakeModel()])
output = feed.get_content()
self.assertEquals(output, CSV_HEADINGS * 2)
def test_csv_missing_attribute(self):
feed = CSVMerchantFeed([EmptyFakeModel()])
output = feed.get_content()
empty_data_row = ',' * (len(MERCHANT_FEED_COLUMNS) - 1) + '\r\n'
self.assertEquals(output, CSV_HEADINGS + empty_data_row)
def test_csv_with_get_method(self):
feed = UppercaseBrandFeed([AttrNameFakeModel()])
output = feed.get_content()
data_row = CSV_HEADINGS.replace('brand', 'BRAND')
self.assertEquals(output, CSV_HEADINGS + data_row)
class CSVFeedViewTest(TestCase):
def test_view_empty(self):
url = reverse('google_feed')
response = self.client.get(url)
self.assertEquals(response.content, CSV_HEADINGS)
def test_has_correct_headers(self):
# content-type is 'text/csv', content-disposition is 'attachment',
# filename is 'google.csv'
url = reverse('google_feed')
response = self.client.get(url)
self.assertEqual(response['Content-Type'],
'text/csv')
self.assertEqual(response['Content-Disposition'],
'attachment; filename="google.csv"')
| [
9,
12,
13,
16,
17
] |
153 | 99a6b450792d434e18b8f9ff350c72abe5366d95 | <mask token>
| try:
alp = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
idx = eval(input('请输入一个整数'))
print(alp[idx])
except NameError:
print('输入错误,请输入一个整数')
except:
print('其他错误')
else:
print('没有发生错误')
finally:
print('程序执行完毕,不知道是否发生了异常')
| try:
alp="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
idx=eval(input("请输入一个整数"))
print(alp[idx])
except NameError:
print("输入错误,请输入一个整数")
except:
print("其他错误")
else:
print("没有发生错误")
finally:
print("程序执行完毕,不知道是否发生了异常")
| null | null | [
0,
1,
2
] |
154 | 3c9302b5cb92e5103ed16ec56e1b349f0662950c | <mask token>
class PostAdmin(admin.ModelAdmin):
class Media:
js = ('admin/js/tiny_mce/tiny_mce.js',
'admin/js/tiny_mce/basic_config.js')
class PhraseAdmin(admin.ModelAdmin):
pass
class TypeGaleryAdmin(admin.ModelAdmin):
pass
class ImageGaleryAdmin(admin.ModelAdmin):
pass
class SponsorAdmin(admin.ModelAdmin):
pass
<mask token>
| <mask token>
class VideoAdmin(admin.ModelAdmin):
pass
class TypePostAdmin(admin.ModelAdmin):
pass
class PostAdmin(admin.ModelAdmin):
class Media:
js = ('admin/js/tiny_mce/tiny_mce.js',
'admin/js/tiny_mce/basic_config.js')
class PhraseAdmin(admin.ModelAdmin):
pass
class TypeGaleryAdmin(admin.ModelAdmin):
pass
class ImageGaleryAdmin(admin.ModelAdmin):
pass
class SponsorAdmin(admin.ModelAdmin):
pass
<mask token>
| <mask token>
class BannerAdmin(admin.ModelAdmin):
pass
class CaricaturaAdmin(admin.ModelAdmin):
pass
class VideoAdmin(admin.ModelAdmin):
pass
class TypePostAdmin(admin.ModelAdmin):
pass
class PostAdmin(admin.ModelAdmin):
class Media:
js = ('admin/js/tiny_mce/tiny_mce.js',
'admin/js/tiny_mce/basic_config.js')
class PhraseAdmin(admin.ModelAdmin):
pass
class TypeGaleryAdmin(admin.ModelAdmin):
pass
class ImageGaleryAdmin(admin.ModelAdmin):
pass
class SponsorAdmin(admin.ModelAdmin):
pass
<mask token>
| <mask token>
class AutorAdmin(admin.ModelAdmin):
pass
class CategoryAdmin(admin.ModelAdmin):
pass
class AnnouncementAdmin(admin.ModelAdmin):
pass
class BannerAdmin(admin.ModelAdmin):
pass
class CaricaturaAdmin(admin.ModelAdmin):
pass
class VideoAdmin(admin.ModelAdmin):
pass
class TypePostAdmin(admin.ModelAdmin):
pass
class PostAdmin(admin.ModelAdmin):
class Media:
js = ('admin/js/tiny_mce/tiny_mce.js',
'admin/js/tiny_mce/basic_config.js')
class PhraseAdmin(admin.ModelAdmin):
pass
class TypeGaleryAdmin(admin.ModelAdmin):
pass
class ImageGaleryAdmin(admin.ModelAdmin):
pass
class SponsorAdmin(admin.ModelAdmin):
pass
<mask token>
| # -*- encoding: utf-8 -*-
from django.contrib import admin
from finish.wall.models import (Autor, Category, Announcement, Banner, Caricatura,
Video, TypePost, Post, Phrase, TypeGalery,
ImageGalery, Sponsor)
class AutorAdmin(admin.ModelAdmin):
pass
class CategoryAdmin(admin.ModelAdmin):
pass
class AnnouncementAdmin(admin.ModelAdmin):
pass
class BannerAdmin(admin.ModelAdmin):
pass
class CaricaturaAdmin(admin.ModelAdmin):
pass
class VideoAdmin(admin.ModelAdmin):
pass
class TypePostAdmin(admin.ModelAdmin):
pass
class PostAdmin(admin.ModelAdmin):
class Media:
js = ('admin/js/tiny_mce/tiny_mce.js',
'admin/js/tiny_mce/basic_config.js',)
class PhraseAdmin(admin.ModelAdmin):
pass
class TypeGaleryAdmin(admin.ModelAdmin):
pass
class ImageGaleryAdmin(admin.ModelAdmin):
pass
class SponsorAdmin(admin.ModelAdmin):
pass
admin.site.register(Autor, AutorAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Announcement, AnnouncementAdmin)
admin.site.register(Banner, BannerAdmin)
admin.site.register(Caricatura, CaricaturaAdmin)
admin.site.register(Video, VideoAdmin)
admin.site.register(TypePost, TypePostAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Phrase, PhraseAdmin)
admin.site.register(TypeGalery, TypeGaleryAdmin)
admin.site.register(ImageGalery, ImageGaleryAdmin)
admin.site.register(Sponsor, SponsorAdmin)
| [
5,
7,
9,
12,
15
] |
155 | ab352c9431fda19bc21a9f7ffa075303641cca45 | <mask token>
class ListingCustomFieldsGet(Service):
<mask token>
def reply(self):
solr_fields = {}
storage = PropertySheetSchemaStorage()
if not storage:
return solr_fields
for listing_name, slot_provider in LISTING_TO_SLOTS.items():
fields_by_listing = {}
for slot_name in slot_provider():
definition = storage.query(slot_name)
if definition is not None:
fields_by_listing.update(definition.
get_solr_dynamic_field_schema())
if fields_by_listing:
solr_fields[listing_name] = {'properties': fields_by_listing}
return solr_fields
| <mask token>
class ListingCustomFieldsGet(Service):
"""API Endpoint which returns custom fields available for listings.
It returns a nested data structure with custom fields for each supported
listing, if available.
Custom fields are provided as follows:
- Custom field source are property sheets registerd for a type associated
with a listing
- Custom fields must be indexed in solr (i.e. everything but `Text`)
- If different sheets for the same type index to the same field, only the
last field is returned.
GET /@listing-custom-fields HTTP/1.1
"""
def reply(self):
solr_fields = {}
storage = PropertySheetSchemaStorage()
if not storage:
return solr_fields
for listing_name, slot_provider in LISTING_TO_SLOTS.items():
fields_by_listing = {}
for slot_name in slot_provider():
definition = storage.query(slot_name)
if definition is not None:
fields_by_listing.update(definition.
get_solr_dynamic_field_schema())
if fields_by_listing:
solr_fields[listing_name] = {'properties': fields_by_listing}
return solr_fields
| <mask token>
LISTING_TO_SLOTS = {u'dossiers': get_dossier_assignment_slots, u'documents':
get_document_assignment_slots}
class ListingCustomFieldsGet(Service):
"""API Endpoint which returns custom fields available for listings.
It returns a nested data structure with custom fields for each supported
listing, if available.
Custom fields are provided as follows:
- Custom field source are property sheets registerd for a type associated
with a listing
- Custom fields must be indexed in solr (i.e. everything but `Text`)
- If different sheets for the same type index to the same field, only the
last field is returned.
GET /@listing-custom-fields HTTP/1.1
"""
def reply(self):
solr_fields = {}
storage = PropertySheetSchemaStorage()
if not storage:
return solr_fields
for listing_name, slot_provider in LISTING_TO_SLOTS.items():
fields_by_listing = {}
for slot_name in slot_provider():
definition = storage.query(slot_name)
if definition is not None:
fields_by_listing.update(definition.
get_solr_dynamic_field_schema())
if fields_by_listing:
solr_fields[listing_name] = {'properties': fields_by_listing}
return solr_fields
| from opengever.propertysheets.assignment import get_document_assignment_slots
from opengever.propertysheets.assignment import get_dossier_assignment_slots
from opengever.propertysheets.storage import PropertySheetSchemaStorage
from plone.restapi.services import Service
LISTING_TO_SLOTS = {u'dossiers': get_dossier_assignment_slots, u'documents':
get_document_assignment_slots}
class ListingCustomFieldsGet(Service):
"""API Endpoint which returns custom fields available for listings.
It returns a nested data structure with custom fields for each supported
listing, if available.
Custom fields are provided as follows:
- Custom field source are property sheets registerd for a type associated
with a listing
- Custom fields must be indexed in solr (i.e. everything but `Text`)
- If different sheets for the same type index to the same field, only the
last field is returned.
GET /@listing-custom-fields HTTP/1.1
"""
def reply(self):
solr_fields = {}
storage = PropertySheetSchemaStorage()
if not storage:
return solr_fields
for listing_name, slot_provider in LISTING_TO_SLOTS.items():
fields_by_listing = {}
for slot_name in slot_provider():
definition = storage.query(slot_name)
if definition is not None:
fields_by_listing.update(definition.
get_solr_dynamic_field_schema())
if fields_by_listing:
solr_fields[listing_name] = {'properties': fields_by_listing}
return solr_fields
| from opengever.propertysheets.assignment import get_document_assignment_slots
from opengever.propertysheets.assignment import get_dossier_assignment_slots
from opengever.propertysheets.storage import PropertySheetSchemaStorage
from plone.restapi.services import Service
LISTING_TO_SLOTS = {
u'dossiers': get_dossier_assignment_slots,
u'documents': get_document_assignment_slots,
}
class ListingCustomFieldsGet(Service):
"""API Endpoint which returns custom fields available for listings.
It returns a nested data structure with custom fields for each supported
listing, if available.
Custom fields are provided as follows:
- Custom field source are property sheets registerd for a type associated
with a listing
- Custom fields must be indexed in solr (i.e. everything but `Text`)
- If different sheets for the same type index to the same field, only the
last field is returned.
GET /@listing-custom-fields HTTP/1.1
"""
def reply(self):
solr_fields = {}
storage = PropertySheetSchemaStorage()
if not storage:
return solr_fields
for listing_name, slot_provider in LISTING_TO_SLOTS.items():
fields_by_listing = {}
for slot_name in slot_provider():
definition = storage.query(slot_name)
if definition is not None:
fields_by_listing.update(
definition.get_solr_dynamic_field_schema()
)
if fields_by_listing:
solr_fields[listing_name] = {
'properties': fields_by_listing
}
return solr_fields
| [
2,
3,
4,
5,
6
] |
156 | 43e721ac45570e4f9ab9c1970abee3da6db40afa | <mask token>
@six.add_metaclass(abc.ABCMeta)
class ParallelMigrationStrategy(base.BaseStrategy):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, config, osc=None):
super(ParallelMigrationStrategy, self).__init__(config, osc)
def pre_execute(self):
pass
def do_execute(self):
params = self.input_parameters.params
for key, value in params.iteritems():
for resource_id, dict in value.items():
resource_status = dict.get(self.STATUS)
dst_hostname = dict.get(self.DST_HOSTNAME)
dst_type = dict.get(self.DST_TYPE)
if key == self.VM:
if resource_status == self.ACTIVE:
self._live_migration(resource_id, dst_hostname)
elif resource_status == self.SHUTOFF:
self._cold_migration(resource_id)
else:
raise Exception('Wrong status: %s.' % resource_status)
elif key == self.VOLUME:
if resource_status == self.IN_USE:
self._volume_update(resource_id, dst_type)
elif resource_status == self.AVAILABLE:
self._volume_retype(resource_id, dst_type)
else:
raise Exception('Wrong status: %s.' % resource_status)
else:
raise Exception('Wrong key: %s.' % key)
def _live_migration(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(action_type=self.LIVE_MIGRATION,
resource_id=resource_id, input_parameters=parameters)
def _cold_migration(self, resource_id):
self.solution.add_action(action_type=self.COLD_MIGRATION,
resource_id=resource_id, input_parameters={})
def _volume_update(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(action_type=self.VOLUME_UPDATE,
resource_id=resource_id, input_parameters=parameters)
def _volume_migrate(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(action_type=self.VOLUME_MIGRATION,
resource_id=resource_id, input_parameters=parameters)
def _volume_retype(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(action_type=self.VOLUME_RETYPE,
resource_id=resource_id, input_parameters=parameters)
def post_execute(self):
pass
@classmethod
def get_goal_name(cls):
return 'zone_migration'
@classmethod
def get_name(cls):
return 'parallel_migration'
@classmethod
def get_display_name(cls):
return _('Parallel migration strategy')
@classmethod
def get_translatable_display_name(cls):
return 'Parallel migration strategy'
@classmethod
def get_schema(cls):
return {'properties': {'params': {'description': '', 'type':
'object', 'default': {'vm': {'instance_id1': {'status':
'active', 'dst_hostname': 'vm_dest_hostname1'}, 'instance_id2':
{'status': 'shutoff'}}, 'volume': {'cinder_id1': {'status':
'available', 'dst_type': 'volume_dst_type'}, 'cinder_id2': {
'status': 'in-use', 'dst_type': 'volume_dst_type'}}}}}}
| <mask token>
@six.add_metaclass(abc.ABCMeta)
class ParallelMigrationStrategy(base.BaseStrategy):
VM = 'vm'
VOLUME = 'volume'
ACTIVE = 'active'
SHUTOFF = 'shutoff'
AVAILABLE = 'available'
IN_USE = 'in-use'
LIVE_MIGRATION = 'live_migration'
COLD_MIGRATION = 'cold_migration'
VOLUME_MIGRATION = 'volume_migration'
VOLUME_RETYPE = 'volume_retype'
VOLUME_UPDATE = 'volume_update'
STATUS = 'status'
DST_HOSTNAME = 'dst_hostname'
DST_TYPE = 'dst_type'
def __init__(self, config, osc=None):
super(ParallelMigrationStrategy, self).__init__(config, osc)
def pre_execute(self):
pass
def do_execute(self):
params = self.input_parameters.params
for key, value in params.iteritems():
for resource_id, dict in value.items():
resource_status = dict.get(self.STATUS)
dst_hostname = dict.get(self.DST_HOSTNAME)
dst_type = dict.get(self.DST_TYPE)
if key == self.VM:
if resource_status == self.ACTIVE:
self._live_migration(resource_id, dst_hostname)
elif resource_status == self.SHUTOFF:
self._cold_migration(resource_id)
else:
raise Exception('Wrong status: %s.' % resource_status)
elif key == self.VOLUME:
if resource_status == self.IN_USE:
self._volume_update(resource_id, dst_type)
elif resource_status == self.AVAILABLE:
self._volume_retype(resource_id, dst_type)
else:
raise Exception('Wrong status: %s.' % resource_status)
else:
raise Exception('Wrong key: %s.' % key)
def _live_migration(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(action_type=self.LIVE_MIGRATION,
resource_id=resource_id, input_parameters=parameters)
def _cold_migration(self, resource_id):
self.solution.add_action(action_type=self.COLD_MIGRATION,
resource_id=resource_id, input_parameters={})
def _volume_update(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(action_type=self.VOLUME_UPDATE,
resource_id=resource_id, input_parameters=parameters)
def _volume_migrate(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(action_type=self.VOLUME_MIGRATION,
resource_id=resource_id, input_parameters=parameters)
def _volume_retype(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(action_type=self.VOLUME_RETYPE,
resource_id=resource_id, input_parameters=parameters)
def post_execute(self):
pass
@classmethod
def get_goal_name(cls):
return 'zone_migration'
@classmethod
def get_name(cls):
return 'parallel_migration'
@classmethod
def get_display_name(cls):
return _('Parallel migration strategy')
@classmethod
def get_translatable_display_name(cls):
return 'Parallel migration strategy'
@classmethod
def get_schema(cls):
return {'properties': {'params': {'description': '', 'type':
'object', 'default': {'vm': {'instance_id1': {'status':
'active', 'dst_hostname': 'vm_dest_hostname1'}, 'instance_id2':
{'status': 'shutoff'}}, 'volume': {'cinder_id1': {'status':
'available', 'dst_type': 'volume_dst_type'}, 'cinder_id2': {
'status': 'in-use', 'dst_type': 'volume_dst_type'}}}}}}
| <mask token>
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ParallelMigrationStrategy(base.BaseStrategy):
VM = 'vm'
VOLUME = 'volume'
ACTIVE = 'active'
SHUTOFF = 'shutoff'
AVAILABLE = 'available'
IN_USE = 'in-use'
LIVE_MIGRATION = 'live_migration'
COLD_MIGRATION = 'cold_migration'
VOLUME_MIGRATION = 'volume_migration'
VOLUME_RETYPE = 'volume_retype'
VOLUME_UPDATE = 'volume_update'
STATUS = 'status'
DST_HOSTNAME = 'dst_hostname'
DST_TYPE = 'dst_type'
def __init__(self, config, osc=None):
super(ParallelMigrationStrategy, self).__init__(config, osc)
def pre_execute(self):
pass
def do_execute(self):
params = self.input_parameters.params
for key, value in params.iteritems():
for resource_id, dict in value.items():
resource_status = dict.get(self.STATUS)
dst_hostname = dict.get(self.DST_HOSTNAME)
dst_type = dict.get(self.DST_TYPE)
if key == self.VM:
if resource_status == self.ACTIVE:
self._live_migration(resource_id, dst_hostname)
elif resource_status == self.SHUTOFF:
self._cold_migration(resource_id)
else:
raise Exception('Wrong status: %s.' % resource_status)
elif key == self.VOLUME:
if resource_status == self.IN_USE:
self._volume_update(resource_id, dst_type)
elif resource_status == self.AVAILABLE:
self._volume_retype(resource_id, dst_type)
else:
raise Exception('Wrong status: %s.' % resource_status)
else:
raise Exception('Wrong key: %s.' % key)
def _live_migration(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(action_type=self.LIVE_MIGRATION,
resource_id=resource_id, input_parameters=parameters)
def _cold_migration(self, resource_id):
self.solution.add_action(action_type=self.COLD_MIGRATION,
resource_id=resource_id, input_parameters={})
def _volume_update(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(action_type=self.VOLUME_UPDATE,
resource_id=resource_id, input_parameters=parameters)
def _volume_migrate(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(action_type=self.VOLUME_MIGRATION,
resource_id=resource_id, input_parameters=parameters)
def _volume_retype(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(action_type=self.VOLUME_RETYPE,
resource_id=resource_id, input_parameters=parameters)
def post_execute(self):
pass
@classmethod
def get_goal_name(cls):
return 'zone_migration'
@classmethod
def get_name(cls):
return 'parallel_migration'
@classmethod
def get_display_name(cls):
return _('Parallel migration strategy')
@classmethod
def get_translatable_display_name(cls):
return 'Parallel migration strategy'
@classmethod
def get_schema(cls):
return {'properties': {'params': {'description': '', 'type':
'object', 'default': {'vm': {'instance_id1': {'status':
'active', 'dst_hostname': 'vm_dest_hostname1'}, 'instance_id2':
{'status': 'shutoff'}}, 'volume': {'cinder_id1': {'status':
'available', 'dst_type': 'volume_dst_type'}, 'cinder_id2': {
'status': 'in-use', 'dst_type': 'volume_dst_type'}}}}}}
| import abc
import six
from oslo_log import log
from watcher._i18n import _
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ParallelMigrationStrategy(base.BaseStrategy):
VM = 'vm'
VOLUME = 'volume'
ACTIVE = 'active'
SHUTOFF = 'shutoff'
AVAILABLE = 'available'
IN_USE = 'in-use'
LIVE_MIGRATION = 'live_migration'
COLD_MIGRATION = 'cold_migration'
VOLUME_MIGRATION = 'volume_migration'
VOLUME_RETYPE = 'volume_retype'
VOLUME_UPDATE = 'volume_update'
STATUS = 'status'
DST_HOSTNAME = 'dst_hostname'
DST_TYPE = 'dst_type'
def __init__(self, config, osc=None):
super(ParallelMigrationStrategy, self).__init__(config, osc)
def pre_execute(self):
pass
def do_execute(self):
params = self.input_parameters.params
for key, value in params.iteritems():
for resource_id, dict in value.items():
resource_status = dict.get(self.STATUS)
dst_hostname = dict.get(self.DST_HOSTNAME)
dst_type = dict.get(self.DST_TYPE)
if key == self.VM:
if resource_status == self.ACTIVE:
self._live_migration(resource_id, dst_hostname)
elif resource_status == self.SHUTOFF:
self._cold_migration(resource_id)
else:
raise Exception('Wrong status: %s.' % resource_status)
elif key == self.VOLUME:
if resource_status == self.IN_USE:
self._volume_update(resource_id, dst_type)
elif resource_status == self.AVAILABLE:
self._volume_retype(resource_id, dst_type)
else:
raise Exception('Wrong status: %s.' % resource_status)
else:
raise Exception('Wrong key: %s.' % key)
def _live_migration(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(action_type=self.LIVE_MIGRATION,
resource_id=resource_id, input_parameters=parameters)
def _cold_migration(self, resource_id):
self.solution.add_action(action_type=self.COLD_MIGRATION,
resource_id=resource_id, input_parameters={})
def _volume_update(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(action_type=self.VOLUME_UPDATE,
resource_id=resource_id, input_parameters=parameters)
def _volume_migrate(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(action_type=self.VOLUME_MIGRATION,
resource_id=resource_id, input_parameters=parameters)
def _volume_retype(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(action_type=self.VOLUME_RETYPE,
resource_id=resource_id, input_parameters=parameters)
def post_execute(self):
pass
@classmethod
def get_goal_name(cls):
return 'zone_migration'
@classmethod
def get_name(cls):
return 'parallel_migration'
@classmethod
def get_display_name(cls):
return _('Parallel migration strategy')
@classmethod
def get_translatable_display_name(cls):
return 'Parallel migration strategy'
@classmethod
def get_schema(cls):
return {'properties': {'params': {'description': '', 'type':
'object', 'default': {'vm': {'instance_id1': {'status':
'active', 'dst_hostname': 'vm_dest_hostname1'}, 'instance_id2':
{'status': 'shutoff'}}, 'volume': {'cinder_id1': {'status':
'available', 'dst_type': 'volume_dst_type'}, 'cinder_id2': {
'status': 'in-use', 'dst_type': 'volume_dst_type'}}}}}}
| #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from oslo_log import log
from watcher._i18n import _
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ParallelMigrationStrategy(base.BaseStrategy):
VM = "vm"
VOLUME = "volume"
ACTIVE = "active"
SHUTOFF = "shutoff"
AVAILABLE = "available"
IN_USE = "in-use"
LIVE_MIGRATION = "live_migration"
COLD_MIGRATION = "cold_migration"
VOLUME_MIGRATION = "volume_migration"
VOLUME_RETYPE = "volume_retype"
VOLUME_UPDATE = "volume_update"
STATUS = "status"
DST_HOSTNAME = "dst_hostname"
DST_TYPE = "dst_type"
def __init__(self, config, osc=None):
super(ParallelMigrationStrategy, self).__init__(config, osc)
def pre_execute(self):
pass
def do_execute(self):
params = self.input_parameters.params
for key, value in params.iteritems():
for resource_id, dict in value.items():
resource_status = dict.get(self.STATUS)
dst_hostname = dict.get(self.DST_HOSTNAME)
dst_type = dict.get(self.DST_TYPE)
if key == self.VM:
if resource_status == self.ACTIVE:
# do live migration
self._live_migration(resource_id, dst_hostname)
elif resource_status == self.SHUTOFF:
# do cold migration
# cold migration can not specify dest_hostname
self._cold_migration(resource_id)
else:
raise Exception("Wrong status: %s." % resource_status)
elif key == self.VOLUME:
if resource_status == self.IN_USE:
# do novavolume update
self._volume_update(resource_id, dst_type)
elif resource_status == self.AVAILABLE:
# detached volume with no snapshots
# do cinder migrate
self._volume_retype(resource_id, dst_type)
else:
raise Exception("Wrong status: %s." % resource_status)
else:
raise Exception("Wrong key: %s." % key)
def _live_migration(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(
action_type=self.LIVE_MIGRATION,
resource_id=resource_id,
input_parameters=parameters)
def _cold_migration(self, resource_id):
self.solution.add_action(
action_type=self.COLD_MIGRATION,
resource_id=resource_id,
input_parameters={})
def _volume_update(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(
action_type=self.VOLUME_UPDATE,
resource_id=resource_id,
input_parameters=parameters)
def _volume_migrate(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(
action_type=self.VOLUME_MIGRATION,
resource_id=resource_id,
input_parameters=parameters)
def _volume_retype(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(
action_type=self.VOLUME_RETYPE,
resource_id=resource_id,
input_parameters=parameters)
def post_execute(self):
pass
@classmethod
def get_goal_name(cls):
return "zone_migration"
@classmethod
def get_name(cls):
return "parallel_migration"
@classmethod
def get_display_name(cls):
return _("Parallel migration strategy")
@classmethod
def get_translatable_display_name(cls):
return "Parallel migration strategy"
@classmethod
def get_schema(cls):
return {
"properties": {
"params": {
"description": "",
"type": "object",
"default":
{"vm":
{"instance_id1":
{"status": "active",
"dst_hostname": "vm_dest_hostname1"},
"instance_id2":
{"status": "shutoff"}},
"volume":
{"cinder_id1":
{"status": "available",
"dst_type": "volume_dst_type"},
"cinder_id2":
{"status": "in-use",
"dst_type": "volume_dst_type"}}}
}
}
}
| [
15,
16,
17,
18,
19
] |
157 | 07ac061d7d1eaf23b6c95fbcbf6753f25e568188 | <mask token>
| <mask token>
def loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise=True
):
data, rate = librosa.load(filePath, sr=None)
if reduceNoise:
noiseRemovedData = noisereduce.reduce_noise(audio_clip=data,
noise_clip=data[0:10000], verbose=False)
noiseRemovedData = noisereduce.reduce_noise(audio_clip=
noiseRemovedData, noise_clip=data[-10000:], verbose=False)
data = noiseRemovedData
maxDataLength = int(maxAudioLength * rate)
padding = []
if data.shape[0] > maxDataLength:
raise ValueError('Max audio length breached')
else:
paddingDataLength = maxDataLength - data.shape[0]
padding = [(0) for i in range(paddingDataLength)]
leftSpeakerSound = data
audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))
if savePlot:
fig, ax = plt.subplots()
ax.plot(audioWithPadding)
fig.suptitle(fileName)
fig.savefig('./output_img/wav/' + fileName + '_wav.png')
plt.close(fig)
return audioWithPadding, rate
| from scipy.io import wavfile
import numpy
from matplotlib import pyplot as plt
import librosa
import noisereduce
def loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise=True
):
data, rate = librosa.load(filePath, sr=None)
if reduceNoise:
noiseRemovedData = noisereduce.reduce_noise(audio_clip=data,
noise_clip=data[0:10000], verbose=False)
noiseRemovedData = noisereduce.reduce_noise(audio_clip=
noiseRemovedData, noise_clip=data[-10000:], verbose=False)
data = noiseRemovedData
maxDataLength = int(maxAudioLength * rate)
padding = []
if data.shape[0] > maxDataLength:
raise ValueError('Max audio length breached')
else:
paddingDataLength = maxDataLength - data.shape[0]
padding = [(0) for i in range(paddingDataLength)]
leftSpeakerSound = data
audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))
if savePlot:
fig, ax = plt.subplots()
ax.plot(audioWithPadding)
fig.suptitle(fileName)
fig.savefig('./output_img/wav/' + fileName + '_wav.png')
plt.close(fig)
return audioWithPadding, rate
| from scipy.io import wavfile
import numpy
from matplotlib import pyplot as plt
import librosa
import noisereduce
def loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise = True):
# Read file
# rate, data = wavfile.read(filePath)
# print(filePath, rate, data.shape, "audio length", data.shape[0] / rate, data[0])
data, rate = librosa.load(filePath, sr=None)
# print(filePath, rate, data.shape, "librosa audio length", data.shape[0] / rate, data[0])
if reduceNoise:
noiseRemovedData = noisereduce.reduce_noise(audio_clip=data, noise_clip=data[0:10000], verbose=False)
noiseRemovedData = noisereduce.reduce_noise(audio_clip=noiseRemovedData, noise_clip=data[-10000:], verbose=False)
data = noiseRemovedData
maxDataLength = int(maxAudioLength * rate)
padding = []
if data.shape[0] > maxDataLength:
raise ValueError("Max audio length breached")
else:
paddingDataLength = maxDataLength - data.shape[0]
padding = [0 for i in range(paddingDataLength)]
# data is stereo sound. take left speaker only
leftSpeakerSound = data # data[:,0]
# print("leftSpeakerSound.shape", leftSpeakerSound.shape)
audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))
# print("audioWithPadding.shape", audioWithPadding.shape)
if savePlot:
fig, ax = plt.subplots()
ax.plot(audioWithPadding)
fig.suptitle(fileName)
fig.savefig("./output_img/wav/" + fileName + "_wav.png")
plt.close(fig)
return audioWithPadding, rate | null | [
0,
1,
2,
3
] |
158 | bb481fa038835abc6d61a4985b1e30c7c00bff96 | <mask token>
| def pixels_generator(w, h):
i = 0
while i < w * h:
yield divmod(i, w)
i = i + 1
| def pixels_generator(w, h):
i = 0
while i < (w * h):
yield divmod(i, w)
i = i + 1
| null | null | [
0,
1,
2
] |
159 | b6824251b1165ca6c66049d40c79fccee6bc7d3a | <mask token>
class Consignor(db.Model):
<mask token>
<mask token>
<mask token>
def __init__(self):
pass
<mask token>
class Convoy(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Convoy %s>' % str(self.id)
| <mask token>
class Transporter(db.Model):
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self):
pass
def __repr__(self):
return '<Transporter %s>' % str(self.id)
class Consignor(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
indents = db.relationship('Indent', lazy='dynamic')
def __init__(self):
pass
def __repr__(self):
return '<Consignor %s>' % str(self.id)
class Convoy(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Convoy %s>' % str(self.id)
| <mask token>
class Account(db.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, acc, pwd):
self.acc = acc
self.pwd = pwd
def __repr__(self):
return '<Account %s %s>' % (str(self.id), self.acc)
class Transporter(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
d_lic = db.Column(db.String(50))
v_lic = db.Column(db.String(50))
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Transporter %s>' % str(self.id)
class Consignor(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
indents = db.relationship('Indent', lazy='dynamic')
def __init__(self):
pass
def __repr__(self):
return '<Consignor %s>' % str(self.id)
class Convoy(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Convoy %s>' % str(self.id)
| <mask token>
class Account(db.Model):
id = db.Column(db.Integer, primary_key=True)
acc = db.Column(db.String(50), unique=True)
pwd = db.Column(db.String(50))
name = db.Column(db.String(20))
sex = db.Column(db.SmallInteger)
idno = db.Column(db.String(20))
phone = db.Column(db.String(20))
crttime = db.Column(db.TIMESTAMP)
crtip = db.Column(db.String(50))
crtmac = db.Column(db.String(50))
crtplat = db.Column(db.SmallInteger)
crtrole = db.Column(db.SmallInteger)
lasttime = db.Column(db.TIMESTAMP)
lastip = db.Column(db.String(50))
lastmac = db.Column(db.String(50))
lastplat = db.Column(db.SmallInteger)
lastrole = db.Column(db.SmallInteger)
transporter = db.relationship('Transporter', uselist=False)
consignor = db.relationship('Consignor', uselist=False)
def __init__(self, acc, pwd):
self.acc = acc
self.pwd = pwd
def __repr__(self):
return '<Account %s %s>' % (str(self.id), self.acc)
class Transporter(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
d_lic = db.Column(db.String(50))
v_lic = db.Column(db.String(50))
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Transporter %s>' % str(self.id)
class Consignor(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
indents = db.relationship('Indent', lazy='dynamic')
def __init__(self):
pass
def __repr__(self):
return '<Consignor %s>' % str(self.id)
class Convoy(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Convoy %s>' % str(self.id)
| from .. import db
class Account(db.Model):
id = db.Column(db.Integer, primary_key=True)
acc = db.Column(db.String(50), unique=True)#TODO 调整长度
pwd = db.Column(db.String(50))#TODO 调整长度
name = db.Column(db.String(20))
sex = db.Column(db.SmallInteger)
idno = db.Column(db.String(20))
phone = db.Column(db.String(20))
crttime = db.Column(db.TIMESTAMP)
crtip = db.Column(db.String(50))
crtmac = db.Column(db.String(50))
crtplat = db.Column(db.SmallInteger)
crtrole = db.Column(db.SmallInteger)
lasttime = db.Column(db.TIMESTAMP)
lastip = db.Column(db.String(50))
lastmac = db.Column(db.String(50))
lastplat = db.Column(db.SmallInteger)
lastrole = db.Column(db.SmallInteger)
transporter = db.relationship('Transporter', uselist=False)
consignor = db.relationship('Consignor', uselist=False)
def __init__(self, acc, pwd):
self.acc = acc
self.pwd = pwd
def __repr__(self):
return '<Account %s %s>'%(str(self.id), self.acc)
class Transporter(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
d_lic = db.Column(db.String(50)) #TODO 长度
v_lic = db.Column(db.String(50))
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Transporter %s>'%str(self.id)
class Consignor(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
indents = db.relationship('Indent', lazy='dynamic')
def __init__(self):
pass
def __repr__(self):
return '<Consignor %s>'%str(self.id)
class Convoy(db.Model):
id = db.Column(db.Integer, db.ForeignKey('account.id'), primary_key=True)
account = db.relationship('Account', uselist=False)
def __init__(self):
pass
def __repr__(self):
return '<Convoy %s>'%str(self.id)
| [
6,
11,
15,
16,
18
] |
160 | 485f85ec5e3f38148978453ea5e7f9a54eb310e1 | <mask token>
class Table(DashComponent):
def __init__(self, plot_factory, df, title='Table'):
"""
Displays table at the bottom of the page.
:param plot_factory: Factory with all plot functions
:param df: Dataframe with all data
:param title: Title of the page
"""
super().__init__(title=title)
self.plot_factory = plot_factory
self.df = df
def layout(self, params=None):
"""
Shows the html layout of the table component.
:param params: Parameters selected at the current level of the dashboard.
:return: Html layout of the program.
"""
return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div
(id='output-data-upload')], type='dot')])
<mask token>
<mask token>
| <mask token>
class Table(DashComponent):
def __init__(self, plot_factory, df, title='Table'):
"""
Displays table at the bottom of the page.
:param plot_factory: Factory with all plot functions
:param df: Dataframe with all data
:param title: Title of the page
"""
super().__init__(title=title)
self.plot_factory = plot_factory
self.df = df
def layout(self, params=None):
"""
Shows the html layout of the table component.
:param params: Parameters selected at the current level of the dashboard.
:return: Html layout of the program.
"""
return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div
(id='output-data-upload')], type='dot')])
def component_callbacks(self, app):
"""
Automatically does the callbacks of the interactive parts of the table component.
:param app: Dash app that uses the code.
:return: Output of the callback functions.
"""
@app.callback(Output('main_table', 'selected_rows' + self.title),
Input('Mygraph-normal-plot', 'selectedData'))
def display_selected_data(graphPoints):
"""
Display the selected data i the table.
:param graphPoints: Data that is currently displayed
:return: Table
"""
points_selected = []
if graphPoints is not None:
print(graphPoints)
for point in graphPoints['points']:
points_selected.append(point['customdata'][0])
return points_selected
<mask token>
| <mask token>
class Table(DashComponent):
def __init__(self, plot_factory, df, title='Table'):
"""
Displays table at the bottom of the page.
:param plot_factory: Factory with all plot functions
:param df: Dataframe with all data
:param title: Title of the page
"""
super().__init__(title=title)
self.plot_factory = plot_factory
self.df = df
def layout(self, params=None):
"""
Shows the html layout of the table component.
:param params: Parameters selected at the current level of the dashboard.
:return: Html layout of the program.
"""
return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div
(id='output-data-upload')], type='dot')])
def component_callbacks(self, app):
"""
Automatically does the callbacks of the interactive parts of the table component.
:param app: Dash app that uses the code.
:return: Output of the callback functions.
"""
@app.callback(Output('main_table', 'selected_rows' + self.title),
Input('Mygraph-normal-plot', 'selectedData'))
def display_selected_data(graphPoints):
"""
Display the selected data i the table.
:param graphPoints: Data that is currently displayed
:return: Table
"""
points_selected = []
if graphPoints is not None:
print(graphPoints)
for point in graphPoints['points']:
points_selected.append(point['customdata'][0])
return points_selected
def set_data(self, df):
"""
Loads in possible parameters for the x and y-axis in dropdown from the data.
:param dummy: dummy html property
:return: Possible options for dropdown x-axis.
"""
self.df = df
| import dash_table
import pandas as pd
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
from dash_oop_components import DashComponent
import dash_table
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash_oop_components import DashFigureFactory, DashComponent, DashComponentTabs, DashApp
from src.main.python.oop.Dataframe import Dataframe
from src.main.python.oop.Figure_factories import VisualFactories
class Table(DashComponent):
def __init__(self, plot_factory, df, title='Table'):
"""
Displays table at the bottom of the page.
:param plot_factory: Factory with all plot functions
:param df: Dataframe with all data
:param title: Title of the page
"""
super().__init__(title=title)
self.plot_factory = plot_factory
self.df = df
def layout(self, params=None):
"""
Shows the html layout of the table component.
:param params: Parameters selected at the current level of the dashboard.
:return: Html layout of the program.
"""
return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div
(id='output-data-upload')], type='dot')])
def component_callbacks(self, app):
"""
Automatically does the callbacks of the interactive parts of the table component.
:param app: Dash app that uses the code.
:return: Output of the callback functions.
"""
@app.callback(Output('main_table', 'selected_rows' + self.title),
Input('Mygraph-normal-plot', 'selectedData'))
def display_selected_data(graphPoints):
"""
Display the selected data i the table.
:param graphPoints: Data that is currently displayed
:return: Table
"""
points_selected = []
if graphPoints is not None:
print(graphPoints)
for point in graphPoints['points']:
points_selected.append(point['customdata'][0])
return points_selected
def set_data(self, df):
"""
Loads in possible parameters for the x and y-axis in dropdown from the data.
:param dummy: dummy html property
:return: Possible options for dropdown x-axis.
"""
self.df = df
| import dash_table
import pandas as pd
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
from dash_oop_components import DashComponent
import dash_table
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash_oop_components import DashFigureFactory, DashComponent, DashComponentTabs, DashApp
from src.main.python.oop.Dataframe import Dataframe
from src.main.python.oop.Figure_factories import VisualFactories
class Table(DashComponent):
def __init__(self, plot_factory, df, title="Table"):
"""
Displays table at the bottom of the page.
:param plot_factory: Factory with all plot functions
:param df: Dataframe with all data
:param title: Title of the page
"""
super().__init__(title=title)
self.plot_factory = plot_factory
self.df = df
def layout(self, params=None):
"""
Shows the html layout of the table component.
:param params: Parameters selected at the current level of the dashboard.
:return: Html layout of the program.
"""
return html.Div([
dcc.Loading(
id="loading-icon3",
children=[html.Div(id='output-data-upload')],
type="dot",
)
])
def component_callbacks(self, app):
"""
Automatically does the callbacks of the interactive parts of the table component.
:param app: Dash app that uses the code.
:return: Output of the callback functions.
"""
@app.callback(
Output('main_table', 'selected_rows' + self.title),
Input('Mygraph-normal-plot', 'selectedData'))
def display_selected_data(graphPoints):
"""
Display the selected data i the table.
:param graphPoints: Data that is currently displayed
:return: Table
"""
points_selected = []
if graphPoints is not None:
print(graphPoints)
for point in graphPoints['points']:
points_selected.append(point['customdata'][0])
return points_selected
def set_data(self, df):
"""
Loads in possible parameters for the x and y-axis in dropdown from the data.
:param dummy: dummy html property
:return: Possible options for dropdown x-axis.
"""
self.df = df | [
3,
4,
5,
6,
7
] |
161 | bdc9856bfc61127d6bca31658b1faf3da09f5b86 | <mask token>
| <mask token>
with open('ACI PostMan Variable Values.csv', encoding='utf-8-sig') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print(row)
print("Let's configure the subnets on the Old BD")
print("First Let's log in")
print('What is the ip address of the APIC?')
<mask token>
print('whats the name of the tenant?')
<mask token>
print('what is the name of the app profile?')
<mask token>
print('what is the name of the old BD?')
<mask token>
print('what is the name of the network?')
<mask token>
print('what is the name of the network IP?')
<mask token>
print('what is the name of the netmask?')
<mask token>
print('what is the name of the epg?')
<mask token>
print(response.text.encode('utf8'))
<mask token>
print(response.text.encode('utf8'))
<mask token>
print(response.text.encode('utf8'))
| <mask token>
with open('ACI PostMan Variable Values.csv', encoding='utf-8-sig') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print(row)
print("Let's configure the subnets on the Old BD")
print("First Let's log in")
print('What is the ip address of the APIC?')
apic = input()
user = getpass('What is you username?')
password = getpass('What is your password?')
print('whats the name of the tenant?')
tenant = input()
print('what is the name of the app profile?')
app_profile = input()
print('what is the name of the old BD?')
old_bd = input()
print('what is the name of the network?')
subnet_network = input()
print('what is the name of the network IP?')
subnet_ip = input()
print('what is the name of the netmask?')
subnet_mask = input()
print('what is the name of the epg?')
epg = input()
s = requests.session()
url = 'https://%s/api/aaaLogin.json' % apic
payload = (
'{\r\n\t"aaaUser":{\r\n\t\t"attributes":{\r\n\t\t\t"name": "%s",\r\n\t\t\t"pwd":"%s"\r\n\t\t}\r\n\t}\r\n}'
% (user, password))
headers = {'Content-Type': 'application/json'}
response = s.request('POST', url, headers=headers, data=payload, verify=False)
print(response.text.encode('utf8'))
url = 'https://%s/api/node/mo/uni/tn-%s/BD-%s/subnet-[%s.%s/%s].json' % (apic,
tenant, old_bd, subnet_network, subnet_ip, subnet_mask)
payload = (
'{"fvSubnet":{"attributes":{"dn":"uni/tn-%s/BD-%s/subnet-[%s.%s/%s]","ip":"%s.%s/%s","scope":"public","rn":"subnet-[%s.%s/%s]","status":"created"},"children":[]}}\r\n'
% (tenant, old_bd, subnet_network, subnet_ip, subnet_mask,
subnet_network, subnet_ip, subnet_mask, subnet_network, subnet_ip,
subnet_mask))
headers = {'Content-Type': 'application/json'}
response = s.request('POST', url, headers=headers, data=payload, verify=False)
print(response.text.encode('utf8'))
url = 'https://%s/api/node/mo/uni/tn-%s/ap-%s/epg-%s.json' % (apic, tenant,
app_profile, epg)
payload = (
'{"fvAEPg":{"attributes":{"dn":"uni/tn-%s/ap-%s/epg-%s","name":"%s","rn":"%s","status":"created"},"children":[{"fvRsBd":{"attributes":{"tnFvBDName":"%s","status":"created,modified"},"children":[]}}]}}\r\n'
% (tenant, app_profile, epg, epg, epg, old_bd))
headers = {'Content-Type': 'application/json'}
response = s.request('POST', url, headers=headers, data=payload, verify=False)
print(response.text.encode('utf8'))
| import requests
from getpass import getpass
import csv
with open('ACI PostMan Variable Values.csv', encoding='utf-8-sig') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print(row)
print("Let's configure the subnets on the Old BD")
print("First Let's log in")
print('What is the ip address of the APIC?')
apic = input()
user = getpass('What is you username?')
password = getpass('What is your password?')
print('whats the name of the tenant?')
tenant = input()
print('what is the name of the app profile?')
app_profile = input()
print('what is the name of the old BD?')
old_bd = input()
print('what is the name of the network?')
subnet_network = input()
print('what is the name of the network IP?')
subnet_ip = input()
print('what is the name of the netmask?')
subnet_mask = input()
print('what is the name of the epg?')
epg = input()
s = requests.session()
url = 'https://%s/api/aaaLogin.json' % apic
payload = (
'{\r\n\t"aaaUser":{\r\n\t\t"attributes":{\r\n\t\t\t"name": "%s",\r\n\t\t\t"pwd":"%s"\r\n\t\t}\r\n\t}\r\n}'
% (user, password))
headers = {'Content-Type': 'application/json'}
response = s.request('POST', url, headers=headers, data=payload, verify=False)
print(response.text.encode('utf8'))
url = 'https://%s/api/node/mo/uni/tn-%s/BD-%s/subnet-[%s.%s/%s].json' % (apic,
tenant, old_bd, subnet_network, subnet_ip, subnet_mask)
payload = (
'{"fvSubnet":{"attributes":{"dn":"uni/tn-%s/BD-%s/subnet-[%s.%s/%s]","ip":"%s.%s/%s","scope":"public","rn":"subnet-[%s.%s/%s]","status":"created"},"children":[]}}\r\n'
% (tenant, old_bd, subnet_network, subnet_ip, subnet_mask,
subnet_network, subnet_ip, subnet_mask, subnet_network, subnet_ip,
subnet_mask))
headers = {'Content-Type': 'application/json'}
response = s.request('POST', url, headers=headers, data=payload, verify=False)
print(response.text.encode('utf8'))
url = 'https://%s/api/node/mo/uni/tn-%s/ap-%s/epg-%s.json' % (apic, tenant,
app_profile, epg)
payload = (
'{"fvAEPg":{"attributes":{"dn":"uni/tn-%s/ap-%s/epg-%s","name":"%s","rn":"%s","status":"created"},"children":[{"fvRsBd":{"attributes":{"tnFvBDName":"%s","status":"created,modified"},"children":[]}}]}}\r\n'
% (tenant, app_profile, epg, epg, epg, old_bd))
headers = {'Content-Type': 'application/json'}
response = s.request('POST', url, headers=headers, data=payload, verify=False)
print(response.text.encode('utf8'))
| #! /user/bin/env python
import requests
from getpass import getpass
import csv
# Set up the variables
with open("ACI PostMan Variable Values.csv", encoding='utf-8-sig') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print(row)
print("Let's configure the subnets on the Old BD")
print("First Let's log in")
print('What is the ip address of the APIC?')
apic = input()
user = getpass('What is you username?')
password = getpass('What is your password?')
print('whats the name of the tenant?')
tenant = input()
print('what is the name of the app profile?')
app_profile = input()
print('what is the name of the old BD?')
old_bd = input()
print('what is the name of the network?')
subnet_network = input()
print('what is the name of the network IP?')
subnet_ip = input()
print('what is the name of the netmask?')
subnet_mask = input()
print('what is the name of the epg?')
epg = input()
# set session persistance for all the API calls
s = requests.session()
# first call to authenticate into the apic
url = "https://%s/api/aaaLogin.json" % (apic)
payload = "{\r\n\t\"aaaUser\":{\r\n\t\t\"attributes\":{\r\n\t\t\t\"name\": \"%s\",\r\n\t\t\t\"pwd\":\"%s\"\r\n\t\t}\r\n\t}\r\n}" % (user, password)
headers = {
'Content-Type': 'application/json'
}
response = s.request("POST", url, headers=headers, data = payload, verify = False)
print(response.text.encode('utf8'))
# Create Subnets under Old BD
url = "https://%s/api/node/mo/uni/tn-%s/BD-%s/subnet-[%s.%s/%s].json" % (apic, tenant, old_bd, subnet_network, subnet_ip, subnet_mask)
payload = "{\"fvSubnet\":{\"attributes\":{\"dn\":\"uni/tn-%s/BD-%s/subnet-[%s.%s/%s]\",\"ip\":\"%s.%s/%s\",\"scope\":\"public\",\"rn\":\"subnet-[%s.%s/%s]\",\"status\":\"created\"},\"children\":[]}}\r\n" % (tenant, old_bd, subnet_network, subnet_ip, subnet_mask, subnet_network, subnet_ip, subnet_mask, subnet_network, subnet_ip, subnet_mask)
headers = {
'Content-Type': 'application/json'
}
response = s.request("POST", url, headers=headers, data = payload, verify = False)
print(response.text.encode('utf8'))
# create EPG's for demo
url = "https://%s/api/node/mo/uni/tn-%s/ap-%s/epg-%s.json" % (apic, tenant, app_profile, epg)
payload = "{\"fvAEPg\":{\"attributes\":{\"dn\":\"uni/tn-%s/ap-%s/epg-%s\",\"name\":\"%s\",\"rn\":\"%s\",\"status\":\"created\"},\"children\":[{\"fvRsBd\":{\"attributes\":{\"tnFvBDName\":\"%s\",\"status\":\"created,modified\"},\"children\":[]}}]}}\r\n" % (tenant, app_profile, epg, epg, epg, old_bd)
headers = {
'Content-Type': 'application/json'
}
response = s.request("POST", url, headers=headers, data = payload, verify = False)
print(response.text.encode('utf8'))
| [
0,
1,
2,
3,
4
] |
162 | debd51b923a6fc3b278a5083478bfb271a5913a8 | <mask token>
class LocalizationTests(unittest.TestCase):
def test_default(self):
logging.info('*** default ***')
localization = Localization()
text = 'hello world'
self.assertEqual(localization._(text), text)
<mask token>
def test_global(self):
logging.info('*** global ***')
settings = {'localized': {'hello world': "What'up, Doc?",
'another string': 'Bye!'}, 'space': {'title': 'space name',
'participants': ['[email protected]']}, 'server': {'url':
'http://to.no.where', 'hook': '/hook', 'binding': '0.0.0.0',
'port': 8080}}
context = Context(settings)
l10n.set_context(context)
self.assertEqual(l10n.actual_strings, {})
self.assertEqual(_('hello world'), "What'up, Doc?")
self.assertEqual(l10n.actual_strings, {'hello world': "What'up, Doc?"})
self.assertEqual(_('not localized'), 'not localized')
self.assertEqual(l10n.actual_strings, {'hello world':
"What'up, Doc?", 'not localized': 'not localized'})
self.assertEqual(_('another string'), 'Bye!')
self.assertEqual(l10n.actual_strings, {'hello world':
"What'up, Doc?", 'another string': 'Bye!', 'not localized':
'not localized'})
<mask token>
| <mask token>
class LocalizationTests(unittest.TestCase):
def test_default(self):
logging.info('*** default ***')
localization = Localization()
text = 'hello world'
self.assertEqual(localization._(text), text)
def test_init(self):
logging.info('*** init ***')
settings = {'localized': {'hello world': "What'up, Doc?",
'another string': 'Bye!'}, 'space': {'title': 'space name',
'participants': ['[email protected]']}, 'server': {'url':
'http://to.no.where', 'hook': '/hook', 'binding': '0.0.0.0',
'port': 8080}}
context = Context(settings)
my_localization = Localization(context)
self.assertEqual(my_localization.context, context)
self.assertEqual(my_localization._('hello world'), "What'up, Doc?")
self.assertEqual(my_localization._('not localized'), 'not localized')
self.assertEqual(my_localization.actual_strings, {'hello world':
"What'up, Doc?", 'not localized': 'not localized'})
def test_global(self):
logging.info('*** global ***')
settings = {'localized': {'hello world': "What'up, Doc?",
'another string': 'Bye!'}, 'space': {'title': 'space name',
'participants': ['[email protected]']}, 'server': {'url':
'http://to.no.where', 'hook': '/hook', 'binding': '0.0.0.0',
'port': 8080}}
context = Context(settings)
l10n.set_context(context)
self.assertEqual(l10n.actual_strings, {})
self.assertEqual(_('hello world'), "What'up, Doc?")
self.assertEqual(l10n.actual_strings, {'hello world': "What'up, Doc?"})
self.assertEqual(_('not localized'), 'not localized')
self.assertEqual(l10n.actual_strings, {'hello world':
"What'up, Doc?", 'not localized': 'not localized'})
self.assertEqual(_('another string'), 'Bye!')
self.assertEqual(l10n.actual_strings, {'hello world':
"What'up, Doc?", 'another string': 'Bye!', 'not localized':
'not localized'})
<mask token>
| <mask token>
class LocalizationTests(unittest.TestCase):
def test_default(self):
logging.info('*** default ***')
localization = Localization()
text = 'hello world'
self.assertEqual(localization._(text), text)
def test_init(self):
logging.info('*** init ***')
settings = {'localized': {'hello world': "What'up, Doc?",
'another string': 'Bye!'}, 'space': {'title': 'space name',
'participants': ['[email protected]']}, 'server': {'url':
'http://to.no.where', 'hook': '/hook', 'binding': '0.0.0.0',
'port': 8080}}
context = Context(settings)
my_localization = Localization(context)
self.assertEqual(my_localization.context, context)
self.assertEqual(my_localization._('hello world'), "What'up, Doc?")
self.assertEqual(my_localization._('not localized'), 'not localized')
self.assertEqual(my_localization.actual_strings, {'hello world':
"What'up, Doc?", 'not localized': 'not localized'})
def test_global(self):
logging.info('*** global ***')
settings = {'localized': {'hello world': "What'up, Doc?",
'another string': 'Bye!'}, 'space': {'title': 'space name',
'participants': ['[email protected]']}, 'server': {'url':
'http://to.no.where', 'hook': '/hook', 'binding': '0.0.0.0',
'port': 8080}}
context = Context(settings)
l10n.set_context(context)
self.assertEqual(l10n.actual_strings, {})
self.assertEqual(_('hello world'), "What'up, Doc?")
self.assertEqual(l10n.actual_strings, {'hello world': "What'up, Doc?"})
self.assertEqual(_('not localized'), 'not localized')
self.assertEqual(l10n.actual_strings, {'hello world':
"What'up, Doc?", 'not localized': 'not localized'})
self.assertEqual(_('another string'), 'Bye!')
self.assertEqual(l10n.actual_strings, {'hello world':
"What'up, Doc?", 'another string': 'Bye!', 'not localized':
'not localized'})
if __name__ == '__main__':
Context.set_logger()
sys.exit(unittest.main())
| import unittest
import gc
import logging
import os
import mock
import sys
import time
from shellbot import Context, Engine
from shellbot.i18n import Localization, localization as l10n, _
class LocalizationTests(unittest.TestCase):
def test_default(self):
logging.info('*** default ***')
localization = Localization()
text = 'hello world'
self.assertEqual(localization._(text), text)
def test_init(self):
logging.info('*** init ***')
settings = {'localized': {'hello world': "What'up, Doc?",
'another string': 'Bye!'}, 'space': {'title': 'space name',
'participants': ['[email protected]']}, 'server': {'url':
'http://to.no.where', 'hook': '/hook', 'binding': '0.0.0.0',
'port': 8080}}
context = Context(settings)
my_localization = Localization(context)
self.assertEqual(my_localization.context, context)
self.assertEqual(my_localization._('hello world'), "What'up, Doc?")
self.assertEqual(my_localization._('not localized'), 'not localized')
self.assertEqual(my_localization.actual_strings, {'hello world':
"What'up, Doc?", 'not localized': 'not localized'})
def test_global(self):
logging.info('*** global ***')
settings = {'localized': {'hello world': "What'up, Doc?",
'another string': 'Bye!'}, 'space': {'title': 'space name',
'participants': ['[email protected]']}, 'server': {'url':
'http://to.no.where', 'hook': '/hook', 'binding': '0.0.0.0',
'port': 8080}}
context = Context(settings)
l10n.set_context(context)
self.assertEqual(l10n.actual_strings, {})
self.assertEqual(_('hello world'), "What'up, Doc?")
self.assertEqual(l10n.actual_strings, {'hello world': "What'up, Doc?"})
self.assertEqual(_('not localized'), 'not localized')
self.assertEqual(l10n.actual_strings, {'hello world':
"What'up, Doc?", 'not localized': 'not localized'})
self.assertEqual(_('another string'), 'Bye!')
self.assertEqual(l10n.actual_strings, {'hello world':
"What'up, Doc?", 'another string': 'Bye!', 'not localized':
'not localized'})
if __name__ == '__main__':
Context.set_logger()
sys.exit(unittest.main())
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import gc
import logging
import os
import mock
import sys
import time
from shellbot import Context, Engine
from shellbot.i18n import Localization, localization as l10n, _
class LocalizationTests(unittest.TestCase):
def test_default(self):
logging.info('*** default ***')
localization = Localization()
text = 'hello world'
self.assertEqual(localization._(text), text)
def test_init(self):
logging.info('*** init ***')
settings = {
'localized': {
'hello world': "What'up, Doc?",
'another string': 'Bye!',
},
'space': {
'title': 'space name',
'participants': ['[email protected]'],
},
'server': {
'url': 'http://to.no.where',
'hook': '/hook',
'binding': '0.0.0.0',
'port': 8080,
},
}
context=Context(settings)
my_localization = Localization(context)
self.assertEqual(my_localization.context, context)
self.assertEqual(my_localization._('hello world'), "What'up, Doc?")
self.assertEqual(my_localization._('not localized'), 'not localized')
self.assertEqual(my_localization.actual_strings,
{'hello world': "What'up, Doc?",
'not localized': 'not localized'})
def test_global(self):
logging.info('*** global ***')
settings = {
'localized': {
'hello world': "What'up, Doc?",
'another string': 'Bye!',
},
'space': {
'title': 'space name',
'participants': ['[email protected]'],
},
'server': {
'url': 'http://to.no.where',
'hook': '/hook',
'binding': '0.0.0.0',
'port': 8080,
},
}
context=Context(settings)
l10n.set_context(context)
self.assertEqual(l10n.actual_strings, {})
self.assertEqual(_('hello world'), "What'up, Doc?")
self.assertEqual(l10n.actual_strings,
{'hello world': "What'up, Doc?"})
self.assertEqual(_('not localized'), 'not localized')
self.assertEqual(l10n.actual_strings,
{'hello world': "What'up, Doc?",
'not localized': 'not localized'})
self.assertEqual(_('another string'), 'Bye!')
self.assertEqual(l10n.actual_strings,
{'hello world': "What'up, Doc?",
'another string': 'Bye!',
'not localized': 'not localized'})
if __name__ == '__main__':
Context.set_logger()
sys.exit(unittest.main())
| [
3,
4,
5,
6,
7
] |
163 | 5917c891d2885f779dc33f189f1a875efbd0c302 | <mask token>
class ModelBase:
<mask token>
<mask token>
<mask token>
def __init__(self, schema):
self.schema = schema
<mask token>
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
<mask token>
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
| <mask token>
class ModelBase:
<mask token>
@classmethod
@abstractmethod
def _get_cls_schema(cls):
pass
def __new__(cls, schema):
if schema is None:
return None
else:
return object.__new__(cls)
def __init__(self, schema):
self.schema = schema
<mask token>
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
<mask token>
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
| <mask token>
class ModelBase:
<mask token>
@classmethod
@abstractmethod
def _get_cls_schema(cls):
pass
def __new__(cls, schema):
if schema is None:
return None
else:
return object.__new__(cls)
def __init__(self, schema):
self.schema = schema
@property
def id(self):
return self.schema.id
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False
):
""" 把用 property 装饰的属性封装到一个 dict 中再返回
:param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性
:param exclude_keys, list, 指定需要排除的属性, 默认为 []
:param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层
:param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys
"""
return_dict = {}
attrs = self.__class__.__dict__
include_keys = include_keys or [name for name in attrs.keys() if
not name.startswith('_')]
exclude_keys = exclude_keys or []
if lite is True:
lite_exclude_keys = getattr(self, 'lite_exclude_keys', [])
exclude_keys = exclude_keys + lite_exclude_keys
include_keys = [name for name in include_keys if name not in
exclude_keys]
if depth > 1:
return self.uid
for key, value in attrs.items():
if key not in include_keys:
continue
if not isinstance(value, property):
continue
value = getattr(self, key)
if isinstance(value, Enum):
return_dict[key] = value.value
elif isinstance(value, list):
list_values = []
for item in value:
if hasattr(item, 'to_dict'):
list_values.append(item.to_dict(depth=depth + 1,
lite=True))
else:
list_values.append(item)
return_dict[key] = list_values
elif isinstance(value, dict):
dict_values = {}
for k, v in value.items():
if hasattr(v, 'to_dict'):
dict_values[k] = v.to_dict(depth=depth + 1, lite=True)
else:
dict_values[k] = v
return_dict[key] = dict_values
elif isinstance(value, datetime):
return_dict[key] = value.isoformat()
elif hasattr(value, 'to_dict'):
return_dict[key] = value.to_dict(depth=depth + 1, lite=True)
else:
return_dict[key] = value
return return_dict
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
| <mask token>
class ModelBase:
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def _get_cls_schema(cls):
pass
def __new__(cls, schema):
if schema is None:
return None
else:
return object.__new__(cls)
def __init__(self, schema):
self.schema = schema
@property
def id(self):
return self.schema.id
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False
):
""" 把用 property 装饰的属性封装到一个 dict 中再返回
:param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性
:param exclude_keys, list, 指定需要排除的属性, 默认为 []
:param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层
:param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys
"""
return_dict = {}
attrs = self.__class__.__dict__
include_keys = include_keys or [name for name in attrs.keys() if
not name.startswith('_')]
exclude_keys = exclude_keys or []
if lite is True:
lite_exclude_keys = getattr(self, 'lite_exclude_keys', [])
exclude_keys = exclude_keys + lite_exclude_keys
include_keys = [name for name in include_keys if name not in
exclude_keys]
if depth > 1:
return self.uid
for key, value in attrs.items():
if key not in include_keys:
continue
if not isinstance(value, property):
continue
value = getattr(self, key)
if isinstance(value, Enum):
return_dict[key] = value.value
elif isinstance(value, list):
list_values = []
for item in value:
if hasattr(item, 'to_dict'):
list_values.append(item.to_dict(depth=depth + 1,
lite=True))
else:
list_values.append(item)
return_dict[key] = list_values
elif isinstance(value, dict):
dict_values = {}
for k, v in value.items():
if hasattr(v, 'to_dict'):
dict_values[k] = v.to_dict(depth=depth + 1, lite=True)
else:
dict_values[k] = v
return_dict[key] = dict_values
elif isinstance(value, datetime):
return_dict[key] = value.isoformat()
elif hasattr(value, 'to_dict'):
return_dict[key] = value.to_dict(depth=depth + 1, lite=True)
else:
return_dict[key] = value
return return_dict
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
| from abc import ABCMeta, abstractmethod
from datetime import datetime
from enum import Enum
from application.response import ResponseError
class ModelBase:
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def _get_cls_schema(cls):
pass
def __new__(cls, schema):
if schema is None:
return None
else:
return object.__new__(cls)
def __init__(self, schema):
self.schema = schema
@property
def id(self):
return self.schema.id
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False):
""" 把用 property 装饰的属性封装到一个 dict 中再返回
:param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性
:param exclude_keys, list, 指定需要排除的属性, 默认为 []
:param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层
:param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys
"""
return_dict = {}
attrs = self.__class__.__dict__
include_keys = include_keys or [
name for name in attrs.keys() if not name.startswith("_")
]
exclude_keys = exclude_keys or []
if lite is True:
lite_exclude_keys = getattr(self, "lite_exclude_keys", [])
exclude_keys = exclude_keys + lite_exclude_keys
include_keys = [name for name in include_keys if name not in exclude_keys]
if depth > 1:
return self.uid
for key, value in attrs.items():
if key not in include_keys:
continue
if not isinstance(value, property):
continue
value = getattr(self, key)
if isinstance(value, Enum):
return_dict[key] = value.value
elif isinstance(value, list):
list_values = []
for item in value:
if hasattr(item, "to_dict"):
list_values.append(item.to_dict(depth=depth + 1, lite=True))
else:
list_values.append(item)
return_dict[key] = list_values
elif isinstance(value, dict):
dict_values = {}
for k, v in value.items():
if hasattr(v, "to_dict"):
dict_values[k] = v.to_dict(depth=depth + 1, lite=True)
else:
dict_values[k] = v
return_dict[key] = dict_values
elif isinstance(value, datetime):
return_dict[key] = value.isoformat()
elif hasattr(value, "to_dict"):
return_dict[key] = value.to_dict(depth=depth + 1, lite=True)
else:
return_dict[key] = value
return return_dict
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
| [
6,
8,
10,
11,
13
] |
164 | 41417e3ce52edf6aee432886bbab6d16ec5bc88d | <mask token>
class RNNClassifier(nn.Module):
<mask token>
<mask token>
| <mask token>
class RNNClassifier(nn.Module):
def __init__(self, batch_size, num_classes, hidden_size, vocab_size,
embed_size, weights):
super(RNNClassifier, self).__init__()
self.batch_size = batch_size
self.num_classes = num_classes
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embed_size = embed_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False
)
self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,
bidirectional=True)
self.proj = nn.Linear(4 * hidden_size, num_classes)
<mask token>
| <mask token>
class RNNClassifier(nn.Module):
def __init__(self, batch_size, num_classes, hidden_size, vocab_size,
embed_size, weights):
super(RNNClassifier, self).__init__()
self.batch_size = batch_size
self.num_classes = num_classes
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embed_size = embed_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False
)
self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,
bidirectional=True)
self.proj = nn.Linear(4 * hidden_size, num_classes)
def forward(self, input_sentence):
batch_size = input_sentence.size()[0]
input = self.word_embeddings(input_sentence)
input = input.permute(1, 0, 2).contiguous()
h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())
output, h_n = self.rnn(input, h_0)
h_n = h_n.permute(1, 0, 2).contiguous()
h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1] * h_n.size
()[2])
logtis = self.proj(h_n)
return logtis
| <mask token>
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class RNNClassifier(nn.Module):
def __init__(self, batch_size, num_classes, hidden_size, vocab_size,
embed_size, weights):
super(RNNClassifier, self).__init__()
self.batch_size = batch_size
self.num_classes = num_classes
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embed_size = embed_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False
)
self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,
bidirectional=True)
self.proj = nn.Linear(4 * hidden_size, num_classes)
def forward(self, input_sentence):
batch_size = input_sentence.size()[0]
input = self.word_embeddings(input_sentence)
input = input.permute(1, 0, 2).contiguous()
h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())
output, h_n = self.rnn(input, h_0)
h_n = h_n.permute(1, 0, 2).contiguous()
h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1] * h_n.size
()[2])
logtis = self.proj(h_n)
return logtis
| """
Created on 01/10/18.
Author: morgan
Copyright defined in text_classification/LICENSE.txt
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class RNNClassifier(nn.Module):
def __init__(self, batch_size, num_classes, hidden_size, vocab_size, embed_size, weights):
super(RNNClassifier, self).__init__()
# weights: Pre-trained GloVe word_embeddings that we will use to create our word_embedding lookup table
self.batch_size = batch_size
self.num_classes = num_classes
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embed_size = embed_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size) # initialize the lookup table
# Assigning the look-up table to the pre-trained GloVe word embedding.
self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False)
self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2, bidirectional=True)
self.proj = nn.Linear(4*hidden_size, num_classes)
def forward(self, input_sentence):
batch_size = input_sentence.size()[0]
# input: [batch_size, seq_len], [64, 100]
# print('input 0:', input_sentence.size())
input = self.word_embeddings(input_sentence) # [batch_size, seq_len, embed_size]p
# print('input 1:', input.size())
input = input.permute(1, 0, 2).contiguous() # [seq_len, batch_size, embed_size]
# Initiate hidden/cell state of the LSTM
h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())
# [4, batch_size, hidden_size]
output, h_n = self.rnn(input, h_0)
# h_n: [4, batch_size, hidden_size]
# output: [max_len, batch_size, hidden]
# print('h_n:', h_n.size())
# print('output', output.size())
h_n = h_n.permute(1, 0, 2).contiguous() #[batch_size, 4, hidden_size]
# print('h_n1:', h_n.size())
h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1]*h_n.size()[2])
# [batch_size, 4*hidden_size]
# print('h_n2:', h_n.size())
# final_hidden_state: [1, batch_size, hidden_size]
logtis = self.proj(h_n)
# print('logtis:', logtis.size())
# final_output: [batch_size, num_classes]
return logtis
| [
1,
2,
3,
4,
5
] |
165 | 9a55ccf758b4b2cc440153ab3b1f97823863a848 | <mask token>
| <mask token>
class Logins(models.Model):
<mask token>
<mask token>
<mask token>
| <mask token>
class Logins(models.Model):
created = models.DateTimeField(auto_now_add=True)
login_addr = models.GenericIPAddressField()
hostname = models.CharField(max_length=200)
| from django.db import models
class Logins(models.Model):
created = models.DateTimeField(auto_now_add=True)
login_addr = models.GenericIPAddressField()
hostname = models.CharField(max_length=200)
| from django.db import models
# Create your models here.
class Logins(models.Model):
created = models.DateTimeField(auto_now_add=True)
login_addr = models.GenericIPAddressField()
hostname = models.CharField(max_length=200)
| [
0,
1,
2,
3,
4
] |
166 | 6c0b2fa8166bb21a514dc188858e1de285ad9b0a | <mask token>
class PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,
BrowserDefaultMixin):
<mask token>
<mask token>
implements(interfaces.IPatrimonyCertificate)
<mask token>
<mask token>
<mask token>
<mask token>
security.declarePublic('getRepresentatives')
def getRepresentatives(self):
"""
"""
return self.getArchitects()
def getLastDeposit(self):
return self.getLastEvent(interfaces.IDepositEvent)
<mask token>
def getLastTheLicence(self):
return self.getLastEvent(interfaces.ITheLicenceEvent)
<mask token>
| <mask token>
class PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,
BrowserDefaultMixin):
<mask token>
security = ClassSecurityInfo()
implements(interfaces.IPatrimonyCertificate)
meta_type = 'PatrimonyCertificate'
_at_rename_after_creation = True
schema = PatrimonyCertificate_schema
schemata_order = ['urban_description', 'urban_road', 'urban_location']
security.declarePublic('getRepresentatives')
def getRepresentatives(self):
"""
"""
return self.getArchitects()
def getLastDeposit(self):
return self.getLastEvent(interfaces.IDepositEvent)
def getLastCollegeReport(self):
return self.getLastEvent(interfaces.ICollegeReportEvent)
def getLastTheLicence(self):
return self.getLastEvent(interfaces.ITheLicenceEvent)
<mask token>
| __author__ = """Gauthier BASTIEN <[email protected]>, Stephan GEULETTE
<[email protected]>, Jean-Michel Abe <[email protected]>"""
__docformat__ = 'plaintext'
<mask token>
optional_fields = ['architects']
schema = Schema((ReferenceField(name='architects', widget=
ReferenceBrowserWidget(allow_search=True, only_for_review_states=
'enabled', allow_browse=True, force_close_on_insert=True,
startup_directory='urban/architects',
restrict_browsing_to_startup_directory=True, wild_card_search=True,
show_index_selector=True, label=_('urban_label_architects', default=
'Architect(s)'), popup_name='contact_reference_popup'), required=False,
schemata='urban_description', multiValued=True, relationship=
'miscdemandarchitects', allowed_types='Architect'),))
setOptionalAttributes(schema, optional_fields)
PatrimonyCertificate_schema = BaseFolderSchema.copy() + getattr(GenericLicence,
'schema', Schema(())).copy() + getattr(Inquiry, 'schema', Schema(())).copy(
) + schema.copy()
setSchemataForInquiry(PatrimonyCertificate_schema)
class PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,
BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.IPatrimonyCertificate)
meta_type = 'PatrimonyCertificate'
_at_rename_after_creation = True
schema = PatrimonyCertificate_schema
schemata_order = ['urban_description', 'urban_road', 'urban_location']
security.declarePublic('getRepresentatives')
def getRepresentatives(self):
"""
"""
return self.getArchitects()
def getLastDeposit(self):
return self.getLastEvent(interfaces.IDepositEvent)
def getLastCollegeReport(self):
return self.getLastEvent(interfaces.ICollegeReportEvent)
def getLastTheLicence(self):
return self.getLastEvent(interfaces.ITheLicenceEvent)
registerType(PatrimonyCertificate, PROJECTNAME)
def finalizeSchema(schema, folderish=False, moveDiscussion=True):
"""
Finalizes the type schema to alter some fields
"""
schema.moveField('description', after='architects')
return schema
finalizeSchema(PatrimonyCertificate_schema)
| __author__ = """Gauthier BASTIEN <[email protected]>, Stephan GEULETTE
<[email protected]>, Jean-Michel Abe <[email protected]>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from zope.interface import implements
from Products.urban import interfaces
from Products.urban.content.licence.GenericLicence import GenericLicence
from Products.urban.content.Inquiry import Inquiry
from Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin
from Products.urban import UrbanMessage as _
from Products.urban.config import *
from Products.urban.utils import setOptionalAttributes
from Products.urban.utils import setSchemataForInquiry
from Products.ATReferenceBrowserWidget.ATReferenceBrowserWidget import ReferenceBrowserWidget
optional_fields = ['architects']
schema = Schema((ReferenceField(name='architects', widget=
ReferenceBrowserWidget(allow_search=True, only_for_review_states=
'enabled', allow_browse=True, force_close_on_insert=True,
startup_directory='urban/architects',
restrict_browsing_to_startup_directory=True, wild_card_search=True,
show_index_selector=True, label=_('urban_label_architects', default=
'Architect(s)'), popup_name='contact_reference_popup'), required=False,
schemata='urban_description', multiValued=True, relationship=
'miscdemandarchitects', allowed_types='Architect'),))
setOptionalAttributes(schema, optional_fields)
PatrimonyCertificate_schema = BaseFolderSchema.copy() + getattr(GenericLicence,
'schema', Schema(())).copy() + getattr(Inquiry, 'schema', Schema(())).copy(
) + schema.copy()
setSchemataForInquiry(PatrimonyCertificate_schema)
class PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,
BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.IPatrimonyCertificate)
meta_type = 'PatrimonyCertificate'
_at_rename_after_creation = True
schema = PatrimonyCertificate_schema
schemata_order = ['urban_description', 'urban_road', 'urban_location']
security.declarePublic('getRepresentatives')
def getRepresentatives(self):
"""
"""
return self.getArchitects()
def getLastDeposit(self):
return self.getLastEvent(interfaces.IDepositEvent)
def getLastCollegeReport(self):
return self.getLastEvent(interfaces.ICollegeReportEvent)
def getLastTheLicence(self):
return self.getLastEvent(interfaces.ITheLicenceEvent)
registerType(PatrimonyCertificate, PROJECTNAME)
def finalizeSchema(schema, folderish=False, moveDiscussion=True):
"""
Finalizes the type schema to alter some fields
"""
schema.moveField('description', after='architects')
return schema
finalizeSchema(PatrimonyCertificate_schema)
| # -*- coding: utf-8 -*-
#
# File: PatrimonyCertificate.py
#
# Copyright (c) 2015 by CommunesPlone
# Generator: ArchGenXML Version 2.7
# http://plone.org/products/archgenxml
#
# GNU General Public License (GPL)
#
__author__ = """Gauthier BASTIEN <[email protected]>, Stephan GEULETTE
<[email protected]>, Jean-Michel Abe <[email protected]>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from zope.interface import implements
from Products.urban import interfaces
from Products.urban.content.licence.GenericLicence import GenericLicence
from Products.urban.content.Inquiry import Inquiry
from Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin
from Products.urban import UrbanMessage as _
from Products.urban.config import *
##code-section module-header #fill in your manual code here
from Products.urban.utils import setOptionalAttributes
from Products.urban.utils import setSchemataForInquiry
from Products.ATReferenceBrowserWidget.ATReferenceBrowserWidget import ReferenceBrowserWidget
optional_fields = ['architects']
##/code-section module-header
schema = Schema((
ReferenceField(
name='architects',
widget=ReferenceBrowserWidget(
allow_search=True,
only_for_review_states='enabled',
allow_browse=True,
force_close_on_insert=True,
startup_directory='urban/architects',
restrict_browsing_to_startup_directory=True,
wild_card_search=True,
show_index_selector=True,
label=_('urban_label_architects', default='Architect(s)'),
popup_name='contact_reference_popup',
),
required=False,
schemata='urban_description',
multiValued=True,
relationship="miscdemandarchitects",
allowed_types='Architect',
),
),
)
##code-section after-local-schema #fill in your manual code here
setOptionalAttributes(schema, optional_fields)
##/code-section after-local-schema
PatrimonyCertificate_schema = BaseFolderSchema.copy() + \
getattr(GenericLicence, 'schema', Schema(())).copy() + \
getattr(Inquiry, 'schema', Schema(())).copy() + \
schema.copy()
##code-section after-schema #fill in your manual code here
#put the the fields coming from Inquiry in a specific schemata
setSchemataForInquiry(PatrimonyCertificate_schema)
##/code-section after-schema
class PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry, BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.IPatrimonyCertificate)
meta_type = 'PatrimonyCertificate'
_at_rename_after_creation = True
schema = PatrimonyCertificate_schema
##code-section class-header #fill in your manual code here
schemata_order = ['urban_description', 'urban_road', 'urban_location']
##/code-section class-header
# Methods
# Manually created methods
security.declarePublic('getRepresentatives')
def getRepresentatives(self):
"""
"""
return self.getArchitects()
def getLastDeposit(self):
return self.getLastEvent(interfaces.IDepositEvent)
def getLastCollegeReport(self):
return self.getLastEvent(interfaces.ICollegeReportEvent)
def getLastTheLicence(self):
return self.getLastEvent(interfaces.ITheLicenceEvent)
registerType(PatrimonyCertificate, PROJECTNAME)
# end of class PatrimonyCertificate
##code-section module-footer #fill in your manual code here
def finalizeSchema(schema, folderish=False, moveDiscussion=True):
"""
Finalizes the type schema to alter some fields
"""
schema.moveField('description', after='architects')
return schema
finalizeSchema(PatrimonyCertificate_schema)
##/code-section module-footer
| [
4,
6,
10,
11,
12
] |
167 | d327151c9659078e12e4aca46631de33e7ca4dcf | <mask token>
class TCRPowerCalculator:
<mask token>
def predict_detection_probability_2step(self, tcr_frequency, num_reads,
num_cells, detect_thresh=1):
"""
2-step detection probability model where
1) Num_cells_TCR is sampled first from the blood (Poisson model)
2) The RNA detection probability is calculated (Negbin model).
The num_cells_TCR is marginalized with the num_cells parameter as the upper limit
on the number of cells that could be sampled for a given TCR.
"""
mu_cells = tcr_frequency * num_cells
p0_poisson = stats.poisson.pmf(0, mu_cells)
num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]
p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)
num_cells_TCR = num_cells_TCR[p1 > 0]
p1 = p1[p1 > 0]
mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,
num_reads)
p2 = np.zeros(p1.shape)
for i in np.arange(detect_thresh):
p2 += self.pcmodel.pmf(mu_reads, count=i)
p0_2step = np.dot(p1.squeeze(), p2.squeeze())
return 1.0 - p0_poisson - p0_2step
<mask token>
def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):
opt_nreads = partial(self.pcmodel.predict_detection_probability,
tcr_frequencies=tcr_freq)
opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=
nreads) - conf_level, method='secant', x0=1e-16, x1=1)
return int(np.around(opt_res.root))
| <mask token>
class TCRPowerCalculator:
def __init__(self, pcmodel):
self.pcmodel = pcmodel
self.predict_variance = self.pcmodel.predict_variance
self.predict_mean = self.pcmodel.predict_mean
self.get_prediction_interval = self.pcmodel.get_prediction_interval
self.predict_detection_probability = (self.pcmodel.
predict_detection_probability)
def predict_detection_probability_2step(self, tcr_frequency, num_reads,
num_cells, detect_thresh=1):
"""
2-step detection probability model where
1) Num_cells_TCR is sampled first from the blood (Poisson model)
2) The RNA detection probability is calculated (Negbin model).
The num_cells_TCR is marginalized with the num_cells parameter as the upper limit
on the number of cells that could be sampled for a given TCR.
"""
mu_cells = tcr_frequency * num_cells
p0_poisson = stats.poisson.pmf(0, mu_cells)
num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]
p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)
num_cells_TCR = num_cells_TCR[p1 > 0]
p1 = p1[p1 > 0]
mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,
num_reads)
p2 = np.zeros(p1.shape)
for i in np.arange(detect_thresh):
p2 += self.pcmodel.pmf(mu_reads, count=i)
p0_2step = np.dot(p1.squeeze(), p2.squeeze())
return 1.0 - p0_poisson - p0_2step
<mask token>
def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):
opt_nreads = partial(self.pcmodel.predict_detection_probability,
tcr_frequencies=tcr_freq)
opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=
nreads) - conf_level, method='secant', x0=1e-16, x1=1)
return int(np.around(opt_res.root))
| <mask token>
class TCRPowerCalculator:
def __init__(self, pcmodel):
self.pcmodel = pcmodel
self.predict_variance = self.pcmodel.predict_variance
self.predict_mean = self.pcmodel.predict_mean
self.get_prediction_interval = self.pcmodel.get_prediction_interval
self.predict_detection_probability = (self.pcmodel.
predict_detection_probability)
def predict_detection_probability_2step(self, tcr_frequency, num_reads,
num_cells, detect_thresh=1):
"""
2-step detection probability model where
1) Num_cells_TCR is sampled first from the blood (Poisson model)
2) The RNA detection probability is calculated (Negbin model).
The num_cells_TCR is marginalized with the num_cells parameter as the upper limit
on the number of cells that could be sampled for a given TCR.
"""
mu_cells = tcr_frequency * num_cells
p0_poisson = stats.poisson.pmf(0, mu_cells)
num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]
p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)
num_cells_TCR = num_cells_TCR[p1 > 0]
p1 = p1[p1 > 0]
mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,
num_reads)
p2 = np.zeros(p1.shape)
for i in np.arange(detect_thresh):
p2 += self.pcmodel.pmf(mu_reads, count=i)
p0_2step = np.dot(p1.squeeze(), p2.squeeze())
return 1.0 - p0_poisson - p0_2step
def get_limit_of_detection_tcrfreq(self, num_reads, conf_level=0.95):
opt_f = partial(self.pcmodel.predict_detection_probability,
num_reads=num_reads)
opt_res = optimize.root_scalar(lambda freq: opt_f(freq) -
conf_level, method='brentq', bracket=[1e-16, 1])
return opt_res.root
def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):
opt_nreads = partial(self.pcmodel.predict_detection_probability,
tcr_frequencies=tcr_freq)
opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=
nreads) - conf_level, method='secant', x0=1e-16, x1=1)
return int(np.around(opt_res.root))
| import numpy as np
import numdifftools as nd
from scipy import stats
from scipy import optimize
from functools import partial
class TCRPowerCalculator:
def __init__(self, pcmodel):
self.pcmodel = pcmodel
self.predict_variance = self.pcmodel.predict_variance
self.predict_mean = self.pcmodel.predict_mean
self.get_prediction_interval = self.pcmodel.get_prediction_interval
self.predict_detection_probability = (self.pcmodel.
predict_detection_probability)
def predict_detection_probability_2step(self, tcr_frequency, num_reads,
num_cells, detect_thresh=1):
"""
2-step detection probability model where
1) Num_cells_TCR is sampled first from the blood (Poisson model)
2) The RNA detection probability is calculated (Negbin model).
The num_cells_TCR is marginalized with the num_cells parameter as the upper limit
on the number of cells that could be sampled for a given TCR.
"""
mu_cells = tcr_frequency * num_cells
p0_poisson = stats.poisson.pmf(0, mu_cells)
num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]
p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)
num_cells_TCR = num_cells_TCR[p1 > 0]
p1 = p1[p1 > 0]
mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,
num_reads)
p2 = np.zeros(p1.shape)
for i in np.arange(detect_thresh):
p2 += self.pcmodel.pmf(mu_reads, count=i)
p0_2step = np.dot(p1.squeeze(), p2.squeeze())
return 1.0 - p0_poisson - p0_2step
def get_limit_of_detection_tcrfreq(self, num_reads, conf_level=0.95):
opt_f = partial(self.pcmodel.predict_detection_probability,
num_reads=num_reads)
opt_res = optimize.root_scalar(lambda freq: opt_f(freq) -
conf_level, method='brentq', bracket=[1e-16, 1])
return opt_res.root
def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):
opt_nreads = partial(self.pcmodel.predict_detection_probability,
tcr_frequencies=tcr_freq)
opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=
nreads) - conf_level, method='secant', x0=1e-16, x1=1)
return int(np.around(opt_res.root))
| import numpy as np
import numdifftools as nd
from scipy import stats
from scipy import optimize
from functools import partial
class TCRPowerCalculator:
def __init__(self, pcmodel):
self.pcmodel = pcmodel
self.predict_variance = self.pcmodel.predict_variance
self.predict_mean = self.pcmodel.predict_mean
self.get_prediction_interval = self.pcmodel.get_prediction_interval
self.predict_detection_probability = self.pcmodel.predict_detection_probability
#possivle TODO: Parse this method out into a new 2-step model class
def predict_detection_probability_2step(self, tcr_frequency, num_reads, num_cells, detect_thresh = 1):
"""
2-step detection probability model where
1) Num_cells_TCR is sampled first from the blood (Poisson model)
2) The RNA detection probability is calculated (Negbin model).
The num_cells_TCR is marginalized with the num_cells parameter as the upper limit
on the number of cells that could be sampled for a given TCR.
"""
mu_cells = tcr_frequency*num_cells
p0_poisson = stats.poisson.pmf(0, mu_cells)
num_cells_TCR = np.arange(1, num_cells + 1)[:,np.newaxis]
#Step 1 Poisson
p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)
#Get rid of 0 probability cell counts
num_cells_TCR = num_cells_TCR[p1 >0]
p1 = p1[p1 >0]
#Step 2 Negbin
mu_reads = self.pcmodel.predict_mean(num_cells_TCR/num_cells, num_reads)
p2 = np.zeros(p1.shape)
for i in np.arange(detect_thresh):
p2 += self.pcmodel.pmf(mu_reads, count = i)
p0_2step = np.dot(p1.squeeze(), p2.squeeze())
#If 0 cells from Poisson model then automatically get 0 reads
return 1.0 - p0_poisson - p0_2step
def get_limit_of_detection_tcrfreq(self, num_reads, conf_level = 0.95):
opt_f = partial(self.pcmodel.predict_detection_probability, num_reads = num_reads)
opt_res = optimize.root_scalar(lambda freq: opt_f(freq) - conf_level,
method = "brentq",
bracket = [1.0e-16, 1])
return opt_res.root
def get_limit_of_detection_nreads(self, tcr_freq, conf_level = 0.95):
opt_nreads = partial(self.pcmodel.predict_detection_probability, tcr_frequencies = tcr_freq)
opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads = nreads) - conf_level,
method = "secant",
x0 = 1.0e-16,
x1 = 1)
return int(np.around(opt_res.root)) | [
3,
4,
5,
6,
7
] |
168 | 704b3c57ca080862bed7a4caa65d1c8d5a32fa0b | <mask token>
class CopyGenerator(nn.Module):
<mask token>
def __init__(self, opt, src_dict, tgt_dict):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(opt.rnn_size, tgt_dict.size())
self.linear_copy = nn.Linear(opt.rnn_size, 1)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def forward(self, hidden, attn, verbose=False):
"""
Computes p(w) = p(z=1) p_{copy}(w|z=0) + p(z=0) * p_{softmax}(w|z=0)
Args:
hidden (FloatTensor): (tgt_len*batch) x hidden
attn (FloatTensor): (tgt_len*batch) x src_len
Returns:
prob (FloatTensor): (tgt_len*batch) x vocab
attn (FloatTensor): (tgt_len*batch) x src_len
"""
logits = self.linear(hidden)
logits[:, onmt.Constants.UNK] = -float('inf')
logits[:, onmt.Constants.PAD] = -float('inf')
prob = F.softmax(logits)
copy = F.sigmoid(self.linear_copy(hidden))
out_prob = torch.mul(prob, 1 - copy.expand_as(prob))
mul_attn = torch.mul(attn, copy.expand_as(attn))
return out_prob, mul_attn
def _debug_copy(self, src, copy, prob, out_prob, attn, mul_attn):
v, mid = prob[0].data.max(0)
print('Initial:', self.tgt_dict.getLabel(mid[0], 'FAIL'), v[0])
print('COPY %3f' % copy.data[0][0])
_, ids = attn[0].cpu().data.sort(0, descending=True)
for j in ids[:10].tolist():
src_idx = src[0, j].data[0]
print('\t%s\t\t%d\t%3f\t%3f' % (self.src_dict.getLabel(src_idx),
j, attn[0, j].data[0], mul_attn[0, j].data[0]))
<mask token>
| <mask token>
class CopyGenerator(nn.Module):
"""
Generator module that additionally considers copying
words directly from the source.
"""
def __init__(self, opt, src_dict, tgt_dict):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(opt.rnn_size, tgt_dict.size())
self.linear_copy = nn.Linear(opt.rnn_size, 1)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def forward(self, hidden, attn, verbose=False):
"""
Computes p(w) = p(z=1) p_{copy}(w|z=0) + p(z=0) * p_{softmax}(w|z=0)
Args:
hidden (FloatTensor): (tgt_len*batch) x hidden
attn (FloatTensor): (tgt_len*batch) x src_len
Returns:
prob (FloatTensor): (tgt_len*batch) x vocab
attn (FloatTensor): (tgt_len*batch) x src_len
"""
logits = self.linear(hidden)
logits[:, onmt.Constants.UNK] = -float('inf')
logits[:, onmt.Constants.PAD] = -float('inf')
prob = F.softmax(logits)
copy = F.sigmoid(self.linear_copy(hidden))
out_prob = torch.mul(prob, 1 - copy.expand_as(prob))
mul_attn = torch.mul(attn, copy.expand_as(attn))
return out_prob, mul_attn
def _debug_copy(self, src, copy, prob, out_prob, attn, mul_attn):
v, mid = prob[0].data.max(0)
print('Initial:', self.tgt_dict.getLabel(mid[0], 'FAIL'), v[0])
print('COPY %3f' % copy.data[0][0])
_, ids = attn[0].cpu().data.sort(0, descending=True)
for j in ids[:10].tolist():
src_idx = src[0, j].data[0]
print('\t%s\t\t%d\t%3f\t%3f' % (self.src_dict.getLabel(src_idx),
j, attn[0, j].data[0], mul_attn[0, j].data[0]))
<mask token>
| <mask token>
class CopyGenerator(nn.Module):
"""
Generator module that additionally considers copying
words directly from the source.
"""
def __init__(self, opt, src_dict, tgt_dict):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(opt.rnn_size, tgt_dict.size())
self.linear_copy = nn.Linear(opt.rnn_size, 1)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def forward(self, hidden, attn, verbose=False):
"""
Computes p(w) = p(z=1) p_{copy}(w|z=0) + p(z=0) * p_{softmax}(w|z=0)
Args:
hidden (FloatTensor): (tgt_len*batch) x hidden
attn (FloatTensor): (tgt_len*batch) x src_len
Returns:
prob (FloatTensor): (tgt_len*batch) x vocab
attn (FloatTensor): (tgt_len*batch) x src_len
"""
logits = self.linear(hidden)
logits[:, onmt.Constants.UNK] = -float('inf')
logits[:, onmt.Constants.PAD] = -float('inf')
prob = F.softmax(logits)
copy = F.sigmoid(self.linear_copy(hidden))
out_prob = torch.mul(prob, 1 - copy.expand_as(prob))
mul_attn = torch.mul(attn, copy.expand_as(attn))
return out_prob, mul_attn
def _debug_copy(self, src, copy, prob, out_prob, attn, mul_attn):
v, mid = prob[0].data.max(0)
print('Initial:', self.tgt_dict.getLabel(mid[0], 'FAIL'), v[0])
print('COPY %3f' % copy.data[0][0])
_, ids = attn[0].cpu().data.sort(0, descending=True)
for j in ids[:10].tolist():
src_idx = src[0, j].data[0]
print('\t%s\t\t%d\t%3f\t%3f' % (self.src_dict.getLabel(src_idx),
j, attn[0, j].data[0], mul_attn[0, j].data[0]))
def CopyCriterion(probs, attn, targ, align, eps=1e-12):
copies = attn.mul(Variable(align)).sum(-1).add(eps)
out = torch.log(probs.gather(1, targ.view(-1, 1)).view(-1) + copies + eps)
out = out.mul(targ.ne(onmt.Constants.PAD).float())
return -out.sum()
| import onmt
import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.cuda
from torch.autograd import Variable
class CopyGenerator(nn.Module):
"""
Generator module that additionally considers copying
words directly from the source.
"""
def __init__(self, opt, src_dict, tgt_dict):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(opt.rnn_size, tgt_dict.size())
self.linear_copy = nn.Linear(opt.rnn_size, 1)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def forward(self, hidden, attn, verbose=False):
"""
Computes p(w) = p(z=1) p_{copy}(w|z=0) + p(z=0) * p_{softmax}(w|z=0)
Args:
hidden (FloatTensor): (tgt_len*batch) x hidden
attn (FloatTensor): (tgt_len*batch) x src_len
Returns:
prob (FloatTensor): (tgt_len*batch) x vocab
attn (FloatTensor): (tgt_len*batch) x src_len
"""
logits = self.linear(hidden)
logits[:, onmt.Constants.UNK] = -float('inf')
logits[:, onmt.Constants.PAD] = -float('inf')
prob = F.softmax(logits)
copy = F.sigmoid(self.linear_copy(hidden))
out_prob = torch.mul(prob, 1 - copy.expand_as(prob))
mul_attn = torch.mul(attn, copy.expand_as(attn))
return out_prob, mul_attn
def _debug_copy(self, src, copy, prob, out_prob, attn, mul_attn):
v, mid = prob[0].data.max(0)
print('Initial:', self.tgt_dict.getLabel(mid[0], 'FAIL'), v[0])
print('COPY %3f' % copy.data[0][0])
_, ids = attn[0].cpu().data.sort(0, descending=True)
for j in ids[:10].tolist():
src_idx = src[0, j].data[0]
print('\t%s\t\t%d\t%3f\t%3f' % (self.src_dict.getLabel(src_idx),
j, attn[0, j].data[0], mul_attn[0, j].data[0]))
def CopyCriterion(probs, attn, targ, align, eps=1e-12):
copies = attn.mul(Variable(align)).sum(-1).add(eps)
out = torch.log(probs.gather(1, targ.view(-1, 1)).view(-1) + copies + eps)
out = out.mul(targ.ne(onmt.Constants.PAD).float())
return -out.sum()
| import onmt
import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.cuda
from torch.autograd import Variable
class CopyGenerator(nn.Module):
"""
Generator module that additionally considers copying
words directly from the source.
"""
def __init__(self, opt, src_dict, tgt_dict):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(opt.rnn_size, tgt_dict.size())
self.linear_copy = nn.Linear(opt.rnn_size, 1)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def forward(self, hidden, attn, verbose=False):
"""
Computes p(w) = p(z=1) p_{copy}(w|z=0) + p(z=0) * p_{softmax}(w|z=0)
Args:
hidden (FloatTensor): (tgt_len*batch) x hidden
attn (FloatTensor): (tgt_len*batch) x src_len
Returns:
prob (FloatTensor): (tgt_len*batch) x vocab
attn (FloatTensor): (tgt_len*batch) x src_len
"""
# Original probabilities.
logits = self.linear(hidden)
logits[:, onmt.Constants.UNK] = -float('inf')
logits[:, onmt.Constants.PAD] = -float('inf')
prob = F.softmax(logits)
# Probability of copying p(z=1) batch
copy = F.sigmoid(self.linear_copy(hidden))
# Probibility of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - copy.expand_as(prob))
mul_attn = torch.mul(attn, copy.expand_as(attn))
return out_prob, mul_attn
def _debug_copy(self, src, copy, prob, out_prob, attn, mul_attn):
v, mid = prob[0].data.max(0)
print("Initial:", self.tgt_dict.getLabel(mid[0], "FAIL"), v[0])
print("COPY %3f" % copy.data[0][0])
_, ids = attn[0].cpu().data.sort(0, descending=True)
for j in ids[:10].tolist():
src_idx = src[0, j].data[0]
print("\t%s\t\t%d\t%3f\t%3f" % (
self.src_dict.getLabel(src_idx),
j,
attn[0, j].data[0],
mul_attn[0, j].data[0]))
def CopyCriterion(probs, attn, targ, align, eps=1e-12):
copies = attn.mul(Variable(align)).sum(-1).add(eps)
# Can't use UNK, must copy.
out = torch.log(probs.gather(1, targ.view(-1, 1)).view(-1) + copies + eps)
out = out.mul(targ.ne(onmt.Constants.PAD).float())
return -out.sum()
| [
4,
5,
6,
7,
8
] |
169 | 630011b188548df9e55b6f1ddbefa08e322b9cba | <mask token>
class FeaturesBuilder(object):
def __init__(self, num_words, img_paths, dataset_matrix=None):
self.num_words = num_words
self.img_paths = img_paths
self.dataset_matrix = dataset_matrix
def getClusterCentures(self):
start_time = datetime.now()
feature_getter = FeatureGetter()
des_list = []
des_matrix = np.zeros((1, 128))
if self.img_paths != None:
for path in self.img_paths:
kp, des = feature_getter.get_feature(path)
if des.any() != None:
des_matrix = np.row_stack((des_matrix, des))
des_list.append(des)
elif self.dataset_matrix != None:
for gray in range(self.dataset_matrix.shape[0]):
sift_det = cv2.xfeatures2d.SIFT_create()
kp, des = sift_det.detectAndCompute(gray, None)
if des != None:
des_matrix = np.row_stack((des_matrix, des))
des_list.append(des)
else:
raise ValueError('输入不合法')
des_matrix = des_matrix[1:, :]
kmeans = MiniBatchKMeans(n_clusters=self.num_words, batch_size=200,
random_state=33)
kmeans.fit(des_matrix)
centres = kmeans.cluster_centers_
elapsed_time = datetime.now() - start_time
print(' 获取聚类中心total_time ', elapsed_time)
return centres, des_list
class GetFeatures(object):
def des2feature(self, des, NUM_WORDS, centures):
img_feature_vec = np.zeros((1, NUM_WORDS), 'float32')
for i in range(des.shape[0]):
feature_k_rows = np.ones((NUM_WORDS, 128), 'float32')
feature = des[i]
feature_k_rows = feature_k_rows * feature
feature_k_rows = np.sum((feature_k_rows - centures) ** 2, 1)
index = np.argmax(feature_k_rows)
img_feature_vec[0][index] += 1
return img_feature_vec
def get_all_features(self, des_list, num_word, centres):
allvec = np.zeros((len(des_list), num_word), 'float32')
for i in range(len(des_list)):
if des_list[i].any() != None:
allvec[i] = self.des2feature(des_list[i], num_word, centres)
return allvec
| <mask token>
class FeatureGetter(object):
<mask token>
<mask token>
<mask token>
class FeaturesBuilder(object):
def __init__(self, num_words, img_paths, dataset_matrix=None):
self.num_words = num_words
self.img_paths = img_paths
self.dataset_matrix = dataset_matrix
def getClusterCentures(self):
start_time = datetime.now()
feature_getter = FeatureGetter()
des_list = []
des_matrix = np.zeros((1, 128))
if self.img_paths != None:
for path in self.img_paths:
kp, des = feature_getter.get_feature(path)
if des.any() != None:
des_matrix = np.row_stack((des_matrix, des))
des_list.append(des)
elif self.dataset_matrix != None:
for gray in range(self.dataset_matrix.shape[0]):
sift_det = cv2.xfeatures2d.SIFT_create()
kp, des = sift_det.detectAndCompute(gray, None)
if des != None:
des_matrix = np.row_stack((des_matrix, des))
des_list.append(des)
else:
raise ValueError('输入不合法')
des_matrix = des_matrix[1:, :]
kmeans = MiniBatchKMeans(n_clusters=self.num_words, batch_size=200,
random_state=33)
kmeans.fit(des_matrix)
centres = kmeans.cluster_centers_
elapsed_time = datetime.now() - start_time
print(' 获取聚类中心total_time ', elapsed_time)
return centres, des_list
class GetFeatures(object):
def des2feature(self, des, NUM_WORDS, centures):
img_feature_vec = np.zeros((1, NUM_WORDS), 'float32')
for i in range(des.shape[0]):
feature_k_rows = np.ones((NUM_WORDS, 128), 'float32')
feature = des[i]
feature_k_rows = feature_k_rows * feature
feature_k_rows = np.sum((feature_k_rows - centures) ** 2, 1)
index = np.argmax(feature_k_rows)
img_feature_vec[0][index] += 1
return img_feature_vec
def get_all_features(self, des_list, num_word, centres):
allvec = np.zeros((len(des_list), num_word), 'float32')
for i in range(len(des_list)):
if des_list[i].any() != None:
allvec[i] = self.des2feature(des_list[i], num_word, centres)
return allvec
| <mask token>
class FeatureGetter(object):
<mask token>
def get_img(self, img_path):
img = cv2.imread(img_path)
return img
def get_feature(self, img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
kp, des = self.sift_det.detectAndCompute(gray, None)
return kp, des
class FeaturesBuilder(object):
def __init__(self, num_words, img_paths, dataset_matrix=None):
self.num_words = num_words
self.img_paths = img_paths
self.dataset_matrix = dataset_matrix
def getClusterCentures(self):
start_time = datetime.now()
feature_getter = FeatureGetter()
des_list = []
des_matrix = np.zeros((1, 128))
if self.img_paths != None:
for path in self.img_paths:
kp, des = feature_getter.get_feature(path)
if des.any() != None:
des_matrix = np.row_stack((des_matrix, des))
des_list.append(des)
elif self.dataset_matrix != None:
for gray in range(self.dataset_matrix.shape[0]):
sift_det = cv2.xfeatures2d.SIFT_create()
kp, des = sift_det.detectAndCompute(gray, None)
if des != None:
des_matrix = np.row_stack((des_matrix, des))
des_list.append(des)
else:
raise ValueError('输入不合法')
des_matrix = des_matrix[1:, :]
kmeans = MiniBatchKMeans(n_clusters=self.num_words, batch_size=200,
random_state=33)
kmeans.fit(des_matrix)
centres = kmeans.cluster_centers_
elapsed_time = datetime.now() - start_time
print(' 获取聚类中心total_time ', elapsed_time)
return centres, des_list
class GetFeatures(object):
def des2feature(self, des, NUM_WORDS, centures):
img_feature_vec = np.zeros((1, NUM_WORDS), 'float32')
for i in range(des.shape[0]):
feature_k_rows = np.ones((NUM_WORDS, 128), 'float32')
feature = des[i]
feature_k_rows = feature_k_rows * feature
feature_k_rows = np.sum((feature_k_rows - centures) ** 2, 1)
index = np.argmax(feature_k_rows)
img_feature_vec[0][index] += 1
return img_feature_vec
def get_all_features(self, des_list, num_word, centres):
allvec = np.zeros((len(des_list), num_word), 'float32')
for i in range(len(des_list)):
if des_list[i].any() != None:
allvec[i] = self.des2feature(des_list[i], num_word, centres)
return allvec
| <mask token>
class FeatureGetter(object):
def __init__(self):
self.sift_det = cv2.xfeatures2d.SIFT_create()
def get_img(self, img_path):
img = cv2.imread(img_path)
return img
def get_feature(self, img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
kp, des = self.sift_det.detectAndCompute(gray, None)
return kp, des
class FeaturesBuilder(object):
def __init__(self, num_words, img_paths, dataset_matrix=None):
self.num_words = num_words
self.img_paths = img_paths
self.dataset_matrix = dataset_matrix
def getClusterCentures(self):
start_time = datetime.now()
feature_getter = FeatureGetter()
des_list = []
des_matrix = np.zeros((1, 128))
if self.img_paths != None:
for path in self.img_paths:
kp, des = feature_getter.get_feature(path)
if des.any() != None:
des_matrix = np.row_stack((des_matrix, des))
des_list.append(des)
elif self.dataset_matrix != None:
for gray in range(self.dataset_matrix.shape[0]):
sift_det = cv2.xfeatures2d.SIFT_create()
kp, des = sift_det.detectAndCompute(gray, None)
if des != None:
des_matrix = np.row_stack((des_matrix, des))
des_list.append(des)
else:
raise ValueError('输入不合法')
des_matrix = des_matrix[1:, :]
kmeans = MiniBatchKMeans(n_clusters=self.num_words, batch_size=200,
random_state=33)
kmeans.fit(des_matrix)
centres = kmeans.cluster_centers_
elapsed_time = datetime.now() - start_time
print(' 获取聚类中心total_time ', elapsed_time)
return centres, des_list
class GetFeatures(object):
def des2feature(self, des, NUM_WORDS, centures):
img_feature_vec = np.zeros((1, NUM_WORDS), 'float32')
for i in range(des.shape[0]):
feature_k_rows = np.ones((NUM_WORDS, 128), 'float32')
feature = des[i]
feature_k_rows = feature_k_rows * feature
feature_k_rows = np.sum((feature_k_rows - centures) ** 2, 1)
index = np.argmax(feature_k_rows)
img_feature_vec[0][index] += 1
return img_feature_vec
def get_all_features(self, des_list, num_word, centres):
allvec = np.zeros((len(des_list), num_word), 'float32')
for i in range(len(des_list)):
if des_list[i].any() != None:
allvec[i] = self.des2feature(des_list[i], num_word, centres)
return allvec
| from datetime import datetime
import cv2
import numpy as np
from sklearn.cluster import KMeans,MiniBatchKMeans
class FeatureGetter(object):
def __init__(self):
self.sift_det = cv2.xfeatures2d.SIFT_create()
def get_img(self, img_path):
img = cv2.imread(img_path)
return img
def get_feature(self, img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
kp, des = self.sift_det.detectAndCompute(gray, None)
return kp, des
class FeaturesBuilder(object):
def __init__(self,num_words,img_paths,dataset_matrix=None):
self.num_words = num_words #聚类中心
self.img_paths =img_paths
self.dataset_matrix =dataset_matrix #dataset_matrix:图像数据的矩阵表示 注:img_paths dataset_matrix这两个参数只需要指定一个
def getClusterCentures(self):
start_time = datetime.now() # 测试时间
feature_getter = FeatureGetter()
des_list = [] # 特征描述
des_matrix = np.zeros((1, 128))
if self.img_paths != None:
for path in self.img_paths:
# kp表示输入的关键点,des表示输出的sift特征向量,通常是128维的。 检测发现是N*128,N是变动的
kp, des = feature_getter.get_feature(path)
if des.any() != None:
des_matrix = np.row_stack((des_matrix, des))
des_list.append(des)
elif self.dataset_matrix != None:
for gray in range(self.dataset_matrix.shape[0]):
sift_det = cv2.xfeatures2d.SIFT_create()
kp, des = sift_det.detectAndCompute(gray, None)
if des != None:
des_matrix = np.row_stack((des_matrix, des))
des_list.append(des)
else:
raise ValueError('输入不合法')
des_matrix = des_matrix[1:, :] # the des matrix of sift
# 计算聚类中心 构造视觉单词词典
# kmeans = KMeans(n_clusters=self.num_words , random_state=33)
kmeans = MiniBatchKMeans(n_clusters=self.num_words , batch_size=200, random_state= 33) #MiniBatchKMeans 加速优化
kmeans.fit(des_matrix)
centres = kmeans.cluster_centers_ # 视觉聚类中心
elapsed_time = datetime.now() - start_time # 需要的时间
print(" 获取聚类中心total_time ", elapsed_time, )
return centres, des_list
#
class GetFeatures(object):
# 将特征描述转换为特征向量
def des2feature(self,des, NUM_WORDS, centures):
# des:单幅图像的SIFT特征描述 centures:聚类中心坐标 centures:聚类中心坐标 NUM_WORDS*128
# return: feature vector 1*NUM_WORDS
img_feature_vec = np.zeros((1, NUM_WORDS), 'float32')
for i in range(des.shape[0]): # 遍历所有图片
feature_k_rows = np.ones((NUM_WORDS, 128), 'float32')
feature = des[i]
feature_k_rows = feature_k_rows * feature
feature_k_rows = np.sum((feature_k_rows - centures) ** 2, 1)
index = np.argmax(feature_k_rows)
img_feature_vec[0][index] += 1
return img_feature_vec
# 获取所有图片的特征向量
def get_all_features(self,des_list, num_word,centres):
# start_time = datetime.now() # 测试时间
allvec = np.zeros((len(des_list), num_word), 'float32')
for i in range(len(des_list)):
if des_list[i].any() != None:
allvec[i] = self.des2feature(des_list[i], num_word,centres)
# elapsed_time = datetime.now() - start_time # 需要的时间
# print(" 将特征描述转换为特征向量total_time ", elapsed_time, )
return allvec
| [
6,
7,
9,
10,
12
] |
170 | 7e1f77210b3beb4e496ff95686d65bd7d79561a3 | <mask token>
| <mask token>
while True:
if a == 'r':
print(random.randint(1, 6))
elif a == 'q':
print('bye!')
exit()
else:
print("give either 'r' or 'q'")
| <mask token>
a = input("enter 'r' to roll the dice and 'q' to quit")
while True:
if a == 'r':
print(random.randint(1, 6))
elif a == 'q':
print('bye!')
exit()
else:
print("give either 'r' or 'q'")
| import random
a = input("enter 'r' to roll the dice and 'q' to quit")
while True:
if a == 'r':
print(random.randint(1, 6))
elif a == 'q':
print('bye!')
exit()
else:
print("give either 'r' or 'q'")
| import random
a=input("enter 'r' to roll the dice and 'q' to quit")
while True:
if (a=="r"):
print(random.randint(1,6))
elif(a=="q"):
print("bye!")
exit()
else:
print("give either 'r' or 'q'")
| [
0,
1,
2,
3,
4
] |
171 | 9188d58a6d9e832b8908b823d57249fcdd80ff51 | <mask token>
| class Classifier(object):
<mask token>
<mask token>
| class Classifier(object):
<mask token>
def __init__(self, classifier, scaler, orient, color_space,
pix_per_cell, cell_per_block, spatial_size, hist_bins):
"""
Initializes an instance.
Parameters
----------
classifier : Trained SciPy classifier for detecting vehicles.
scaler : SciPy scaler to apply to X.
"""
self.classifier = classifier
self.scaler = scaler
self.color_space = color_space
self.orient = orient
self.pix_per_cell = pix_per_cell
self.cell_per_block = cell_per_block
self.spatial_size = spatial_size
self.hist_bins = hist_bins
| class Classifier(object):
"""
Trained classifier
"""
def __init__(self, classifier, scaler, orient, color_space,
pix_per_cell, cell_per_block, spatial_size, hist_bins):
"""
Initializes an instance.
Parameters
----------
classifier : Trained SciPy classifier for detecting vehicles.
scaler : SciPy scaler to apply to X.
"""
self.classifier = classifier
self.scaler = scaler
self.color_space = color_space
self.orient = orient
self.pix_per_cell = pix_per_cell
self.cell_per_block = cell_per_block
self.spatial_size = spatial_size
self.hist_bins = hist_bins
| class Classifier(object):
"""
Trained classifier
"""
def __init__(self, classifier, scaler, orient, color_space, pix_per_cell,
cell_per_block, spatial_size, hist_bins):
"""
Initializes an instance.
Parameters
----------
classifier : Trained SciPy classifier for detecting vehicles.
scaler : SciPy scaler to apply to X.
"""
self.classifier = classifier
self.scaler = scaler
self.color_space = color_space
self.orient = orient
self.pix_per_cell = pix_per_cell
self.cell_per_block = cell_per_block
self.spatial_size = spatial_size
self.hist_bins = hist_bins
| [
0,
1,
2,
3,
4
] |
172 | 2f6e0b6a7e14ac9c5a38db6fd2b1cf23cff7144e | <mask token>
class dequeue:
def __init__(self):
self.front = None
self.last = None
self.count = 0
<mask token>
<mask token>
<mask token>
def remove_front(self):
if self.front == None:
return
else:
self.front = self.front.next
if self.front == None:
self.last = None
return
self.count -= 1
self.front.prev = None
def remove_last(self):
if self.last == None:
return
else:
self.last = self.last.prev
if self.last == None:
self.front = None
return
self.count -= 1
self.last.next = None
<mask token>
<mask token>
<mask token>
def pal_check(self, pal_lis):
for i in pal_lis:
llist.add_front(i)
while self.count != 0:
if self.front.data == self.last.data:
llist.remove_front()
if self.count > 1:
llist.remove_last()
else:
return False
if self.count == 1:
break
return True
<mask token>
| <mask token>
class dequeue:
def __init__(self):
self.front = None
self.last = None
self.count = 0
<mask token>
<mask token>
<mask token>
def remove_front(self):
if self.front == None:
return
else:
self.front = self.front.next
if self.front == None:
self.last = None
return
self.count -= 1
self.front.prev = None
def remove_last(self):
if self.last == None:
return
else:
self.last = self.last.prev
if self.last == None:
self.front = None
return
self.count -= 1
self.last.next = None
<mask token>
def size(self):
print(self.count)
<mask token>
def pal_check(self, pal_lis):
for i in pal_lis:
llist.add_front(i)
while self.count != 0:
if self.front.data == self.last.data:
llist.remove_front()
if self.count > 1:
llist.remove_last()
else:
return False
if self.count == 1:
break
return True
<mask token>
| <mask token>
class dequeue:
def __init__(self):
self.front = None
self.last = None
self.count = 0
def add_front(self, data):
new_nodef = Node(data)
if self.front == None:
self.front = self.last = new_nodef
self.count += 1
else:
new_nodef.next = self.front
self.front.prev = new_nodef
self.front = new_nodef
self.count += 1
<mask token>
def print_list(self):
if self.front == None:
return
temp = self.front
while temp != None:
print(temp.data)
temp = temp.next
def remove_front(self):
if self.front == None:
return
else:
self.front = self.front.next
if self.front == None:
self.last = None
return
self.count -= 1
self.front.prev = None
def remove_last(self):
if self.last == None:
return
else:
self.last = self.last.prev
if self.last == None:
self.front = None
return
self.count -= 1
self.last.next = None
def is_empty(self):
if self.count == 0:
return True
else:
return False
def size(self):
print(self.count)
def entry(self):
pal_to_check = str(input(
'Enter the string to check whether palindrome or not :'))
pal_list = [str(i) for i in pal_to_check]
print(pal_list)
pal_check_con = llist.pal_check(pal_list)
print('Is palindrome :', pal_check_con)
def pal_check(self, pal_lis):
for i in pal_lis:
llist.add_front(i)
while self.count != 0:
if self.front.data == self.last.data:
llist.remove_front()
if self.count > 1:
llist.remove_last()
else:
return False
if self.count == 1:
break
return True
<mask token>
| class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
class dequeue:
def __init__(self):
self.front = None
self.last = None
self.count = 0
def add_front(self, data):
new_nodef = Node(data)
if self.front == None:
self.front = self.last = new_nodef
self.count += 1
else:
new_nodef.next = self.front
self.front.prev = new_nodef
self.front = new_nodef
self.count += 1
def add_last(self, data):
new_nodeb = Node(data)
if self.last == None:
self.last = self.front = new_nodeb
self.count += 1
else:
new_nodeb.prev = self.last
self.last.next = new_nodeb
self.last = new_nodeb
self.count += 1
def print_list(self):
if self.front == None:
return
temp = self.front
while temp != None:
print(temp.data)
temp = temp.next
def remove_front(self):
if self.front == None:
return
else:
self.front = self.front.next
if self.front == None:
self.last = None
return
self.count -= 1
self.front.prev = None
def remove_last(self):
if self.last == None:
return
else:
self.last = self.last.prev
if self.last == None:
self.front = None
return
self.count -= 1
self.last.next = None
def is_empty(self):
if self.count == 0:
return True
else:
return False
def size(self):
print(self.count)
def entry(self):
pal_to_check = str(input(
'Enter the string to check whether palindrome or not :'))
pal_list = [str(i) for i in pal_to_check]
print(pal_list)
pal_check_con = llist.pal_check(pal_list)
print('Is palindrome :', pal_check_con)
def pal_check(self, pal_lis):
for i in pal_lis:
llist.add_front(i)
while self.count != 0:
if self.front.data == self.last.data:
llist.remove_front()
if self.count > 1:
llist.remove_last()
else:
return False
if self.count == 1:
break
return True
if __name__ == '__main__':
llist = dequeue()
llist.entry()
| class Node:
def __init__(self,data):
self.data = data
self.next = None
self.prev = None
class dequeue:
def __init__(self):
self.front = None
self.last = None
self.count = 0
def add_front(self, data):
new_nodef = Node(data)
if(self.front == None):
self.front = self.last = new_nodef
self.count +=1
else:
new_nodef.next = self.front
self.front.prev = new_nodef
self.front = new_nodef
self.count +=1
def add_last(self,data):
new_nodeb = Node(data)
if(self.last == None):
self.last = self.front = new_nodeb
self.count +=1
else:
new_nodeb.prev = self.last
self.last.next = new_nodeb
self.last = new_nodeb
self.count +=1
def print_list(self):
if(self.front == None):
return
temp = self.front
while(temp != None):
print(temp.data)
temp = temp.next
def remove_front(self):
if(self.front == None):
return
else:
self.front = self.front.next
if(self.front == None):
self.last = None
return
self.count -= 1
self.front.prev = None
def remove_last(self):
if(self.last == None):
return
else:
self.last = self.last.prev
if(self.last == None):
self.front = None
return
self.count -= 1
self.last.next = None
def is_empty(self):
if(self.count == 0):
return True
else:
return False
def size(self):
print(self.count)
def entry(self):
pal_to_check = str(input("Enter the string to check whether palindrome or not :"))
pal_list = [str(i) for i in pal_to_check]
print(pal_list)
pal_check_con = llist.pal_check(pal_list)
print("Is palindrome :",pal_check_con)
def pal_check(self, pal_lis):
for i in pal_lis:
llist.add_front(i)
while(self.count != 0):
if(self.front.data == self.last.data):
llist.remove_front()
if(self.count > 1):
llist.remove_last()
else:
return False
if(self.count == 1):
break
return True
#Driver function
if __name__=="__main__":
llist = dequeue()
llist.entry()
| [
5,
6,
10,
14,
15
] |
173 | 95f7710fb0137617025819b6240312ce02915328 | #!/usr/bin/env python
from ROOT import TFileMerger
import subprocess
def MergeFiles(output, fileList, skipList=[], acceptList=[], n=20):
merger = TFileMerger(False)
merger.OutputFile(output);
merger.SetMaxOpenedFiles(n);
print "Total number of files is {0}".format(len(fileList))
for fileName in fileList:
print "Adding file {0}".format(fileName)
merger.AddFile(fileName)
mode = TFileMerger.kAllIncremental
if len(skipList) > 0:
mode = mode | TFileMerger.kSkipListed
if (len(acceptList) > 0):
print("Accept list is being ignored!!!")
for skipObject in skipList:
merger.AddObjectNames(skipObject)
elif len(acceptList) > 0:
mode = mode | TFileMerger.kAcceptListed
for acceptObject in acceptList:
merger.AddObjectNames(acceptObject)
merger.PrintFiles("");
r = merger.PartialMerge(mode);
if not r:
print "Merge error!"
return r
def MergeFilesHadd(output, fileList, n=20):
cmd = ["hadd", "-n", str(n), output]
cmd.extend(fileList)
subprocess.call(cmd)
| null | null | null | null | [
0
] |
174 | bfb2d7b811fd450b53493375fa130649349d308f | <mask token>
| for i in range(0, 20):
if i % 20 == 0:
print('Stop It')
else:
print('The For Loop Failed')
| for i in range(0,20):
if i % 20 == 0:
print('Stop It')
else:
print('The For Loop Failed') | null | null | [
0,
1,
2
] |
175 | 720ec6c222659a13d4a0f3cf9096b70ce6e2b2b3 | <mask token>
class JobGroupManager(object):
<mask token>
def GetJobGroup(self, group_id):
with self._lock:
for group in self.all_job_groups:
if group.id == group_id:
return group
return None
<mask token>
def AddJobGroup(self, group):
with self._lock:
group.id = self._id_producer.GetNextId()
self._logger.debug('Creating runtime environment for %r.', group)
CommandExecuter().RunCommand(cmd.Chain(cmd.RmTree(group.home_dir),
cmd.MakeDir(group.home_dir)))
with self._lock:
self.all_job_groups.append(group)
for job_ in group.jobs:
self.job_manager.AddJob(job_)
group.status = job_group.STATUS_EXECUTING
self._logger.info('Added %r to queue.', group)
return group.id
<mask token>
def NotifyJobComplete(self, job_):
self._logger.debug('Handling %r completion event.', job_)
group = job_.group
with self._lock:
if group.status != job_group.STATUS_FAILED:
if job_.status == job.STATUS_FAILED:
group.status = job_group.STATUS_FAILED
if group.cleanup_on_failure:
for job_ in group.jobs:
self.job_manager.KillJob(job_)
self.job_manager.CleanUpJob(job_)
else:
assert job_.status == job.STATUS_SUCCEEDED
finished = True
for other_job in group.jobs:
assert other_job.status != job.STATUS_FAILED
if other_job.status != job.STATUS_SUCCEEDED:
finished = False
break
if finished and group.status != job_group.STATUS_SUCCEEDED:
group.status = job_group.STATUS_SUCCEEDED
if group.cleanup_on_completion:
for job_ in group.jobs:
self.job_manager.CleanUpJob(job_)
self._job_group_finished.notifyAll()
| <mask token>
class JobGroupManager(object):
def __init__(self, job_manager):
self.all_job_groups = []
self.job_manager = job_manager
self.job_manager.AddListener(self)
self._lock = threading.Lock()
self._job_group_finished = threading.Condition(self._lock)
self._id_producer = IdProducerPolicy()
self._id_producer.Initialize(job_group.JobGroup.HOMEDIR_PREFIX,
'job-group-(?P<id>\\d+)')
self._logger = logging.getLogger(self.__class__.__name__)
def GetJobGroup(self, group_id):
with self._lock:
for group in self.all_job_groups:
if group.id == group_id:
return group
return None
<mask token>
def AddJobGroup(self, group):
with self._lock:
group.id = self._id_producer.GetNextId()
self._logger.debug('Creating runtime environment for %r.', group)
CommandExecuter().RunCommand(cmd.Chain(cmd.RmTree(group.home_dir),
cmd.MakeDir(group.home_dir)))
with self._lock:
self.all_job_groups.append(group)
for job_ in group.jobs:
self.job_manager.AddJob(job_)
group.status = job_group.STATUS_EXECUTING
self._logger.info('Added %r to queue.', group)
return group.id
<mask token>
def NotifyJobComplete(self, job_):
self._logger.debug('Handling %r completion event.', job_)
group = job_.group
with self._lock:
if group.status != job_group.STATUS_FAILED:
if job_.status == job.STATUS_FAILED:
group.status = job_group.STATUS_FAILED
if group.cleanup_on_failure:
for job_ in group.jobs:
self.job_manager.KillJob(job_)
self.job_manager.CleanUpJob(job_)
else:
assert job_.status == job.STATUS_SUCCEEDED
finished = True
for other_job in group.jobs:
assert other_job.status != job.STATUS_FAILED
if other_job.status != job.STATUS_SUCCEEDED:
finished = False
break
if finished and group.status != job_group.STATUS_SUCCEEDED:
group.status = job_group.STATUS_SUCCEEDED
if group.cleanup_on_completion:
for job_ in group.jobs:
self.job_manager.CleanUpJob(job_)
self._job_group_finished.notifyAll()
| <mask token>
class JobGroupManager(object):
def __init__(self, job_manager):
self.all_job_groups = []
self.job_manager = job_manager
self.job_manager.AddListener(self)
self._lock = threading.Lock()
self._job_group_finished = threading.Condition(self._lock)
self._id_producer = IdProducerPolicy()
self._id_producer.Initialize(job_group.JobGroup.HOMEDIR_PREFIX,
'job-group-(?P<id>\\d+)')
self._logger = logging.getLogger(self.__class__.__name__)
def GetJobGroup(self, group_id):
with self._lock:
for group in self.all_job_groups:
if group.id == group_id:
return group
return None
def GetAllJobGroups(self):
with self._lock:
return copy.deepcopy(self.all_job_groups)
def AddJobGroup(self, group):
with self._lock:
group.id = self._id_producer.GetNextId()
self._logger.debug('Creating runtime environment for %r.', group)
CommandExecuter().RunCommand(cmd.Chain(cmd.RmTree(group.home_dir),
cmd.MakeDir(group.home_dir)))
with self._lock:
self.all_job_groups.append(group)
for job_ in group.jobs:
self.job_manager.AddJob(job_)
group.status = job_group.STATUS_EXECUTING
self._logger.info('Added %r to queue.', group)
return group.id
<mask token>
def NotifyJobComplete(self, job_):
self._logger.debug('Handling %r completion event.', job_)
group = job_.group
with self._lock:
if group.status != job_group.STATUS_FAILED:
if job_.status == job.STATUS_FAILED:
group.status = job_group.STATUS_FAILED
if group.cleanup_on_failure:
for job_ in group.jobs:
self.job_manager.KillJob(job_)
self.job_manager.CleanUpJob(job_)
else:
assert job_.status == job.STATUS_SUCCEEDED
finished = True
for other_job in group.jobs:
assert other_job.status != job.STATUS_FAILED
if other_job.status != job.STATUS_SUCCEEDED:
finished = False
break
if finished and group.status != job_group.STATUS_SUCCEEDED:
group.status = job_group.STATUS_SUCCEEDED
if group.cleanup_on_completion:
for job_ in group.jobs:
self.job_manager.CleanUpJob(job_)
self._job_group_finished.notifyAll()
| <mask token>
class JobGroupManager(object):
def __init__(self, job_manager):
self.all_job_groups = []
self.job_manager = job_manager
self.job_manager.AddListener(self)
self._lock = threading.Lock()
self._job_group_finished = threading.Condition(self._lock)
self._id_producer = IdProducerPolicy()
self._id_producer.Initialize(job_group.JobGroup.HOMEDIR_PREFIX,
'job-group-(?P<id>\\d+)')
self._logger = logging.getLogger(self.__class__.__name__)
def GetJobGroup(self, group_id):
with self._lock:
for group in self.all_job_groups:
if group.id == group_id:
return group
return None
def GetAllJobGroups(self):
with self._lock:
return copy.deepcopy(self.all_job_groups)
def AddJobGroup(self, group):
with self._lock:
group.id = self._id_producer.GetNextId()
self._logger.debug('Creating runtime environment for %r.', group)
CommandExecuter().RunCommand(cmd.Chain(cmd.RmTree(group.home_dir),
cmd.MakeDir(group.home_dir)))
with self._lock:
self.all_job_groups.append(group)
for job_ in group.jobs:
self.job_manager.AddJob(job_)
group.status = job_group.STATUS_EXECUTING
self._logger.info('Added %r to queue.', group)
return group.id
def KillJobGroup(self, group):
with self._lock:
self._logger.debug('Killing all jobs that belong to %r.', group)
for job_ in group.jobs:
self.job_manager.KillJob(job_)
self._logger.debug('Waiting for jobs to quit.')
while group.status not in [job_group.STATUS_SUCCEEDED,
job_group.STATUS_FAILED]:
self._job_group_finished.wait()
def NotifyJobComplete(self, job_):
self._logger.debug('Handling %r completion event.', job_)
group = job_.group
with self._lock:
if group.status != job_group.STATUS_FAILED:
if job_.status == job.STATUS_FAILED:
group.status = job_group.STATUS_FAILED
if group.cleanup_on_failure:
for job_ in group.jobs:
self.job_manager.KillJob(job_)
self.job_manager.CleanUpJob(job_)
else:
assert job_.status == job.STATUS_SUCCEEDED
finished = True
for other_job in group.jobs:
assert other_job.status != job.STATUS_FAILED
if other_job.status != job.STATUS_SUCCEEDED:
finished = False
break
if finished and group.status != job_group.STATUS_SUCCEEDED:
group.status = job_group.STATUS_SUCCEEDED
if group.cleanup_on_completion:
for job_ in group.jobs:
self.job_manager.CleanUpJob(job_)
self._job_group_finished.notifyAll()
| # Copyright 2010 Google Inc. All Rights Reserved.
#
import copy
import logging
import threading
from automation.common import command as cmd
from automation.common import logger
from automation.common.command_executer import CommandExecuter
from automation.common import job
from automation.common import job_group
from automation.server.job_manager import IdProducerPolicy
class JobGroupManager(object):
def __init__(self, job_manager):
self.all_job_groups = []
self.job_manager = job_manager
self.job_manager.AddListener(self)
self._lock = threading.Lock()
self._job_group_finished = threading.Condition(self._lock)
self._id_producer = IdProducerPolicy()
self._id_producer.Initialize(job_group.JobGroup.HOMEDIR_PREFIX,
'job-group-(?P<id>\d+)')
self._logger = logging.getLogger(self.__class__.__name__)
def GetJobGroup(self, group_id):
with self._lock:
for group in self.all_job_groups:
if group.id == group_id:
return group
return None
def GetAllJobGroups(self):
with self._lock:
return copy.deepcopy(self.all_job_groups)
def AddJobGroup(self, group):
with self._lock:
group.id = self._id_producer.GetNextId()
self._logger.debug('Creating runtime environment for %r.', group)
CommandExecuter().RunCommand(cmd.Chain(
cmd.RmTree(group.home_dir), cmd.MakeDir(group.home_dir)))
with self._lock:
self.all_job_groups.append(group)
for job_ in group.jobs:
self.job_manager.AddJob(job_)
group.status = job_group.STATUS_EXECUTING
self._logger.info('Added %r to queue.', group)
return group.id
def KillJobGroup(self, group):
with self._lock:
self._logger.debug('Killing all jobs that belong to %r.', group)
for job_ in group.jobs:
self.job_manager.KillJob(job_)
self._logger.debug('Waiting for jobs to quit.')
# Lets block until the group is killed so we know it is completed
# when we return.
while group.status not in [job_group.STATUS_SUCCEEDED,
job_group.STATUS_FAILED]:
self._job_group_finished.wait()
def NotifyJobComplete(self, job_):
self._logger.debug('Handling %r completion event.', job_)
group = job_.group
with self._lock:
# We need to perform an action only if the group hasn't already failed.
if group.status != job_group.STATUS_FAILED:
if job_.status == job.STATUS_FAILED:
# We have a failed job, abort the job group
group.status = job_group.STATUS_FAILED
if group.cleanup_on_failure:
for job_ in group.jobs:
# TODO(bjanakiraman): We should probably only kill dependent jobs
# instead of the whole job group.
self.job_manager.KillJob(job_)
self.job_manager.CleanUpJob(job_)
else:
# The job succeeded successfully -- lets check to see if we are done.
assert job_.status == job.STATUS_SUCCEEDED
finished = True
for other_job in group.jobs:
assert other_job.status != job.STATUS_FAILED
if other_job.status != job.STATUS_SUCCEEDED:
finished = False
break
if finished and group.status != job_group.STATUS_SUCCEEDED:
# TODO(kbaclawski): Without check performed above following code
# could be called more than once. This would trigger StateMachine
# crash, because it cannot transition from STATUS_SUCCEEDED to
# STATUS_SUCCEEDED. Need to address that bug in near future.
group.status = job_group.STATUS_SUCCEEDED
if group.cleanup_on_completion:
for job_ in group.jobs:
self.job_manager.CleanUpJob(job_)
self._job_group_finished.notifyAll()
| [
4,
5,
6,
7,
9
] |
176 | 700d35f9e941fe9325821a377ec1ca1c245ddaec | <mask token>
| <mask token>
def write_file(path, text):
path.write_text(text)
return path
<mask token>
| <mask token>
def write_file(path, text):
path.write_text(text)
return path
def test_argparse(tmp_path):
tmpl = write_file(tmp_path / 't.yaml', 'key: {{ var }}')
inp = write_file(tmp_path / 'i.json', '{"var": "Hello!"}')
out = tmp_path / 'o.json'
jiml.cli.main(jiml.cli.parse_args('-t', str(tmpl), '-i', str(inp), '-o',
str(out)))
| import jiml.cli
def write_file(path, text):
path.write_text(text)
return path
def test_argparse(tmp_path):
tmpl = write_file(tmp_path / 't.yaml', 'key: {{ var }}')
inp = write_file(tmp_path / 'i.json', '{"var": "Hello!"}')
out = tmp_path / 'o.json'
jiml.cli.main(jiml.cli.parse_args('-t', str(tmpl), '-i', str(inp), '-o',
str(out)))
| import jiml.cli
def write_file(path, text):
path.write_text(text)
return path
def test_argparse(tmp_path):
tmpl = write_file(tmp_path / 't.yaml', 'key: {{ var }}')
inp = write_file(tmp_path / 'i.json', '{"var": "Hello!"}')
out = tmp_path / 'o.json'
jiml.cli.main(jiml.cli.parse_args(
'-t', str(tmpl),
'-i', str(inp),
'-o', str(out),
))
| [
0,
1,
2,
3,
4
] |
177 | e868998833774c829b05ae8da3280bed61363be1 | <mask token>
| <mask token>
compiler.generate_proto(PROTO, '.')
compiler.generate_proto(PROTO, '.', with_plugin='python_rpcz', suffix=
'_rpcz.py')
| <mask token>
PROTO = '../index_server.proto'
compiler.generate_proto(PROTO, '.')
compiler.generate_proto(PROTO, '.', with_plugin='python_rpcz', suffix=
'_rpcz.py')
| from rpcz import compiler
PROTO = '../index_server.proto'
compiler.generate_proto(PROTO, '.')
compiler.generate_proto(PROTO, '.', with_plugin='python_rpcz', suffix=
'_rpcz.py')
| #!/usr/bin/env python2
from rpcz import compiler
PROTO = '../index_server.proto'
compiler.generate_proto(PROTO, '.')
compiler.generate_proto(
PROTO, '.',
with_plugin='python_rpcz', suffix='_rpcz.py')
| [
0,
1,
2,
3,
4
] |
178 | 848e4abcd0b4f118030fc62f1272a19bfce9db4e | <mask token>
def interpolate_images(baseline, image, alphas):
alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]
baseline_x = tf.expand_dims(baseline, axis=0)
input_x = tf.expand_dims(image, axis=0)
delta = input_x - baseline_x
images = baseline_x + alphas_x * delta
return images
def compute_gradients(model, images, target_class):
with tf.GradientTape() as tape:
tape.watch(images)
raw_probs = model(images)
probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class
gradients = tape.gradient(probs, images)
return gradients
def integral_approximation(gradients):
grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)
return tf.math.reduce_mean(grads, axis=0)
<mask token>
| <mask token>
def interpolate_images(baseline, image, alphas):
alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]
baseline_x = tf.expand_dims(baseline, axis=0)
input_x = tf.expand_dims(image, axis=0)
delta = input_x - baseline_x
images = baseline_x + alphas_x * delta
return images
def compute_gradients(model, images, target_class):
with tf.GradientTape() as tape:
tape.watch(images)
raw_probs = model(images)
probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class
gradients = tape.gradient(probs, images)
return gradients
def integral_approximation(gradients):
grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)
return tf.math.reduce_mean(grads, axis=0)
@tf.function
def integrated_gradients(model, baseline, image, target_class, m_steps=50,
batch_size=32):
alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)
gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)
for alpha in tf.range(0, len(alphas), batch_size):
from_ = alpha
to = tf.minimum(from_ + batch_size, len(alphas))
alpha_batch = alphas[from_:to]
interpolated_path_input_batch = interpolate_images(baseline=
baseline, image=image, alphas=alpha_batch)
gradient_batch = compute_gradients(model=model, images=
interpolated_path_input_batch, target_class=target_class)
gradient_batches = gradient_batches.scatter(tf.range(from_, to),
gradient_batch)
total_gradients = gradient_batches.stack()
avg_gradients = integral_approximation(gradients=total_gradients)
return (image - baseline) * avg_gradients
def main(gcs_bucket, n_channels=5, dataset_name='b0-tensorfa-dwiqc',
model_dir='b0_tensorfa_dwiqc', dataset_seed=8, target_class=1,
confusion_class='true_pos'):
print('Setting gpu thread mode to gpu_private.')
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
print('Configuring distribution strategy')
use_tpu = False
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
use_tpu = True
print('TPU detected.')
print('All devices: ', tf.config.list_logical_devices('TPU'))
except ValueError:
strategy = tf.distribute.MirroredStrategy()
print('GPUs detected.')
print('Number of accelerators: ', strategy.num_replicas_in_sync)
tf.keras.mixed_precision.set_global_policy('mixed_float16')
scope = strategy.scope()
GCS_BASE_PATH = f'gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}'
GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, 'saved_model')
GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, 'integrated_gradients')
fs = gcsfs.GCSFileSystem()
LOCAL_SAVED_MODEL_DIR = 'saved_model'
LOCAL_OUTPUT_DIR = 'output'
os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)
os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)
fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)
GCS_DATA_PATH = f'gs://{gcs_bucket}'
GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, 'tfrecs', dataset_name, 'all-data'
)
if use_tpu:
device_alldata_dir = GCS_ALLDATA_DIR
else:
LOCAL_ALLDATA_DIR = op.join('.', 'tfrecs', dataset_name, 'all-data')
os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)
fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)
device_alldata_dir = LOCAL_ALLDATA_DIR
volume_shape = 128, 128, 128, n_channels
element_spec = tf.TensorSpec(shape=(), dtype=tf.int64, name=None), (tf.
TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None
), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None))
dataset = tf.data.experimental.load(op.join(device_alldata_dir,
confusion_class), element_spec=element_spec)
volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]
baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)
print('Computing integrated gradients')
with scope:
model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)
ig_attributions = [integrated_gradients(model=model, baseline=
baseline, image=volume, target_class=target_class, m_steps=128,
batch_size=1) for volume in volumes]
if target_class == 1:
postfix = 'attribution_pass'
else:
postfix = 'attribution_fail'
ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))
tf.data.experimental.save(ig_dataset, op.join(LOCAL_OUTPUT_DIR,
f'ig_{confusion_class}_{postfix}'))
affine = np.diag([1, 1, 1, 1])
volume_niftis = [{'b0': nib.Nifti1Image(volume[:, :, :, 3].numpy(),
affine), 'color_fa': nib.Nifti1Image(volume[:, :, :, :3].numpy(),
affine)} for volume in volumes]
ig_niftis = [{'b0': nib.Nifti1Image(attribution[:, :, :, 3].numpy(),
affine), 'color_fa': nib.Nifti1Image(attribution[:, :, :, :3].numpy
(), affine), 'sum': nib.Nifti1Image(tf.math.reduce_sum(attribution[
:, :, :, :4], axis=-1).numpy(), affine)} for attribution in
ig_attributions]
for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis,
ig_niftis)):
for key, value in volume_nifti.items():
nib.save(value, op.join(LOCAL_OUTPUT_DIR,
f'{confusion_class}_{key}_{idx}.nii.gz'))
for key, value in ig_nifti.items():
nib.save(value, op.join(LOCAL_OUTPUT_DIR,
f'{confusion_class}_{postfix}_{key}_{idx}.nii.gz'))
fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)
<mask token>
| <mask token>
def interpolate_images(baseline, image, alphas):
alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]
baseline_x = tf.expand_dims(baseline, axis=0)
input_x = tf.expand_dims(image, axis=0)
delta = input_x - baseline_x
images = baseline_x + alphas_x * delta
return images
def compute_gradients(model, images, target_class):
with tf.GradientTape() as tape:
tape.watch(images)
raw_probs = model(images)
probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class
gradients = tape.gradient(probs, images)
return gradients
def integral_approximation(gradients):
grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)
return tf.math.reduce_mean(grads, axis=0)
@tf.function
def integrated_gradients(model, baseline, image, target_class, m_steps=50,
batch_size=32):
alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)
gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)
for alpha in tf.range(0, len(alphas), batch_size):
from_ = alpha
to = tf.minimum(from_ + batch_size, len(alphas))
alpha_batch = alphas[from_:to]
interpolated_path_input_batch = interpolate_images(baseline=
baseline, image=image, alphas=alpha_batch)
gradient_batch = compute_gradients(model=model, images=
interpolated_path_input_batch, target_class=target_class)
gradient_batches = gradient_batches.scatter(tf.range(from_, to),
gradient_batch)
total_gradients = gradient_batches.stack()
avg_gradients = integral_approximation(gradients=total_gradients)
return (image - baseline) * avg_gradients
def main(gcs_bucket, n_channels=5, dataset_name='b0-tensorfa-dwiqc',
model_dir='b0_tensorfa_dwiqc', dataset_seed=8, target_class=1,
confusion_class='true_pos'):
print('Setting gpu thread mode to gpu_private.')
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
print('Configuring distribution strategy')
use_tpu = False
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
use_tpu = True
print('TPU detected.')
print('All devices: ', tf.config.list_logical_devices('TPU'))
except ValueError:
strategy = tf.distribute.MirroredStrategy()
print('GPUs detected.')
print('Number of accelerators: ', strategy.num_replicas_in_sync)
tf.keras.mixed_precision.set_global_policy('mixed_float16')
scope = strategy.scope()
GCS_BASE_PATH = f'gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}'
GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, 'saved_model')
GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, 'integrated_gradients')
fs = gcsfs.GCSFileSystem()
LOCAL_SAVED_MODEL_DIR = 'saved_model'
LOCAL_OUTPUT_DIR = 'output'
os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)
os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)
fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)
GCS_DATA_PATH = f'gs://{gcs_bucket}'
GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, 'tfrecs', dataset_name, 'all-data'
)
if use_tpu:
device_alldata_dir = GCS_ALLDATA_DIR
else:
LOCAL_ALLDATA_DIR = op.join('.', 'tfrecs', dataset_name, 'all-data')
os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)
fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)
device_alldata_dir = LOCAL_ALLDATA_DIR
volume_shape = 128, 128, 128, n_channels
element_spec = tf.TensorSpec(shape=(), dtype=tf.int64, name=None), (tf.
TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None
), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None))
dataset = tf.data.experimental.load(op.join(device_alldata_dir,
confusion_class), element_spec=element_spec)
volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]
baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)
print('Computing integrated gradients')
with scope:
model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)
ig_attributions = [integrated_gradients(model=model, baseline=
baseline, image=volume, target_class=target_class, m_steps=128,
batch_size=1) for volume in volumes]
if target_class == 1:
postfix = 'attribution_pass'
else:
postfix = 'attribution_fail'
ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))
tf.data.experimental.save(ig_dataset, op.join(LOCAL_OUTPUT_DIR,
f'ig_{confusion_class}_{postfix}'))
affine = np.diag([1, 1, 1, 1])
volume_niftis = [{'b0': nib.Nifti1Image(volume[:, :, :, 3].numpy(),
affine), 'color_fa': nib.Nifti1Image(volume[:, :, :, :3].numpy(),
affine)} for volume in volumes]
ig_niftis = [{'b0': nib.Nifti1Image(attribution[:, :, :, 3].numpy(),
affine), 'color_fa': nib.Nifti1Image(attribution[:, :, :, :3].numpy
(), affine), 'sum': nib.Nifti1Image(tf.math.reduce_sum(attribution[
:, :, :, :4], axis=-1).numpy(), affine)} for attribution in
ig_attributions]
for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis,
ig_niftis)):
for key, value in volume_nifti.items():
nib.save(value, op.join(LOCAL_OUTPUT_DIR,
f'{confusion_class}_{key}_{idx}.nii.gz'))
for key, value in ig_nifti.items():
nib.save(value, op.join(LOCAL_OUTPUT_DIR,
f'{confusion_class}_{postfix}_{key}_{idx}.nii.gz'))
fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gcs_bucket', type=str, help=
'The name of the gcs bucket that will contain the saved models, checkpoints, etc.'
)
parser.add_argument('--n_channels', type=int, help=
'The number of channels in the data.', default=5)
parser.add_argument('--dataset_name', type=str, help=
'The name of the dataset in the tfrecs folder of the GCS bucket.',
default='b0-tensorfa-dwiqc')
parser.add_argument('--model_dir', type=str, help=
'The name of the GCS directory in which the tensorflow model is saved.'
, default='b0_tensorfa_dwiqc')
parser.add_argument('--dataset_seed', type=int, help=
'The seed for the dataset', default=8)
parser.add_argument('--target_class', type=int, help=
'The target class for the integrated gradients.', default=1)
parser.add_argument('--confusion_class', type=str, help=
'The confusion class for which to compute integrated gradients',
default='true_pos')
args = parser.parse_args()
main(gcs_bucket=args.gcs_bucket, n_channels=args.n_channels,
dataset_name=args.dataset_name, model_dir=args.model_dir,
dataset_seed=args.dataset_seed, target_class=args.target_class,
confusion_class=args.confusion_class)
| import argparse
import gc
import gcsfs
import nibabel as nib
import nilearn
import nobrainer
import numpy as np
import os
import os.path as op
import pandas as pd
import tensorflow as tf
def interpolate_images(baseline, image, alphas):
alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]
baseline_x = tf.expand_dims(baseline, axis=0)
input_x = tf.expand_dims(image, axis=0)
delta = input_x - baseline_x
images = baseline_x + alphas_x * delta
return images
def compute_gradients(model, images, target_class):
with tf.GradientTape() as tape:
tape.watch(images)
raw_probs = model(images)
probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class
gradients = tape.gradient(probs, images)
return gradients
def integral_approximation(gradients):
grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)
return tf.math.reduce_mean(grads, axis=0)
@tf.function
def integrated_gradients(model, baseline, image, target_class, m_steps=50,
batch_size=32):
alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)
gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)
for alpha in tf.range(0, len(alphas), batch_size):
from_ = alpha
to = tf.minimum(from_ + batch_size, len(alphas))
alpha_batch = alphas[from_:to]
interpolated_path_input_batch = interpolate_images(baseline=
baseline, image=image, alphas=alpha_batch)
gradient_batch = compute_gradients(model=model, images=
interpolated_path_input_batch, target_class=target_class)
gradient_batches = gradient_batches.scatter(tf.range(from_, to),
gradient_batch)
total_gradients = gradient_batches.stack()
avg_gradients = integral_approximation(gradients=total_gradients)
return (image - baseline) * avg_gradients
def main(gcs_bucket, n_channels=5, dataset_name='b0-tensorfa-dwiqc',
model_dir='b0_tensorfa_dwiqc', dataset_seed=8, target_class=1,
confusion_class='true_pos'):
print('Setting gpu thread mode to gpu_private.')
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
print('Configuring distribution strategy')
use_tpu = False
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
use_tpu = True
print('TPU detected.')
print('All devices: ', tf.config.list_logical_devices('TPU'))
except ValueError:
strategy = tf.distribute.MirroredStrategy()
print('GPUs detected.')
print('Number of accelerators: ', strategy.num_replicas_in_sync)
tf.keras.mixed_precision.set_global_policy('mixed_float16')
scope = strategy.scope()
GCS_BASE_PATH = f'gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}'
GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, 'saved_model')
GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, 'integrated_gradients')
fs = gcsfs.GCSFileSystem()
LOCAL_SAVED_MODEL_DIR = 'saved_model'
LOCAL_OUTPUT_DIR = 'output'
os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)
os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)
fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)
GCS_DATA_PATH = f'gs://{gcs_bucket}'
GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, 'tfrecs', dataset_name, 'all-data'
)
if use_tpu:
device_alldata_dir = GCS_ALLDATA_DIR
else:
LOCAL_ALLDATA_DIR = op.join('.', 'tfrecs', dataset_name, 'all-data')
os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)
fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)
device_alldata_dir = LOCAL_ALLDATA_DIR
volume_shape = 128, 128, 128, n_channels
element_spec = tf.TensorSpec(shape=(), dtype=tf.int64, name=None), (tf.
TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None
), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None))
dataset = tf.data.experimental.load(op.join(device_alldata_dir,
confusion_class), element_spec=element_spec)
volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]
baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)
print('Computing integrated gradients')
with scope:
model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)
ig_attributions = [integrated_gradients(model=model, baseline=
baseline, image=volume, target_class=target_class, m_steps=128,
batch_size=1) for volume in volumes]
if target_class == 1:
postfix = 'attribution_pass'
else:
postfix = 'attribution_fail'
ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))
tf.data.experimental.save(ig_dataset, op.join(LOCAL_OUTPUT_DIR,
f'ig_{confusion_class}_{postfix}'))
affine = np.diag([1, 1, 1, 1])
volume_niftis = [{'b0': nib.Nifti1Image(volume[:, :, :, 3].numpy(),
affine), 'color_fa': nib.Nifti1Image(volume[:, :, :, :3].numpy(),
affine)} for volume in volumes]
ig_niftis = [{'b0': nib.Nifti1Image(attribution[:, :, :, 3].numpy(),
affine), 'color_fa': nib.Nifti1Image(attribution[:, :, :, :3].numpy
(), affine), 'sum': nib.Nifti1Image(tf.math.reduce_sum(attribution[
:, :, :, :4], axis=-1).numpy(), affine)} for attribution in
ig_attributions]
for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis,
ig_niftis)):
for key, value in volume_nifti.items():
nib.save(value, op.join(LOCAL_OUTPUT_DIR,
f'{confusion_class}_{key}_{idx}.nii.gz'))
for key, value in ig_nifti.items():
nib.save(value, op.join(LOCAL_OUTPUT_DIR,
f'{confusion_class}_{postfix}_{key}_{idx}.nii.gz'))
fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gcs_bucket', type=str, help=
'The name of the gcs bucket that will contain the saved models, checkpoints, etc.'
)
parser.add_argument('--n_channels', type=int, help=
'The number of channels in the data.', default=5)
parser.add_argument('--dataset_name', type=str, help=
'The name of the dataset in the tfrecs folder of the GCS bucket.',
default='b0-tensorfa-dwiqc')
parser.add_argument('--model_dir', type=str, help=
'The name of the GCS directory in which the tensorflow model is saved.'
, default='b0_tensorfa_dwiqc')
parser.add_argument('--dataset_seed', type=int, help=
'The seed for the dataset', default=8)
parser.add_argument('--target_class', type=int, help=
'The target class for the integrated gradients.', default=1)
parser.add_argument('--confusion_class', type=str, help=
'The confusion class for which to compute integrated gradients',
default='true_pos')
args = parser.parse_args()
main(gcs_bucket=args.gcs_bucket, n_channels=args.n_channels,
dataset_name=args.dataset_name, model_dir=args.model_dir,
dataset_seed=args.dataset_seed, target_class=args.target_class,
confusion_class=args.confusion_class)
| import argparse
import gc
import gcsfs
import nibabel as nib
import nilearn
import nobrainer
import numpy as np
import os
import os.path as op
import pandas as pd
import tensorflow as tf
def interpolate_images(baseline, image, alphas):
alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]
baseline_x = tf.expand_dims(baseline, axis=0)
input_x = tf.expand_dims(image, axis=0)
delta = input_x - baseline_x
images = baseline_x + alphas_x * delta
return images
def compute_gradients(model, images, target_class):
with tf.GradientTape() as tape:
tape.watch(images)
raw_probs = model(images)
probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class
gradients = tape.gradient(probs, images)
return gradients
def integral_approximation(gradients):
# riemann_trapezoidal
grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)
return tf.math.reduce_mean(grads, axis=0)
@tf.function
def integrated_gradients(
model, baseline, image, target_class, m_steps=50, batch_size=32
):
# 1. Generate alphas.
alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)
# Initialize TensorArray outside loop to collect gradients.
gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)
# Iterate alphas range and batch computation for speed, memory efficiency, and scaling to larger m_steps.
for alpha in tf.range(0, len(alphas), batch_size):
from_ = alpha
to = tf.minimum(from_ + batch_size, len(alphas))
alpha_batch = alphas[from_:to]
# 2. Generate interpolated inputs between baseline and input.
interpolated_path_input_batch = interpolate_images(
baseline=baseline, image=image, alphas=alpha_batch
)
# 3. Compute gradients between model outputs and interpolated inputs.
gradient_batch = compute_gradients(
model=model,
images=interpolated_path_input_batch,
target_class=target_class,
)
# Write batch indices and gradients to extend TensorArray.
gradient_batches = gradient_batches.scatter(tf.range(from_, to), gradient_batch)
# Stack path gradients together row-wise into single tensor.
total_gradients = gradient_batches.stack()
# 4. Integral approximation through averaging gradients.
avg_gradients = integral_approximation(gradients=total_gradients)
# 5. Scale integrated gradients with respect to input.
return (image - baseline) * avg_gradients
def main(
gcs_bucket,
n_channels=5,
dataset_name="b0-tensorfa-dwiqc",
model_dir="b0_tensorfa_dwiqc",
dataset_seed=8,
target_class=1,
confusion_class="true_pos",
):
print("Setting gpu thread mode to gpu_private.")
os.environ["TF_GPU_THREAD_MODE"] = "gpu_private"
print("Configuring distribution strategy")
use_tpu = False
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="")
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
use_tpu = True
print("TPU detected.")
print("All devices: ", tf.config.list_logical_devices("TPU"))
except ValueError:
strategy = tf.distribute.MirroredStrategy()
print("GPUs detected.")
print("Number of accelerators: ", strategy.num_replicas_in_sync)
# Train using mixed-precision policy
tf.keras.mixed_precision.set_global_policy("mixed_float16")
scope = strategy.scope()
# Setting location were training logs and checkpoints will be stored
GCS_BASE_PATH = f"gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}"
GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, "saved_model")
GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, "integrated_gradients")
fs = gcsfs.GCSFileSystem()
LOCAL_SAVED_MODEL_DIR = "saved_model"
LOCAL_OUTPUT_DIR = "output"
os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)
os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)
fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)
# Specify the datasets on GCP storage
GCS_DATA_PATH = f"gs://{gcs_bucket}"
GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, "tfrecs", dataset_name, "all-data")
if use_tpu:
device_alldata_dir = GCS_ALLDATA_DIR
else:
LOCAL_ALLDATA_DIR = op.join(".", "tfrecs", dataset_name, "all-data")
os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)
fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)
device_alldata_dir = LOCAL_ALLDATA_DIR
volume_shape = (128, 128, 128, n_channels)
element_spec = (
tf.TensorSpec(shape=(), dtype=tf.int64, name=None),
(
tf.TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None),
tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None),
),
)
dataset = tf.data.experimental.load(
op.join(device_alldata_dir, confusion_class),
element_spec=element_spec,
)
volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]
baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)
print("Computing integrated gradients")
with scope:
model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)
ig_attributions = [
integrated_gradients(
model=model,
baseline=baseline,
image=volume,
target_class=target_class,
m_steps=128,
batch_size=1,
)
for volume in volumes
]
if target_class == 1:
postfix = "attribution_pass"
else:
postfix = "attribution_fail"
ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))
tf.data.experimental.save(
ig_dataset,
op.join(LOCAL_OUTPUT_DIR, f"ig_{confusion_class}_{postfix}"),
)
affine = np.diag([1, 1, 1, 1])
volume_niftis = [
{
"b0": nib.Nifti1Image(volume[:, :, :, 3].numpy(), affine),
"color_fa": nib.Nifti1Image(volume[:, :, :, :3].numpy(), affine),
}
for volume in volumes
]
ig_niftis = [
{
"b0": nib.Nifti1Image(attribution[:, :, :, 3].numpy(), affine),
"color_fa": nib.Nifti1Image(attribution[:, :, :, :3].numpy(), affine),
"sum": nib.Nifti1Image(
tf.math.reduce_sum(attribution[:, :, :, :4], axis=-1).numpy(), affine
),
}
for attribution in ig_attributions
]
for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis, ig_niftis)):
for key, value in volume_nifti.items():
nib.save(
value,
op.join(LOCAL_OUTPUT_DIR, f"{confusion_class}_{key}_{idx}.nii.gz"),
)
for key, value in ig_nifti.items():
nib.save(
value,
op.join(
LOCAL_OUTPUT_DIR, f"{confusion_class}_{postfix}_{key}_{idx}.nii.gz"
),
)
fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--gcs_bucket",
type=str,
help=(
"The name of the gcs bucket that will contain the saved models, "
"checkpoints, etc."
),
)
parser.add_argument(
"--n_channels",
type=int,
help="The number of channels in the data.",
default=5,
)
parser.add_argument(
"--dataset_name",
type=str,
help="The name of the dataset in the tfrecs folder of the GCS bucket.",
default="b0-tensorfa-dwiqc",
)
parser.add_argument(
"--model_dir",
type=str,
help="The name of the GCS directory in which the tensorflow model is saved.",
default="b0_tensorfa_dwiqc",
)
parser.add_argument(
"--dataset_seed",
type=int,
help="The seed for the dataset",
default=8,
)
parser.add_argument(
"--target_class",
type=int,
help="The target class for the integrated gradients.",
default=1,
)
parser.add_argument(
"--confusion_class",
type=str,
help="The confusion class for which to compute integrated gradients",
default="true_pos",
)
args = parser.parse_args()
main(
gcs_bucket=args.gcs_bucket,
n_channels=args.n_channels,
dataset_name=args.dataset_name,
model_dir=args.model_dir,
dataset_seed=args.dataset_seed,
target_class=args.target_class,
confusion_class=args.confusion_class,
)
| [
3,
5,
6,
7,
8
] |
179 | 3817770a80f8ab16322485522be18edd6b3f5516 | <mask token>
| <mask token>
@app.route('/my/<name>/<age>')
def my(name, age):
name = 'saral'
age = '20'
return 'my name is {} and age is {}'.format(name, age)
| <mask token>
app = Flask('__name__')
@app.route('/my/<name>/<age>')
def my(name, age):
name = 'saral'
age = '20'
return 'my name is {} and age is {}'.format(name, age)
| from flask import Flask
app = Flask('__name__')
@app.route('/my/<name>/<age>')
def my(name, age):
name = 'saral'
age = '20'
return 'my name is {} and age is {}'.format(name, age)
| from flask import Flask
app=Flask('__name__')
@app.route("/my/<name>/<age>")
def my(name,age):
name ="saral"
age="20"
return "my name is {} and age is {}".format(name,age) | [
0,
1,
2,
3,
4
] |
180 | 9931fc25118981bcce80cffd3fda9dc99d951bf5 | <mask token>
| <mask token>
if len(s) < 26:
for i in range(26):
c = chr(ord('a') + i)
if c not in s:
print(s + c)
exit()
else:
for i in reversed(range(1, 26)):
if s[i - 1] < s[i]:
s1 = s[0:i - 1]
for j in range(26):
c = chr(ord('a') + j)
if c > s[i - 1] and c not in s1:
print(s1 + c)
exit()
print(-1)
| s = input()
if len(s) < 26:
for i in range(26):
c = chr(ord('a') + i)
if c not in s:
print(s + c)
exit()
else:
for i in reversed(range(1, 26)):
if s[i - 1] < s[i]:
s1 = s[0:i - 1]
for j in range(26):
c = chr(ord('a') + j)
if c > s[i - 1] and c not in s1:
print(s1 + c)
exit()
print(-1)
| s = input()
if len(s) < 26:
for i in range(26):
c = chr(ord("a")+i)
if c not in s:
print(s+c)
exit()
else:
for i in reversed(range(1,26)):
if s[i-1] < s[i]:
s1 = s[0:i-1]
for j in range(26):
c = chr(ord("a")+j)
if c > s[i-1] and c not in s1:
print(s1+c)
exit()
print(-1) | null | [
0,
1,
2,
3
] |
181 | e30bd33ae18881307e7cf4f60d3c60eae91573bc | <mask token>
class MultiSpeakerBRIR(SimpleFreeFieldHRIR):
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class MultiSpeakerBRIR(SimpleFreeFieldHRIR):
<mask token>
<mask token>
def __init__(self):
super().__init__()
self.default_objects['Receiver']['count'] = 2
self.conditions['must have 2 Receivers'] = (lambda name, fixed,
variances, count: name != 'Receiver' or count == 2)
self.conditions['must have Listener Up and View'] = (lambda name,
fixed, variances, count: name != 'Listener' or 'Up' in fixed +
variances and 'View' in fixed + variances)
(self.conditions['must have both Emitter View and Up or neither']) = (
lambda name, fixed, variances, count: name != 'Emitter' or
'View' not in fixed + variances or 'Up' in fixed + variances and
'View' in fixed + variances)
def add_metadata(self, database):
super().add_metadata(database)
database.Data.Type = 'FIRE'
database.Room.Type = 'reverberant'
return
| <mask token>
class MultiSpeakerBRIR(SimpleFreeFieldHRIR):
name = 'MultiSpeakerBRIR'
version = '0.3'
def __init__(self):
super().__init__()
self.default_objects['Receiver']['count'] = 2
self.conditions['must have 2 Receivers'] = (lambda name, fixed,
variances, count: name != 'Receiver' or count == 2)
self.conditions['must have Listener Up and View'] = (lambda name,
fixed, variances, count: name != 'Listener' or 'Up' in fixed +
variances and 'View' in fixed + variances)
(self.conditions['must have both Emitter View and Up or neither']) = (
lambda name, fixed, variances, count: name != 'Emitter' or
'View' not in fixed + variances or 'Up' in fixed + variances and
'View' in fixed + variances)
def add_metadata(self, database):
super().add_metadata(database)
database.Data.Type = 'FIRE'
database.Room.Type = 'reverberant'
return
| from .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR
class MultiSpeakerBRIR(SimpleFreeFieldHRIR):
name = 'MultiSpeakerBRIR'
version = '0.3'
def __init__(self):
super().__init__()
self.default_objects['Receiver']['count'] = 2
self.conditions['must have 2 Receivers'] = (lambda name, fixed,
variances, count: name != 'Receiver' or count == 2)
self.conditions['must have Listener Up and View'] = (lambda name,
fixed, variances, count: name != 'Listener' or 'Up' in fixed +
variances and 'View' in fixed + variances)
(self.conditions['must have both Emitter View and Up or neither']) = (
lambda name, fixed, variances, count: name != 'Emitter' or
'View' not in fixed + variances or 'Up' in fixed + variances and
'View' in fixed + variances)
def add_metadata(self, database):
super().add_metadata(database)
database.Data.Type = 'FIRE'
database.Room.Type = 'reverberant'
return
| # Copyright (c) 2019 Jannika Lossner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR
class MultiSpeakerBRIR(SimpleFreeFieldHRIR):
name = "MultiSpeakerBRIR"
version = "0.3"
def __init__(self):
super().__init__()
self.default_objects["Receiver"]["count"] = 2
#self.default_data["IR"] = 1
self.conditions["must have 2 Receivers"] = lambda name, fixed, variances, count: name != "Receiver" or count == 2
self.conditions["must have Listener Up and View"] = lambda name, fixed, variances, count: name != "Listener" or ("Up" in fixed + variances and "View" in fixed + variances)
self.conditions["must have both Emitter View and Up or neither"] = lambda name, fixed, variances, count: name != "Emitter" or "View" not in fixed + variances or ("Up" in fixed + variances and "View" in fixed + variances)
def add_metadata(self, database):
super().add_metadata(database)
database.Data.Type = "FIRE"
database.Room.Type = "reverberant"
return
| [
1,
3,
4,
5,
6
] |
182 | ba8b46f830abaaaedf1730cba2f04fd677f11da4 | <mask token>
def getLevel(levelName, no_match=logging.NOTSET):
"""Return the numeric representation of levelName.
see getLevelName() for background
"""
try:
result = logging._nameToLevel.get(levelName)
if result is not None:
return result
return int(levelName)
except ValueError:
if raiseExceptions:
raise "parameter 'levelName' must be a defined String"
return no_match
def getLevelOrName(level):
pass
def _checkLevel(level, case=False, type=False, map=False):
pass
try:
if isinstance(level, str):
if not case:
level = str.upper(level)
rv = _nameToLevel.get(level)
if isinstance(level, int) or not type:
level = int(level)
if level in _levelToName(level):
rv = level
else:
rv = NOTSET if map else level
if rv is None:
level = str(level)
if rv is None:
if level in _levelToName or not type and int(level
) in _levelToName:
rv = NOTSET if level < NOTSET else level
if rv is None and map:
raise ValueError
else:
rv = level
rv = int(level)
except (TypeError, ValueError, KeyError) as err:
if raiseExceptions:
raise TypeError('Level not an integer or a valid string: %r' %
level) from err
except Exception:
pass
return NOTSET - 1 if rv is None else rv
| <mask token>
def getLevelName(level, format='%s', no_match=None):
"""Return the textual representation of 'level'.
Whether predefined (eg. CRITICAL -> "CRITICAL") or user-defined via
addLevelName(), the string associated with 'level' is chosen.
Otherwise, 'level' (no_match == NONE) or 'no_match' is returned
subject to formatting per 'format'.
In the spirit of "be liberal in what you accept", any value of 'level'
that survives int() will be accepted (FUTURE: subject to 'strict').
Issue #29220 introduced the BAD IDEA that passing an empty string
(an obvious TypeError) would return same. This was requested in order
to squash the fall-thru behavior of returning "Level %s", when the
multi-word response was itself the actual ERROR since it broke all
field-based log processing! The astute reader will note that an empty
string causes the same pathology...
DEPRECATION WARNING:
This function WRONGLY returned the mapped Integer if a String form
was provided. This violates the clearly stated purpose and forces
the caller into defensive Type checks or suffer future TypeErrors.
NOTE:
Does no bounds or validity checks. Use _checkLevel().
FUTURE:
In strict mode, enforce parameter dataType, case, or membership.
"""
try:
if level in logging._nameToLevel:
return format % level
result = logging._levelToName.get(int(level))
if result is not None:
return format % result
except TypeError:
if raiseExceptions:
raise "parameter 'level' must reduce to an Integer"
except ValueError:
pass
return format % level if no_match is None else format % no_match
def getLevel(levelName, no_match=logging.NOTSET):
"""Return the numeric representation of levelName.
see getLevelName() for background
"""
try:
result = logging._nameToLevel.get(levelName)
if result is not None:
return result
return int(levelName)
except ValueError:
if raiseExceptions:
raise "parameter 'levelName' must be a defined String"
return no_match
def getLevelOrName(level):
pass
def _checkLevel(level, case=False, type=False, map=False):
pass
try:
if isinstance(level, str):
if not case:
level = str.upper(level)
rv = _nameToLevel.get(level)
if isinstance(level, int) or not type:
level = int(level)
if level in _levelToName(level):
rv = level
else:
rv = NOTSET if map else level
if rv is None:
level = str(level)
if rv is None:
if level in _levelToName or not type and int(level
) in _levelToName:
rv = NOTSET if level < NOTSET else level
if rv is None and map:
raise ValueError
else:
rv = level
rv = int(level)
except (TypeError, ValueError, KeyError) as err:
if raiseExceptions:
raise TypeError('Level not an integer or a valid string: %r' %
level) from err
except Exception:
pass
return NOTSET - 1 if rv is None else rv
| <mask token>
__all__ = ['getLevelName', 'getLevel']
<mask token>
def getLevelName(level, format='%s', no_match=None):
"""Return the textual representation of 'level'.
Whether predefined (eg. CRITICAL -> "CRITICAL") or user-defined via
addLevelName(), the string associated with 'level' is chosen.
Otherwise, 'level' (no_match == NONE) or 'no_match' is returned
subject to formatting per 'format'.
In the spirit of "be liberal in what you accept", any value of 'level'
that survives int() will be accepted (FUTURE: subject to 'strict').
Issue #29220 introduced the BAD IDEA that passing an empty string
(an obvious TypeError) would return same. This was requested in order
to squash the fall-thru behavior of returning "Level %s", when the
multi-word response was itself the actual ERROR since it broke all
field-based log processing! The astute reader will note that an empty
string causes the same pathology...
DEPRECATION WARNING:
This function WRONGLY returned the mapped Integer if a String form
was provided. This violates the clearly stated purpose and forces
the caller into defensive Type checks or suffer future TypeErrors.
NOTE:
Does no bounds or validity checks. Use _checkLevel().
FUTURE:
In strict mode, enforce parameter dataType, case, or membership.
"""
try:
if level in logging._nameToLevel:
return format % level
result = logging._levelToName.get(int(level))
if result is not None:
return format % result
except TypeError:
if raiseExceptions:
raise "parameter 'level' must reduce to an Integer"
except ValueError:
pass
return format % level if no_match is None else format % no_match
def getLevel(levelName, no_match=logging.NOTSET):
"""Return the numeric representation of levelName.
see getLevelName() for background
"""
try:
result = logging._nameToLevel.get(levelName)
if result is not None:
return result
return int(levelName)
except ValueError:
if raiseExceptions:
raise "parameter 'levelName' must be a defined String"
return no_match
def getLevelOrName(level):
pass
def _checkLevel(level, case=False, type=False, map=False):
pass
try:
if isinstance(level, str):
if not case:
level = str.upper(level)
rv = _nameToLevel.get(level)
if isinstance(level, int) or not type:
level = int(level)
if level in _levelToName(level):
rv = level
else:
rv = NOTSET if map else level
if rv is None:
level = str(level)
if rv is None:
if level in _levelToName or not type and int(level
) in _levelToName:
rv = NOTSET if level < NOTSET else level
if rv is None and map:
raise ValueError
else:
rv = level
rv = int(level)
except (TypeError, ValueError, KeyError) as err:
if raiseExceptions:
raise TypeError('Level not an integer or a valid string: %r' %
level) from err
except Exception:
pass
return NOTSET - 1 if rv is None else rv
| from __future__ import print_function, absolute_import, unicode_literals, division
__all__ = ['getLevelName', 'getLevel']
import logging
def getLevelName(level, format='%s', no_match=None):
"""Return the textual representation of 'level'.
Whether predefined (eg. CRITICAL -> "CRITICAL") or user-defined via
addLevelName(), the string associated with 'level' is chosen.
Otherwise, 'level' (no_match == NONE) or 'no_match' is returned
subject to formatting per 'format'.
In the spirit of "be liberal in what you accept", any value of 'level'
that survives int() will be accepted (FUTURE: subject to 'strict').
Issue #29220 introduced the BAD IDEA that passing an empty string
(an obvious TypeError) would return same. This was requested in order
to squash the fall-thru behavior of returning "Level %s", when the
multi-word response was itself the actual ERROR since it broke all
field-based log processing! The astute reader will note that an empty
string causes the same pathology...
DEPRECATION WARNING:
This function WRONGLY returned the mapped Integer if a String form
was provided. This violates the clearly stated purpose and forces
the caller into defensive Type checks or suffer future TypeErrors.
NOTE:
Does no bounds or validity checks. Use _checkLevel().
FUTURE:
In strict mode, enforce parameter dataType, case, or membership.
"""
try:
if level in logging._nameToLevel:
return format % level
result = logging._levelToName.get(int(level))
if result is not None:
return format % result
except TypeError:
if raiseExceptions:
raise "parameter 'level' must reduce to an Integer"
except ValueError:
pass
return format % level if no_match is None else format % no_match
def getLevel(levelName, no_match=logging.NOTSET):
"""Return the numeric representation of levelName.
see getLevelName() for background
"""
try:
result = logging._nameToLevel.get(levelName)
if result is not None:
return result
return int(levelName)
except ValueError:
if raiseExceptions:
raise "parameter 'levelName' must be a defined String"
return no_match
def getLevelOrName(level):
pass
def _checkLevel(level, case=False, type=False, map=False):
pass
try:
if isinstance(level, str):
if not case:
level = str.upper(level)
rv = _nameToLevel.get(level)
if isinstance(level, int) or not type:
level = int(level)
if level in _levelToName(level):
rv = level
else:
rv = NOTSET if map else level
if rv is None:
level = str(level)
if rv is None:
if level in _levelToName or not type and int(level
) in _levelToName:
rv = NOTSET if level < NOTSET else level
if rv is None and map:
raise ValueError
else:
rv = level
rv = int(level)
except (TypeError, ValueError, KeyError) as err:
if raiseExceptions:
raise TypeError('Level not an integer or a valid string: %r' %
level) from err
except Exception:
pass
return NOTSET - 1 if rv is None else rv
| # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, unicode_literals, division
__all__ = ['getLevelName', 'getLevel'] #, 'getLevelOrName', '_checkLevel']
import logging
# private re-implementations till Python Core fixes Lib/logging
# XXX bug numbers here
def getLevelName(level, format='%s', no_match=None):
# strict={'case': False, 'type': False, 'map': False},
# fixup=False
"""Return the textual representation of 'level'.
Whether predefined (eg. CRITICAL -> "CRITICAL") or user-defined via
addLevelName(), the string associated with 'level' is chosen.
Otherwise, 'level' (no_match == NONE) or 'no_match' is returned
subject to formatting per 'format'.
In the spirit of "be liberal in what you accept", any value of 'level'
that survives int() will be accepted (FUTURE: subject to 'strict').
Issue #29220 introduced the BAD IDEA that passing an empty string
(an obvious TypeError) would return same. This was requested in order
to squash the fall-thru behavior of returning "Level %s", when the
multi-word response was itself the actual ERROR since it broke all
field-based log processing! The astute reader will note that an empty
string causes the same pathology...
DEPRECATION WARNING:
This function WRONGLY returned the mapped Integer if a String form
was provided. This violates the clearly stated purpose and forces
the caller into defensive Type checks or suffer future TypeErrors.
NOTE:
Does no bounds or validity checks. Use _checkLevel().
FUTURE:
In strict mode, enforce parameter dataType, case, or membership.
"""
try:
# check Name->Level in case called incorrectly (backward compat)
if level in logging._nameToLevel:
return format % level
# retval = _checkLevel(level, flags, fix=T/F)
# if isinstance(retval, bool) then handle pass/fail, else update level with fixed value
result = logging._levelToName.get(int(level))
if result is not None:
return format % result
except TypeError:
if raiseExceptions:
raise("parameter 'level' must reduce to an Integer")
except ValueError:
pass
return format % level if no_match is None else format % no_match
def getLevel(levelName, no_match=logging.NOTSET):
# strict={'case': False, 'type': False, 'map': False},
# fixup=False
"""Return the numeric representation of levelName.
see getLevelName() for background
"""
try:
result = logging._nameToLevel.get(levelName)
if result is not None:
return result
return int(levelName)
except ValueError:
if raiseExceptions:
raise("parameter 'levelName' must be a defined String")
return no_match
def getLevelOrName(level):
pass
def _checkLevel(level, case=False, type=False, map=False):
#TODO define check as dictionary
pass
# """Check parameter against defined values
#
# Returns corresponding or original Integer, or NOTSET if no-match.
# Will raise TypeError or ValueError as applicable.
#
# NOTE: Since all logging.$level() functions choose to emit based on
# numeric comparison, a default of ERROR would be more logical.
# """
try:
if isinstance(level, str):
if not case:
level = str.upper(level)
rv = _nameToLevel.get(level)
# if rv is None:
# XXX what now?
if isinstance(level, int) or not type:
# flip negative values
level = int(level)
if level in _levelToName(level):
rv = level
else:
# tolerate any Integer value
rv = NOTSET if map else level
if rv is None:
level = str(level)
if rv is None:
if level in _levelToName or (not type and int(level) in _levelToName):
rv = NOTSET if level < NOTSET else level
# rv = level
if rv is None and map:
raise ValueError
else:
# return parameter even though invalid
rv = level
# sor level < NOTSET or level > ???:
# #raise ValueError
# if isinstance(level, int):
# XXX check >NOTSET
# else:
# raise TypeError
#FIXME - test harness injects '+1', so tolerating
# arbitrary integers is expected behavior. Why?
# raise ValueError
rv = int(level)
except (TypeError, ValueError, KeyError) as err:
if raiseExceptions:
# test harness (../test/test_logging) expects 'TypeError' ONLY
raise TypeError("Level not an integer or a valid string: %r" % level) from err
except Exception:
pass
return NOTSET - 1 if rv is None else rv
| [
3,
4,
5,
6,
7
] |
183 | 88c304f224ab60062582abbfa1146a651e1233e6 | def missing_value_count_and_percent(df):
"""
Return the number and percent of missing values for each column.
Args:
df (Dataframe): A dataframe with many columns
Return:
df (Dataframe): A dataframe with one column showing number of missing values, one column showing percentage of missing values with 4 digits
"""
df = pd.concat({'num_missing_values':df.isnull().sum(), 'pct_missing_values':df.isnull().mean().round(4)}, axis=1)
)
return df | null | null | null | null | [
0
] |
184 | e5d31a2ea4a8615d24626be2414f5ae49b9cd6a1 | <mask token>
| <mask token>
class Solution:
<mask token>
<mask token>
| <mask token>
class Solution:
def maximalSquare(self, matrix: List[List[str]]) ->int:
if not matrix:
return 0
dp = [0] * (len(matrix[0]) + 1)
longestSide = 0
for i in range(len(matrix)):
prevSquare = 0
for j in range(len(matrix[0])):
temp = dp[j]
if matrix[i][j] == '1':
dp[j] = 1 + min(dp[j], dp[j - 1], prevSquare)
longestSide = max(longestSide, dp[j])
else:
dp[j] = 0
prevSquare = temp
return longestSide * longestSide
<mask token>
| """
Given a 2D binary matrix filled with 0's and 1's, find the largest square containing only 1's and return its area.
Example:
Input:
1 0 1 0 0
1 0 1 1 1
1 1 1 1 1
1 0 0 1 0
Output: 4
"""
# 196ms. 98 percentile
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if not matrix:
return 0
dp = [0]*(len(matrix[0]) + 1)
longestSide = 0
for i in range(len(matrix)):
prevSquare =0
for j in range(len(matrix[0])):
temp = dp[j]
if matrix[i][j] == '1':
dp[j] = 1 + min(dp[j], dp[j-1], prevSquare)
longestSide = max(longestSide, dp[j])
else:
dp[j] = 0
prevSquare = temp
return longestSide*longestSide
"""
Notes:
Two hard things in this problem. The first is the logic for the dp, although after the fact
it seems pretty straightforward imo.
At any element you can check if you have a 2 by 2 square by looking at its neighbors. So anywhere you see
1 1
1 1
you're going to replace the bottom right corner with a 2. Note we're going top down and left to right...
So if you see
2 2
2 1
...then you know that you actually have
1 1 1
1 2 2
1 2 1
meaning you can actually put 3 in the corner.
On the other hand, if any of the neighbors are 1's, then you won't have the full cube. Implying that
at each spot, if it's a 1, you take the min of the three neighbors + 1.
The second hard thing is just dealing with the fact the input is characters not ints...annoying imo. The second
solution up there just uses a standard 1d dp array to keep track of the last row processed in terms of ints...which
is all we need. So we can avoid casting anything.
The first solution only casts the first first row and the first column.
Most of it is straightforwards. The one thing I want to note is that temp variable switch. Basically because our dp is
a single array, when we're processing element i, we've already replaced element i-1 with an updated value. That's a problem
because the i-1 value represents the j-1,i-1 value for the ith element in the dp. So we use that little temp switch to
skirt the issue.
""" | null | [
0,
1,
2,
3
] |
185 | 7e2bf898eb1c0118205042797e6dac535342979b | <mask token>
class MainApplication(tk.Frame):
def __init__(self, root: tk.Tk):
super().__init__(root)
self.root = root
self.pack(padx=32, pady=32, expand=True)
self.root.option_add('*tearOff', False)
self.root.title('Counter')
frm_buttons = tk.Frame(self)
frm_buttons.grid(row=0, column=0)
self.var_count = tk.IntVar(frm_buttons)
tk.Button(frm_buttons, textvariable=self.var_count, command=self.
count_up, font='Times, 60').grid(row=0, column=0, columnspan=3,
sticky=tk.NSEW)
tk.Button(frm_buttons, text='-1', command=self.count_down).grid(row
=1, column=0)
tk.Button(frm_buttons, text='Reset', command=self.reset).grid(row=1,
column=1)
tk.Button(frm_buttons, text='Save', command=self.save).grid(row=1,
column=2)
self.selected_count = ''
self.lst_counts = tk.Listbox(self)
self.lst_counts.grid(row=0, column=1)
self.lst_counts.bind('<<ListboxSelect>>', self.listbox_select)
self.lst_counts.bind('<Button-3>', self.right_click)
self.men_list = tk.Menu(self)
self.men_list.add_command(label='Delete Selected', command=self.
delete_save)
self.root.bind('<Key>', self.key_press)
try:
saves = os.listdir('data')
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return
for count_save in saves:
self.lst_counts.insert(tk.END, count_save)
listener = keyboard.Listener(on_release=self.on_key_release)
listener.start()
def count_up(self):
if self.var_count.get() == 0 and self.lst_counts.index(tk.END) == 0:
SaveCount(tk.Toplevel(), self.save_to_listbox)
return
elif not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
self.var_count.set(self.var_count.get() + 1)
self.save_to_file(self.selected_count)
def count_down(self):
if not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
self.var_count.set(self.var_count.get() - 1)
self.save_to_file(self.selected_count)
def reset(self):
if not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
choice = messagebox.askyesno('Reset',
'Are you sure you want to reset the count?')
if choice:
self.var_count.set(0)
self.save_to_file(self.selected_count)
def save(self):
SaveCount(tk.Toplevel(), self.save_to_listbox)
<mask token>
def save_to_file(self, name: str) ->bool:
try:
with open(join('data', name), 'w') as file:
file.write(str(self.var_count.get()))
return True
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return False
<mask token>
def listbox_select(self, event):
widget = event.widget
try:
name_of_selected_count = widget.get(int(widget.curselection()[0]))
except IndexError:
return
try:
with open(join('data', name_of_selected_count), 'r') as file:
count = int(file.read())
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return
self.var_count.set(count)
self.selected_count = name_of_selected_count
<mask token>
def key_press(self, event):
if event.char == ' ':
self.count_up()
<mask token>
<mask token>
| <mask token>
class MainApplication(tk.Frame):
def __init__(self, root: tk.Tk):
super().__init__(root)
self.root = root
self.pack(padx=32, pady=32, expand=True)
self.root.option_add('*tearOff', False)
self.root.title('Counter')
frm_buttons = tk.Frame(self)
frm_buttons.grid(row=0, column=0)
self.var_count = tk.IntVar(frm_buttons)
tk.Button(frm_buttons, textvariable=self.var_count, command=self.
count_up, font='Times, 60').grid(row=0, column=0, columnspan=3,
sticky=tk.NSEW)
tk.Button(frm_buttons, text='-1', command=self.count_down).grid(row
=1, column=0)
tk.Button(frm_buttons, text='Reset', command=self.reset).grid(row=1,
column=1)
tk.Button(frm_buttons, text='Save', command=self.save).grid(row=1,
column=2)
self.selected_count = ''
self.lst_counts = tk.Listbox(self)
self.lst_counts.grid(row=0, column=1)
self.lst_counts.bind('<<ListboxSelect>>', self.listbox_select)
self.lst_counts.bind('<Button-3>', self.right_click)
self.men_list = tk.Menu(self)
self.men_list.add_command(label='Delete Selected', command=self.
delete_save)
self.root.bind('<Key>', self.key_press)
try:
saves = os.listdir('data')
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return
for count_save in saves:
self.lst_counts.insert(tk.END, count_save)
listener = keyboard.Listener(on_release=self.on_key_release)
listener.start()
def count_up(self):
if self.var_count.get() == 0 and self.lst_counts.index(tk.END) == 0:
SaveCount(tk.Toplevel(), self.save_to_listbox)
return
elif not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
self.var_count.set(self.var_count.get() + 1)
self.save_to_file(self.selected_count)
def count_down(self):
if not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
self.var_count.set(self.var_count.get() - 1)
self.save_to_file(self.selected_count)
def reset(self):
if not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
choice = messagebox.askyesno('Reset',
'Are you sure you want to reset the count?')
if choice:
self.var_count.set(0)
self.save_to_file(self.selected_count)
def save(self):
SaveCount(tk.Toplevel(), self.save_to_listbox)
def save_to_listbox(self, name: str):
if self.save_to_file(name):
self.lst_counts.insert(tk.END, name)
def save_to_file(self, name: str) ->bool:
try:
with open(join('data', name), 'w') as file:
file.write(str(self.var_count.get()))
return True
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return False
<mask token>
def listbox_select(self, event):
widget = event.widget
try:
name_of_selected_count = widget.get(int(widget.curselection()[0]))
except IndexError:
return
try:
with open(join('data', name_of_selected_count), 'r') as file:
count = int(file.read())
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return
self.var_count.set(count)
self.selected_count = name_of_selected_count
<mask token>
def key_press(self, event):
if event.char == ' ':
self.count_up()
<mask token>
<mask token>
| <mask token>
class MainApplication(tk.Frame):
def __init__(self, root: tk.Tk):
super().__init__(root)
self.root = root
self.pack(padx=32, pady=32, expand=True)
self.root.option_add('*tearOff', False)
self.root.title('Counter')
frm_buttons = tk.Frame(self)
frm_buttons.grid(row=0, column=0)
self.var_count = tk.IntVar(frm_buttons)
tk.Button(frm_buttons, textvariable=self.var_count, command=self.
count_up, font='Times, 60').grid(row=0, column=0, columnspan=3,
sticky=tk.NSEW)
tk.Button(frm_buttons, text='-1', command=self.count_down).grid(row
=1, column=0)
tk.Button(frm_buttons, text='Reset', command=self.reset).grid(row=1,
column=1)
tk.Button(frm_buttons, text='Save', command=self.save).grid(row=1,
column=2)
self.selected_count = ''
self.lst_counts = tk.Listbox(self)
self.lst_counts.grid(row=0, column=1)
self.lst_counts.bind('<<ListboxSelect>>', self.listbox_select)
self.lst_counts.bind('<Button-3>', self.right_click)
self.men_list = tk.Menu(self)
self.men_list.add_command(label='Delete Selected', command=self.
delete_save)
self.root.bind('<Key>', self.key_press)
try:
saves = os.listdir('data')
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return
for count_save in saves:
self.lst_counts.insert(tk.END, count_save)
listener = keyboard.Listener(on_release=self.on_key_release)
listener.start()
def count_up(self):
if self.var_count.get() == 0 and self.lst_counts.index(tk.END) == 0:
SaveCount(tk.Toplevel(), self.save_to_listbox)
return
elif not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
self.var_count.set(self.var_count.get() + 1)
self.save_to_file(self.selected_count)
def count_down(self):
if not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
self.var_count.set(self.var_count.get() - 1)
self.save_to_file(self.selected_count)
def reset(self):
if not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
choice = messagebox.askyesno('Reset',
'Are you sure you want to reset the count?')
if choice:
self.var_count.set(0)
self.save_to_file(self.selected_count)
def save(self):
SaveCount(tk.Toplevel(), self.save_to_listbox)
def save_to_listbox(self, name: str):
if self.save_to_file(name):
self.lst_counts.insert(tk.END, name)
def save_to_file(self, name: str) ->bool:
try:
with open(join('data', name), 'w') as file:
file.write(str(self.var_count.get()))
return True
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return False
<mask token>
def listbox_select(self, event):
widget = event.widget
try:
name_of_selected_count = widget.get(int(widget.curselection()[0]))
except IndexError:
return
try:
with open(join('data', name_of_selected_count), 'r') as file:
count = int(file.read())
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return
self.var_count.set(count)
self.selected_count = name_of_selected_count
def right_click(self, event):
self.men_list.tk_popup(event.x_root, event.y_root)
def key_press(self, event):
if event.char == ' ':
self.count_up()
<mask token>
<mask token>
| <mask token>
class MainApplication(tk.Frame):
def __init__(self, root: tk.Tk):
super().__init__(root)
self.root = root
self.pack(padx=32, pady=32, expand=True)
self.root.option_add('*tearOff', False)
self.root.title('Counter')
frm_buttons = tk.Frame(self)
frm_buttons.grid(row=0, column=0)
self.var_count = tk.IntVar(frm_buttons)
tk.Button(frm_buttons, textvariable=self.var_count, command=self.
count_up, font='Times, 60').grid(row=0, column=0, columnspan=3,
sticky=tk.NSEW)
tk.Button(frm_buttons, text='-1', command=self.count_down).grid(row
=1, column=0)
tk.Button(frm_buttons, text='Reset', command=self.reset).grid(row=1,
column=1)
tk.Button(frm_buttons, text='Save', command=self.save).grid(row=1,
column=2)
self.selected_count = ''
self.lst_counts = tk.Listbox(self)
self.lst_counts.grid(row=0, column=1)
self.lst_counts.bind('<<ListboxSelect>>', self.listbox_select)
self.lst_counts.bind('<Button-3>', self.right_click)
self.men_list = tk.Menu(self)
self.men_list.add_command(label='Delete Selected', command=self.
delete_save)
self.root.bind('<Key>', self.key_press)
try:
saves = os.listdir('data')
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return
for count_save in saves:
self.lst_counts.insert(tk.END, count_save)
listener = keyboard.Listener(on_release=self.on_key_release)
listener.start()
def count_up(self):
if self.var_count.get() == 0 and self.lst_counts.index(tk.END) == 0:
SaveCount(tk.Toplevel(), self.save_to_listbox)
return
elif not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
self.var_count.set(self.var_count.get() + 1)
self.save_to_file(self.selected_count)
def count_down(self):
if not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
self.var_count.set(self.var_count.get() - 1)
self.save_to_file(self.selected_count)
def reset(self):
if not self.selected_count:
messagebox.showerror('No Configuration Selected',
'Please select a configuration.')
return
choice = messagebox.askyesno('Reset',
'Are you sure you want to reset the count?')
if choice:
self.var_count.set(0)
self.save_to_file(self.selected_count)
def save(self):
SaveCount(tk.Toplevel(), self.save_to_listbox)
def save_to_listbox(self, name: str):
if self.save_to_file(name):
self.lst_counts.insert(tk.END, name)
def save_to_file(self, name: str) ->bool:
try:
with open(join('data', name), 'w') as file:
file.write(str(self.var_count.get()))
return True
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return False
def delete_save(self):
try:
name_of_selected_count = self.lst_counts.get(int(self.
lst_counts.curselection()[0]))
except IndexError:
return
os.remove(join('data', name_of_selected_count))
for i in range(self.lst_counts.size()):
if self.lst_counts.get(i) == name_of_selected_count:
self.lst_counts.delete(i)
def listbox_select(self, event):
widget = event.widget
try:
name_of_selected_count = widget.get(int(widget.curselection()[0]))
except IndexError:
return
try:
with open(join('data', name_of_selected_count), 'r') as file:
count = int(file.read())
except FileNotFoundError:
os.mkdir('data')
messagebox.showerror('Save Error',
'No data folder was found; created one now.')
return
self.var_count.set(count)
self.selected_count = name_of_selected_count
def right_click(self, event):
self.men_list.tk_popup(event.x_root, event.y_root)
def key_press(self, event):
if event.char == ' ':
self.count_up()
def on_key_release(self, key):
if key == keyboard.KeyCode.from_char('+'):
self.count_up()
def main():
root = tk.Tk()
MainApplication(root)
root.mainloop()
| import os
import tkinter as tk
from tkinter import messagebox
from os.path import join
from pynput import keyboard
from src.save_count import SaveCount
class MainApplication(tk.Frame):
def __init__(self, root: tk.Tk):
super().__init__(root)
self.root = root
self.pack(padx=32, pady=32, expand=True)
self.root.option_add("*tearOff", False)
self.root.title("Counter")
frm_buttons = tk.Frame(self)
frm_buttons.grid(row=0, column=0)
self.var_count = tk.IntVar(frm_buttons)
tk.Button(frm_buttons, textvariable=self.var_count, command=self.count_up, font="Times, 60") \
.grid(row=0, column=0, columnspan=3, sticky=tk.NSEW)
tk.Button(frm_buttons, text="-1", command=self.count_down).grid(row=1, column=0)
tk.Button(frm_buttons, text="Reset", command=self.reset).grid(row=1, column=1)
tk.Button(frm_buttons, text="Save", command=self.save).grid(row=1, column=2)
# tk.Button(frm_buttons, text="Undecorated Window", command=None).grid(row=2, column=0, columnspan=3)
self.selected_count = ""
self.lst_counts = tk.Listbox(self)
self.lst_counts.grid(row=0, column=1)
self.lst_counts.bind("<<ListboxSelect>>", self.listbox_select)
self.lst_counts.bind("<Button-3>", self.right_click)
self.men_list = tk.Menu(self)
self.men_list.add_command(label="Delete Selected", command=self.delete_save)
self.root.bind("<Key>", self.key_press)
try:
saves = os.listdir("data")
except FileNotFoundError:
os.mkdir("data")
messagebox.showerror("Save Error", "No data folder was found; created one now.")
return
for count_save in saves:
self.lst_counts.insert(tk.END, count_save)
listener = keyboard.Listener(on_release=self.on_key_release)
listener.start()
def count_up(self):
# Save to entry, if it's the first one
if self.var_count.get() == 0 and self.lst_counts.index(tk.END) == 0:
SaveCount(tk.Toplevel(), self.save_to_listbox)
return
else:
if not self.selected_count:
messagebox.showerror("No Configuration Selected", "Please select a configuration.")
return
self.var_count.set(self.var_count.get() + 1)
self.save_to_file(self.selected_count)
def count_down(self):
if not self.selected_count:
messagebox.showerror("No Configuration Selected", "Please select a configuration.")
return
self.var_count.set(self.var_count.get() - 1)
self.save_to_file(self.selected_count)
def reset(self):
if not self.selected_count:
messagebox.showerror("No Configuration Selected", "Please select a configuration.")
return
choice = messagebox.askyesno("Reset", "Are you sure you want to reset the count?")
if choice:
self.var_count.set(0)
self.save_to_file(self.selected_count)
def save(self):
SaveCount(tk.Toplevel(), self.save_to_listbox)
def save_to_listbox(self, name: str):
if self.save_to_file(name): # If save is successful
self.lst_counts.insert(tk.END, name)
def save_to_file(self, name: str) -> bool:
try:
with open(join("data", name), "w") as file:
file.write(str(self.var_count.get()))
return True
except FileNotFoundError:
os.mkdir("data")
messagebox.showerror("Save Error", "No data folder was found; created one now.")
return False
def delete_save(self):
try:
name_of_selected_count = self.lst_counts.get(int(self.lst_counts.curselection()[0]))
except IndexError:
return
os.remove(join("data", name_of_selected_count))
for i in range(self.lst_counts.size()):
if self.lst_counts.get(i) == name_of_selected_count:
self.lst_counts.delete(i)
def listbox_select(self, event):
widget = event.widget
try:
name_of_selected_count = widget.get(int(widget.curselection()[0]))
except IndexError:
return
try:
with open(join("data", name_of_selected_count), "r") as file:
count = int(file.read())
except FileNotFoundError:
os.mkdir("data")
messagebox.showerror("Save Error", "No data folder was found; created one now.")
return
self.var_count.set(count)
self.selected_count = name_of_selected_count
def right_click(self, event):
self.men_list.tk_popup(event.x_root, event.y_root)
def key_press(self, event):
if event.char == " ":
self.count_up()
def on_key_release(self, key):
if key == keyboard.KeyCode.from_char("+"):
self.count_up()
def main():
root = tk.Tk()
MainApplication(root)
root.mainloop()
| [
9,
10,
11,
14,
16
] |
186 | f8601ed7ba7c2b8d2dd8d5f74f7b5ae8e99dad78 | <mask token>
| <mask token>
print((lambda myself: lambda n: IF(IS_ZERO(n))(lambda _: ONE)(lambda _:
MULT(n)(myself(myself)(SUB1(n)))))(lambda myself: lambda n: IF(IS_ZERO(
n))(lambda _: ONE)(lambda _: MULT(n)(myself(myself)(SUB1(n)))))(6))
| IS_ZERO = lambda x: x == 0
ONE = 1
SUB1 = lambda x: x - 1
MULT = lambda x: lambda y: x * y
IF = lambda cond: lambda t_func: lambda f_func: t_func(None
) if cond else f_func(None)
print((lambda myself: lambda n: IF(IS_ZERO(n))(lambda _: ONE)(lambda _:
MULT(n)(myself(myself)(SUB1(n)))))(lambda myself: lambda n: IF(IS_ZERO(
n))(lambda _: ONE)(lambda _: MULT(n)(myself(myself)(SUB1(n)))))(6))
| IS_ZERO = lambda x: x == 0
ONE = 1
SUB1 = lambda x: x - 1
MULT = lambda x: lambda y: x * y
IF = lambda cond: lambda t_func: lambda f_func: t_func(None) if cond else f_func(None)
print(
(
lambda myself: (
lambda n: (
IF(
IS_ZERO(n)
)(
lambda _: ONE
)(
lambda _: MULT(n)( myself(myself)(SUB1(n)) )
)
)
)
)(
lambda myself: (
lambda n: (
IF(
IS_ZERO(n)
)(
lambda _: ONE
)(
lambda _: MULT(n)( myself(myself)(SUB1(n)) )
)
)
)
)
(6)
) | null | [
0,
1,
2,
3
] |
187 | 8e854398084e89b0b8436d6b0a2bf8f36a9c7bd5 | <mask token>
class TestNetworkSimulatorService(TestCase):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def test_deregisterInvalidDevice(self):
with self.assertRaises(UnknownDeviceException):
self.network_svc.deregister_device(self.device_id)
def create_device(self, device_id):
device = Device(self.device_data_dict)
device.device_id = device_id
device.xpos = 4.0
device.ypos = 3.0
return device
| <mask token>
class TestNetworkSimulatorService(TestCase):
@patch(
'network_simulator.service.network_topology_handler.write_network_topology_to_file'
)
def setUp(self, write_network_topology_to_file_mock):
self.device_id = 'testid'
self.device_type = 'vm'
self.tap_if_name = 'testtap'
self.device_data_dict = {'device_id': self.device_id, 'device_type':
self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,
'ypos': 3.0}
self.test_net_namespace = 'testns'
self.network_svc = NetworkSimulatorService(self.test_net_namespace)
def test_deviceStrRepresentation(self):
device = Device(self.device_data_dict)
str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.
tap_if_name)
self.assertEqual(str_rep, str(device))
<mask token>
<mask token>
<mask token>
def test_deregisterInvalidDevice(self):
with self.assertRaises(UnknownDeviceException):
self.network_svc.deregister_device(self.device_id)
def create_device(self, device_id):
device = Device(self.device_data_dict)
device.device_id = device_id
device.xpos = 4.0
device.ypos = 3.0
return device
| <mask token>
class TestNetworkSimulatorService(TestCase):
@patch(
'network_simulator.service.network_topology_handler.write_network_topology_to_file'
)
def setUp(self, write_network_topology_to_file_mock):
self.device_id = 'testid'
self.device_type = 'vm'
self.tap_if_name = 'testtap'
self.device_data_dict = {'device_id': self.device_id, 'device_type':
self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,
'ypos': 3.0}
self.test_net_namespace = 'testns'
self.network_svc = NetworkSimulatorService(self.test_net_namespace)
def test_deviceStrRepresentation(self):
device = Device(self.device_data_dict)
str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.
tap_if_name)
self.assertEqual(str_rep, str(device))
def test_registerDeviceTwice(self):
self.network_svc.devices[self.device_id] = ''
with self.assertRaises(DeviceAlreadyRegisteredException):
self.network_svc.register_new_device(self.device_data_dict)
<mask token>
def test_deregisterDevice(self):
self.network_svc.devices[self.device_id] = ''
self.network_svc.deregister_device(self.device_id)
self.assertNotIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterInvalidDevice(self):
with self.assertRaises(UnknownDeviceException):
self.network_svc.deregister_device(self.device_id)
def create_device(self, device_id):
device = Device(self.device_data_dict)
device.device_id = device_id
device.xpos = 4.0
device.ypos = 3.0
return device
| <mask token>
class TestNetworkSimulatorService(TestCase):
@patch(
'network_simulator.service.network_topology_handler.write_network_topology_to_file'
)
def setUp(self, write_network_topology_to_file_mock):
self.device_id = 'testid'
self.device_type = 'vm'
self.tap_if_name = 'testtap'
self.device_data_dict = {'device_id': self.device_id, 'device_type':
self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,
'ypos': 3.0}
self.test_net_namespace = 'testns'
self.network_svc = NetworkSimulatorService(self.test_net_namespace)
def test_deviceStrRepresentation(self):
device = Device(self.device_data_dict)
str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.
tap_if_name)
self.assertEqual(str_rep, str(device))
def test_registerDeviceTwice(self):
self.network_svc.devices[self.device_id] = ''
with self.assertRaises(DeviceAlreadyRegisteredException):
self.network_svc.register_new_device(self.device_data_dict)
def test_registerNewDevice(self):
self.network_svc.register_new_device(self.device_data_dict)
self.assertIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterDevice(self):
self.network_svc.devices[self.device_id] = ''
self.network_svc.deregister_device(self.device_id)
self.assertNotIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterInvalidDevice(self):
with self.assertRaises(UnknownDeviceException):
self.network_svc.deregister_device(self.device_id)
def create_device(self, device_id):
device = Device(self.device_data_dict)
device.device_id = device_id
device.xpos = 4.0
device.ypos = 3.0
return device
| from unittest import TestCase
from unittest.mock import patch, mock_open, call
from network_simulator.exceptions.device_exceptions import DeviceAlreadyRegisteredException, UnknownDeviceException
from network_simulator.service import NetworkSimulatorService
from network_simulator.service.network_simulator_service import Device
class TestNetworkSimulatorService(TestCase):
@patch("network_simulator.service.network_topology_handler.write_network_topology_to_file")
def setUp(self, write_network_topology_to_file_mock):
self.device_id = "testid"
self.device_type = "vm"
self.tap_if_name = "testtap"
self.device_data_dict = {
"device_id": self.device_id,
"device_type": self.device_type,
"tap_if_name": self.tap_if_name,
"xpos": 5.0,
"ypos": 3.0
}
self.test_net_namespace = "testns"
self.network_svc = NetworkSimulatorService(self.test_net_namespace)
def test_deviceStrRepresentation(self):
device = Device(self.device_data_dict)
str_rep = "{},{},{}".format(self.device_id, self.device_type, self.tap_if_name)
self.assertEqual(str_rep, str(device))
def test_registerDeviceTwice(self):
self.network_svc.devices[self.device_id] = ""
with self.assertRaises(DeviceAlreadyRegisteredException):
self.network_svc.register_new_device(self.device_data_dict)
def test_registerNewDevice(self):
self.network_svc.register_new_device(self.device_data_dict)
self.assertIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterDevice(self):
self.network_svc.devices[self.device_id] = ""
self.network_svc.deregister_device(self.device_id)
self.assertNotIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterInvalidDevice(self):
with self.assertRaises(UnknownDeviceException):
self.network_svc.deregister_device(self.device_id)
# helper
def create_device(self, device_id):
device = Device(self.device_data_dict)
device.device_id = device_id
device.xpos = 4.0
device.ypos = 3.0
return device
| [
3,
5,
7,
8,
10
] |
188 | fcd3e4c0d42649833e6c5ff6414c993654691d16 | <mask token>
| <mask token>
admin.site.register(Category, MPTTModelAdmin)
admin.site.register(Item)
admin.site.register(Product)
| from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from product.models import Item, Product, Category
admin.site.register(Category, MPTTModelAdmin)
admin.site.register(Item)
admin.site.register(Product)
| from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from product.models import Item,Product,Category
# Register your models here.
admin.site.register(Category,MPTTModelAdmin)
admin.site.register(Item)
admin.site.register(Product) | null | [
0,
1,
2,
3
] |
189 | 565888d771f53934805555390e48d4886a43bdb6 | <mask token>
class D3D12_Resource_Mapping_Zoo(rdtest.TestCase):
<mask token>
<mask token>
def check_capture(self):
if not self.controller.GetAPIProperties().shaderDebugging:
rdtest.log.success('Shader debugging not enabled, skipping test')
return
failed = False
test_marker: rd.ActionDescription = self.find_action('sm_5_0')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, 'sm_5_0') or failed
test_marker: rd.ActionDescription = self.find_action('sm_5_1')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, 'sm_5_1') or failed
rdtest.log.begin_section('Resource array tests')
test_marker: rd.ActionDescription = self.find_action('ResArray')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y,
'ResArray({},{})'.format(x, y)) or failed
rdtest.log.end_section('Resource array tests')
rdtest.log.begin_section('Bindless tests')
test_marker: rd.ActionDescription = self.find_action('Bindless')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y,
'Bindless({},{})'.format(x, y)) or failed
rdtest.log.end_section('Bindless tests')
if failed:
raise rdtest.TestFailureException('Some tests were not as expected'
)
rdtest.log.success('All tests matched')
| <mask token>
class D3D12_Resource_Mapping_Zoo(rdtest.TestCase):
<mask token>
def test_debug_pixel(self, x, y, test_name):
pipe: rd.PipeState = self.controller.GetPipelineState()
if not pipe.GetShaderReflection(rd.ShaderStage.Pixel
).debugInfo.debuggable:
rdtest.log.print('Skipping undebuggable shader at {}.'.format(
test_name))
return
trace: rd.ShaderDebugTrace = self.controller.DebugPixel(x, y, rd.
ReplayController.NoPreference, rd.ReplayController.NoPreference)
cycles, variables = self.process_trace(trace)
output = self.find_output_source_var(trace, rd.ShaderBuiltin.
ColorOutput, 0)
debugged = self.evaluate_source_var(output, variables)
try:
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, x,
y, debugged.value.f32v[0:4])
except rdtest.TestFailureException as ex:
rdtest.log.error('Test {} did not match. {}'.format(test_name,
str(ex)))
return False
finally:
self.controller.FreeTrace(trace)
rdtest.log.success('Test {} matched as expected'.format(test_name))
return True
def check_capture(self):
if not self.controller.GetAPIProperties().shaderDebugging:
rdtest.log.success('Shader debugging not enabled, skipping test')
return
failed = False
test_marker: rd.ActionDescription = self.find_action('sm_5_0')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, 'sm_5_0') or failed
test_marker: rd.ActionDescription = self.find_action('sm_5_1')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, 'sm_5_1') or failed
rdtest.log.begin_section('Resource array tests')
test_marker: rd.ActionDescription = self.find_action('ResArray')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y,
'ResArray({},{})'.format(x, y)) or failed
rdtest.log.end_section('Resource array tests')
rdtest.log.begin_section('Bindless tests')
test_marker: rd.ActionDescription = self.find_action('Bindless')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y,
'Bindless({},{})'.format(x, y)) or failed
rdtest.log.end_section('Bindless tests')
if failed:
raise rdtest.TestFailureException('Some tests were not as expected'
)
rdtest.log.success('All tests matched')
| <mask token>
class D3D12_Resource_Mapping_Zoo(rdtest.TestCase):
demos_test_name = 'D3D12_Resource_Mapping_Zoo'
def test_debug_pixel(self, x, y, test_name):
pipe: rd.PipeState = self.controller.GetPipelineState()
if not pipe.GetShaderReflection(rd.ShaderStage.Pixel
).debugInfo.debuggable:
rdtest.log.print('Skipping undebuggable shader at {}.'.format(
test_name))
return
trace: rd.ShaderDebugTrace = self.controller.DebugPixel(x, y, rd.
ReplayController.NoPreference, rd.ReplayController.NoPreference)
cycles, variables = self.process_trace(trace)
output = self.find_output_source_var(trace, rd.ShaderBuiltin.
ColorOutput, 0)
debugged = self.evaluate_source_var(output, variables)
try:
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, x,
y, debugged.value.f32v[0:4])
except rdtest.TestFailureException as ex:
rdtest.log.error('Test {} did not match. {}'.format(test_name,
str(ex)))
return False
finally:
self.controller.FreeTrace(trace)
rdtest.log.success('Test {} matched as expected'.format(test_name))
return True
def check_capture(self):
if not self.controller.GetAPIProperties().shaderDebugging:
rdtest.log.success('Shader debugging not enabled, skipping test')
return
failed = False
test_marker: rd.ActionDescription = self.find_action('sm_5_0')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, 'sm_5_0') or failed
test_marker: rd.ActionDescription = self.find_action('sm_5_1')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, 'sm_5_1') or failed
rdtest.log.begin_section('Resource array tests')
test_marker: rd.ActionDescription = self.find_action('ResArray')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y,
'ResArray({},{})'.format(x, y)) or failed
rdtest.log.end_section('Resource array tests')
rdtest.log.begin_section('Bindless tests')
test_marker: rd.ActionDescription = self.find_action('Bindless')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y,
'Bindless({},{})'.format(x, y)) or failed
rdtest.log.end_section('Bindless tests')
if failed:
raise rdtest.TestFailureException('Some tests were not as expected'
)
rdtest.log.success('All tests matched')
| import renderdoc as rd
from typing import List
import rdtest
class D3D12_Resource_Mapping_Zoo(rdtest.TestCase):
demos_test_name = 'D3D12_Resource_Mapping_Zoo'
def test_debug_pixel(self, x, y, test_name):
pipe: rd.PipeState = self.controller.GetPipelineState()
if not pipe.GetShaderReflection(rd.ShaderStage.Pixel
).debugInfo.debuggable:
rdtest.log.print('Skipping undebuggable shader at {}.'.format(
test_name))
return
trace: rd.ShaderDebugTrace = self.controller.DebugPixel(x, y, rd.
ReplayController.NoPreference, rd.ReplayController.NoPreference)
cycles, variables = self.process_trace(trace)
output = self.find_output_source_var(trace, rd.ShaderBuiltin.
ColorOutput, 0)
debugged = self.evaluate_source_var(output, variables)
try:
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, x,
y, debugged.value.f32v[0:4])
except rdtest.TestFailureException as ex:
rdtest.log.error('Test {} did not match. {}'.format(test_name,
str(ex)))
return False
finally:
self.controller.FreeTrace(trace)
rdtest.log.success('Test {} matched as expected'.format(test_name))
return True
def check_capture(self):
if not self.controller.GetAPIProperties().shaderDebugging:
rdtest.log.success('Shader debugging not enabled, skipping test')
return
failed = False
test_marker: rd.ActionDescription = self.find_action('sm_5_0')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, 'sm_5_0') or failed
test_marker: rd.ActionDescription = self.find_action('sm_5_1')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, 'sm_5_1') or failed
rdtest.log.begin_section('Resource array tests')
test_marker: rd.ActionDescription = self.find_action('ResArray')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y,
'ResArray({},{})'.format(x, y)) or failed
rdtest.log.end_section('Resource array tests')
rdtest.log.begin_section('Bindless tests')
test_marker: rd.ActionDescription = self.find_action('Bindless')
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y,
'Bindless({},{})'.format(x, y)) or failed
rdtest.log.end_section('Bindless tests')
if failed:
raise rdtest.TestFailureException('Some tests were not as expected'
)
rdtest.log.success('All tests matched')
| import renderdoc as rd
from typing import List
import rdtest
class D3D12_Resource_Mapping_Zoo(rdtest.TestCase):
demos_test_name = 'D3D12_Resource_Mapping_Zoo'
def test_debug_pixel(self, x, y, test_name):
pipe: rd.PipeState = self.controller.GetPipelineState()
if not pipe.GetShaderReflection(rd.ShaderStage.Pixel).debugInfo.debuggable:
rdtest.log.print("Skipping undebuggable shader at {}.".format(test_name))
return
# Debug the shader
trace: rd.ShaderDebugTrace = self.controller.DebugPixel(x, y, rd.ReplayController.NoPreference,
rd.ReplayController.NoPreference)
cycles, variables = self.process_trace(trace)
output = self.find_output_source_var(trace, rd.ShaderBuiltin.ColorOutput, 0)
debugged = self.evaluate_source_var(output, variables)
try:
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, x, y, debugged.value.f32v[0:4])
except rdtest.TestFailureException as ex:
rdtest.log.error("Test {} did not match. {}".format(test_name, str(ex)))
return False
finally:
self.controller.FreeTrace(trace)
rdtest.log.success("Test {} matched as expected".format(test_name))
return True
def check_capture(self):
if not self.controller.GetAPIProperties().shaderDebugging:
rdtest.log.success("Shader debugging not enabled, skipping test")
return
failed = False
test_marker: rd.ActionDescription = self.find_action("sm_5_0")
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, "sm_5_0") or failed
test_marker: rd.ActionDescription = self.find_action("sm_5_1")
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
failed = not self.test_debug_pixel(200, 200, "sm_5_1") or failed
rdtest.log.begin_section("Resource array tests")
test_marker: rd.ActionDescription = self.find_action("ResArray")
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y, "ResArray({},{})".format(x, y)) or failed
rdtest.log.end_section("Resource array tests")
rdtest.log.begin_section("Bindless tests")
test_marker: rd.ActionDescription = self.find_action("Bindless")
action = test_marker.next
self.controller.SetFrameEvent(action.eventId, False)
for y in range(4):
for x in range(4):
failed = not self.test_debug_pixel(200 + x, 200 + y, "Bindless({},{})".format(x, y)) or failed
rdtest.log.end_section("Bindless tests")
if failed:
raise rdtest.TestFailureException("Some tests were not as expected")
rdtest.log.success("All tests matched")
| [
2,
3,
4,
5,
6
] |
190 | 607fc97c4520c7f54ee44e768776ceae2b70c378 | import os
from flask import Flask
from flask import request
result=""
app = Flask(__name__)
@app.route('/postjson', methods = ['POST'])
def postJsonHandler():
global result
#print (request.is_json)
content = request.get_json()
#print (content)
#print ("true")
#print (content["encode"])
#print (content["aaa"])
result=(content["aaa"])
os.chdir("/home/ec2-user/sdpd")
with open("image.jpg", "wb") as fh:
fh.write(content["encode"].decode('base64'))
return 'JSON posted'
@app.route('/getjson')
def getJsonHandler():
global result
print result
if (result == "tomato"):
os.chdir("/home/ec2-user/sdpd/tomato")
os.system("python -m scripts.label_image --graph=tf_files/retrained_graph.pb --image=/home/ec2-user/sdpd/image.jpg > a.txt")
elif (result == "potato"):
os.chdir("/home/ec2-user/sdpd/tensor")
os.system("python -m scripts.label_image --graph=tf_files/retrained_graph.pb --image=/home/ec2-user/sdpd/image.jpg > a.txt")
elif (result == "corn"):
os.chdir("/home/ec2-user/sdpd/corn")
os.system("python -m scripts.label_image --graph=tf_files/retrained_graph.pb --image=/home/ec2-user/sdpd/image.jpg > a.txt")
elif (result == "grape"):
os.chdir("/home/ec2-user/sdpd/grape")
os.system("python -m scripts.label_image --graph=tf_files/retrained_graph.pb --image=/home/ec2-user/sdpd/image.jpg > a.txt")
file = open("a.txt", "r")
aa=""
for i in file.readline():
if (i.isdigit()):
break
aa= aa+i
baa = aa.replace(" ","")
os.chdir("/home/ec2-user/sdpd")
file1 = open(baa + ".txt","r")
aa = aa + " \n \n \n \n" + file1.read()
return aa
#return 'string posted'
app.run(host='ec2-13-127-4-47.ap-south-1.compute.amazonaws.com', port= 8090)
| null | null | null | null | [
0
] |
191 | 247e352b7772a1da74a26f007228355f5af8d3b3 | <mask token>
| def lcs(X, Y, m, n):
dp = [([0] * (n + 1)) for i in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if X[i - 1] == Y[j - 1]:
dp[i][j] = 1 + dp[i - 1][j - 1]
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
index = dp[m][n]
s = ''
i = m
j = n
while i > 0 and j > 0:
if X[i - 1] == Y[j - 1]:
s += X[i - 1]
i -= 1
j -= 1
elif dp[i - 1][j] > dp[i][j - 1]:
i -= 1
else:
j -= 1
return s
<mask token>
| def lcs(X, Y, m, n):
dp = [([0] * (n + 1)) for i in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if X[i - 1] == Y[j - 1]:
dp[i][j] = 1 + dp[i - 1][j - 1]
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
index = dp[m][n]
s = ''
i = m
j = n
while i > 0 and j > 0:
if X[i - 1] == Y[j - 1]:
s += X[i - 1]
i -= 1
j -= 1
elif dp[i - 1][j] > dp[i][j - 1]:
i -= 1
else:
j -= 1
return s
<mask token>
print('Length of LCS is ', lcs(X, Y, len(X), len(Y)))
| def lcs(X, Y, m, n):
dp = [([0] * (n + 1)) for i in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if X[i - 1] == Y[j - 1]:
dp[i][j] = 1 + dp[i - 1][j - 1]
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
index = dp[m][n]
s = ''
i = m
j = n
while i > 0 and j > 0:
if X[i - 1] == Y[j - 1]:
s += X[i - 1]
i -= 1
j -= 1
elif dp[i - 1][j] > dp[i][j - 1]:
i -= 1
else:
j -= 1
return s
X = 'AGGTAB'
Y = 'GXTXAYB'
print('Length of LCS is ', lcs(X, Y, len(X), len(Y)))
| def lcs(X, Y, m, n):
dp = [[0]*(n+1) for i in range(m+1)]
for i in range(1,m+1):
for j in range(1,n+1):
if X[i-1] == Y[j-1]:
dp[i][j] = 1 + dp[i-1][j-1]
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
index = dp[m][n]
s = ""
i = m
j = n
while i > 0 and j > 0:
if X[i-1] == Y[j-1]:
s += X[i-1]
i -= 1
j -= 1
elif dp[i-1][j] > dp[i][j-1]:
i -= 1
else:
j -= 1
return s
X = "AGGTAB"
Y = "GXTXAYB"
print("Length of LCS is ", lcs(X , Y, len(X), len(Y)))
| [
0,
1,
2,
3,
4
] |
192 | 5ac4dd62d8e56c7baf38f9fe9f8b4a5034f1cb80 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
# (119ms)
def isSubtree(self, s, t):
"""
:type s: TreeNode
:type t: TreeNode
:rtype: bool
"""
def traverse(root, now):
if not root:
now.append("$")
return
now.append(`root.val`)
traverse(root.left, now)
traverse(root.right, now)
s_list, t_list = [], []
traverse(s, s_list)
traverse(t, t_list)
s_str, t_str= "," + ",".join(s_list), "," + ",".join(t_list)
return t_str in s_str | null | null | null | null | [
0
] |
193 | daccc5aafb3e250e7fa7ac9db69a147b7e916736 | <mask token>
def f(x: float, y: np.ndarray) ->np.ndarray:
"""
Работает с вектором { y , y'}
"""
return np.array([y[1], np.sqrt(abs(-np.exp(y[1]) * y[0] + 2.71 * y[0] **
2 / np.log(x) + 1 / x ** 2))])
<mask token>
| <mask token>
def f(x: float, y: np.ndarray) ->np.ndarray:
"""
Работает с вектором { y , y'}
"""
return np.array([y[1], np.sqrt(abs(-np.exp(y[1]) * y[0] + 2.71 * y[0] **
2 / np.log(x) + 1 / x ** 2))])
def dormand_prince(x_0, Y_0, h, N):
"""
https://en.wikipedia.org/wiki/Dormand%E2%80%93Prince_method <- таблица Бутчера
x_0: точка, где заданы функция и производная
Y_0: {y(x_0), y'(x_0)}
"""
x_n = x_0
Y_n = Y_0.copy()
xes, yes = [], []
xes.append(x_n)
yes.append(Y_n[0])
for _ in range(int(N)):
k_1 = f(x_n, Y_n)
k_2 = f(x_n + h / 5, Y_n + h * k_1 / 5)
k_3 = f(x_n + 3 * h / 10, Y_n + h * k_1 * 3 / 40 + h * k_2 * 9 / 40)
k_4 = f(x_n + 4 / 5 * h, Y_n + 44 * h * k_1 / 55 - 56 * h * k_2 /
15 + 32 * h * k_3 / 9)
k_5 = f(x_n + 8 / 9 * h, Y_n + 19372 * h * k_1 / 6561 - 25360 /
2187 * h * k_2 + 64448 / 6561 * h * k_3 - 212 / 729 * h * k_4)
k_6 = f(x_n + h, Y_n + 9017 / 3168 * k_1 * h - 355 / 33 * k_2 * h +
46732 / 5247 * k_3 * h + 49 / 176 * k_4 * h - 5103 / 18656 * h *
k_5)
k_7 = f(x_n + h, Y_n + 35 / 384 * k_1 * h + 0 + 500 / 1113 * k_3 *
h + 125 / 192 * k_4 * h - 2187 / 6784 * k_5 * h + 11 / 84 * h * k_6
)
Y_n += h * (35 / 384 * k_1 + 500 / 1113 * k_3 + 125 / 192 * k_4 -
2187 / 6784 * k_5 + 11 / 84 * k_6)
x_n += h
xes.append(x_n)
yes.append(Y_n[0])
return np.array(xes), yes
<mask token>
plt.scatter(xes_1, yes_1)
<mask token>
for i, x in enumerate(xes_2):
if abs(x - 0.5) < 0.001:
print(x, yes_2[i])
if abs(x - 1) < 0.001:
print(x, yes_2[i])
if abs(x - 1.5) < 0.001:
print(x, yes_2[i])
if abs(x - 2) < 0.001:
print(x, yes_2[i])
if abs(x - 2.5) < 0.001:
print(x, yes_2[i])
plt.scatter(xes_2, yes_2)
plt.show()
| <mask token>
def f(x: float, y: np.ndarray) ->np.ndarray:
"""
Работает с вектором { y , y'}
"""
return np.array([y[1], np.sqrt(abs(-np.exp(y[1]) * y[0] + 2.71 * y[0] **
2 / np.log(x) + 1 / x ** 2))])
def dormand_prince(x_0, Y_0, h, N):
"""
https://en.wikipedia.org/wiki/Dormand%E2%80%93Prince_method <- таблица Бутчера
x_0: точка, где заданы функция и производная
Y_0: {y(x_0), y'(x_0)}
"""
x_n = x_0
Y_n = Y_0.copy()
xes, yes = [], []
xes.append(x_n)
yes.append(Y_n[0])
for _ in range(int(N)):
k_1 = f(x_n, Y_n)
k_2 = f(x_n + h / 5, Y_n + h * k_1 / 5)
k_3 = f(x_n + 3 * h / 10, Y_n + h * k_1 * 3 / 40 + h * k_2 * 9 / 40)
k_4 = f(x_n + 4 / 5 * h, Y_n + 44 * h * k_1 / 55 - 56 * h * k_2 /
15 + 32 * h * k_3 / 9)
k_5 = f(x_n + 8 / 9 * h, Y_n + 19372 * h * k_1 / 6561 - 25360 /
2187 * h * k_2 + 64448 / 6561 * h * k_3 - 212 / 729 * h * k_4)
k_6 = f(x_n + h, Y_n + 9017 / 3168 * k_1 * h - 355 / 33 * k_2 * h +
46732 / 5247 * k_3 * h + 49 / 176 * k_4 * h - 5103 / 18656 * h *
k_5)
k_7 = f(x_n + h, Y_n + 35 / 384 * k_1 * h + 0 + 500 / 1113 * k_3 *
h + 125 / 192 * k_4 * h - 2187 / 6784 * k_5 * h + 11 / 84 * h * k_6
)
Y_n += h * (35 / 384 * k_1 + 500 / 1113 * k_3 + 125 / 192 * k_4 -
2187 / 6784 * k_5 + 11 / 84 * k_6)
x_n += h
xes.append(x_n)
yes.append(Y_n[0])
return np.array(xes), yes
x_0 = 2.71
Y_0 = np.array([2.71, 2.009], dtype=float)
<mask token>
L = [2.71, 7.34]
h_1 = 0.03
N_1 = (L[1] - L[0]) / h_1
h_2 = 0.0005
N_2 = (L[1] - L[0]) / h_2
xes_1, yes_1 = dormand_prince(x_0, Y_0, h_2, N_2)
plt.scatter(xes_1, yes_1)
<mask token>
x_0 = 2.71
Y_0 = np.array([2.71, 2.009], dtype=float)
L_3 = [0.49, 2.71]
h_3 = -0.005
N_3 = (L_3[0] - L_3[1]) / h_3
xes_2, yes_2 = dormand_prince(x_0, Y_0, h_3, N_3)
for i, x in enumerate(xes_2):
if abs(x - 0.5) < 0.001:
print(x, yes_2[i])
if abs(x - 1) < 0.001:
print(x, yes_2[i])
if abs(x - 1.5) < 0.001:
print(x, yes_2[i])
if abs(x - 2) < 0.001:
print(x, yes_2[i])
if abs(x - 2.5) < 0.001:
print(x, yes_2[i])
plt.scatter(xes_2, yes_2)
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
def f(x: float, y: np.ndarray) ->np.ndarray:
"""
Работает с вектором { y , y'}
"""
return np.array([y[1], np.sqrt(abs(-np.exp(y[1]) * y[0] + 2.71 * y[0] **
2 / np.log(x) + 1 / x ** 2))])
def dormand_prince(x_0, Y_0, h, N):
"""
https://en.wikipedia.org/wiki/Dormand%E2%80%93Prince_method <- таблица Бутчера
x_0: точка, где заданы функция и производная
Y_0: {y(x_0), y'(x_0)}
"""
x_n = x_0
Y_n = Y_0.copy()
xes, yes = [], []
xes.append(x_n)
yes.append(Y_n[0])
for _ in range(int(N)):
k_1 = f(x_n, Y_n)
k_2 = f(x_n + h / 5, Y_n + h * k_1 / 5)
k_3 = f(x_n + 3 * h / 10, Y_n + h * k_1 * 3 / 40 + h * k_2 * 9 / 40)
k_4 = f(x_n + 4 / 5 * h, Y_n + 44 * h * k_1 / 55 - 56 * h * k_2 /
15 + 32 * h * k_3 / 9)
k_5 = f(x_n + 8 / 9 * h, Y_n + 19372 * h * k_1 / 6561 - 25360 /
2187 * h * k_2 + 64448 / 6561 * h * k_3 - 212 / 729 * h * k_4)
k_6 = f(x_n + h, Y_n + 9017 / 3168 * k_1 * h - 355 / 33 * k_2 * h +
46732 / 5247 * k_3 * h + 49 / 176 * k_4 * h - 5103 / 18656 * h *
k_5)
k_7 = f(x_n + h, Y_n + 35 / 384 * k_1 * h + 0 + 500 / 1113 * k_3 *
h + 125 / 192 * k_4 * h - 2187 / 6784 * k_5 * h + 11 / 84 * h * k_6
)
Y_n += h * (35 / 384 * k_1 + 500 / 1113 * k_3 + 125 / 192 * k_4 -
2187 / 6784 * k_5 + 11 / 84 * k_6)
x_n += h
xes.append(x_n)
yes.append(Y_n[0])
return np.array(xes), yes
x_0 = 2.71
Y_0 = np.array([2.71, 2.009], dtype=float)
<mask token>
L = [2.71, 7.34]
h_1 = 0.03
N_1 = (L[1] - L[0]) / h_1
h_2 = 0.0005
N_2 = (L[1] - L[0]) / h_2
xes_1, yes_1 = dormand_prince(x_0, Y_0, h_2, N_2)
plt.scatter(xes_1, yes_1)
<mask token>
x_0 = 2.71
Y_0 = np.array([2.71, 2.009], dtype=float)
L_3 = [0.49, 2.71]
h_3 = -0.005
N_3 = (L_3[0] - L_3[1]) / h_3
xes_2, yes_2 = dormand_prince(x_0, Y_0, h_3, N_3)
for i, x in enumerate(xes_2):
if abs(x - 0.5) < 0.001:
print(x, yes_2[i])
if abs(x - 1) < 0.001:
print(x, yes_2[i])
if abs(x - 1.5) < 0.001:
print(x, yes_2[i])
if abs(x - 2) < 0.001:
print(x, yes_2[i])
if abs(x - 2.5) < 0.001:
print(x, yes_2[i])
plt.scatter(xes_2, yes_2)
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
def f(x:float,y:np.ndarray) -> np.ndarray:
"""
Работает с вектором { y , y'}
"""
# return some function result
return np.array([y[1], np.sqrt(abs(-np.exp(y[1])*y[0] + 2.71*y[0]**2/np.log(x)+1/x**2))])
# return np.array([y[1], -y[0]])
def dormand_prince(x_0,Y_0,h,N):
"""
https://en.wikipedia.org/wiki/Dormand%E2%80%93Prince_method <- таблица Бутчера
x_0: точка, где заданы функция и производная
Y_0: {y(x_0), y'(x_0)}
"""
x_n = x_0
Y_n = Y_0.copy()
xes, yes = [],[]
xes.append(x_n)
yes.append(Y_n[0])
for _ in range(int(N)):
k_1 = f(x_n, Y_n)
k_2 = f(x_n+h/5, Y_n+h*k_1/5)
k_3 = f(x_n+3*h/10, Y_n+h*k_1*3/40+h*k_2*9/40)
k_4 = f(x_n+4/5*h, Y_n+44*h*k_1/55 - 56*h*k_2/15 + 32*h*k_3/9)
k_5 = f(x_n+8/9*h, Y_n+19372*h*k_1/6561 - 25360/2187*h*k_2+ 64448/6561*h*k_3 - 212/729*h*k_4)
k_6 = f(x_n+h, Y_n+9017/3168*k_1*h - 355/33*k_2*h + 46732/5247*k_3*h +49/176*k_4*h - 5103/18656*h*k_5)
k_7 = f(x_n+h, Y_n+35/384*k_1*h +0+ 500/1113*k_3*h + 125/192*k_4*h-2187/6784*k_5*h + 11/84*h*k_6)
# print(k_1, k_2, k_3, k_4, k_5, k_6, k_7)
Y_n += h*(35/384*k_1 + 500/1113*k_3 + 125/192*k_4 -2187/6784*k_5 + 11/84*k_6)
x_n += h
xes.append(x_n)
yes.append(Y_n[0])
return np.array(xes), yes
x_0 = 2.71
Y_0 = np.array([2.71, 2.009],dtype = float) # функция и производная в точке х_0
"""
Из-за особенностей заданя, не представляется возмоность увеличить значение производной в начальной точке, поэтому 2
Так же, не стоит менять шаг, иначе все перестает работать ¯\_(ツ)_/¯
"""
L = [2.71, 7.34]
h_1 = 0.03
N_1 = (L[1]-L[0])/h_1
h_2 = 0.0005
N_2 = (L[1]-L[0])/h_2
# N = 100
xes_1 , yes_1 = dormand_prince(x_0,Y_0,h_2,N_2)
plt.scatter(xes_1, yes_1)
"""
Осталось задать значения функции в требуемых точках
"""
x_0 = 2.71
Y_0 = np.array([2.71, 2.009],dtype = float)
L_3 = [0.49, 2.71]
h_3 = -0.005
N_3 = (L_3[0]-L_3[1])/h_3
xes_2, yes_2 = dormand_prince(x_0, Y_0, h_3, N_3)
for i,x in enumerate(xes_2):
if abs(x-0.5)<1e-3:
print(x,yes_2[i])
if abs(x-1)<1e-3:
print(x,yes_2[i])
if abs(x-1.5)<1e-3:
print(x,yes_2[i])
if abs(x-2)<1e-3:
print(x,yes_2[i])
if abs(x-2.5)<1e-3:
print(x,yes_2[i])
plt.scatter(xes_2, yes_2)
plt.show()
| [
1,
3,
4,
5,
6
] |
194 | 18b43ea8696e2e54f4c1cbbece4cde1fd3130145 | <mask token>
class RobotFrameworkServerApi(PythonLanguageServer):
<mask token>
def __init__(self, read_from, write_to, libspec_manager=None, observer:
Optional[IFSObserver]=None):
from robotframework_ls.impl.libspec_manager import LibspecManager
if libspec_manager is None:
try:
libspec_manager = LibspecManager(observer=observer)
except:
log.exception(
'Unable to properly initialize the LibspecManager.')
raise
self.libspec_manager = libspec_manager
PythonLanguageServer.__init__(self, read_from, write_to)
self._version = None
self._next_time = partial(next, itertools.count(0))
<mask token>
<mask token>
def _check_min_version(self, min_version):
from robocorp_ls_core.basic import check_min_version
version = self.m_version()
return check_min_version(version, min_version)
@overrides(PythonLanguageServer.m_workspace__did_change_configuration)
def m_workspace__did_change_configuration(self, **kwargs):
PythonLanguageServer.m_workspace__did_change_configuration(self, **
kwargs)
self.libspec_manager.config = self.config
@overrides(PythonLanguageServer.lint)
def lint(self, *args, **kwargs):
pass
@overrides(PythonLanguageServer.cancel_lint)
def cancel_lint(self, *args, **kwargs):
pass
<mask token>
@overrides(PythonLanguageServer._create_workspace)
def _create_workspace(self, root_uri: str, fs_observer: IFSObserver,
workspace_folders) ->IWorkspace:
from robotframework_ls.impl.robot_workspace import RobotWorkspace
return RobotWorkspace(root_uri, fs_observer, workspace_folders,
libspec_manager=self.libspec_manager)
<mask token>
<mask token>
def m_complete_all(self, doc_uri, line, col):
func = partial(self._threaded_complete_all, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_complete_all(self, doc_uri, line, col, monitor: IMonitor):
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return []
return self._complete_from_completion_context(completion_context)
def _complete_from_completion_context(self, completion_context):
from robotframework_ls.impl import section_name_completions
from robotframework_ls.impl import keyword_completions
from robotframework_ls.impl import variable_completions
from robotframework_ls.impl import dictionary_completions
from robotframework_ls.impl import filesystem_section_completions
from robotframework_ls.impl import keyword_parameter_completions
from robotframework_ls.impl import auto_import_completions
from robotframework_ls.impl.collect_keywords import collect_keyword_name_to_keyword_found
from robotframework_ls.impl import ast_utils
ret = section_name_completions.complete(completion_context)
if not ret:
ret.extend(filesystem_section_completions.complete(
completion_context))
if not ret:
token_info = completion_context.get_current_token()
if token_info is not None:
token = ast_utils.get_keyword_name_token(token_info.node,
token_info.token)
if token is not None:
keyword_name_to_keyword_found: Dict[str, List[
IKeywordFound]
] = collect_keyword_name_to_keyword_found(
completion_context)
ret.extend(keyword_completions.complete(completion_context)
)
ret.extend(auto_import_completions.complete(
completion_context, keyword_name_to_keyword_found))
return ret
if not ret:
ret.extend(variable_completions.complete(completion_context))
if not ret:
ret.extend(dictionary_completions.complete(completion_context))
if not ret:
ret.extend(keyword_parameter_completions.complete(
completion_context))
return ret
def m_section_name_complete(self, doc_uri, line, col):
from robotframework_ls.impl import section_name_completions
completion_context = self._create_completion_context(doc_uri, line,
col, None)
if completion_context is None:
return []
return section_name_completions.complete(completion_context)
<mask token>
def m_find_definition(self, doc_uri, line, col):
func = partial(self._threaded_find_definition, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_find_definition(self, doc_uri, line, col, monitor
) ->Optional[list]:
from robotframework_ls.impl.find_definition import find_definition
import os.path
from robocorp_ls_core.lsp import Location, Range
from robocorp_ls_core import uris
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
definitions = find_definition(completion_context)
ret = []
for definition in definitions:
if not definition.source:
log.info('Found definition with empty source (%s).', definition
)
continue
if not os.path.exists(definition.source):
log.info('Found definition: %s (but source does not exist).',
definition)
continue
lineno = definition.lineno
if lineno is None or lineno < 0:
lineno = 0
end_lineno = definition.end_lineno
if end_lineno is None or end_lineno < 0:
end_lineno = 0
col_offset = definition.col_offset
end_col_offset = definition.end_col_offset
ret.append(Location(uris.from_fs_path(definition.source), Range
((lineno, col_offset), (end_lineno, end_col_offset))).to_dict()
)
return ret
<mask token>
def _threaded_code_format(self, text_document, options, monitor: IMonitor):
from robotframework_ls.impl.formatting import create_text_edit_from_diff
from robocorp_ls_core.lsp import TextDocumentItem
import os.path
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY
text_document_item = TextDocumentItem(**text_document)
text = text_document_item.text
if not text:
completion_context = self._create_completion_context(
text_document_item.uri, 0, 0, monitor)
if completion_context is None:
return []
text = completion_context.doc.source
if not text:
return []
if options is None:
options = {}
tab_size = options.get('tabSize', 4)
formatter = self._config.get_setting(OPTION_ROBOT_CODE_FORMATTER,
str, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY)
if formatter not in (OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,
OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY):
log.critical(
f'Code formatter invalid: {formatter}. Please select one of: {OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY}, {OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY}.'
)
return []
if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:
from robotframework_ls.impl.formatting import robot_source_format
new_contents = robot_source_format(text, space_count=tab_size)
else:
if not self._check_min_version((4, 0)):
log.critical(
f'To use the robotidy formatter, at least Robot Framework 4 is needed. Found: {self.m_version()}'
)
return []
from robocorp_ls_core.robotidy_wrapper import robot_tidy_source_format
ast = completion_context.get_ast()
path = completion_context.doc.path
dirname = '.'
try:
os.stat(path)
except:
ws = self._workspace
if ws is not None:
dirname = ws.root_path
else:
dirname = os.path.dirname(path)
new_contents = robot_tidy_source_format(ast, dirname)
if new_contents is None or new_contents == text:
return []
return [x.to_dict() for x in create_text_edit_from_diff(text,
new_contents)]
<mask token>
<mask token>
def _threaded_signature_help(self, doc_uri: str, line: int, col: int,
monitor: IMonitor) ->Optional[dict]:
from robotframework_ls.impl.signature_help import signature_help
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
return signature_help(completion_context)
def m_folding_range(self, doc_uri: str):
func = partial(self._threaded_folding_range, doc_uri)
func = require_monitor(func)
return func
def _threaded_folding_range(self, doc_uri: str, monitor: IMonitor) ->List[
FoldingRangeTypedDict]:
from robotframework_ls.impl.folding_range import folding_range
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return folding_range(completion_context)
def m_code_lens(self, doc_uri: str):
func = partial(self._threaded_code_lens, doc_uri)
func = require_monitor(func)
return func
def _threaded_code_lens(self, doc_uri: str, monitor: IMonitor) ->List[
CodeLensTypedDict]:
from robotframework_ls.impl.code_lens import code_lens
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return code_lens(completion_context)
def m_resolve_code_lens(self, **code_lens: CodeLensTypedDict):
func = partial(self._threaded_resolve_code_lens, code_lens)
func = require_monitor(func)
return func
<mask token>
<mask token>
def _threaded_document_symbol(self, doc_uri: str, monitor: IMonitor
) ->List[DocumentSymbolTypedDict]:
from robotframework_ls.impl.document_symbol import document_symbol
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return document_symbol(completion_context)
<mask token>
def _threaded_list_tests(self, doc_uri: str, monitor: IMonitor) ->List[
ITestInfoTypedDict]:
from robotframework_ls.impl.code_lens import list_tests
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return list_tests(completion_context)
<mask token>
def _threaded_hover(self, doc_uri: str, line, col, monitor: IMonitor
) ->Optional[HoverTypedDict]:
from robotframework_ls.impl.hover import hover
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
return hover(completion_context)
def m_workspace_symbols(self, query: Optional[str]=None):
func = partial(self._threaded_workspace_symbols, query)
func = require_monitor(func)
return func
def _threaded_workspace_symbols(self, query: Optional[str], monitor:
IMonitor) ->Optional[List[SymbolInformationTypedDict]]:
from robotframework_ls.impl.workspace_symbols import workspace_symbols
from robotframework_ls.impl.completion_context import BaseContext
from robotframework_ls.impl.protocols import IRobotWorkspace
from typing import cast
workspace = self._workspace
if not workspace:
return []
robot_workspace = cast(IRobotWorkspace, workspace)
return workspace_symbols(query, BaseContext(workspace=
robot_workspace, config=self.config, monitor=monitor))
<mask token>
def m_text_document__semantic_tokens__full(self, textDocument=None):
func = partial(self.threaded_semantic_tokens_full, textDocument=
textDocument)
func = require_monitor(func)
return func
def threaded_semantic_tokens_full(self, textDocument:
TextDocumentTypedDict, monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
doc_uri = textDocument['uri']
context = self._create_completion_context(doc_uri, -1, -1, monitor)
if context is None:
return {'resultId': None, 'data': []}
return {'resultId': None, 'data': semantic_tokens_full(context)}
def m_monaco_completions_from_code_full(self, prefix: str='', full_code:
str='', position=PositionTypedDict, uri: str='', indent: str=''):
func = partial(self.threaded_monaco_completions_from_code_full,
prefix=prefix, full_code=full_code, position=position, uri=uri,
indent=indent)
func = require_monitor(func)
return func
def threaded_monaco_completions_from_code_full(self, prefix: str,
full_code: str, position: PositionTypedDict, uri: str, indent: str,
monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.robot_workspace import RobotDocument
from robotframework_ls.impl.completion_context import CompletionContext
from robocorp_ls_core.workspace import Document
from robotframework_ls.impl import section_completions
from robotframework_ls.impl import snippets_completions
from robotframework_ls.server_api.monaco_conversions import convert_to_monaco_completion
from robotframework_ls.impl.completion_context import CompletionType
d = Document(uri, prefix)
last_line, _last_col = d.get_last_line_col()
line = last_line + position['line']
col = position['character']
col += len(indent)
document = RobotDocument(uri, full_code)
completion_context = CompletionContext(document, line, col, config=
self.config, monitor=monitor, workspace=self.workspace)
completion_context.type = CompletionType.shell
completions = self._complete_from_completion_context(completion_context
)
completions.extend(section_completions.complete(completion_context))
completions.extend(snippets_completions.complete(completion_context))
return {'suggestions': [convert_to_monaco_completion(c, line_delta=
last_line, col_delta=len(indent), uri=uri) for c in completions]}
<mask token>
def threaded_semantic_tokens_from_code_full(self, prefix: str,
full_code: str, indent: str, monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast
try:
from robotframework_ls.impl.robot_workspace import RobotDocument
doc = RobotDocument('')
doc.source = full_code
ast = doc.get_ast()
data = semantic_tokens_full_from_ast(ast, monitor)
if not prefix:
return {'resultId': None, 'data': data}
prefix_doc = RobotDocument('')
prefix_doc.source = prefix
last_line, last_col = prefix_doc.get_last_line_col()
ints_iter = iter(data)
line = 0
col = 0
new_data = []
indent_len = len(indent)
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
line += line_delta
if line_delta == 0:
col += col_delta
else:
col = col_delta
if line >= last_line:
new_data.append(line - last_line)
new_data.append(col_delta - indent_len)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
new_data.append(line_delta)
if line_delta > 0:
new_data.append(col_delta - indent_len)
else:
new_data.append(col_delta)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
break
return {'resultId': None, 'data': new_data}
except:
log.exception('Error computing semantic tokens from code.')
return {'resultId': None, 'data': []}
<mask token>
<mask token>
| <mask token>
class RobotFrameworkServerApi(PythonLanguageServer):
<mask token>
def __init__(self, read_from, write_to, libspec_manager=None, observer:
Optional[IFSObserver]=None):
from robotframework_ls.impl.libspec_manager import LibspecManager
if libspec_manager is None:
try:
libspec_manager = LibspecManager(observer=observer)
except:
log.exception(
'Unable to properly initialize the LibspecManager.')
raise
self.libspec_manager = libspec_manager
PythonLanguageServer.__init__(self, read_from, write_to)
self._version = None
self._next_time = partial(next, itertools.count(0))
<mask token>
def m_version(self):
if self._version is not None:
return self._version
try:
import robot
except:
log.exception("Unable to import 'robot'.")
version = 'NO_ROBOT'
else:
try:
from robot import get_version
version = get_version(naked=True)
except:
log.exception('Unable to get version.')
version = 'N/A'
self._version = version
return self._version
def _check_min_version(self, min_version):
from robocorp_ls_core.basic import check_min_version
version = self.m_version()
return check_min_version(version, min_version)
@overrides(PythonLanguageServer.m_workspace__did_change_configuration)
def m_workspace__did_change_configuration(self, **kwargs):
PythonLanguageServer.m_workspace__did_change_configuration(self, **
kwargs)
self.libspec_manager.config = self.config
@overrides(PythonLanguageServer.lint)
def lint(self, *args, **kwargs):
pass
@overrides(PythonLanguageServer.cancel_lint)
def cancel_lint(self, *args, **kwargs):
pass
<mask token>
@overrides(PythonLanguageServer._create_workspace)
def _create_workspace(self, root_uri: str, fs_observer: IFSObserver,
workspace_folders) ->IWorkspace:
from robotframework_ls.impl.robot_workspace import RobotWorkspace
return RobotWorkspace(root_uri, fs_observer, workspace_folders,
libspec_manager=self.libspec_manager)
def m_lint(self, doc_uri):
if not self._check_min_version((3, 2)):
from robocorp_ls_core.lsp import Error
msg = (
"""robotframework version (%s) too old for linting.
Please install a newer version and restart the language server."""
% (self.m_version(),))
log.info(msg)
return [Error(msg, (0, 0), (1, 0)).to_lsp_diagnostic()]
func = partial(self._threaded_lint, doc_uri)
func = require_monitor(func)
return func
<mask token>
def m_complete_all(self, doc_uri, line, col):
func = partial(self._threaded_complete_all, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_complete_all(self, doc_uri, line, col, monitor: IMonitor):
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return []
return self._complete_from_completion_context(completion_context)
def _complete_from_completion_context(self, completion_context):
from robotframework_ls.impl import section_name_completions
from robotframework_ls.impl import keyword_completions
from robotframework_ls.impl import variable_completions
from robotframework_ls.impl import dictionary_completions
from robotframework_ls.impl import filesystem_section_completions
from robotframework_ls.impl import keyword_parameter_completions
from robotframework_ls.impl import auto_import_completions
from robotframework_ls.impl.collect_keywords import collect_keyword_name_to_keyword_found
from robotframework_ls.impl import ast_utils
ret = section_name_completions.complete(completion_context)
if not ret:
ret.extend(filesystem_section_completions.complete(
completion_context))
if not ret:
token_info = completion_context.get_current_token()
if token_info is not None:
token = ast_utils.get_keyword_name_token(token_info.node,
token_info.token)
if token is not None:
keyword_name_to_keyword_found: Dict[str, List[
IKeywordFound]
] = collect_keyword_name_to_keyword_found(
completion_context)
ret.extend(keyword_completions.complete(completion_context)
)
ret.extend(auto_import_completions.complete(
completion_context, keyword_name_to_keyword_found))
return ret
if not ret:
ret.extend(variable_completions.complete(completion_context))
if not ret:
ret.extend(dictionary_completions.complete(completion_context))
if not ret:
ret.extend(keyword_parameter_completions.complete(
completion_context))
return ret
def m_section_name_complete(self, doc_uri, line, col):
from robotframework_ls.impl import section_name_completions
completion_context = self._create_completion_context(doc_uri, line,
col, None)
if completion_context is None:
return []
return section_name_completions.complete(completion_context)
def m_keyword_complete(self, doc_uri, line, col):
from robotframework_ls.impl import keyword_completions
completion_context = self._create_completion_context(doc_uri, line,
col, None)
if completion_context is None:
return []
return keyword_completions.complete(completion_context)
def m_find_definition(self, doc_uri, line, col):
func = partial(self._threaded_find_definition, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_find_definition(self, doc_uri, line, col, monitor
) ->Optional[list]:
from robotframework_ls.impl.find_definition import find_definition
import os.path
from robocorp_ls_core.lsp import Location, Range
from robocorp_ls_core import uris
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
definitions = find_definition(completion_context)
ret = []
for definition in definitions:
if not definition.source:
log.info('Found definition with empty source (%s).', definition
)
continue
if not os.path.exists(definition.source):
log.info('Found definition: %s (but source does not exist).',
definition)
continue
lineno = definition.lineno
if lineno is None or lineno < 0:
lineno = 0
end_lineno = definition.end_lineno
if end_lineno is None or end_lineno < 0:
end_lineno = 0
col_offset = definition.col_offset
end_col_offset = definition.end_col_offset
ret.append(Location(uris.from_fs_path(definition.source), Range
((lineno, col_offset), (end_lineno, end_col_offset))).to_dict()
)
return ret
<mask token>
def _threaded_code_format(self, text_document, options, monitor: IMonitor):
from robotframework_ls.impl.formatting import create_text_edit_from_diff
from robocorp_ls_core.lsp import TextDocumentItem
import os.path
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY
text_document_item = TextDocumentItem(**text_document)
text = text_document_item.text
if not text:
completion_context = self._create_completion_context(
text_document_item.uri, 0, 0, monitor)
if completion_context is None:
return []
text = completion_context.doc.source
if not text:
return []
if options is None:
options = {}
tab_size = options.get('tabSize', 4)
formatter = self._config.get_setting(OPTION_ROBOT_CODE_FORMATTER,
str, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY)
if formatter not in (OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,
OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY):
log.critical(
f'Code formatter invalid: {formatter}. Please select one of: {OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY}, {OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY}.'
)
return []
if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:
from robotframework_ls.impl.formatting import robot_source_format
new_contents = robot_source_format(text, space_count=tab_size)
else:
if not self._check_min_version((4, 0)):
log.critical(
f'To use the robotidy formatter, at least Robot Framework 4 is needed. Found: {self.m_version()}'
)
return []
from robocorp_ls_core.robotidy_wrapper import robot_tidy_source_format
ast = completion_context.get_ast()
path = completion_context.doc.path
dirname = '.'
try:
os.stat(path)
except:
ws = self._workspace
if ws is not None:
dirname = ws.root_path
else:
dirname = os.path.dirname(path)
new_contents = robot_tidy_source_format(ast, dirname)
if new_contents is None or new_contents == text:
return []
return [x.to_dict() for x in create_text_edit_from_diff(text,
new_contents)]
<mask token>
<mask token>
def _threaded_signature_help(self, doc_uri: str, line: int, col: int,
monitor: IMonitor) ->Optional[dict]:
from robotframework_ls.impl.signature_help import signature_help
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
return signature_help(completion_context)
def m_folding_range(self, doc_uri: str):
func = partial(self._threaded_folding_range, doc_uri)
func = require_monitor(func)
return func
def _threaded_folding_range(self, doc_uri: str, monitor: IMonitor) ->List[
FoldingRangeTypedDict]:
from robotframework_ls.impl.folding_range import folding_range
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return folding_range(completion_context)
def m_code_lens(self, doc_uri: str):
func = partial(self._threaded_code_lens, doc_uri)
func = require_monitor(func)
return func
def _threaded_code_lens(self, doc_uri: str, monitor: IMonitor) ->List[
CodeLensTypedDict]:
from robotframework_ls.impl.code_lens import code_lens
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return code_lens(completion_context)
def m_resolve_code_lens(self, **code_lens: CodeLensTypedDict):
func = partial(self._threaded_resolve_code_lens, code_lens)
func = require_monitor(func)
return func
def _threaded_resolve_code_lens(self, code_lens: CodeLensTypedDict,
monitor: IMonitor) ->CodeLensTypedDict:
from robotframework_ls.impl.code_lens import code_lens_resolve
data = code_lens.get('data')
if not isinstance(data, dict):
return code_lens
doc_uri = data.get('uri')
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return code_lens
return code_lens_resolve(completion_context, code_lens)
def m_document_symbol(self, doc_uri: str):
func = partial(self._threaded_document_symbol, doc_uri)
func = require_monitor(func)
return func
def _threaded_document_symbol(self, doc_uri: str, monitor: IMonitor
) ->List[DocumentSymbolTypedDict]:
from robotframework_ls.impl.document_symbol import document_symbol
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return document_symbol(completion_context)
<mask token>
def _threaded_list_tests(self, doc_uri: str, monitor: IMonitor) ->List[
ITestInfoTypedDict]:
from robotframework_ls.impl.code_lens import list_tests
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return list_tests(completion_context)
<mask token>
def _threaded_hover(self, doc_uri: str, line, col, monitor: IMonitor
) ->Optional[HoverTypedDict]:
from robotframework_ls.impl.hover import hover
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
return hover(completion_context)
def m_workspace_symbols(self, query: Optional[str]=None):
func = partial(self._threaded_workspace_symbols, query)
func = require_monitor(func)
return func
def _threaded_workspace_symbols(self, query: Optional[str], monitor:
IMonitor) ->Optional[List[SymbolInformationTypedDict]]:
from robotframework_ls.impl.workspace_symbols import workspace_symbols
from robotframework_ls.impl.completion_context import BaseContext
from robotframework_ls.impl.protocols import IRobotWorkspace
from typing import cast
workspace = self._workspace
if not workspace:
return []
robot_workspace = cast(IRobotWorkspace, workspace)
return workspace_symbols(query, BaseContext(workspace=
robot_workspace, config=self.config, monitor=monitor))
<mask token>
def m_text_document__semantic_tokens__full(self, textDocument=None):
func = partial(self.threaded_semantic_tokens_full, textDocument=
textDocument)
func = require_monitor(func)
return func
def threaded_semantic_tokens_full(self, textDocument:
TextDocumentTypedDict, monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
doc_uri = textDocument['uri']
context = self._create_completion_context(doc_uri, -1, -1, monitor)
if context is None:
return {'resultId': None, 'data': []}
return {'resultId': None, 'data': semantic_tokens_full(context)}
def m_monaco_completions_from_code_full(self, prefix: str='', full_code:
str='', position=PositionTypedDict, uri: str='', indent: str=''):
func = partial(self.threaded_monaco_completions_from_code_full,
prefix=prefix, full_code=full_code, position=position, uri=uri,
indent=indent)
func = require_monitor(func)
return func
def threaded_monaco_completions_from_code_full(self, prefix: str,
full_code: str, position: PositionTypedDict, uri: str, indent: str,
monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.robot_workspace import RobotDocument
from robotframework_ls.impl.completion_context import CompletionContext
from robocorp_ls_core.workspace import Document
from robotframework_ls.impl import section_completions
from robotframework_ls.impl import snippets_completions
from robotframework_ls.server_api.monaco_conversions import convert_to_monaco_completion
from robotframework_ls.impl.completion_context import CompletionType
d = Document(uri, prefix)
last_line, _last_col = d.get_last_line_col()
line = last_line + position['line']
col = position['character']
col += len(indent)
document = RobotDocument(uri, full_code)
completion_context = CompletionContext(document, line, col, config=
self.config, monitor=monitor, workspace=self.workspace)
completion_context.type = CompletionType.shell
completions = self._complete_from_completion_context(completion_context
)
completions.extend(section_completions.complete(completion_context))
completions.extend(snippets_completions.complete(completion_context))
return {'suggestions': [convert_to_monaco_completion(c, line_delta=
last_line, col_delta=len(indent), uri=uri) for c in completions]}
def m_semantic_tokens_from_code_full(self, prefix: str='', full_code:
str='', indent: str=''):
func = partial(self.threaded_semantic_tokens_from_code_full, prefix
=prefix, full_code=full_code, indent=indent)
func = require_monitor(func)
return func
def threaded_semantic_tokens_from_code_full(self, prefix: str,
full_code: str, indent: str, monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast
try:
from robotframework_ls.impl.robot_workspace import RobotDocument
doc = RobotDocument('')
doc.source = full_code
ast = doc.get_ast()
data = semantic_tokens_full_from_ast(ast, monitor)
if not prefix:
return {'resultId': None, 'data': data}
prefix_doc = RobotDocument('')
prefix_doc.source = prefix
last_line, last_col = prefix_doc.get_last_line_col()
ints_iter = iter(data)
line = 0
col = 0
new_data = []
indent_len = len(indent)
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
line += line_delta
if line_delta == 0:
col += col_delta
else:
col = col_delta
if line >= last_line:
new_data.append(line - last_line)
new_data.append(col_delta - indent_len)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
new_data.append(line_delta)
if line_delta > 0:
new_data.append(col_delta - indent_len)
else:
new_data.append(col_delta)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
break
return {'resultId': None, 'data': new_data}
except:
log.exception('Error computing semantic tokens from code.')
return {'resultId': None, 'data': []}
def m_shutdown(self, **_kwargs):
PythonLanguageServer.m_shutdown(self, **_kwargs)
self.libspec_manager.dispose()
def m_exit(self, **_kwargs):
PythonLanguageServer.m_exit(self, **_kwargs)
self.libspec_manager.dispose()
| <mask token>
class RobotFrameworkServerApi(PythonLanguageServer):
<mask token>
def __init__(self, read_from, write_to, libspec_manager=None, observer:
Optional[IFSObserver]=None):
from robotframework_ls.impl.libspec_manager import LibspecManager
if libspec_manager is None:
try:
libspec_manager = LibspecManager(observer=observer)
except:
log.exception(
'Unable to properly initialize the LibspecManager.')
raise
self.libspec_manager = libspec_manager
PythonLanguageServer.__init__(self, read_from, write_to)
self._version = None
self._next_time = partial(next, itertools.count(0))
<mask token>
def m_version(self):
if self._version is not None:
return self._version
try:
import robot
except:
log.exception("Unable to import 'robot'.")
version = 'NO_ROBOT'
else:
try:
from robot import get_version
version = get_version(naked=True)
except:
log.exception('Unable to get version.')
version = 'N/A'
self._version = version
return self._version
def _check_min_version(self, min_version):
from robocorp_ls_core.basic import check_min_version
version = self.m_version()
return check_min_version(version, min_version)
@overrides(PythonLanguageServer.m_workspace__did_change_configuration)
def m_workspace__did_change_configuration(self, **kwargs):
PythonLanguageServer.m_workspace__did_change_configuration(self, **
kwargs)
self.libspec_manager.config = self.config
@overrides(PythonLanguageServer.lint)
def lint(self, *args, **kwargs):
pass
@overrides(PythonLanguageServer.cancel_lint)
def cancel_lint(self, *args, **kwargs):
pass
<mask token>
@overrides(PythonLanguageServer._create_workspace)
def _create_workspace(self, root_uri: str, fs_observer: IFSObserver,
workspace_folders) ->IWorkspace:
from robotframework_ls.impl.robot_workspace import RobotWorkspace
return RobotWorkspace(root_uri, fs_observer, workspace_folders,
libspec_manager=self.libspec_manager)
def m_lint(self, doc_uri):
if not self._check_min_version((3, 2)):
from robocorp_ls_core.lsp import Error
msg = (
"""robotframework version (%s) too old for linting.
Please install a newer version and restart the language server."""
% (self.m_version(),))
log.info(msg)
return [Error(msg, (0, 0), (1, 0)).to_lsp_diagnostic()]
func = partial(self._threaded_lint, doc_uri)
func = require_monitor(func)
return func
<mask token>
def m_complete_all(self, doc_uri, line, col):
func = partial(self._threaded_complete_all, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_complete_all(self, doc_uri, line, col, monitor: IMonitor):
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return []
return self._complete_from_completion_context(completion_context)
def _complete_from_completion_context(self, completion_context):
from robotframework_ls.impl import section_name_completions
from robotframework_ls.impl import keyword_completions
from robotframework_ls.impl import variable_completions
from robotframework_ls.impl import dictionary_completions
from robotframework_ls.impl import filesystem_section_completions
from robotframework_ls.impl import keyword_parameter_completions
from robotframework_ls.impl import auto_import_completions
from robotframework_ls.impl.collect_keywords import collect_keyword_name_to_keyword_found
from robotframework_ls.impl import ast_utils
ret = section_name_completions.complete(completion_context)
if not ret:
ret.extend(filesystem_section_completions.complete(
completion_context))
if not ret:
token_info = completion_context.get_current_token()
if token_info is not None:
token = ast_utils.get_keyword_name_token(token_info.node,
token_info.token)
if token is not None:
keyword_name_to_keyword_found: Dict[str, List[
IKeywordFound]
] = collect_keyword_name_to_keyword_found(
completion_context)
ret.extend(keyword_completions.complete(completion_context)
)
ret.extend(auto_import_completions.complete(
completion_context, keyword_name_to_keyword_found))
return ret
if not ret:
ret.extend(variable_completions.complete(completion_context))
if not ret:
ret.extend(dictionary_completions.complete(completion_context))
if not ret:
ret.extend(keyword_parameter_completions.complete(
completion_context))
return ret
def m_section_name_complete(self, doc_uri, line, col):
from robotframework_ls.impl import section_name_completions
completion_context = self._create_completion_context(doc_uri, line,
col, None)
if completion_context is None:
return []
return section_name_completions.complete(completion_context)
def m_keyword_complete(self, doc_uri, line, col):
from robotframework_ls.impl import keyword_completions
completion_context = self._create_completion_context(doc_uri, line,
col, None)
if completion_context is None:
return []
return keyword_completions.complete(completion_context)
def m_find_definition(self, doc_uri, line, col):
func = partial(self._threaded_find_definition, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_find_definition(self, doc_uri, line, col, monitor
) ->Optional[list]:
from robotframework_ls.impl.find_definition import find_definition
import os.path
from robocorp_ls_core.lsp import Location, Range
from robocorp_ls_core import uris
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
definitions = find_definition(completion_context)
ret = []
for definition in definitions:
if not definition.source:
log.info('Found definition with empty source (%s).', definition
)
continue
if not os.path.exists(definition.source):
log.info('Found definition: %s (but source does not exist).',
definition)
continue
lineno = definition.lineno
if lineno is None or lineno < 0:
lineno = 0
end_lineno = definition.end_lineno
if end_lineno is None or end_lineno < 0:
end_lineno = 0
col_offset = definition.col_offset
end_col_offset = definition.end_col_offset
ret.append(Location(uris.from_fs_path(definition.source), Range
((lineno, col_offset), (end_lineno, end_col_offset))).to_dict()
)
return ret
def m_code_format(self, text_document, options):
func = partial(self._threaded_code_format, text_document, options)
func = require_monitor(func)
return func
def _threaded_code_format(self, text_document, options, monitor: IMonitor):
from robotframework_ls.impl.formatting import create_text_edit_from_diff
from robocorp_ls_core.lsp import TextDocumentItem
import os.path
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY
text_document_item = TextDocumentItem(**text_document)
text = text_document_item.text
if not text:
completion_context = self._create_completion_context(
text_document_item.uri, 0, 0, monitor)
if completion_context is None:
return []
text = completion_context.doc.source
if not text:
return []
if options is None:
options = {}
tab_size = options.get('tabSize', 4)
formatter = self._config.get_setting(OPTION_ROBOT_CODE_FORMATTER,
str, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY)
if formatter not in (OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,
OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY):
log.critical(
f'Code formatter invalid: {formatter}. Please select one of: {OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY}, {OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY}.'
)
return []
if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:
from robotframework_ls.impl.formatting import robot_source_format
new_contents = robot_source_format(text, space_count=tab_size)
else:
if not self._check_min_version((4, 0)):
log.critical(
f'To use the robotidy formatter, at least Robot Framework 4 is needed. Found: {self.m_version()}'
)
return []
from robocorp_ls_core.robotidy_wrapper import robot_tidy_source_format
ast = completion_context.get_ast()
path = completion_context.doc.path
dirname = '.'
try:
os.stat(path)
except:
ws = self._workspace
if ws is not None:
dirname = ws.root_path
else:
dirname = os.path.dirname(path)
new_contents = robot_tidy_source_format(ast, dirname)
if new_contents is None or new_contents == text:
return []
return [x.to_dict() for x in create_text_edit_from_diff(text,
new_contents)]
<mask token>
<mask token>
def _threaded_signature_help(self, doc_uri: str, line: int, col: int,
monitor: IMonitor) ->Optional[dict]:
from robotframework_ls.impl.signature_help import signature_help
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
return signature_help(completion_context)
def m_folding_range(self, doc_uri: str):
func = partial(self._threaded_folding_range, doc_uri)
func = require_monitor(func)
return func
def _threaded_folding_range(self, doc_uri: str, monitor: IMonitor) ->List[
FoldingRangeTypedDict]:
from robotframework_ls.impl.folding_range import folding_range
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return folding_range(completion_context)
def m_code_lens(self, doc_uri: str):
func = partial(self._threaded_code_lens, doc_uri)
func = require_monitor(func)
return func
def _threaded_code_lens(self, doc_uri: str, monitor: IMonitor) ->List[
CodeLensTypedDict]:
from robotframework_ls.impl.code_lens import code_lens
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return code_lens(completion_context)
def m_resolve_code_lens(self, **code_lens: CodeLensTypedDict):
func = partial(self._threaded_resolve_code_lens, code_lens)
func = require_monitor(func)
return func
def _threaded_resolve_code_lens(self, code_lens: CodeLensTypedDict,
monitor: IMonitor) ->CodeLensTypedDict:
from robotframework_ls.impl.code_lens import code_lens_resolve
data = code_lens.get('data')
if not isinstance(data, dict):
return code_lens
doc_uri = data.get('uri')
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return code_lens
return code_lens_resolve(completion_context, code_lens)
def m_document_symbol(self, doc_uri: str):
func = partial(self._threaded_document_symbol, doc_uri)
func = require_monitor(func)
return func
def _threaded_document_symbol(self, doc_uri: str, monitor: IMonitor
) ->List[DocumentSymbolTypedDict]:
from robotframework_ls.impl.document_symbol import document_symbol
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return document_symbol(completion_context)
<mask token>
def _threaded_list_tests(self, doc_uri: str, monitor: IMonitor) ->List[
ITestInfoTypedDict]:
from robotframework_ls.impl.code_lens import list_tests
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return list_tests(completion_context)
def m_hover(self, doc_uri: str, line: int, col: int):
func = partial(self._threaded_hover, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_hover(self, doc_uri: str, line, col, monitor: IMonitor
) ->Optional[HoverTypedDict]:
from robotframework_ls.impl.hover import hover
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
return hover(completion_context)
def m_workspace_symbols(self, query: Optional[str]=None):
func = partial(self._threaded_workspace_symbols, query)
func = require_monitor(func)
return func
def _threaded_workspace_symbols(self, query: Optional[str], monitor:
IMonitor) ->Optional[List[SymbolInformationTypedDict]]:
from robotframework_ls.impl.workspace_symbols import workspace_symbols
from robotframework_ls.impl.completion_context import BaseContext
from robotframework_ls.impl.protocols import IRobotWorkspace
from typing import cast
workspace = self._workspace
if not workspace:
return []
robot_workspace = cast(IRobotWorkspace, workspace)
return workspace_symbols(query, BaseContext(workspace=
robot_workspace, config=self.config, monitor=monitor))
<mask token>
def m_text_document__semantic_tokens__full(self, textDocument=None):
func = partial(self.threaded_semantic_tokens_full, textDocument=
textDocument)
func = require_monitor(func)
return func
def threaded_semantic_tokens_full(self, textDocument:
TextDocumentTypedDict, monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
doc_uri = textDocument['uri']
context = self._create_completion_context(doc_uri, -1, -1, monitor)
if context is None:
return {'resultId': None, 'data': []}
return {'resultId': None, 'data': semantic_tokens_full(context)}
def m_monaco_completions_from_code_full(self, prefix: str='', full_code:
str='', position=PositionTypedDict, uri: str='', indent: str=''):
func = partial(self.threaded_monaco_completions_from_code_full,
prefix=prefix, full_code=full_code, position=position, uri=uri,
indent=indent)
func = require_monitor(func)
return func
def threaded_monaco_completions_from_code_full(self, prefix: str,
full_code: str, position: PositionTypedDict, uri: str, indent: str,
monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.robot_workspace import RobotDocument
from robotframework_ls.impl.completion_context import CompletionContext
from robocorp_ls_core.workspace import Document
from robotframework_ls.impl import section_completions
from robotframework_ls.impl import snippets_completions
from robotframework_ls.server_api.monaco_conversions import convert_to_monaco_completion
from robotframework_ls.impl.completion_context import CompletionType
d = Document(uri, prefix)
last_line, _last_col = d.get_last_line_col()
line = last_line + position['line']
col = position['character']
col += len(indent)
document = RobotDocument(uri, full_code)
completion_context = CompletionContext(document, line, col, config=
self.config, monitor=monitor, workspace=self.workspace)
completion_context.type = CompletionType.shell
completions = self._complete_from_completion_context(completion_context
)
completions.extend(section_completions.complete(completion_context))
completions.extend(snippets_completions.complete(completion_context))
return {'suggestions': [convert_to_monaco_completion(c, line_delta=
last_line, col_delta=len(indent), uri=uri) for c in completions]}
def m_semantic_tokens_from_code_full(self, prefix: str='', full_code:
str='', indent: str=''):
func = partial(self.threaded_semantic_tokens_from_code_full, prefix
=prefix, full_code=full_code, indent=indent)
func = require_monitor(func)
return func
def threaded_semantic_tokens_from_code_full(self, prefix: str,
full_code: str, indent: str, monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast
try:
from robotframework_ls.impl.robot_workspace import RobotDocument
doc = RobotDocument('')
doc.source = full_code
ast = doc.get_ast()
data = semantic_tokens_full_from_ast(ast, monitor)
if not prefix:
return {'resultId': None, 'data': data}
prefix_doc = RobotDocument('')
prefix_doc.source = prefix
last_line, last_col = prefix_doc.get_last_line_col()
ints_iter = iter(data)
line = 0
col = 0
new_data = []
indent_len = len(indent)
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
line += line_delta
if line_delta == 0:
col += col_delta
else:
col = col_delta
if line >= last_line:
new_data.append(line - last_line)
new_data.append(col_delta - indent_len)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
new_data.append(line_delta)
if line_delta > 0:
new_data.append(col_delta - indent_len)
else:
new_data.append(col_delta)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
break
return {'resultId': None, 'data': new_data}
except:
log.exception('Error computing semantic tokens from code.')
return {'resultId': None, 'data': []}
def m_shutdown(self, **_kwargs):
PythonLanguageServer.m_shutdown(self, **_kwargs)
self.libspec_manager.dispose()
def m_exit(self, **_kwargs):
PythonLanguageServer.m_exit(self, **_kwargs)
self.libspec_manager.dispose()
| <mask token>
class RobotFrameworkServerApi(PythonLanguageServer):
"""
This is a custom server. It uses the same message-format used in the language
server but with custom messages (i.e.: this is not the language server, but
an API to use the bits we need from robotframework in a separate process).
"""
def __init__(self, read_from, write_to, libspec_manager=None, observer:
Optional[IFSObserver]=None):
from robotframework_ls.impl.libspec_manager import LibspecManager
if libspec_manager is None:
try:
libspec_manager = LibspecManager(observer=observer)
except:
log.exception(
'Unable to properly initialize the LibspecManager.')
raise
self.libspec_manager = libspec_manager
PythonLanguageServer.__init__(self, read_from, write_to)
self._version = None
self._next_time = partial(next, itertools.count(0))
@overrides(PythonLanguageServer._create_config)
def _create_config(self) ->IConfig:
from robotframework_ls.robot_config import RobotConfig
return RobotConfig()
def m_version(self):
if self._version is not None:
return self._version
try:
import robot
except:
log.exception("Unable to import 'robot'.")
version = 'NO_ROBOT'
else:
try:
from robot import get_version
version = get_version(naked=True)
except:
log.exception('Unable to get version.')
version = 'N/A'
self._version = version
return self._version
def _check_min_version(self, min_version):
from robocorp_ls_core.basic import check_min_version
version = self.m_version()
return check_min_version(version, min_version)
@overrides(PythonLanguageServer.m_workspace__did_change_configuration)
def m_workspace__did_change_configuration(self, **kwargs):
PythonLanguageServer.m_workspace__did_change_configuration(self, **
kwargs)
self.libspec_manager.config = self.config
@overrides(PythonLanguageServer.lint)
def lint(self, *args, **kwargs):
pass
@overrides(PythonLanguageServer.cancel_lint)
def cancel_lint(self, *args, **kwargs):
pass
@overrides(PythonLanguageServer._obtain_fs_observer)
def _obtain_fs_observer(self) ->IFSObserver:
return self.libspec_manager.fs_observer
@overrides(PythonLanguageServer._create_workspace)
def _create_workspace(self, root_uri: str, fs_observer: IFSObserver,
workspace_folders) ->IWorkspace:
from robotframework_ls.impl.robot_workspace import RobotWorkspace
return RobotWorkspace(root_uri, fs_observer, workspace_folders,
libspec_manager=self.libspec_manager)
def m_lint(self, doc_uri):
if not self._check_min_version((3, 2)):
from robocorp_ls_core.lsp import Error
msg = (
"""robotframework version (%s) too old for linting.
Please install a newer version and restart the language server."""
% (self.m_version(),))
log.info(msg)
return [Error(msg, (0, 0), (1, 0)).to_lsp_diagnostic()]
func = partial(self._threaded_lint, doc_uri)
func = require_monitor(func)
return func
def _threaded_lint(self, doc_uri, monitor: IMonitor):
from robocorp_ls_core.jsonrpc.exceptions import JsonRpcRequestCancelled
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_LINT_ROBOCOP_ENABLED
from robocorp_ls_core import uris
from robocorp_ls_core.lsp import Error
try:
from robotframework_ls.impl.ast_utils import collect_errors
from robotframework_ls.impl import code_analysis
import os.path
log.debug('Lint: starting (in thread).')
completion_context = self._create_completion_context(doc_uri, 0,
0, monitor)
if completion_context is None:
return []
config = completion_context.config
robocop_enabled = config is None or config.get_setting(
OPTION_ROBOT_LINT_ROBOCOP_ENABLED, bool, False)
ast = completion_context.get_ast()
source = completion_context.doc.source
monitor.check_cancelled()
errors = collect_errors(ast)
log.debug('Collected AST errors (in thread): %s', len(errors))
monitor.check_cancelled()
analysis_errors = code_analysis.collect_analysis_errors(
completion_context)
monitor.check_cancelled()
log.debug('Collected analysis errors (in thread): %s', len(
analysis_errors))
errors.extend(analysis_errors)
lsp_diagnostics = [error.to_lsp_diagnostic() for error in errors]
try:
if robocop_enabled:
from robocorp_ls_core.robocop_wrapper import collect_robocop_diagnostics
workspace = completion_context.workspace
if workspace is not None:
project_root = workspace.root_path
else:
project_root = os.path.abspath('.')
monitor.check_cancelled()
lsp_diagnostics.extend(collect_robocop_diagnostics(
project_root, ast, uris.to_fs_path(doc_uri), source))
except Exception as e:
log.exception(
'Error collecting Robocop errors (possibly an unsupported Robocop version is installed).'
)
lsp_diagnostics.append(Error(
f'Error collecting Robocop errors: {e}', (0, 0), (1, 0)
).to_lsp_diagnostic())
return lsp_diagnostics
except JsonRpcRequestCancelled:
raise JsonRpcRequestCancelled('Lint cancelled (inside lint)')
except Exception as e:
log.exception('Error collecting errors.')
ret = [Error(f'Error collecting Robocop errors: {e}', (0, 0), (
1, 0)).to_lsp_diagnostic()]
return ret
def m_complete_all(self, doc_uri, line, col):
func = partial(self._threaded_complete_all, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_complete_all(self, doc_uri, line, col, monitor: IMonitor):
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return []
return self._complete_from_completion_context(completion_context)
def _complete_from_completion_context(self, completion_context):
from robotframework_ls.impl import section_name_completions
from robotframework_ls.impl import keyword_completions
from robotframework_ls.impl import variable_completions
from robotframework_ls.impl import dictionary_completions
from robotframework_ls.impl import filesystem_section_completions
from robotframework_ls.impl import keyword_parameter_completions
from robotframework_ls.impl import auto_import_completions
from robotframework_ls.impl.collect_keywords import collect_keyword_name_to_keyword_found
from robotframework_ls.impl import ast_utils
ret = section_name_completions.complete(completion_context)
if not ret:
ret.extend(filesystem_section_completions.complete(
completion_context))
if not ret:
token_info = completion_context.get_current_token()
if token_info is not None:
token = ast_utils.get_keyword_name_token(token_info.node,
token_info.token)
if token is not None:
keyword_name_to_keyword_found: Dict[str, List[
IKeywordFound]
] = collect_keyword_name_to_keyword_found(
completion_context)
ret.extend(keyword_completions.complete(completion_context)
)
ret.extend(auto_import_completions.complete(
completion_context, keyword_name_to_keyword_found))
return ret
if not ret:
ret.extend(variable_completions.complete(completion_context))
if not ret:
ret.extend(dictionary_completions.complete(completion_context))
if not ret:
ret.extend(keyword_parameter_completions.complete(
completion_context))
return ret
def m_section_name_complete(self, doc_uri, line, col):
from robotframework_ls.impl import section_name_completions
completion_context = self._create_completion_context(doc_uri, line,
col, None)
if completion_context is None:
return []
return section_name_completions.complete(completion_context)
def m_keyword_complete(self, doc_uri, line, col):
from robotframework_ls.impl import keyword_completions
completion_context = self._create_completion_context(doc_uri, line,
col, None)
if completion_context is None:
return []
return keyword_completions.complete(completion_context)
def m_find_definition(self, doc_uri, line, col):
func = partial(self._threaded_find_definition, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_find_definition(self, doc_uri, line, col, monitor
) ->Optional[list]:
from robotframework_ls.impl.find_definition import find_definition
import os.path
from robocorp_ls_core.lsp import Location, Range
from robocorp_ls_core import uris
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
definitions = find_definition(completion_context)
ret = []
for definition in definitions:
if not definition.source:
log.info('Found definition with empty source (%s).', definition
)
continue
if not os.path.exists(definition.source):
log.info('Found definition: %s (but source does not exist).',
definition)
continue
lineno = definition.lineno
if lineno is None or lineno < 0:
lineno = 0
end_lineno = definition.end_lineno
if end_lineno is None or end_lineno < 0:
end_lineno = 0
col_offset = definition.col_offset
end_col_offset = definition.end_col_offset
ret.append(Location(uris.from_fs_path(definition.source), Range
((lineno, col_offset), (end_lineno, end_col_offset))).to_dict()
)
return ret
def m_code_format(self, text_document, options):
func = partial(self._threaded_code_format, text_document, options)
func = require_monitor(func)
return func
def _threaded_code_format(self, text_document, options, monitor: IMonitor):
from robotframework_ls.impl.formatting import create_text_edit_from_diff
from robocorp_ls_core.lsp import TextDocumentItem
import os.path
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY
text_document_item = TextDocumentItem(**text_document)
text = text_document_item.text
if not text:
completion_context = self._create_completion_context(
text_document_item.uri, 0, 0, monitor)
if completion_context is None:
return []
text = completion_context.doc.source
if not text:
return []
if options is None:
options = {}
tab_size = options.get('tabSize', 4)
formatter = self._config.get_setting(OPTION_ROBOT_CODE_FORMATTER,
str, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY)
if formatter not in (OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,
OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY):
log.critical(
f'Code formatter invalid: {formatter}. Please select one of: {OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY}, {OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY}.'
)
return []
if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:
from robotframework_ls.impl.formatting import robot_source_format
new_contents = robot_source_format(text, space_count=tab_size)
else:
if not self._check_min_version((4, 0)):
log.critical(
f'To use the robotidy formatter, at least Robot Framework 4 is needed. Found: {self.m_version()}'
)
return []
from robocorp_ls_core.robotidy_wrapper import robot_tidy_source_format
ast = completion_context.get_ast()
path = completion_context.doc.path
dirname = '.'
try:
os.stat(path)
except:
ws = self._workspace
if ws is not None:
dirname = ws.root_path
else:
dirname = os.path.dirname(path)
new_contents = robot_tidy_source_format(ast, dirname)
if new_contents is None or new_contents == text:
return []
return [x.to_dict() for x in create_text_edit_from_diff(text,
new_contents)]
def _create_completion_context(self, doc_uri, line, col, monitor:
Optional[IMonitor]):
from robotframework_ls.impl.completion_context import CompletionContext
if not self._check_min_version((3, 2)):
log.info('robotframework version too old.')
return None
workspace = self.workspace
if not workspace:
log.info('Workspace still not initialized.')
return None
document = workspace.get_document(doc_uri, accept_from_file=True)
if document is None:
log.info('Unable to get document for uri: %s.', doc_uri)
return None
return CompletionContext(document, line, col, workspace=workspace,
config=self.config, monitor=monitor)
def m_signature_help(self, doc_uri: str, line: int, col: int):
func = partial(self._threaded_signature_help, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_signature_help(self, doc_uri: str, line: int, col: int,
monitor: IMonitor) ->Optional[dict]:
from robotframework_ls.impl.signature_help import signature_help
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
return signature_help(completion_context)
def m_folding_range(self, doc_uri: str):
func = partial(self._threaded_folding_range, doc_uri)
func = require_monitor(func)
return func
def _threaded_folding_range(self, doc_uri: str, monitor: IMonitor) ->List[
FoldingRangeTypedDict]:
from robotframework_ls.impl.folding_range import folding_range
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return folding_range(completion_context)
def m_code_lens(self, doc_uri: str):
func = partial(self._threaded_code_lens, doc_uri)
func = require_monitor(func)
return func
def _threaded_code_lens(self, doc_uri: str, monitor: IMonitor) ->List[
CodeLensTypedDict]:
from robotframework_ls.impl.code_lens import code_lens
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return code_lens(completion_context)
def m_resolve_code_lens(self, **code_lens: CodeLensTypedDict):
func = partial(self._threaded_resolve_code_lens, code_lens)
func = require_monitor(func)
return func
def _threaded_resolve_code_lens(self, code_lens: CodeLensTypedDict,
monitor: IMonitor) ->CodeLensTypedDict:
from robotframework_ls.impl.code_lens import code_lens_resolve
data = code_lens.get('data')
if not isinstance(data, dict):
return code_lens
doc_uri = data.get('uri')
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return code_lens
return code_lens_resolve(completion_context, code_lens)
def m_document_symbol(self, doc_uri: str):
func = partial(self._threaded_document_symbol, doc_uri)
func = require_monitor(func)
return func
def _threaded_document_symbol(self, doc_uri: str, monitor: IMonitor
) ->List[DocumentSymbolTypedDict]:
from robotframework_ls.impl.document_symbol import document_symbol
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return document_symbol(completion_context)
def m_list_tests(self, doc_uri: str):
func = partial(self._threaded_list_tests, doc_uri)
func = require_monitor(func)
return func
def _threaded_list_tests(self, doc_uri: str, monitor: IMonitor) ->List[
ITestInfoTypedDict]:
from robotframework_ls.impl.code_lens import list_tests
completion_context = self._create_completion_context(doc_uri, 0, 0,
monitor)
if completion_context is None:
return []
return list_tests(completion_context)
def m_hover(self, doc_uri: str, line: int, col: int):
func = partial(self._threaded_hover, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_hover(self, doc_uri: str, line, col, monitor: IMonitor
) ->Optional[HoverTypedDict]:
from robotframework_ls.impl.hover import hover
completion_context = self._create_completion_context(doc_uri, line,
col, monitor)
if completion_context is None:
return None
return hover(completion_context)
def m_workspace_symbols(self, query: Optional[str]=None):
func = partial(self._threaded_workspace_symbols, query)
func = require_monitor(func)
return func
def _threaded_workspace_symbols(self, query: Optional[str], monitor:
IMonitor) ->Optional[List[SymbolInformationTypedDict]]:
from robotframework_ls.impl.workspace_symbols import workspace_symbols
from robotframework_ls.impl.completion_context import BaseContext
from robotframework_ls.impl.protocols import IRobotWorkspace
from typing import cast
workspace = self._workspace
if not workspace:
return []
robot_workspace = cast(IRobotWorkspace, workspace)
return workspace_symbols(query, BaseContext(workspace=
robot_workspace, config=self.config, monitor=monitor))
def m_text_document__semantic_tokens__range(self, textDocument=None,
range=None):
raise RuntimeError('Not currently implemented!')
def m_text_document__semantic_tokens__full(self, textDocument=None):
func = partial(self.threaded_semantic_tokens_full, textDocument=
textDocument)
func = require_monitor(func)
return func
def threaded_semantic_tokens_full(self, textDocument:
TextDocumentTypedDict, monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
doc_uri = textDocument['uri']
context = self._create_completion_context(doc_uri, -1, -1, monitor)
if context is None:
return {'resultId': None, 'data': []}
return {'resultId': None, 'data': semantic_tokens_full(context)}
def m_monaco_completions_from_code_full(self, prefix: str='', full_code:
str='', position=PositionTypedDict, uri: str='', indent: str=''):
func = partial(self.threaded_monaco_completions_from_code_full,
prefix=prefix, full_code=full_code, position=position, uri=uri,
indent=indent)
func = require_monitor(func)
return func
def threaded_monaco_completions_from_code_full(self, prefix: str,
full_code: str, position: PositionTypedDict, uri: str, indent: str,
monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.robot_workspace import RobotDocument
from robotframework_ls.impl.completion_context import CompletionContext
from robocorp_ls_core.workspace import Document
from robotframework_ls.impl import section_completions
from robotframework_ls.impl import snippets_completions
from robotframework_ls.server_api.monaco_conversions import convert_to_monaco_completion
from robotframework_ls.impl.completion_context import CompletionType
d = Document(uri, prefix)
last_line, _last_col = d.get_last_line_col()
line = last_line + position['line']
col = position['character']
col += len(indent)
document = RobotDocument(uri, full_code)
completion_context = CompletionContext(document, line, col, config=
self.config, monitor=monitor, workspace=self.workspace)
completion_context.type = CompletionType.shell
completions = self._complete_from_completion_context(completion_context
)
completions.extend(section_completions.complete(completion_context))
completions.extend(snippets_completions.complete(completion_context))
return {'suggestions': [convert_to_monaco_completion(c, line_delta=
last_line, col_delta=len(indent), uri=uri) for c in completions]}
def m_semantic_tokens_from_code_full(self, prefix: str='', full_code:
str='', indent: str=''):
func = partial(self.threaded_semantic_tokens_from_code_full, prefix
=prefix, full_code=full_code, indent=indent)
func = require_monitor(func)
return func
def threaded_semantic_tokens_from_code_full(self, prefix: str,
full_code: str, indent: str, monitor: Optional[IMonitor]=None):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast
try:
from robotframework_ls.impl.robot_workspace import RobotDocument
doc = RobotDocument('')
doc.source = full_code
ast = doc.get_ast()
data = semantic_tokens_full_from_ast(ast, monitor)
if not prefix:
return {'resultId': None, 'data': data}
prefix_doc = RobotDocument('')
prefix_doc.source = prefix
last_line, last_col = prefix_doc.get_last_line_col()
ints_iter = iter(data)
line = 0
col = 0
new_data = []
indent_len = len(indent)
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
line += line_delta
if line_delta == 0:
col += col_delta
else:
col = col_delta
if line >= last_line:
new_data.append(line - last_line)
new_data.append(col_delta - indent_len)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
new_data.append(line_delta)
if line_delta > 0:
new_data.append(col_delta - indent_len)
else:
new_data.append(col_delta)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
break
return {'resultId': None, 'data': new_data}
except:
log.exception('Error computing semantic tokens from code.')
return {'resultId': None, 'data': []}
def m_shutdown(self, **_kwargs):
PythonLanguageServer.m_shutdown(self, **_kwargs)
self.libspec_manager.dispose()
def m_exit(self, **_kwargs):
PythonLanguageServer.m_exit(self, **_kwargs)
self.libspec_manager.dispose()
| from robocorp_ls_core.python_ls import PythonLanguageServer
from robocorp_ls_core.basic import overrides
from robocorp_ls_core.robotframework_log import get_logger
from typing import Optional, List, Dict
from robocorp_ls_core.protocols import IConfig, IMonitor, ITestInfoTypedDict, IWorkspace
from functools import partial
from robocorp_ls_core.jsonrpc.endpoint import require_monitor
from robocorp_ls_core.lsp import (
SymbolInformationTypedDict,
FoldingRangeTypedDict,
HoverTypedDict,
TextDocumentTypedDict,
CodeLensTypedDict,
DocumentSymbolTypedDict,
PositionTypedDict,
)
from robotframework_ls.impl.protocols import IKeywordFound
from robocorp_ls_core.watchdog_wrapper import IFSObserver
import itertools
log = get_logger(__name__)
class RobotFrameworkServerApi(PythonLanguageServer):
"""
This is a custom server. It uses the same message-format used in the language
server but with custom messages (i.e.: this is not the language server, but
an API to use the bits we need from robotframework in a separate process).
"""
def __init__(
self,
read_from,
write_to,
libspec_manager=None,
observer: Optional[IFSObserver] = None,
):
from robotframework_ls.impl.libspec_manager import LibspecManager
if libspec_manager is None:
try:
libspec_manager = LibspecManager(observer=observer)
except:
log.exception("Unable to properly initialize the LibspecManager.")
raise
self.libspec_manager = libspec_manager
PythonLanguageServer.__init__(self, read_from, write_to)
self._version = None
self._next_time = partial(next, itertools.count(0))
@overrides(PythonLanguageServer._create_config)
def _create_config(self) -> IConfig:
from robotframework_ls.robot_config import RobotConfig
return RobotConfig()
def m_version(self):
if self._version is not None:
return self._version
try:
import robot # noqa
except:
log.exception("Unable to import 'robot'.")
version = "NO_ROBOT"
else:
try:
from robot import get_version
version = get_version(naked=True)
except:
log.exception("Unable to get version.")
version = "N/A" # Too old?
self._version = version
return self._version
def _check_min_version(self, min_version):
from robocorp_ls_core.basic import check_min_version
version = self.m_version()
return check_min_version(version, min_version)
@overrides(PythonLanguageServer.m_workspace__did_change_configuration)
def m_workspace__did_change_configuration(self, **kwargs):
PythonLanguageServer.m_workspace__did_change_configuration(self, **kwargs)
self.libspec_manager.config = self.config
@overrides(PythonLanguageServer.lint)
def lint(self, *args, **kwargs):
pass # No-op for this server.
@overrides(PythonLanguageServer.cancel_lint)
def cancel_lint(self, *args, **kwargs):
pass # No-op for this server.
@overrides(PythonLanguageServer._obtain_fs_observer)
def _obtain_fs_observer(self) -> IFSObserver:
return self.libspec_manager.fs_observer
@overrides(PythonLanguageServer._create_workspace)
def _create_workspace(
self, root_uri: str, fs_observer: IFSObserver, workspace_folders
) -> IWorkspace:
from robotframework_ls.impl.robot_workspace import RobotWorkspace
return RobotWorkspace(
root_uri,
fs_observer,
workspace_folders,
libspec_manager=self.libspec_manager,
)
def m_lint(self, doc_uri):
if not self._check_min_version((3, 2)):
from robocorp_ls_core.lsp import Error
msg = (
"robotframework version (%s) too old for linting.\n"
"Please install a newer version and restart the language server."
% (self.m_version(),)
)
log.info(msg)
return [Error(msg, (0, 0), (1, 0)).to_lsp_diagnostic()]
func = partial(self._threaded_lint, doc_uri)
func = require_monitor(func)
return func
def _threaded_lint(self, doc_uri, monitor: IMonitor):
from robocorp_ls_core.jsonrpc.exceptions import JsonRpcRequestCancelled
from robotframework_ls.impl.robot_lsp_constants import (
OPTION_ROBOT_LINT_ROBOCOP_ENABLED,
)
from robocorp_ls_core import uris
from robocorp_ls_core.lsp import Error
try:
from robotframework_ls.impl.ast_utils import collect_errors
from robotframework_ls.impl import code_analysis
import os.path
log.debug("Lint: starting (in thread).")
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return []
config = completion_context.config
robocop_enabled = config is None or config.get_setting(
OPTION_ROBOT_LINT_ROBOCOP_ENABLED, bool, False
)
ast = completion_context.get_ast()
source = completion_context.doc.source
monitor.check_cancelled()
errors = collect_errors(ast)
log.debug("Collected AST errors (in thread): %s", len(errors))
monitor.check_cancelled()
analysis_errors = code_analysis.collect_analysis_errors(completion_context)
monitor.check_cancelled()
log.debug("Collected analysis errors (in thread): %s", len(analysis_errors))
errors.extend(analysis_errors)
lsp_diagnostics = [error.to_lsp_diagnostic() for error in errors]
try:
if robocop_enabled:
from robocorp_ls_core.robocop_wrapper import (
collect_robocop_diagnostics,
)
workspace = completion_context.workspace
if workspace is not None:
project_root = workspace.root_path
else:
project_root = os.path.abspath(".")
monitor.check_cancelled()
lsp_diagnostics.extend(
collect_robocop_diagnostics(
project_root, ast, uris.to_fs_path(doc_uri), source
)
)
except Exception as e:
log.exception(
"Error collecting Robocop errors (possibly an unsupported Robocop version is installed)."
)
lsp_diagnostics.append(
Error(
f"Error collecting Robocop errors: {e}", (0, 0), (1, 0)
).to_lsp_diagnostic()
)
return lsp_diagnostics
except JsonRpcRequestCancelled:
raise JsonRpcRequestCancelled("Lint cancelled (inside lint)")
except Exception as e:
log.exception("Error collecting errors.")
ret = [
Error(
f"Error collecting Robocop errors: {e}", (0, 0), (1, 0)
).to_lsp_diagnostic()
]
return ret
def m_complete_all(self, doc_uri, line, col):
func = partial(self._threaded_complete_all, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_complete_all(self, doc_uri, line, col, monitor: IMonitor):
completion_context = self._create_completion_context(
doc_uri, line, col, monitor
)
if completion_context is None:
return []
return self._complete_from_completion_context(completion_context)
def _complete_from_completion_context(self, completion_context):
from robotframework_ls.impl import section_name_completions
from robotframework_ls.impl import keyword_completions
from robotframework_ls.impl import variable_completions
from robotframework_ls.impl import dictionary_completions
from robotframework_ls.impl import filesystem_section_completions
from robotframework_ls.impl import keyword_parameter_completions
from robotframework_ls.impl import auto_import_completions
from robotframework_ls.impl.collect_keywords import (
collect_keyword_name_to_keyword_found,
)
from robotframework_ls.impl import ast_utils
ret = section_name_completions.complete(completion_context)
if not ret:
ret.extend(filesystem_section_completions.complete(completion_context))
if not ret:
token_info = completion_context.get_current_token()
if token_info is not None:
token = ast_utils.get_keyword_name_token(
token_info.node, token_info.token
)
if token is not None:
keyword_name_to_keyword_found: Dict[
str, List[IKeywordFound]
] = collect_keyword_name_to_keyword_found(completion_context)
ret.extend(keyword_completions.complete(completion_context))
ret.extend(
auto_import_completions.complete(
completion_context, keyword_name_to_keyword_found
)
)
return ret
if not ret:
ret.extend(variable_completions.complete(completion_context))
if not ret:
ret.extend(dictionary_completions.complete(completion_context))
if not ret:
ret.extend(keyword_parameter_completions.complete(completion_context))
return ret
def m_section_name_complete(self, doc_uri, line, col):
from robotframework_ls.impl import section_name_completions
completion_context = self._create_completion_context(doc_uri, line, col, None)
if completion_context is None:
return []
return section_name_completions.complete(completion_context)
def m_keyword_complete(self, doc_uri, line, col):
from robotframework_ls.impl import keyword_completions
completion_context = self._create_completion_context(doc_uri, line, col, None)
if completion_context is None:
return []
return keyword_completions.complete(completion_context)
def m_find_definition(self, doc_uri, line, col):
func = partial(self._threaded_find_definition, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_find_definition(self, doc_uri, line, col, monitor) -> Optional[list]:
from robotframework_ls.impl.find_definition import find_definition
import os.path
from robocorp_ls_core.lsp import Location, Range
from robocorp_ls_core import uris
completion_context = self._create_completion_context(
doc_uri, line, col, monitor
)
if completion_context is None:
return None
definitions = find_definition(completion_context)
ret = []
for definition in definitions:
if not definition.source:
log.info("Found definition with empty source (%s).", definition)
continue
if not os.path.exists(definition.source):
log.info(
"Found definition: %s (but source does not exist).", definition
)
continue
lineno = definition.lineno
if lineno is None or lineno < 0:
lineno = 0
end_lineno = definition.end_lineno
if end_lineno is None or end_lineno < 0:
end_lineno = 0
col_offset = definition.col_offset
end_col_offset = definition.end_col_offset
ret.append(
Location(
uris.from_fs_path(definition.source),
Range((lineno, col_offset), (end_lineno, end_col_offset)),
).to_dict()
)
return ret
def m_code_format(self, text_document, options):
func = partial(self._threaded_code_format, text_document, options)
func = require_monitor(func)
return func
def _threaded_code_format(self, text_document, options, monitor: IMonitor):
from robotframework_ls.impl.formatting import create_text_edit_from_diff
from robocorp_ls_core.lsp import TextDocumentItem
import os.path
from robotframework_ls.impl.robot_lsp_constants import (
OPTION_ROBOT_CODE_FORMATTER,
)
from robotframework_ls.impl.robot_lsp_constants import (
OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,
)
from robotframework_ls.impl.robot_lsp_constants import (
OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY,
)
text_document_item = TextDocumentItem(**text_document)
text = text_document_item.text
if not text:
completion_context = self._create_completion_context(
text_document_item.uri, 0, 0, monitor
)
if completion_context is None:
return []
text = completion_context.doc.source
if not text:
return []
if options is None:
options = {}
tab_size = options.get("tabSize", 4)
# Default for now is the builtin. This will probably be changed in the future.
formatter = self._config.get_setting(
OPTION_ROBOT_CODE_FORMATTER, str, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY
)
if formatter not in (
OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,
OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY,
):
log.critical(
f"Code formatter invalid: {formatter}. Please select one of: {OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY}, {OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY}."
)
return []
if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:
from robotframework_ls.impl.formatting import robot_source_format
new_contents = robot_source_format(text, space_count=tab_size)
else:
if not self._check_min_version((4, 0)):
log.critical(
f"To use the robotidy formatter, at least Robot Framework 4 is needed. Found: {self.m_version()}"
)
return []
from robocorp_ls_core.robotidy_wrapper import robot_tidy_source_format
ast = completion_context.get_ast()
path = completion_context.doc.path
dirname = "."
try:
os.stat(path)
except:
# It doesn't exist
ws = self._workspace
if ws is not None:
dirname = ws.root_path
else:
dirname = os.path.dirname(path)
new_contents = robot_tidy_source_format(ast, dirname)
if new_contents is None or new_contents == text:
return []
return [x.to_dict() for x in create_text_edit_from_diff(text, new_contents)]
def _create_completion_context(
self, doc_uri, line, col, monitor: Optional[IMonitor]
):
from robotframework_ls.impl.completion_context import CompletionContext
if not self._check_min_version((3, 2)):
log.info("robotframework version too old.")
return None
workspace = self.workspace
if not workspace:
log.info("Workspace still not initialized.")
return None
document = workspace.get_document(doc_uri, accept_from_file=True)
if document is None:
log.info("Unable to get document for uri: %s.", doc_uri)
return None
return CompletionContext(
document,
line,
col,
workspace=workspace,
config=self.config,
monitor=monitor,
)
def m_signature_help(self, doc_uri: str, line: int, col: int):
func = partial(self._threaded_signature_help, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_signature_help(
self, doc_uri: str, line: int, col: int, monitor: IMonitor
) -> Optional[dict]:
from robotframework_ls.impl.signature_help import signature_help
completion_context = self._create_completion_context(
doc_uri, line, col, monitor
)
if completion_context is None:
return None
return signature_help(completion_context)
def m_folding_range(self, doc_uri: str):
func = partial(self._threaded_folding_range, doc_uri)
func = require_monitor(func)
return func
def _threaded_folding_range(
self, doc_uri: str, monitor: IMonitor
) -> List[FoldingRangeTypedDict]:
from robotframework_ls.impl.folding_range import folding_range
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return []
return folding_range(completion_context)
def m_code_lens(self, doc_uri: str):
func = partial(self._threaded_code_lens, doc_uri)
func = require_monitor(func)
return func
def _threaded_code_lens(
self, doc_uri: str, monitor: IMonitor
) -> List[CodeLensTypedDict]:
from robotframework_ls.impl.code_lens import code_lens
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return []
return code_lens(completion_context)
def m_resolve_code_lens(self, **code_lens: CodeLensTypedDict):
func = partial(self._threaded_resolve_code_lens, code_lens)
func = require_monitor(func)
return func
def _threaded_resolve_code_lens(
self, code_lens: CodeLensTypedDict, monitor: IMonitor
) -> CodeLensTypedDict:
from robotframework_ls.impl.code_lens import code_lens_resolve
data = code_lens.get("data")
if not isinstance(data, dict):
return code_lens
doc_uri = data.get("uri")
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return code_lens
return code_lens_resolve(completion_context, code_lens)
def m_document_symbol(self, doc_uri: str):
func = partial(self._threaded_document_symbol, doc_uri)
func = require_monitor(func)
return func
def _threaded_document_symbol(
self, doc_uri: str, monitor: IMonitor
) -> List[DocumentSymbolTypedDict]:
from robotframework_ls.impl.document_symbol import document_symbol
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return []
return document_symbol(completion_context)
def m_list_tests(self, doc_uri: str):
func = partial(self._threaded_list_tests, doc_uri)
func = require_monitor(func)
return func
def _threaded_list_tests(
self, doc_uri: str, monitor: IMonitor
) -> List[ITestInfoTypedDict]:
from robotframework_ls.impl.code_lens import list_tests
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return []
return list_tests(completion_context)
def m_hover(self, doc_uri: str, line: int, col: int):
func = partial(self._threaded_hover, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_hover(
self, doc_uri: str, line, col, monitor: IMonitor
) -> Optional[HoverTypedDict]:
from robotframework_ls.impl.hover import hover
completion_context = self._create_completion_context(
doc_uri, line, col, monitor
)
if completion_context is None:
return None
return hover(completion_context)
def m_workspace_symbols(self, query: Optional[str] = None):
func = partial(self._threaded_workspace_symbols, query)
func = require_monitor(func)
return func
def _threaded_workspace_symbols(
self, query: Optional[str], monitor: IMonitor
) -> Optional[List[SymbolInformationTypedDict]]:
from robotframework_ls.impl.workspace_symbols import workspace_symbols
from robotframework_ls.impl.completion_context import BaseContext
from robotframework_ls.impl.protocols import IRobotWorkspace
from typing import cast
workspace = self._workspace
if not workspace:
return []
robot_workspace = cast(IRobotWorkspace, workspace)
return workspace_symbols(
query,
BaseContext(workspace=robot_workspace, config=self.config, monitor=monitor),
)
def m_text_document__semantic_tokens__range(self, textDocument=None, range=None):
raise RuntimeError("Not currently implemented!")
def m_text_document__semantic_tokens__full(self, textDocument=None):
func = partial(self.threaded_semantic_tokens_full, textDocument=textDocument)
func = require_monitor(func)
return func
def threaded_semantic_tokens_full(
self, textDocument: TextDocumentTypedDict, monitor: Optional[IMonitor] = None
):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
doc_uri = textDocument["uri"]
context = self._create_completion_context(doc_uri, -1, -1, monitor)
if context is None:
return {"resultId": None, "data": []}
return {"resultId": None, "data": semantic_tokens_full(context)}
def m_monaco_completions_from_code_full(
self,
prefix: str = "",
full_code: str = "",
position=PositionTypedDict,
uri: str = "",
indent: str = "",
):
func = partial(
self.threaded_monaco_completions_from_code_full,
prefix=prefix,
full_code=full_code,
position=position,
uri=uri,
indent=indent,
)
func = require_monitor(func)
return func
def threaded_monaco_completions_from_code_full(
self,
prefix: str,
full_code: str,
position: PositionTypedDict,
uri: str,
indent: str,
monitor: Optional[IMonitor] = None,
):
from robotframework_ls.impl.robot_workspace import RobotDocument
from robotframework_ls.impl.completion_context import CompletionContext
from robocorp_ls_core.workspace import Document
from robotframework_ls.impl import section_completions
from robotframework_ls.impl import snippets_completions
from robotframework_ls.server_api.monaco_conversions import (
convert_to_monaco_completion,
)
from robotframework_ls.impl.completion_context import CompletionType
d = Document(uri, prefix)
last_line, _last_col = d.get_last_line_col()
line = last_line + position["line"]
col = position["character"]
col += len(indent)
document = RobotDocument(uri, full_code)
completion_context = CompletionContext(
document,
line,
col,
config=self.config,
monitor=monitor,
workspace=self.workspace,
)
completion_context.type = CompletionType.shell
completions = self._complete_from_completion_context(completion_context)
completions.extend(section_completions.complete(completion_context))
completions.extend(snippets_completions.complete(completion_context))
return {
"suggestions": [
convert_to_monaco_completion(
c, line_delta=last_line, col_delta=len(indent), uri=uri
)
for c in completions
]
}
def m_semantic_tokens_from_code_full(
self, prefix: str = "", full_code: str = "", indent: str = ""
):
func = partial(
self.threaded_semantic_tokens_from_code_full,
prefix=prefix,
full_code=full_code,
indent=indent,
)
func = require_monitor(func)
return func
def threaded_semantic_tokens_from_code_full(
self,
prefix: str,
full_code: str,
indent: str,
monitor: Optional[IMonitor] = None,
):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast
try:
from robotframework_ls.impl.robot_workspace import RobotDocument
doc = RobotDocument("")
doc.source = full_code
ast = doc.get_ast()
data = semantic_tokens_full_from_ast(ast, monitor)
if not prefix:
return {"resultId": None, "data": data}
# We have to exclude the prefix from the coloring...
# debug info...
# import io
# from robotframework_ls.impl.semantic_tokens import decode_semantic_tokens
# stream = io.StringIO()
# decode_semantic_tokens(data, doc, stream)
# found = stream.getvalue()
prefix_doc = RobotDocument("")
prefix_doc.source = prefix
last_line, last_col = prefix_doc.get_last_line_col()
# Now we have the data from the full code, but we need to remove whatever
# we have in the prefix from the result...
ints_iter = iter(data)
line = 0
col = 0
new_data = []
indent_len = len(indent)
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
line += line_delta
if line_delta == 0:
col += col_delta
else:
col = col_delta
if line >= last_line:
new_data.append(line - last_line)
new_data.append(col_delta - indent_len)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
# Ok, now, we have to add the indent_len to all the
# next lines
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
new_data.append(line_delta)
if line_delta > 0:
new_data.append(col_delta - indent_len)
else:
new_data.append(col_delta)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
break
# Approach changed so that we always have a new line
# i.e.:
# \n<indent><code>
#
# so, the condition below no longer applies.
# elif line == last_line and col >= last_col:
# new_data.append(0)
# new_data.append(col - last_col)
# new_data.append(token_len)
# new_data.append(token_type)
# new_data.append(token_modifier)
# new_data.extend(ints_iter)
# break
# debug info...
# temp_stream = io.StringIO()
# temp_doc = RobotDocument("")
# temp_doc.source = full_code[len(prefix) :]
# decode_semantic_tokens(new_data, temp_doc, temp_stream)
# temp_found = temp_stream.getvalue()
return {"resultId": None, "data": new_data}
except:
log.exception("Error computing semantic tokens from code.")
return {"resultId": None, "data": []}
def m_shutdown(self, **_kwargs):
PythonLanguageServer.m_shutdown(self, **_kwargs)
self.libspec_manager.dispose()
def m_exit(self, **_kwargs):
PythonLanguageServer.m_exit(self, **_kwargs)
self.libspec_manager.dispose()
| [
30,
38,
40,
48,
51
] |
195 | 6ad36f2b115c822a50a38e88a8d7d524fc5b045b | <mask token>
| <mask token>
for i in range(b - 1):
if (i + 1) * a % b == c:
frag = 'YES'
break
print(frag)
| a, b, c = map(int, input().split())
frag = 'NO'
for i in range(b - 1):
if (i + 1) * a % b == c:
frag = 'YES'
break
print(frag)
| a,b,c=map(int,input().split())
frag='NO'
for i in range(b-1):
if (i+1)*a%b==c:
frag='YES'
break
print(frag)
| null | [
0,
1,
2,
3
] |
196 | 02182f0379e58b64bbe17cc5f433e8aae7814976 | <mask token>
| <mask token>
web = Blueprint('web', __name__)
<mask token>
| from flask import Blueprint
web = Blueprint('web', __name__)
from app.web import auth
from app.web import user
from app.web import book
| from flask import Blueprint
web = Blueprint('web', __name__)
from app.web import auth
from app.web import user
from app.web import book
| null | [
0,
1,
2,
3
] |
197 | 074defa92c8bc5afc221c9c19842d808fbf1e112 | <mask token>
def randomWord():
id = random.randint(0, len(WORDS) - 1)
return WORDS[id]
def displayBoard(hiddenWord, tries):
print(IMAGES[tries] + '\n')
print(hiddenWord)
print('--- * --- * --- * --- * --- * ---')
def run():
word = randomWord()
hiddenWord = ['-'] * len(word)
tries = 0
while True:
displayBoard(hiddenWord, tries)
currentLetter = str(raw_input('Escoge una letra: '))
letterIndexes = []
for i in range(len(word)):
if word[i] == currentLetter:
letterIndexes.append(i)
if len(letterIndexes) == 0:
tries += 1
if tries == len(IMAGES) - 2:
displayBoard(hiddenWord, tries)
print('\nLo sentimos, perdiste. La palabra correcta era {}'
.format(word))
break
else:
for id in letterIndexes:
hiddenWord[id] = currentLetter
letterIndexes = []
try:
hiddenWord.index('-')
except ValueError:
print('\nFelicidades. Ganaste. La palabra es: {}'.format(word))
break
<mask token>
| <mask token>
def randomWord():
id = random.randint(0, len(WORDS) - 1)
return WORDS[id]
def displayBoard(hiddenWord, tries):
print(IMAGES[tries] + '\n')
print(hiddenWord)
print('--- * --- * --- * --- * --- * ---')
def run():
word = randomWord()
hiddenWord = ['-'] * len(word)
tries = 0
while True:
displayBoard(hiddenWord, tries)
currentLetter = str(raw_input('Escoge una letra: '))
letterIndexes = []
for i in range(len(word)):
if word[i] == currentLetter:
letterIndexes.append(i)
if len(letterIndexes) == 0:
tries += 1
if tries == len(IMAGES) - 2:
displayBoard(hiddenWord, tries)
print('\nLo sentimos, perdiste. La palabra correcta era {}'
.format(word))
break
else:
for id in letterIndexes:
hiddenWord[id] = currentLetter
letterIndexes = []
try:
hiddenWord.index('-')
except ValueError:
print('\nFelicidades. Ganaste. La palabra es: {}'.format(word))
break
if __name__ == '__main__':
print('B I E N V E N I D O S A A H O R C A D O S')
run()
| <mask token>
IMAGES = [
"""
+---+
| |
|
|
|
|
========="""
,
"""
+---+
| |
O |
|
|
|
========="""
,
"""
+---+
| |
O |
| |
|
|
========="""
,
"""
+---+
| |
O |
/| |
|
|
========="""
,
"""
+---+
| |
O |
/|\\ |
|
|
========="""
,
"""
+---+
| |
O |
/|\\ |
| |
|
========="""
,
"""
+---+
| |
O |
/|\\ |
| |
/ |
========="""
,
"""
+---+
| |
O |
/|\\ |
| |
/ \\ |
========="""
, '\n']
WORDS = ['lavadora', 'secadora', 'sofa', 'gobierno', 'diputado',
'democracia', 'computadora', 'teclado']
def randomWord():
id = random.randint(0, len(WORDS) - 1)
return WORDS[id]
def displayBoard(hiddenWord, tries):
print(IMAGES[tries] + '\n')
print(hiddenWord)
print('--- * --- * --- * --- * --- * ---')
def run():
word = randomWord()
hiddenWord = ['-'] * len(word)
tries = 0
while True:
displayBoard(hiddenWord, tries)
currentLetter = str(raw_input('Escoge una letra: '))
letterIndexes = []
for i in range(len(word)):
if word[i] == currentLetter:
letterIndexes.append(i)
if len(letterIndexes) == 0:
tries += 1
if tries == len(IMAGES) - 2:
displayBoard(hiddenWord, tries)
print('\nLo sentimos, perdiste. La palabra correcta era {}'
.format(word))
break
else:
for id in letterIndexes:
hiddenWord[id] = currentLetter
letterIndexes = []
try:
hiddenWord.index('-')
except ValueError:
print('\nFelicidades. Ganaste. La palabra es: {}'.format(word))
break
if __name__ == '__main__':
print('B I E N V E N I D O S A A H O R C A D O S')
run()
| import random
IMAGES = [
"""
+---+
| |
|
|
|
|
========="""
,
"""
+---+
| |
O |
|
|
|
========="""
,
"""
+---+
| |
O |
| |
|
|
========="""
,
"""
+---+
| |
O |
/| |
|
|
========="""
,
"""
+---+
| |
O |
/|\\ |
|
|
========="""
,
"""
+---+
| |
O |
/|\\ |
| |
|
========="""
,
"""
+---+
| |
O |
/|\\ |
| |
/ |
========="""
,
"""
+---+
| |
O |
/|\\ |
| |
/ \\ |
========="""
, '\n']
WORDS = ['lavadora', 'secadora', 'sofa', 'gobierno', 'diputado',
'democracia', 'computadora', 'teclado']
def randomWord():
id = random.randint(0, len(WORDS) - 1)
return WORDS[id]
def displayBoard(hiddenWord, tries):
print(IMAGES[tries] + '\n')
print(hiddenWord)
print('--- * --- * --- * --- * --- * ---')
def run():
word = randomWord()
hiddenWord = ['-'] * len(word)
tries = 0
while True:
displayBoard(hiddenWord, tries)
currentLetter = str(raw_input('Escoge una letra: '))
letterIndexes = []
for i in range(len(word)):
if word[i] == currentLetter:
letterIndexes.append(i)
if len(letterIndexes) == 0:
tries += 1
if tries == len(IMAGES) - 2:
displayBoard(hiddenWord, tries)
print('\nLo sentimos, perdiste. La palabra correcta era {}'
.format(word))
break
else:
for id in letterIndexes:
hiddenWord[id] = currentLetter
letterIndexes = []
try:
hiddenWord.index('-')
except ValueError:
print('\nFelicidades. Ganaste. La palabra es: {}'.format(word))
break
if __name__ == '__main__':
print('B I E N V E N I D O S A A H O R C A D O S')
run()
| # -*- coding: utf-8 -*-
import random
IMAGES = ['''
+---+
| |
|
|
|
|
=========''', '''
+---+
| |
O |
|
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
/|\ |
|
|
=========''', '''
+---+
| |
O |
/|\ |
| |
|
=========''', '''
+---+
| |
O |
/|\ |
| |
/ |
=========''', '''
+---+
| |
O |
/|\ |
| |
/ \ |
=========''', '''
''']
WORDS = [
'lavadora',
'secadora',
'sofa',
'gobierno',
'diputado',
'democracia',
'computadora',
'teclado'
]
# Funcion que regresa una palabra aleatoria
def randomWord():
id = random.randint(0, len(WORDS) - 1)
return WORDS[id]
def displayBoard(hiddenWord, tries):
print(IMAGES[tries] + '\n')
print(hiddenWord)
print('--- * --- * --- * --- * --- * ---')
def run():
word = randomWord()
hiddenWord = ['-'] * len(word)
tries = 0
while True:
displayBoard(hiddenWord, tries)
currentLetter = str(raw_input('Escoge una letra: '))
letterIndexes = []
for i in range(len(word)):
if word[i] == currentLetter:
letterIndexes.append(i)
if len(letterIndexes) == 0:
tries += 1
# Checa si perdio el jugador
if tries == len(IMAGES) - 2:
displayBoard(hiddenWord, tries)
print('\nLo sentimos, perdiste. La palabra correcta era {}'.format(word))
break
else:
for id in letterIndexes:
hiddenWord[id] = currentLetter
letterIndexes = []
# Chea si gano el jugador
try:
hiddenWord.index('-')
except ValueError:
print('\nFelicidades. Ganaste. La palabra es: {}'.format(word))
break
if __name__ == '__main__':
print('B I E N V E N I D O S A A H O R C A D O S')
run() | [
3,
4,
5,
6,
7
] |
198 | c879230efe12bde9042159da221a2b9b4c1d8349 | <mask token>
def load_data_from_csv(file_name, header=0, encoding='utf-8'):
data_df = pd.read_csv(file_name, header=header, encoding=encoding)
return data_df
<mask token>
| <mask token>
def load_data_from_csv(file_name, header=0, encoding='utf-8'):
data_df = pd.read_csv(file_name, header=header, encoding=encoding)
return data_df
<mask token>
out.write(data['content'][0].encode('utf-8'))
inp.close()
out.close()
| <mask token>
outf = 'test.txt'
inf = 'remove_items.txt'
out = open(outf, 'w')
inp = open(inf, 'r')
validate_data_path = (
'/data1/hjw/fine_grit_emotion_analysis/validation/ai_challenger_sentiment_analysis_validationset_20180816/sentiment_analysis_validationset.csv'
)
train_data_path = (
'/data1/hjw/fine_grit_emotion_analysis/train/ai_challenger_sentiment_analysis_trainingset_20180816/sentiment_analysis_trainingset.csv'
)
def load_data_from_csv(file_name, header=0, encoding='utf-8'):
data_df = pd.read_csv(file_name, header=header, encoding=encoding)
return data_df
data = load_data_from_csv(validate_data_path)
out.write(data['content'][0].encode('utf-8'))
inp.close()
out.close()
| import re
import sys
import os
import pandas as pd
import jieba
import logging
import argparse
from sklearn.externals import joblib
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import f1_score, accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from sklearn.externals import joblib
import os
import argparse
import keras as ks
from sklearn.model_selection import train_test_split
import pdb
import logging
from pyfasttext import FastText
outf = 'test.txt'
inf = 'remove_items.txt'
out = open(outf, 'w')
inp = open(inf, 'r')
validate_data_path = (
'/data1/hjw/fine_grit_emotion_analysis/validation/ai_challenger_sentiment_analysis_validationset_20180816/sentiment_analysis_validationset.csv'
)
train_data_path = (
'/data1/hjw/fine_grit_emotion_analysis/train/ai_challenger_sentiment_analysis_trainingset_20180816/sentiment_analysis_trainingset.csv'
)
def load_data_from_csv(file_name, header=0, encoding='utf-8'):
data_df = pd.read_csv(file_name, header=header, encoding=encoding)
return data_df
data = load_data_from_csv(validate_data_path)
out.write(data['content'][0].encode('utf-8'))
inp.close()
out.close()
| #-*- coding: utf-8 -*-
import re
import sys
import os
import pandas as pd
import jieba
import logging
import argparse
from sklearn.externals import joblib
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import f1_score,accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from sklearn.externals import joblib
import os
import argparse
import keras as ks
from sklearn.model_selection import train_test_split
#from keras.utils.np_utils import to_categorical
#from keras.models import Sequential
#from keras import layers
import pdb
import logging
from pyfasttext import FastText
outf = "test.txt"
inf = "remove_items.txt"
out = open(outf,'w')
inp = open(inf,'r')
#i = inp.readline()
#print(type(i))
#out.write(inp.readline())
validate_data_path = "/data1/hjw/fine_grit_emotion_analysis/validation/ai_challenger_sentiment_analysis_validationset_20180816/sentiment_analysis_validationset.csv"
train_data_path = "/data1/hjw/fine_grit_emotion_analysis/train/ai_challenger_sentiment_analysis_trainingset_20180816/sentiment_analysis_trainingset.csv"
#load the data
def load_data_from_csv(file_name, header=0, encoding="utf-8"):
data_df = pd.read_csv(file_name, header=header, encoding=encoding)
return data_df
#train = load_data_from(train_data_path)
data = load_data_from_csv(validate_data_path)
out.write(data['content'][0].encode('utf-8'))
inp.close()
out.close()
| [
1,
2,
3,
4,
5
] |
199 | 7525691ece4fe66bb175e470db3ac78f701e3730 | <mask token>
| <mask token>
api.add_resource(Store, '/store/<string:name>')
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(StoreList, '/stores')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(debug=True)
| <mask token>
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',
'sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'key123'
api = Api(app)
jwt = JWT(app, authenticate, identity)
api.add_resource(Store, '/store/<string:name>')
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(StoreList, '/stores')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(debug=True)
| import os
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT, timedelta
from security import authenticate, identity
from resources.user import UserRegister
from resources.item import Item, ItemList
from resources.store import Store, StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',
'sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'key123'
api = Api(app)
jwt = JWT(app, authenticate, identity)
api.add_resource(Store, '/store/<string:name>')
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(StoreList, '/stores')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(debug=True)
| # create item based on name using post method, get specific item or list of items using get method, update item using put and delete item using del method.
import os
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT, timedelta
from security import authenticate, identity
from resources.user import UserRegister
from resources.item import Item,ItemList
from resources.store import Store, StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # turn off flask SQLAlchemy modification.
app.secret_key = 'key123'
api = Api(app)
jwt = JWT(app, authenticate, identity)
api.add_resource(Store,'/store/<string:name>')
api.add_resource(Item,'/item/<string:name>') # http://localhost:5000/student/Rolf
api.add_resource(ItemList,'/items')
api.add_resource(StoreList,'/stores')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(debug=True)
| [
0,
1,
2,
3,
4
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.